Merge branch 'master' into clarify-IdP-support

clarify-IdP-support
Jason Stirnaman 2025-03-13 15:22:35 -05:00 committed by GitHub
commit 7d675078a2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
211 changed files with 13321 additions and 12285 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,11 @@
extends: substitution
message: Did you mean '%s' instead of '%s'
level: warning
ignorecase: false
# swap maps tokens in form of bad: good
# NOTE: The left-hand (bad) side can match the right-hand (good) side;
# Vale ignores alerts that match the intended form.
swap:
'cloud-serverless|cloud-dedicated|clustered': core
'Cloud Serverless|Cloud Dedicated|Clustered': Core
'API token': database token

View File

@ -0,0 +1,10 @@
extends: substitution
message: Did you mean '%s' instead of '%s'
level: warning
ignorecase: false
# swap maps tokens in form of bad: good
# NOTE: The left-hand (bad) side can match the right-hand (good) side;
# Vale ignores alerts that match the intended form.
swap:
'(?i)bucket': database
'(?i)measurement': table

40
.frontmatter-schema.json Normal file
View File

@ -0,0 +1,40 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"title": {
"type": "string",
"description": "Title of the page"
},
"description": {
"type": "string",
"description": "Page description that supports multi-line text"
},
"menu": {
"type": "object",
"properties": {
"influxdb3_core": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Menu item name"
}
},
"required": ["name"]
}
}
},
"weight": {
"type": "integer",
"description": "Order weight for menu items",
"minimum": 0
},
"source": {
"type": "string",
"description": "Path to source content file",
"pattern": "^/shared/.+\\.md$"
}
},
"required": ["title", "description", "menu", "weight"]
}

2
.gitignore vendored
View File

@ -8,7 +8,7 @@ node_modules
*.log
/resources
.hugo_build.lock
/content/influxdb/*/api/**/*.html
/content/influxdb*/**/api/**/*.html
/api-docs/redoc-static.html*
.vscode/*
.idea

15
.vscode/settings.json vendored
View File

@ -1,4 +1,17 @@
{
"vale.valeCLI.config": " \"${workspaceFolder}/.vale.ini\"",
"commentAnchors.tags.anchors":
{ "SOURCE": {
"scope": "file",
"behavior": "link",
"iconColor": "#FF0000",
"highlightColor": "#FF0000",
"style": "bold"
}},
"commentAnchors.workspace.matchFiles": "**/*.{md,ini,json,yaml,yml}",
"commentAnchors.workspace.enabled": true,
"yaml.schemas": {
"./.frontmatter-schema.json": "${workspaceFolder}/content/**/*.md"
},
"vale.valeCLI.config": "${workspaceFolder}/.vale.ini",
"vale.valeCLI.minAlertLevel": "warning",
}

View File

@ -45,18 +45,22 @@ To install dependencies listed in package.json:
3. Run `yarn` to install dependencies (including Hugo).
4. Install the Yarn package manager and run `yarn` to install project dependencies.
`package.json` contains dependencies for linting and running Git hooks.
`package.json` contains dependencies used in `/assets/js` JavaScript code and
dev dependencies used in pre-commit hooks for linting, syntax-checking, and testing.
- **[husky](https://github.com/typicode/husky)**: manages Git hooks, including the pre-commit hook for linting and testing
- **[lint-staged](https://github.com/lint-staged/lint-staged)**: passes staged files to commands
- **[prettier](https://prettier.io/docs/en/)**: formats code, including Markdown, according to style rules for consistency
Dev dependencies include:
- [Lefthook](https://github.com/evilmartians/lefthook): configures and
manages pre-commit hooks for linting and testing Markdown content.
- [prettier](https://prettier.io/docs/en/): formats code, including Markdown, according to style rules for consistency
- [Cypress]: e2e testing for UI elements and URLs in content
### Install Docker
Install [Docker](https://docs.docker.com/get-docker/) for your system.
docs-v2 includes Docker configurations (`compose.yaml` and Dockerfiles) for running the Vale style linter and tests for code blocks (Shell, Bash, and Python) in Markdown files.
Install [Docker](https://docs.docker.com/get-docker/) for your system.
#### Build the test dependency image
After you have installed Docker, run the following command to build the test
@ -65,13 +69,24 @@ The tests defined in `compose.yaml` use the dependencies and execution
environment from this image.
```bash
docker build -t influxdata:docs-pytest -f Dockerfile.pytest .
docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest .
```
### Run the documentation locally (optional)
To run the documentation locally, follow the instructions provided in the README.
### Install Visual Studio Code extensions
If you use Microsoft Visual Studio (VS) Code, you can install extensions
to help you navigate, check, and edit files.
docs-v2 contains a `./.vscode/settings.json` that configures the following extensions:
- Comment Anchors: recognizes tags (for example, `//SOURCE`) and makes links and filepaths clickable in comments.
- Vale: shows linter errors and suggestions in the editor.
- YAML Schemas: validates frontmatter attributes.
### Make your changes
Make your suggested changes being sure to follow the [style and formatting guidelines](#style--formatting) outline below.
@ -80,15 +95,16 @@ Make your suggested changes being sure to follow the [style and formatting guide
### Automatic pre-commit checks
docs-v2 uses Husky to manage Git hook scripts.
When you try to commit your changes (for example, `git commit`), Git runs
scripts configured in `.husky/pre-commit`, including linting and tests for your **staged** files.
docs-v2 uses Lefthook to manage Git hooks, such as pre-commit hooks that lint Markdown and test code blocks.
When you try to commit changes (`git commit`), Git runs
the commands configured in `lefthook.yml` which pass your **staged** files to Vale,
Prettier, Cypress (for UI tests and link-checking), and Pytest (for testing Python and shell code in code blocks).
### Skip pre-commit hooks
**We strongly recommend running linting and tests**, but you can skip them
(and avoid installing dependencies)
by including the `HUSKY=0` environment variable or the `--no-verify` flag with
by including the `LEFTHOOK=0` environment variable or the `--no-verify` flag with
your commit--for example:
```sh
@ -96,13 +112,14 @@ git commit -m "<COMMIT_MESSAGE>" --no-verify
```
```sh
HUSKY=0 git commit
LEFTHOOK=0 git commit
```
For more options, see the [Husky documentation](https://typicode.github.io/husky/how-to.html#skipping-git-hooks).
### Set up test scripts and credentials
Tests for code blocks require your InfluxDB credentials and other typical
InfluxDB configuration.
To set up your docs-v2 instance to run tests locally, do the following:
1. **Set executable permissions on test scripts** in `./test/src`:
@ -118,18 +135,19 @@ To set up your docs-v2 instance to run tests locally, do the following:
Cloud Dedicated instance for testing in most cases. To avoid conflicts when
running tests, create separate Cloud Dedicated and Clustered databases.
3. **Create .env.test**: Copy the `./test/env.test.example` file into each
1. **Create .env.test**: Copy the `./test/env.test.example` file into each
product directory to test and rename the file as `.env.test`--for example:
```sh
./content/influxdb/cloud-dedicated/.env.test
```
4. Inside each product's `.env.test` file, assign your InfluxDB credentials to
environment variables.
2. Inside each product's `.env.test` file, assign your InfluxDB credentials to
environment variables:
In addition to the usual `INFLUX_` environment variables, in your
`cloud-dedicated/.env.test` and `clustered/.env.test` files define the
- Include the usual `INFLUX_` environment variables
- In
`cloud-dedicated/.env.test` and `clustered/.env.test` files, also define the
following variables:
- `ACCOUNT_ID`, `CLUSTER_ID`: You can find these values in your `influxctl`
@ -137,72 +155,18 @@ Cloud Dedicated instance for testing in most cases. To avoid conflicts when
- `MANAGEMENT_TOKEN`: Use the `influxctl management create` command to generate
a long-lived management token to authenticate Management API requests
For the full list of variables you'll need to include, see the substitution
patterns in `./test/src/prepare-content.sh`.
See the substitution
patterns in `./test/src/prepare-content.sh` for the full list of variables you may need to define in your `.env.test` files.
**Warning**: The database you configure in `.env.test` and any written data may
be deleted during test runs.
**Warning**: To prevent accidentally adding credentials to the docs-v2 repo,
Git is configured to ignore `.env*` files. Don't add your `.env.test` files to Git.
Consider backing them up on your local machine in case of accidental deletion.
5. For influxctl commands to run in tests, move or copy your `config.toml` file
3. For influxctl commands to run in tests, move or copy your `config.toml` file
to the `./test` directory.
### Pre-commit linting and testing
When you try to commit your changes using `git commit` or your editor,
the project automatically runs pre-commit checks for spelling, punctuation,
and style on your staged files.
`.husky/pre-commit` script runs Git pre-commit hook commands, including
[`lint-staged`](https://github.com/lint-staged/lint-staged).
The `.lintstagedrc.mjs` lint-staged configuration maps product-specific glob
patterns to lint and test commands and passes a product-specific
`.env.test` file to a test runner Docker container.
The container then loads the `.env` file into the container's environment variables.
To test or troubleshoot testing and linting scripts and configurations before
committing, choose from the following:
- To run pre-commit scripts without actually committing, append `exit 1` to the
`.husky/pre-commit` script--for example:
```sh
./test/src/monitor-tests.sh start
npx lint-staged --relative
./test/src/monitor-tests.sh kill
exit 1
```
And then run `git commit`.
The `exit 1` status fails the commit, even if all the tasks succeed.
- Use `yarn` to run one of the lint or test scripts configured in
`package.json`--for example:
```sh
yarn run test
```
- Run `lint-staged` directly and specify options:
```sh
npx lint-staged --relative --verbose
```
The pre-commit linting configuration checks for _error-level_ problems.
An error-level rule violation fails the commit and you must do one of the following before you can commit your changes:
- fix the reported problem in the content
- edit the linter rules to permanently allow the content.
See **Configure style rules**.
- temporarily override the hook (using `git commit --no-verify`)
> [!Warning]
>
> - The database you configure in `.env.test` and any written data may
be deleted during test runs.
> - Don't add your `.env.test` files to Git. To prevent accidentally adding credentials to the docs-v2 repo,
Git is configured to ignore `.env*` files. Consider backing them up on your local machine in case of accidental deletion.
#### Test shell and python code blocks
@ -249,7 +213,7 @@ You probably don't want to display this syntax in the docs, which unfortunately
means you'd need to include the test block separately from the displayed code
block.
To hide it from users, wrap the code block inside an HTML comment.
Pytest-codeblocks will still collect and run the code block.
pytest-codeblocks will still collect and run the code block.
##### Mark tests to skip
@ -546,6 +510,25 @@ Insert warning markdown content here.
{{% /warn %}}
```
### Product data
Display the full product name and version name for the current page--for example:
- InfluxDB 3 Core
- InfluxDB 3 Cloud Dedicated
```md
{{% product-name %}}
```
Display the short version name (part of the key used in `products.yml`) from the current page URL--for example:
- `/influxdb3/core` returns `core`
```md
{{% product-key %}}
```
### Enterprise Content
For sections content that relate specifically to InfluxDB Enterprise, use the `{{% enterprise %}}` shortcode.

View File

@ -32,7 +32,13 @@ RUN apt-get update && apt-get upgrade -y && apt-get install -y \
python3-venv \
rsync \
telegraf \
wget
wget \
yq
# Install InfluxDB 3 Core
RUN curl -O https://www.influxdata.com/d/install_influxdb3.sh \
&& chmod +x install_influxdb3.sh \
&& bash -c yes | ./install_influxdb3.sh
RUN ln -s /usr/bin/python3 /usr/bin/python

View File

@ -237,7 +237,7 @@ InfluxDB Cloud releases are frequent and not versioned, so the Cloud API spec is
We regenerate API reference docs from `influxdata/openapi`
**master** branch as features are released.
### InfluxDB OSS version
### InfluxDB OSS v2 version
Given that
`influxdata/openapi` **master** may contain OSS spec changes not implemented

View File

@ -41,7 +41,6 @@ function generateHtml {
local productName="$3"
local api="$4"
local configPath="$5"
local isDefault=$6
# Use the product name to define the menu for the Hugo template
local menu="$(echo $productVersion | sed 's/\./_/g;s/-/_/g;s/\//_/g;')"
@ -55,7 +54,18 @@ function generateHtml {
# Use the title and summary defined in the product API's info.yml file.
local title=$(yq '.title' $productVersion/$apiName/content/info.yml)
local menuTitle=$(yq '.x-influxdata-short-title' $productVersion/$apiName/content/info.yml)
local description=$(yq '.summary' $productVersion/$apiName/content/info.yml)
# Get the shortened description to use for metadata.
local shortDescription=$(yq '.x-influxdata-short-description' $productVersion/$apiName/content/info.yml)
# Get the aliases array from the configuration file.
local aliases=$(yq e ".apis | .$api | .x-influxdata-docs-aliases" "$configPath")
# If aliases is null, set it to an empty YAML array.
if [[ "$aliases" == "null" ]]; then
aliases='[]'
fi
local weight=102
if [[ $apiName == "v1-compatibility" ]]; then
weight=304
fi
# Define the file name for the Redoc HTML output.
local specbundle=redoc-static_index.html
# Define the temporary file for the Hugo template and Redoc HTML.
@ -70,75 +80,31 @@ function generateHtml {
npm_config_yes=true npx redoc-cli@0.12.3 bundle $specPath \
--config $configPath \
-t template.hbs \
--title=$title \
--title="$title" \
--options.sortPropsAlphabetically \
--options.menuToggle \
--options.hideDownloadButton \
--options.hideHostname \
--options.noAutoAuth \
--output=$specbundle \
--templateOptions.description=$description \
--templateOptions.description="$shortDescription" \
--templateOptions.product="$productVersion" \
--templateOptions.productName="$productName"
if [[ $apiName == "v1-compatibility" ]]; then
local frontmatter=$(yq eval -n \
".title = \"$title\" |
.description = \"$shortDescription\" |
.layout = \"api\" |
.weight = $weight |
.menu.[\"$menu\"].parent = \"InfluxDB HTTP API\" |
.menu.[\"$menu\"].name = \"$menuTitle\" |
.menu.[\"$menu\"].identifier = \"api-reference-$apiName\" |
.aliases = \"$aliases\"")
frontmatter="---
title: $title
description: $description
layout: api
menu:
$menu:
parent: InfluxDB HTTP API
name: $menuTitle
identifier: api-reference-$apiName
weight: 304
aliases:
- /influxdb/$versionDir/api/v1/
$frontmatter
---
"
elif [[ $apiVersion == "0" ]]; then
echo $productName $apiName
frontmatter="---
title: $title
description: $description
layout: api
weight: 102
menu:
$menu:
parent: InfluxDB HTTP API
name: $menuTitle
identifier: api-reference-$apiName
---
"
elif [[ $isDefault == true ]]; then
frontmatter="---
title: $title
description: $description
layout: api
menu:
$menu:
parent: InfluxDB HTTP API
name: $menuTitle
identifier: api-reference-$apiName
weight: 102
aliases:
- /influxdb/$versionDir/api/
---
"
else
frontmatter="---
title: $title
description: $description
layout: api
menu:
$menu:
parent: InfluxDB HTTP API
name: $menuTitle
identifier: api-reference-$apiName
weight: 102
---
"
fi
# Create the Hugo template file with the frontmatter and Redoc HTML
echo "$frontmatter" >> $tmpfile
@ -174,9 +140,10 @@ function build {
# Get the version API configuration file.
local configPath="$version/.config.yml"
if [ ! -f "$configPath" ]; then
configPath=".config.yml"
# Skip to the next version if the configuration file doesn't exist.
continue
fi
echo "Using config $configPath"
echo "Using config $version $configPath"
# Get the product name from the configuration.
local versionName
versionName=$(yq e '.x-influxdata-product-name' "$configPath")
@ -198,13 +165,7 @@ function build {
if [ -d "$specPath" ] || [ ! -f "$specPath" ]; then
echo "OpenAPI spec $specPath doesn't exist."
fi
# Get default status from the configuration.
local isDefault=false
local defaultStatus
defaultStatus=$(yq e ".apis | .$api | .x-influxdata-default" "$configPath")
if [[ $defaultStatus == "true" ]]; then
isDefault=true
fi
# If the spec file differs from master, regenerate the HTML.
local update=0
@ -218,9 +179,9 @@ function build {
if [[ $update -eq 0 ]]; then
echo "Regenerating $version $api"
generateHtml "$specPath" "$version" "$versionName" "$api" "$configPath" "$isDefault"
generateHtml "$specPath" "$version" "$versionName" "$api" "$configPath"
fi
echo "========Done with $version $api========"
echo -e "========Finished $version $api========\n\n"
done <<< "$apis"
done
}

View File

@ -62,7 +62,7 @@ function showHelp {
subcommand=$1
case "$subcommand" in
cloud-dedicated-v2|cloud-dedicated-management|cloud-serverless-v2|clustered-v2|cloud-v2|v2|v1-compat|all)
cloud-dedicated-v2|cloud-dedicated-management|cloud-serverless-v2|clustered-v2|cloud-v2|v2|v1-compat|core-v3|enterprise-v3|all)
product=$1
shift
@ -176,17 +176,6 @@ function updateCloudDedicatedV2 {
postProcess $outFile 'influxdb3/cloud-dedicated/.config.yml' v2@2
}
function updateClusteredV2 {
outFile="influxdb3/clustered/v2/ref.yml"
if [[ -z "$baseUrl" ]];
then
echo "Using existing $outFile"
else
curl $UPDATE_OPTIONS ${baseUrl}/contracts/ref/cloud.yml -o $outFile
fi
postProcess $outFile 'influxdb3/clustered/.config.yml' v2@2
}
function updateCloudServerlessV2 {
outFile="influxdb3/cloud-serverless/v2/ref.yml"
if [[ -z "$baseUrl" ]];
@ -198,15 +187,50 @@ function updateCloudServerlessV2 {
postProcess $outFile 'influxdb3/cloud-serverless/.config.yml' v2@2
}
function updateClusteredV2 {
outFile="influxdb3/clustered/v2/ref.yml"
if [[ -z "$baseUrl" ]];
then
echo "Using existing $outFile"
else
curl $UPDATE_OPTIONS ${baseUrl}/contracts/ref/cloud.yml -o $outFile
fi
postProcess $outFile 'influxdb3/clustered/.config.yml' v2@2
}
function updateCoreV3 {
outFile="influxdb3/core/v3/ref.yml"
if [[ -z "$baseUrl" ]];
then
echo "Using existing $outFile"
else
local url="${baseUrl}/TO_BE_DECIDED"
curl $UPDATE_OPTIONS $url -o $outFile
fi
postProcess $outFile 'influxdb3/core/.config.yml' v3@3
}
function updateEnterpriseV3 {
outFile="influxdb3/enterprise/v3/ref.yml"
if [[ -z "$baseUrl" ]];
then
echo "Using existing $outFile"
else
local url="${baseUrl}/TO_BE_DECIDED"
curl $UPDATE_OPTIONS $url -o $outFile
fi
postProcess $outFile 'influxdb3/enterprise/.config.yml' v3@3
}
function updateOSSV2 {
outFile="influxdb/v2/ref.yml"
outFile="influxdb/v2/v2/ref.yml"
if [[ -z "$baseUrlOSS" ]];
then
echo "Using existing $outFile"
else
curl $UPDATE_OPTIONS ${baseUrlOSS}/contracts/ref/oss.yml -o $outFile
fi
postProcess $outFile 'influxdb/v2/.config.yml' '@2'
postProcess $outFile 'influxdb/v2/.config.yml' 'v2@2'
}
function updateV1Compat {
@ -220,7 +244,7 @@ function updateV1Compat {
postProcess $outFile 'influxdb/cloud/.config.yml' 'v1-compatibility'
outFile="influxdb/v2/v1-compatibility/swaggerV1Compat.yml"
cp cloud/v1-compatibility/swaggerV1Compat.yml $outFile
cp influxdb/cloud/v1-compatibility/swaggerV1Compat.yml $outFile
postProcess $outFile 'influxdb/v2/.config.yml' 'v1-compatibility'
outFile="influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml"
@ -257,6 +281,12 @@ then
elif [ "$product" = "clustered-v2" ];
then
updateClusteredV2
elif [ "$product" = "core-v3" ];
then
updateCoreV3
elif [ "$product" = "enterprise-v3" ];
then
updateEnterpriseV3
elif [ "$product" = "v2" ];
then
updateOSSV2
@ -270,9 +300,11 @@ then
updateCloudDedicatedManagement
updateCloudServerlessV2
updateClusteredV2
updateCoreV3
updateEnterpriseV3
updateOSSV2
updateV1Compat
else
echo "Provide a product argument: cloud-v2, cloud-serverless-v2, cloud-dedicated-v2, clustered-v2, v2, v1-compat, or all."
echo "Provide a product argument: cloud-v2, cloud-serverless-v2, cloud-dedicated-v2, cloud-dedicated-management, clustered-v2, core-v3, enterprise-v3, v2, v1-compat, or all."
showHelp
fi

View File

@ -8,6 +8,9 @@ x-influxdata-product-name: InfluxDB v2 Cloud
apis:
v2@2:
root: v2/ref.yml
x-influxdata-default: true
x-influxdata-docs-aliases:
- /influxdb/cloud/api/
v1-compatibility@2:
root: v1-compatibility/swaggerV1Compat.yml
x-influxdata-docs-aliases:
- /influxdb/cloud/api/v1/

View File

@ -1,11 +1,17 @@
title: InfluxDB v1 HTTP API for InfluxDB Cloud
title: InfluxDB v1 HTTP API for InfluxDB Cloud (TSM)
x-influxdata-short-title: v1 Compatibility API
summary: The InfluxDB v1 compatibility API provides a programmatic interface for interactions with InfluxDB Cloud using InfluxDB v1-compatible endpoints.
x-influxdata-short-description: The v1-compatibility HTTP API provides compatibility for writing and querying data in an InfluxDB v2 bucket using InfluxDB v1 endpoints.
description: |
The InfluxDB 1.x compatibility /write and /query endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others.
The v1-compatibility HTTP API provides compatibility for writing and querying data in an InfluxDB v2 bucket using InfluxDB v1 endpoints.
The `/write` and `/query` endpoints support InfluxDB 1.x client libraries and third-party integrations such as Grafana.
This documentation is generated from the
[InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/openapi/master/contracts/swaggerV1Compat.yml).
license:
name: MIT
url: 'https://opensource.org/licenses/MIT'
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com

View File

@ -1,16 +1,21 @@
openapi: 3.0.0
info:
title: InfluxDB v1 HTTP API for InfluxDB Cloud
title: InfluxDB v1 HTTP API for InfluxDB Cloud (TSM)
version: ''
description: |
The InfluxDB 1.x compatibility /write and /query endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others.
The v1-compatibility HTTP API provides compatibility for writing and querying data in an InfluxDB v2 bucket using InfluxDB v1 endpoints.
The `/write` and `/query` endpoints support InfluxDB 1.x client libraries and third-party integrations such as Grafana.
This documentation is generated from the
[InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/openapi/master/contracts/swaggerV1Compat.yml).
license:
name: MIT
url: https://opensource.org/licenses/MIT
summary: The InfluxDB v1 compatibility API provides a programmatic interface for interactions with InfluxDB Cloud using InfluxDB v1-compatible endpoints.
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com
servers:
- url: /
security:

View File

@ -1,8 +1,9 @@
title: InfluxDB Cloud API Service
x-influxdata-short-title: v2 API
summary: The InfluxDB v2 HTTP API provides a programmatic interface for all interactions with InfluxDB v2.
x-influxdata-short-description: The InfluxDB v2 HTTP API provides a programmatic interface for all interactions with InfluxDB v2.
description: |
The InfluxDB v2 HTTP API provides a programmatic interface for all interactions with InfluxDB v2. Access the InfluxDB API using `/api/v2/` and InfluxDB v1-compatible endpoints.
The InfluxDB v2 HTTP API provides a programmatic interface for all interactions with InfluxDB v2.
Access the InfluxDB API using `/api/v2/` and InfluxDB v1-compatible endpoints.
This documentation is generated from the
[InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/openapi/master/contracts/ref/cloud.yml).
@ -10,3 +11,7 @@ version: 2.x
license:
name: MIT
url: 'https://opensource.org/licenses/MIT'
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com

View File

@ -6,8 +6,5 @@
- Headers
- Pagination
- Response codes
- Data I/O endpoints
- Security and access endpoints
- System information endpoints
- name: All endpoints
tags: []

View File

@ -3,14 +3,18 @@ info:
title: InfluxDB Cloud API Service
version: 2.x
description: |
The InfluxDB v2 HTTP API provides a programmatic interface for all interactions with InfluxDB v2. Access the InfluxDB API using `/api/v2/` and InfluxDB v1-compatible endpoints.
The InfluxDB v2 HTTP API provides a programmatic interface for all interactions with InfluxDB v2.
Access the InfluxDB API using `/api/v2/` and InfluxDB v1-compatible endpoints.
This documentation is generated from the
[InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/openapi/master/contracts/ref/cloud.yml).
license:
name: MIT
url: https://opensource.org/licenses/MIT
summary: The InfluxDB v2 HTTP API provides a programmatic interface for all interactions with InfluxDB v2.
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com
servers:
- url: https://{baseurl}
description: InfluxDB Cloud API URL
@ -97,8 +101,8 @@ tags:
- name: Dashboards
- name: Data I/O endpoints
- description: |
The InfluxDB 1.x data model includes [databases](/influxdb/v1.8/concepts/glossary/#database)
and [retention policies](/influxdb/v1.8/concepts/glossary/#retention-policy-rp).
The InfluxDB 1.x data model includes [databases](/influxdb/cloud/reference/glossary/#database)
and [retention policies](/influxdb/cloud/reference/glossary/#retention-policy-rp).
InfluxDB 2.x replaces databases and retention policies with buckets.
To support InfluxDB 1.x query and write patterns in InfluxDB 2.x,
databases and retention policies are mapped to buckets using the
@ -410,7 +414,7 @@ paths:
Specifies an authorization by its `token` property value
and returns the authorization.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Doesn't support this parameter. InfluxDB OSS ignores the `token=` parameter,
applies other parameters, and then returns the result.
@ -437,11 +441,11 @@ paths:
token used in the request has `read-user` permission for the users (`userID` property value)
in those authorizations.
#### InfluxDB OSS
#### InfluxDB OSS v2
- **Warning**: The response body contains authorizations with their
[API token](/influxdb/cloud/reference/glossary/#token) values in clear text.
- If the request uses an _[operator token](/influxdb/latest/security/tokens/#operator-token)_,
- If the request uses an _[operator token](/influxdb/v2/security/tokens/#operator-token)_,
InfluxDB OSS returns authorizations for all organizations in the instance.
'400':
$ref: '#/components/responses/GeneralServerError'
@ -593,11 +597,11 @@ paths:
Use this endpoint to retrieve information about an API token, including
the token's permissions and the user that the token is scoped to.
#### InfluxDB OSS
#### InfluxDB OSS v2
- InfluxDB OSS returns
[API token](/influxdb/cloud/reference/glossary/#token) values in authorizations.
- If the request uses an _[operator token](/influxdb/latest/security/tokens/#operator-token)_,
- If the request uses an _[operator token](/influxdb/v2/security/tokens/#operator-token)_,
InfluxDB OSS returns authorizations for all organizations in the instance.
#### Related guides
@ -710,7 +714,7 @@ paths:
If no query parameters are passed, InfluxDB returns all buckets up to the
default `limit`.
#### InfluxDB OSS
#### InfluxDB OSS v2
- If you use an _[operator token](/influxdb/cloud/security/tokens/#operator-token)_
to authenticate your request, InfluxDB retrieves resources for _all
@ -742,7 +746,7 @@ paths:
- Doesn't use the `org` parameter or `orgID` parameter.
- Lists buckets for the organization associated with the authorization (API token).
#### InfluxDB OSS
#### InfluxDB OSS v2
- Lists buckets for the specified organization.
in: query
@ -757,7 +761,7 @@ paths:
- Doesn't use the `org` parameter or `orgID` parameter.
- Lists buckets for the organization associated with the authorization (API token).
#### InfluxDB OSS
#### InfluxDB OSS v2
- Requires either the `org` parameter or `orgID` parameter.
- Lists buckets for the specified organization.
@ -842,7 +846,7 @@ paths:
[retention period](/influxdb/cloud/reference/glossary/#retention-period)
is 30 days.
#### InfluxDB OSS
#### InfluxDB OSS v2
- A single InfluxDB OSS instance supports active writes or queries for
approximately 20 buckets across all organizations at a given time.
@ -985,7 +989,7 @@ paths:
2. Returns an HTTP `204` status code if queued; _error_ otherwise.
3. Handles the delete asynchronously.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Validates the request, handles the delete synchronously,
and then responds with success or failure.
@ -1016,7 +1020,7 @@ paths:
#### InfluxDB Cloud
- The bucket is queued for deletion.
#### InfluxDB OSS
#### InfluxDB OSS v2
- The bucket is deleted.
'400':
content:
@ -1153,7 +1157,7 @@ paths:
- Requires the `retentionRules` property in the request body. If you don't
provide `retentionRules`, InfluxDB responds with an HTTP `403` status code.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Doesn't require `retentionRules`.
@ -3452,7 +3456,7 @@ paths:
schema:
type: string
- description: |
A [retention policy](/influxdb/v1.8/concepts/glossary/#retention-policy-rp).
A [retention policy](/influxdb/cloud/reference/glossary/#retention-policy-rp).
Specifies the 1.x retention policy to filter on.
in: query
name: rp
@ -3857,7 +3861,7 @@ paths:
Because writes and deletes are asynchronous, your change might not yet be readable
when you receive the response.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Validates the request, handles the delete synchronously,
and then responds with success or failure.
@ -3891,7 +3895,7 @@ paths:
- Deletes data from the bucket in the organization
associated with the authorization (API token).
#### InfluxDB OSS
#### InfluxDB OSS v2
- Requires either the `org` parameter or the `orgID` parameter.
- Deletes data from the bucket in the specified organization.
@ -3919,7 +3923,7 @@ paths:
- Deletes data from the bucket in the organization
associated with the authorization (API token).
#### InfluxDB OSS
#### InfluxDB OSS v2
- Requires either the `org` parameter or the `orgID` parameter.
- Deletes data from the bucket in the specified organization.
@ -3973,7 +3977,7 @@ paths:
Because writes are asynchronous, data might not yet be written
when you receive the response.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Deleted the data.
'400':
@ -3991,7 +3995,7 @@ paths:
Bad request.
The response body contains detail about the error.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Returns this error if the `org` parameter or `orgID` parameter doesn't match an organization.
'401':
@ -4031,16 +4035,12 @@ paths:
1. Use [token authentication](#section/Authentication/TokenAuthentication) or a [user session](#tag/Signin) with this endpoint to retrieve
feature flags and their values.
2. Follow the instructions to [enable, disable, or override values for feature flags](/influxdb/cloud/reference/config-options/#feature-flags).
2. Follow the instructions to enable, disable, or override values for feature flags.
3. **Optional**: To confirm that your change is applied, do one of the following:
- Send a request to this endpoint to retrieve the current feature flag values.
- Send a request to the [`GET /api/v2/config` endpoint](#operation/GetConfig) to retrieve the
current runtime server configuration.
#### Related guides
- [InfluxDB configuration options](/influxdb/cloud/reference/config-options/)
operationId: GetFlags
parameters:
- $ref: '#/components/parameters/TraceSpan'
@ -4262,7 +4262,7 @@ paths:
#### Related guides
- [InfluxDB Cloud - Change your password](/influxdb/cloud/account-management/change-password/)
- [InfluxDB OSS - Change your password](/influxdb/latest/users/change-password/)
- [InfluxDB OSS - Change your password](/influxdb/v2/users/change-password/)
operationId: PutMePassword
parameters:
- $ref: '#/components/parameters/TraceSpan'
@ -4298,7 +4298,7 @@ paths:
- Doesn't let you manage user passwords through the API; always responds with this status.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Doesn't understand a value passed in the request.
'401':
@ -5111,7 +5111,7 @@ paths:
2. Returns an HTTP `204` status code if queued; _error_ otherwise.
3. Handles the delete asynchronously.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Validates the request, handles the delete synchronously,
and then responds with success or failure.
@ -5141,7 +5141,7 @@ paths:
#### InfluxDB Cloud
- The organization is queued for deletion.
#### InfluxDB OSS
#### InfluxDB OSS v2
- The organization is deleted.
'400':
$ref: '#/components/responses/BadRequestError'
@ -6076,7 +6076,7 @@ paths:
- Doesn't use the `org` parameter or `orgID` parameter.
- Queries the bucket in the organization associated with the authorization (API token).
#### InfluxDB OSS
#### InfluxDB OSS v2
- Requires either the `org` parameter or `orgID` parameter.
- Queries the bucket in the specified organization.
@ -6092,7 +6092,7 @@ paths:
- Doesn't use the `org` parameter or `orgID` parameter.
- Queries the bucket in the organization associated with the authorization (API token).
#### InfluxDB OSS
#### InfluxDB OSS v2
- Requires either the `org` parameter or `orgID` parameter.
- Queries the bucket in the specified organization.
@ -6156,7 +6156,7 @@ paths:
Bad request.
The response body contains detail about the error.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Returns this error if the `org` parameter or `orgID` parameter doesn't match an organization.
'401':
@ -6172,7 +6172,7 @@ paths:
[global limit](/influxdb/cloud/account-management/limits/#global-limits)
- returns `Retry-After` header that describes when to try the write again.
#### InfluxDB OSS:
#### InfluxDB OSS v2:
- doesn't return this error.
headers:
Retry-After:
@ -8508,7 +8508,7 @@ paths:
Bad request.
The response body contains detail about the error.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Returns this error if an incorrect value is passed in the `org` parameter or `orgID` parameter.
'401':
@ -9857,7 +9857,7 @@ paths:
- Always returns this error; doesn't support cancelling tasks.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Doesn't return this error.
'500':
@ -11184,7 +11184,7 @@ paths:
#### Related guides
- [InfluxDB Cloud - Change your password](/influxdb/cloud/account-management/change-password/)
- [InfluxDB OSS - Change your password](/influxdb/latest/users/change-password/)
- [InfluxDB OSS - Change your password](/influxdb/v2/users/change-password/)
operationId: PostUsersIDPassword
parameters:
- $ref: '#/components/parameters/TraceSpan'
@ -11222,7 +11222,7 @@ paths:
- Doesn't allow you to manage passwords through the API; always responds with this status.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Doesn't understand a value passed in the request.
default:
@ -11259,7 +11259,7 @@ paths:
#### Related guides
- [InfluxDB Cloud - Change your password](/influxdb/cloud/account-management/change-password/)
- [InfluxDB OSS - Change your password](/influxdb/latest/users/change-password/)
- [InfluxDB OSS - Change your password](/influxdb/v2/users/change-password/)
operationId: PutUsersIDPassword
parameters:
- $ref: '#/components/parameters/TraceSpan'
@ -11297,7 +11297,7 @@ paths:
- Doesn't allow you to manage passwords through the API; always responds with this status.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Doesn't understand a value passed in the request.
default:
@ -11600,7 +11600,7 @@ paths:
Because writes and deletes are asynchronous, your change might not yet be readable
when you receive the response.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Validates the request and handles the write synchronously.
- If all points were written successfully, responds with HTTP `2xx` status code;
@ -11671,7 +11671,7 @@ paths:
- Returns only `application/json` for format and limit errors.
- Returns only `text/html` for some quota limit errors.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Returns only `application/json` for format and limit errors.
@ -11695,7 +11695,7 @@ paths:
- Writes data to the bucket in the organization
associated with the authorization (API token).
#### InfluxDB OSS
#### InfluxDB OSS v2
- Requires either the `org` parameter or the `orgID` parameter.
- If you pass both `orgID` and `org`, they must both be valid.
@ -11715,7 +11715,7 @@ paths:
- Writes data to the bucket in the organization
associated with the authorization (API token).
#### InfluxDB OSS
#### InfluxDB OSS v2
- Requires either the `org` parameter or the `orgID` parameter.
- If you pass both `orgID` and `org`, they must both be valid.
@ -11772,7 +11772,7 @@ paths:
- Validated and queued the request.
- Handles the write asynchronously - the write might not have completed yet.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Successfully wrote all points in the batch.
@ -11807,7 +11807,7 @@ paths:
- Returns this error for bucket schema conflicts.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Returns this error if the `org` parameter or `orgID` parameter doesn't match an organization.
'401':
@ -11848,7 +11848,7 @@ paths:
- Returns this error if the payload exceeds the 50MB size limit.
- Returns `Content-Type: text/html` for this error.
#### InfluxDB OSS:
#### InfluxDB OSS v2:
- Returns this error only if the [Go (golang) `ioutil.ReadAll()`](https://pkg.go.dev/io/ioutil#ReadAll) function raises an error.
- Returns `Content-Type: application/json` for this error.
@ -11866,7 +11866,7 @@ paths:
Rates (data-in (writes), queries (reads), and deletes) accrue within a fixed five-minute window.
Once a rate limit is exceeded, InfluxDB returns an error response until the current five-minute window resets.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Doesn't return this error.
headers:
@ -12186,7 +12186,7 @@ paths:
- description: |
The database to query data from.
This is mapped to an InfluxDB [bucket](/influxdb/cloud/reference/glossary/#bucket).
For more information, see [Database and retention policy mapping](/influxdb/cloud/api/influxdb-1x/dbrp/).
For more information, see [Database and retention policy mapping](/influxdb/cloud/api-guide/influxdb-1x/dbrp/).
in: query
name: db
required: true
@ -12195,7 +12195,7 @@ paths:
- description: |
The retention policy to query data from.
This is mapped to an InfluxDB [bucket](/influxdb/cloud/reference/glossary/#bucket).
For more information, see [Database and retention policy mapping](/influxdb/cloud/api/influxdb-1x/dbrp/).
For more information, see [Database and retention policy mapping](/influxdb/cloud/api-guide/influxdb-1x/dbrp/).
in: query
name: rp
schema:
@ -12208,7 +12208,7 @@ paths:
type: string
- description: |
A unix timestamp precision.
Formats timestamps as [unix (epoch) timestamps](/influxdb/cloud/reference/glossary/#unix-timestamp) the specified precision
Formats timestamps as [unix (epoch) timestamps](/influxdb/cloud/reference/glossary/#unix-timestamp) with the specified precision
instead of [RFC3339 timestamps](/influxdb/cloud/reference/glossary/#rfc3339-timestamp) with nanosecond precision.
in: query
name: epoch
@ -12271,7 +12271,7 @@ paths:
[global limit](/influxdb/cloud/account-management/limits/#global-limits)
- returns `Retry-After` header that describes when to try the write again.
#### InfluxDB OSS:
#### InfluxDB OSS v2:
- doesn't return this error.
headers:
Retry-After:
@ -12550,7 +12550,7 @@ components:
Bad request.
The response body contains detail about the error.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Returns this error if an incorrect value is passed in the `org` parameter or `orgID` parameter.
GeneralServerError:
@ -13448,7 +13448,7 @@ components:
type: string
retention_policy:
description: |
A [retention policy](/influxdb/v1.8/concepts/glossary/#retention-policy-rp) name.
A [retention policy](/influxdb/cloud/reference/glossary/#retention-policy-rp) name.
Identifies the InfluxDB v1 retention policy mapping.
type: string
virtual:
@ -13491,7 +13491,7 @@ components:
type: string
retention_policy:
description: |
A [retention policy](/influxdb/v1.8/concepts/glossary/#retention-policy-rp) name.
A [retention policy](/influxdb/cloud/reference/glossary/#retention-policy-rp) name.
Identifies the InfluxDB v1 retention policy mapping.
type: string
required:
@ -13515,7 +13515,7 @@ components:
type: boolean
retention_policy:
description: |
A [retention policy](/influxdb/v1.8/concepts/glossary/#retention-policy-rp) name.
A [retention policy](/influxdb/cloud/reference/glossary/#retention-policy-rp) name.
Identifies the InfluxDB v1 retention policy mapping.
type: string
DBRPs:
@ -15997,7 +15997,7 @@ components:
- Doesn't use `shardGroupDurationsSeconds`.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Default value depends on the [bucket retention period](/influxdb/cloud/reference/internals/shards/#shard-group-duration).
@ -16130,7 +16130,7 @@ components:
If you need compatibility with InfluxDB 1.x, specify a value for the `rp` property;
otherwise, see the `retentionRules` property.
[Retention policy](/influxdb/v1.8/concepts/glossary/#retention-policy-rp)
[Retention policy](/influxdb/cloud/reference/glossary/#retention-policy-rp)
is an InfluxDB 1.x concept.
The InfluxDB 2.x and Cloud equivalent is
[retention period](/influxdb/cloud/reference/glossary/#retention-period).
@ -16147,7 +16147,7 @@ components:
- Use `explicit` to enforce column names, tags, fields, and data types for
your data.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Doesn't support `explicit` bucket schemas.
required:
@ -16440,7 +16440,7 @@ components:
- Does not use `shardGroupDurationsSeconds`.
#### InfluxDB OSS
#### InfluxDB OSS v2
- Default value depends on the
[bucket retention period](/influxdb/cloud/reference/internals/shards/#shard-group-duration).
@ -16463,7 +16463,7 @@ components:
- `retentionRules` is required.
#### InfluxDB OSS
#### InfluxDB OSS v2
- `retentionRules` isn't required.
items:
@ -19156,9 +19156,6 @@ x-tagGroups:
- Headers
- Pagination
- Response codes
- Data I/O endpoints
- Security and access endpoints
- System information endpoints
- name: All endpoints
tags:
- Authorizations (API tokens)

View File

@ -6,8 +6,11 @@ extends:
x-influxdata-product-name: InfluxDB v2 OSS
apis:
'@2':
root: ref.yml
x-influxdata-default: true
v2@2:
root: v2/ref.yml
x-influxdata-docs-aliases:
- /influxdb/v2/api/
v1-compatibility@2:
root: v1-compatibility/swaggerV1Compat.yml
x-influxdata-docs-aliases:
- /influxdb/v2/api/v1/

View File

@ -1,12 +0,0 @@
title: InfluxDB OSS API Service
x-influxdata-short-title: v2 API
summary: The InfluxDB v2 HTTP API provides a programmatic interface for all interactions with an InfluxDB v2 instance.
description: |
The InfluxDB v2 HTTP API provides a programmatic interface for all interactions with an InfluxDB v2 instance. Access the InfluxDB API using `/api/v2/` and InfluxDB v1-compatible endpoints.
This documentation is generated from the
[InfluxDB OpenAPI specification](https://github.com/influxdata/openapi/blob/influxdb-oss-v2.7.0/contracts/ref/oss.yml).
version: 2.x
license:
name: MIT
url: 'https://opensource.org/licenses/MIT'

View File

@ -1,12 +1,15 @@
title: InfluxDB v1 HTTP API for InfluxDB v2 OSS
x-influxdata-short-title: v1 Compatibility API
summary: The InfluxDB v1 compatibility API provides a programmatic interface for interactions with InfluxDB v2 using InfluxDB v1-compatible endpoints.
description: |
The v1-compatibility HTTP API provides compatibility for writing and querying data in an InfluxDB v2 bucket using InfluxDB v1 endpoints.
The InfluxDB 1.x compatibility `/write` and `/query` endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others.
This documentation is generated from the
[InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/openapi/master/contracts/swaggerV1Compat.yml).
license:
name: MIT
url: 'https://opensource.org/licenses/MIT'
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com

View File

@ -3,14 +3,18 @@ info:
title: InfluxDB v1 HTTP API for InfluxDB v2 OSS
version: ''
description: |
The InfluxDB 1.x compatibility /write and /query endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others.
The v1-compatibility HTTP API provides compatibility for writing and querying data in an InfluxDB v2 bucket using InfluxDB v1 endpoints.
The InfluxDB 1.x compatibility `/write` and `/query` endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others.
This documentation is generated from the
[InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/openapi/master/contracts/swaggerV1Compat.yml).
license:
name: MIT
url: https://opensource.org/licenses/MIT
summary: The InfluxDB v1 compatibility API provides a programmatic interface for interactions with InfluxDB v2 using InfluxDB v1-compatible endpoints.
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com
servers:
- url: /
security:
@ -463,19 +467,3 @@ components:
For examples and more information, see how to [authenticate with a username and password](/influxdb/cloud/reference/api/influxdb-1x/).
x-tagGroups:
- name: Using the InfluxDB HTTP API
tags:
- Quick start
- Authentication
- Supported operations
- Headers
- Pagination
- Response codes
- Data I/O endpoints
- Security and access endpoints
- System information endpoints
- name: All endpoints
tags:
- Query
- Write

View File

@ -0,0 +1,17 @@
title: InfluxDB OSS API Service
x-influxdata-short-title: v2 API
description: |
The InfluxDB v2 HTTP API provides a programmatic interface for all interactions with an InfluxDB v2 instance.
The InfluxDB v2 HTTP API provides a programmatic interface for all interactions with an InfluxDB v2 instance. Access the InfluxDB API using `/api/v2/` and InfluxDB v1-compatible endpoints.
This documentation is generated from the
[InfluxDB OpenAPI specification](https://github.com/influxdata/openapi/blob/influxdb-oss-v2.7.0/contracts/ref/oss.yml).
version: 2.x
license:
name: MIT
url: 'https://opensource.org/licenses/MIT'
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com

View File

@ -6,8 +6,5 @@
- Headers
- Pagination
- Response codes
- Data I/O endpoints
- Security and access endpoints
- System information endpoints
- name: All endpoints
tags: []

View File

@ -10,6 +10,9 @@ apis:
root: management/openapi.yml
v2@2:
root: v2/ref.yml
x-influxdata-default: true
x-influxdata-docs-aliases:
- /influxdb3/cloud-dedicated/api/
v1-compatibility@2:
root: v1-compatibility/swaggerV1Compat.yml
x-influxdata-docs-aliases:
- /influxdb3/cloud-dedicated/api/v1/

View File

@ -1,12 +1,15 @@
title: InfluxDB 3 Cloud Dedicated Management API
x-influxdata-short-title: Management API
summary: |
The Management API for InfluxDB 3 Cloud Dedicated provides a programmatic interface for managing a Cloud Dedicated instance.
description: |
The Management API lets you manage an InfluxDB 3 Cloud Dedicated instance and integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application.
The Management API for InfluxDB 3 Cloud Dedicated provides a programmatic interface for managing a Cloud Dedicated cluster.
The Management API lets you integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application.
This documentation is generated from the
InfluxDB OpenAPI specification.
license:
name: MIT
url: 'https://opensource.org/licenses/MIT'
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com

View File

@ -2,16 +2,19 @@ openapi: 3.1.0
info:
title: InfluxDB 3 Cloud Dedicated Management API
description: |
The Management API lets you manage an InfluxDB 3 Cloud Dedicated instance and integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application.
The Management API for InfluxDB 3 Cloud Dedicated provides a programmatic interface for managing a Cloud Dedicated cluster.
The Management API lets you integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application.
This documentation is generated from the
InfluxDB OpenAPI specification.
summary: |
The Management API for InfluxDB 3 Cloud Dedicated provides a programmatic interface for managing a Cloud Dedicated instance.
license:
name: MIT
url: https://opensource.org/licenses/MIT
version: ''
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com
servers:
- url: https://{baseurl}/api/v0
description: InfluxDB 3 Cloud Dedicated Management API URL
@ -37,8 +40,6 @@ tags:
See how to [create a management token](/influxdb3/cloud-dedicated/admin/tokens/management/).
By default, management tokens in InfluxDB 3 are short-lived tokens issued by an OAuth2 identity provider that grant a specific user administrative access to your InfluxDB cluster. However, for automation purposes, you can manually create management tokens that authenticate directly with your InfluxDB cluster and do not require human interaction with your identity provider.
- name: Database tokens
description: Manage database read/write tokens for a cluster
- name: Databases
@ -1020,7 +1021,6 @@ paths:
For example, see how to [authenticate Telegraf using tokens in your OS secret store](https://github.com/influxdata/telegraf/tree/master/plugins/secretstores/os).
If you lose a token, [delete the token from InfluxDB](/influxdb3/cloud-dedicated/admin/tokens/database/delete/) and create a new one.
parameters:
- name: accountId
in: path

View File

@ -1,7 +1,9 @@
title: InfluxDB v1 HTTP API for InfluxDB 3 Cloud Dedicated
x-influxdata-short-title: v1 Compatibility API
summary: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Cloud Dedicated database.
x-influxdata-short-description: The v1-compatibility HTTP API provides compatibility for writing and querying data in an InfluxDB 3 Cloud Dedicated database using InfluxDB v1 endpoints.
description: |
The v1-compatibility HTTP API provides compatibility for writing and querying data in an InfluxDB 3 Cloud Dedicated database using InfluxDB v1 endpoints.
The InfluxDB 1.x `/write` and `/query` endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others.
This documentation is generated from the
@ -13,3 +15,7 @@ description: |
license:
name: MIT
url: 'https://opensource.org/licenses/MIT'
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com

View File

@ -3,6 +3,8 @@ info:
title: InfluxDB v1 HTTP API for InfluxDB 3 Cloud Dedicated
version: ''
description: |
The v1-compatibility HTTP API provides compatibility for writing and querying data in an InfluxDB 3 Cloud Dedicated database using InfluxDB v1 endpoints.
The InfluxDB 1.x `/write` and `/query` endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others.
This documentation is generated from the
@ -14,7 +16,10 @@ info:
license:
name: MIT
url: https://opensource.org/licenses/MIT
summary: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Cloud Dedicated database.
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com
servers:
- url: /
security:
@ -35,6 +40,7 @@ tags:
<!-- ReDoc-Inject: <security-definitions> -->
x-traitTag: true
- name: Ping
- name: Query
- name: Write
paths:
@ -290,7 +296,6 @@ paths:
To check the health of ingesters before writing data, send a request to one of the [write endpoints](/influxdb3/cloud-dedicated/api/v2/#tag/Write).
This endpoint doesn't require authentication.
operationId: HeadPing
responses:
'204':

View File

@ -1,12 +1,17 @@
title: InfluxDB v2 HTTP API for InfluxDB 3 Cloud Dedicated
x-influxdata-short-title: v2 API
summary: The InfluxDB v2 HTTP API for InfluxDB 3 Cloud Dedicated provides a v2-compatible programmatic interface for writing data stored in an InfluxDB 3 Cloud Dedicated database.
x-influxdata-short-description: The InfluxDB v2 HTTP API provides a v2-compatible programmatic interface for writing and managing data stored in an InfluxDB 3 Cloud Dedicated database.
description: |
The InfluxDB v2 HTTP API lets you use `/api/v2` endpoints for managing retention policy mappings and writing data stored in an InfluxDB 3 instance.
The InfluxDB v2 HTTP API provides a v2-compatible programmatic interface for writing and managing data stored in an InfluxDB 3 Cloud Dedicated database.
Use the InfluxDB v2 HTTP API `/api/v2` endpoints to manage retention policy mappings and write data to an InfluxDB 3 instance.
This documentation is generated from the
[InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/openapi/master/contracts/ref/cloud.yml).
license:
name: MIT
url: 'https://opensource.org/licenses/MIT'
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com

View File

@ -6,7 +6,6 @@
- Headers
- Pagination
- Response codes
- System information endpoints
- name: All endpoints
tags:
- Ping

View File

@ -2,15 +2,20 @@ openapi: 3.0.0
info:
title: InfluxDB v2 HTTP API for InfluxDB 3 Cloud Dedicated
description: |
The InfluxDB v2 HTTP API lets you use `/api/v2` endpoints for managing retention policy mappings and writing data stored in an InfluxDB 3 instance.
The InfluxDB v2 HTTP API provides a v2-compatible programmatic interface for writing and managing data stored in an InfluxDB 3 Cloud Dedicated database.
Use the InfluxDB v2 HTTP API `/api/v2` endpoints to manage retention policy mappings and write data to an InfluxDB 3 instance.
This documentation is generated from the
[InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/openapi/master/contracts/ref/cloud.yml).
license:
name: MIT
url: https://opensource.org/licenses/MIT
summary: The InfluxDB v2 HTTP API for InfluxDB 3 Cloud Dedicated provides a v2-compatible programmatic interface for writing data stored in an InfluxDB 3 Cloud Dedicated database.
version: ''
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com
servers:
- url: https://{baseurl}
description: InfluxDB 3 Cloud Dedicated API URL
@ -499,7 +504,7 @@ paths:
type: string
- description: |
A unix timestamp precision.
Formats timestamps as [unix (epoch) timestamps](/influxdb3/cloud-dedicated/reference/glossary/#unix-timestamp) the specified precision
Formats timestamps as [unix (epoch) timestamps](/influxdb3/cloud-dedicated/reference/glossary/#unix-timestamp) with the specified precision
instead of [RFC3339 timestamps](/influxdb3/cloud-dedicated/reference/glossary/#rfc3339-timestamp) with nanosecond precision.
in: query
name: epoch
@ -932,7 +937,7 @@ components:
type: string
retention_policy:
description: |
A [retention policy](/influxdb/v1.8/concepts/glossary/#retention-policy-rp) name.
A [retention policy](/influxdb3/cloud-dedicated/reference/glossary/#retention-policy-rp) name.
Identifies the InfluxDB v1 retention policy mapping.
type: string
virtual:
@ -975,7 +980,7 @@ components:
type: string
retention_policy:
description: |
A [retention policy](/influxdb/v1.8/concepts/glossary/#retention-policy-rp) name.
A [retention policy](/influxdb3/cloud-dedicated/reference/glossary/#retention-policy-rp) name.
Identifies the InfluxDB v1 retention policy mapping.
type: string
required:
@ -999,7 +1004,7 @@ components:
type: boolean
retention_policy:
description: |
A [retention policy](/influxdb/v1.8/concepts/glossary/#retention-policy-rp) name.
A [retention policy](/influxdb3/cloud-dedicated/reference/glossary/#retention-policy-rp) name.
Identifies the InfluxDB v1 retention policy mapping.
type: string
DBRPs:
@ -2138,7 +2143,6 @@ x-tagGroups:
- Headers
- Pagination
- Response codes
- System information endpoints
- name: All endpoints
tags:
- Ping

View File

@ -8,6 +8,9 @@ x-influxdata-product-name: InfluxDB 3 Serverless
apis:
v2@2:
root: v2/ref.yml
x-influxdata-default: true
x-influxdata-docs-aliases:
- /influxdb3/cloud-serverless/api/
v1-compatibility@2:
root: v1-compatibility/swaggerV1Compat.yml
x-influxdata-docs-aliases:
- /influxdb3/cloud-serverless/api/v1/

View File

@ -1,7 +1,9 @@
title: InfluxDB v1 HTTP API for InfluxDB 3 Cloud Serverless
x-influxdata-short-title: v1 Compatibility API
summary: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Cloud Serverless bucket.
x-influxdata-short-description: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Cloud Serverless bucket.
description: |
The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Cloud Serverless bucket.
The InfluxDB 1.x `/write` and `/query` endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others.
This documentation is generated from the
@ -13,3 +15,7 @@ description: |
license:
name: MIT
url: 'https://opensource.org/licenses/MIT'
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com

View File

@ -3,6 +3,8 @@ info:
title: InfluxDB v1 HTTP API for InfluxDB 3 Cloud Serverless
version: ''
description: |
The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Cloud Serverless bucket.
The InfluxDB 1.x `/write` and `/query` endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others.
This documentation is generated from the
@ -14,7 +16,10 @@ info:
license:
name: MIT
url: https://opensource.org/licenses/MIT
summary: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Cloud Serverless bucket.
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com
servers:
- url: /
security:

View File

@ -1,8 +1,9 @@
title: InfluxDB 3 Cloud Serverless API Service
x-influxdata-short-title: v2 API
summary: |
The InfluxDB v2 HTTP API for InfluxDB 3 Cloud Serverless provides a programmatic interface for writing data stored in an InfluxDB 3 Cloud Serverless bucket.
x-influxdata-short-description: The InfluxDB v2 HTTP API for InfluxDB 3 Cloud Serverless provides a programmatic interface for writing data stored in an InfluxDB 3 Cloud Serverless bucket.
description: |
The InfluxDB v2 HTTP API for InfluxDB 3 Cloud Serverless provides a programmatic interface for writing data stored in an InfluxDB 3 Cloud Serverless bucket.
The InfluxDB v2 HTTP API lets you use `/api/v2` endpoints for managing retention policy mappings and writing data stored in an InfluxDB 3 instance.
This documentation is generated from the
@ -10,3 +11,7 @@ description: |
license:
name: MIT
url: 'https://opensource.org/licenses/MIT'
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com

View File

@ -6,9 +6,6 @@
- Headers
- Pagination
- Response codes
- Data I/O endpoints
- Security and access endpoints
- System information endpoints
- name: All endpoints
tags:
- Authorizations (API tokens)

View File

@ -2,6 +2,8 @@ openapi: 3.0.0
info:
title: InfluxDB 3 Cloud Serverless API Service
description: |
The InfluxDB v2 HTTP API for InfluxDB 3 Cloud Serverless provides a programmatic interface for writing data stored in an InfluxDB 3 Cloud Serverless bucket.
The InfluxDB v2 HTTP API lets you use `/api/v2` endpoints for managing retention policy mappings and writing data stored in an InfluxDB 3 instance.
This documentation is generated from the
@ -9,9 +11,11 @@ info:
license:
name: MIT
url: https://opensource.org/licenses/MIT
summary: |
The InfluxDB v2 HTTP API for InfluxDB 3 Cloud Serverless provides a programmatic interface for writing data stored in an InfluxDB 3 Cloud Serverless bucket.
version: ''
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com
servers:
- url: https://{baseurl}
description: InfluxDB 3 Cloud Serverless API URL
@ -122,16 +126,16 @@ tags:
|:------------------------ |:--------------------- |:-------------------------------------------|
| `bucket` | string | The bucket name or ID ([find your bucket](/influxdb3/cloud-serverless/admin/buckets/view-buckets/). |
| `bucketID` | string | The bucket ID ([find your bucket](/influxdb3/cloud-serverless/admin/buckets/view-buckets/). |
| `org` | string | The organization name or ID ([find your organization](/influxdb3/cloud-serverless/organizations/view-orgs/). |
| `orgID` | 16-byte string | The organization ID ([find your organization](/influxdb3/cloud-serverless/organizations/view-orgs/). |
| `org` | string | The organization name or ID ([find your organization](/influxdb3/cloud-serverless/admin/organizations/view-orgs/). |
| `orgID` | 16-byte string | The organization ID ([find your organization](/influxdb3/cloud-serverless/admin/organizations/view-orgs/). |
name: Common parameters
x-traitTag: true
- name: Config
- name: Dashboards
- name: Data I/O endpoints
- description: |
The InfluxDB 1.x data model includes [databases](/influxdb/v1.8/concepts/glossary/#database)
and [retention policies](/influxdb/v1.8/concepts/glossary/#retention-policy-rp).
The InfluxDB 1.x data model includes [databases](/influxdb3/cloud-serverless/reference/glossary/#database)
and [retention policies](/influxdb3/cloud-serverless/reference/glossary/#retention-policy-rp).
InfluxDB 2.x replaces databases and retention policies with buckets.
To support InfluxDB 1.x query and write patterns in InfluxDB 2.x,
databases and retention policies are mapped to buckets using the
@ -439,11 +443,6 @@ paths:
Specifies an authorization by its `token` property value
and returns the authorization.
#### InfluxDB OSS
- Doesn't support this parameter. InfluxDB OSS ignores the `token=` parameter,
applies other parameters, and then returns the result.
#### Limitations
- The parameter is non-repeatable. If you specify more than one,
@ -465,13 +464,6 @@ paths:
If the response body is missing authorizations that you expect, check that the API
token used in the request has `read-user` permission for the users (`userID` property value)
in those authorizations.
#### InfluxDB OSS
- **Warning**: The response body contains authorizations with their
[API token](/influxdb3/cloud-serverless/reference/glossary/#token) values in clear text.
- If the request uses an _[operator token](/influxdb/latest/security/tokens/#operator-token)_,
InfluxDB OSS returns authorizations for all organizations in the instance.
'400':
$ref: '#/components/responses/GeneralServerError'
description: Invalid request
@ -621,14 +613,6 @@ paths:
Use this endpoint to retrieve information about an API token, including
the token's permissions and the user that the token is scoped to.
#### InfluxDB OSS
- InfluxDB OSS returns
[API token](/influxdb3/cloud-serverless/reference/glossary/#token) values in authorizations.
- If the request uses an _[operator token](/influxdb/latest/security/tokens/#operator-token)_,
InfluxDB OSS returns authorizations for all organizations in the instance.
#### Related guides
- [View tokens](/influxdb3/cloud-serverless/security/tokens/view-tokens/)
@ -736,14 +720,6 @@ paths:
If no query parameters are passed, InfluxDB returns all buckets up to the
default `limit`.
#### InfluxDB OSS
- If you use an _[operator token](/influxdb3/cloud-serverless/security/tokens/#operator-token)_
to authenticate your request, InfluxDB retrieves resources for _all
organizations_ in the instance.
To retrieve resources for only a specific organization, use the
`org` parameter or the `orgID` parameter to specify the organization.
#### Required permissions
| Action | Permission required |
@ -859,13 +835,6 @@ paths:
[retention period](/influxdb3/cloud-serverless/reference/glossary/#retention-period)
is 30 days.
#### InfluxDB OSS
- A single InfluxDB OSS instance supports active writes or queries for
approximately 20 buckets across all organizations at a given time.
Reading or writing to more than 20 buckets at a time can adversely affect
performance.
#### Limitations
- InfluxDB Cloud Free Plan allows users to create up to two buckets.
@ -2381,7 +2350,7 @@ paths:
schema:
type: string
- description: |
A [retention policy](/influxdb/v1.8/concepts/glossary/#retention-policy-rp).
A [retention policy](/influxdb3/cloud-serverless/reference/glossary/#retention-policy-rp).
Specifies the 1.x retention policy to filter on.
in: query
name: rp
@ -2851,10 +2820,6 @@ paths:
description: |
Bad request.
The response body contains detail about the error.
#### InfluxDB OSS
- Returns this error if the `org` parameter or `orgID` parameter doesn't match an organization.
'401':
$ref: '#/components/responses/AuthorizationError'
'404':
@ -2906,7 +2871,7 @@ paths:
#### Related guides
- [View organizations](/influxdb3/cloud-serverless/organizations/view-orgs/)
- [View organizations](/influxdb3/cloud-serverless/admin/organizations/view-orgs/)
operationId: GetOrgs
parameters:
- $ref: '#/components/parameters/TraceSpan'
@ -3071,11 +3036,6 @@ paths:
2. Returns an HTTP `204` status code if queued; _error_ otherwise.
3. Handles the delete asynchronously.
#### InfluxDB OSS
- Validates the request, handles the delete synchronously,
and then responds with success or failure.
#### Limitations
- Only one organization can be deleted per request.
@ -3100,9 +3060,6 @@ paths:
#### InfluxDB Cloud
- The organization is queued for deletion.
#### InfluxDB OSS
- The organization is deleted.
'400':
$ref: '#/components/responses/BadRequestError'
'401':
@ -3141,7 +3098,7 @@ paths:
#### Related guides
- [View organizations](/influxdb3/cloud-serverless/organizations/view-orgs/)
- [View organizations](/influxdb3/cloud-serverless/admin/organizations/view-orgs/)
operationId: GetOrgsID
parameters:
- $ref: '#/components/parameters/TraceSpan'
@ -3905,10 +3862,6 @@ paths:
description: |
Bad request.
The response body contains detail about the error.
#### InfluxDB OSS
- Returns this error if the `org` parameter or `orgID` parameter doesn't match an organization.
'401':
$ref: '#/components/responses/AuthorizationError'
'404':
@ -4890,10 +4843,6 @@ paths:
description: |
Bad request.
The response body contains detail about the error.
#### InfluxDB OSS
- Returns this error if an incorrect value is passed in the `org` parameter or `orgID` parameter.
'401':
$ref: '#/components/responses/AuthorizationError'
'500':
@ -6230,10 +6179,6 @@ paths:
#### InfluxDB Cloud
- Always returns this error; doesn't support cancelling tasks.
#### InfluxDB OSS
- Doesn't return this error.
'500':
$ref: '#/components/responses/InternalServerError'
default:
@ -7823,7 +7768,7 @@ paths:
type: string
- description: |
A unix timestamp precision.
Formats timestamps as [unix (epoch) timestamps](/influxdb3/cloud-serverless/reference/glossary/#unix-timestamp) the specified precision
Formats timestamps as [unix (epoch) timestamps](/influxdb3/cloud-serverless/reference/glossary/#unix-timestamp) with the specified precision
instead of [RFC3339 timestamps](/influxdb3/cloud-serverless/reference/glossary/#rfc3339-timestamp) with nanosecond precision.
in: query
name: epoch
@ -8229,10 +8174,6 @@ components:
description: |
Bad request.
The response body contains detail about the error.
#### InfluxDB OSS
- Returns this error if an incorrect value is passed in the `org` parameter or `orgID` parameter.
GeneralServerError:
content:
application/json:
@ -9128,7 +9069,7 @@ components:
type: string
retention_policy:
description: |
A [retention policy](/influxdb/v1.8/concepts/glossary/#retention-policy-rp) name.
A [retention policy](/influxdb3/cloud-serverless/reference/glossary/#retention-policy-rp) name.
Identifies the InfluxDB v1 retention policy mapping.
type: string
virtual:
@ -9171,7 +9112,7 @@ components:
type: string
retention_policy:
description: |
A [retention policy](/influxdb/v1.8/concepts/glossary/#retention-policy-rp) name.
A [retention policy](/influxdb3/cloud-serverless/reference/glossary/#retention-policy-rp) name.
Identifies the InfluxDB v1 retention policy mapping.
type: string
required:
@ -9195,7 +9136,7 @@ components:
type: boolean
retention_policy:
description: |
A [retention policy](/influxdb/v1.8/concepts/glossary/#retention-policy-rp) name.
A [retention policy](/influxdb3/cloud-serverless/reference/glossary/#retention-policy-rp) name.
Identifies the InfluxDB v1 retention policy mapping.
type: string
DBRPs:
@ -11677,10 +11618,6 @@ components:
- Doesn't use `shardGroupDurationsSeconds`.
#### InfluxDB OSS
- Default value depends on the [bucket retention period](/influxdb3/cloud-serverless/reference/internals/shards/#shard-group-duration).
#### Related guides
- InfluxDB [shards and shard groups](/influxdb3/cloud-serverless/reference/internals/shards/)
@ -11810,7 +11747,7 @@ components:
If you need compatibility with InfluxDB 1.x, specify a value for the `rp` property;
otherwise, see the `retentionRules` property.
[Retention policy](/influxdb/v1.8/concepts/glossary/#retention-policy-rp)
[Retention policy](/influxdb3/cloud-serverless/reference/glossary/#retention-policy-rp)
is an InfluxDB 1.x concept.
The InfluxDB 2.x and Cloud equivalent is
[retention period](/influxdb3/cloud-serverless/reference/glossary/#retention-period).
@ -12128,10 +12065,6 @@ components:
#### InfluxDB 3 Cloud Serverless
- `retentionRules` is required.
#### InfluxDB OSS
- `retentionRules` isn't required.
items:
$ref: '#/components/schemas/RetentionRule'
type: array
@ -13396,7 +13329,7 @@ components:
The organization owns all resources created by the template.
To find your organization, see how to
[view organizations](/influxdb3/cloud-serverless/organizations/view-orgs/).
[view organizations](/influxdb3/cloud-serverless/admin/organizations/view-orgs/).
type: string
remotes:
description: |
@ -14813,9 +14746,6 @@ x-tagGroups:
- Headers
- Pagination
- Response codes
- Data I/O endpoints
- Security and access endpoints
- System information endpoints
- name: All endpoints
tags:
- Authorizations (API tokens)

View File

@ -8,6 +8,9 @@ x-influxdata-product-name: InfluxDB 3 Clustered
apis:
v2@2:
root: v2/ref.yml
x-influxdata-default: true
x-influxdata-docs-aliases:
- /influxdb3/clustered/api/
v1-compatibility@2:
root: v1-compatibility/swaggerV1Compat.yml
x-influxdata-docs-aliases:
- /influxdb3/clustered/api/v1/

View File

@ -1,7 +1,9 @@
title: InfluxDB v1 HTTP API for InfluxDB 3 Clustered
x-influxdata-short-title: v1 Compatibility API
summary: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Clustered database.
x-influxdata-short-description: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Clustered database.
description: |
The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Clustered database.
The InfluxDB 1.x `/write` and `/query` endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others.
This documentation is generated from the
@ -13,3 +15,7 @@ description: |
license:
name: MIT
url: 'https://opensource.org/licenses/MIT'
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com

View File

@ -3,6 +3,8 @@ info:
title: InfluxDB v1 HTTP API for InfluxDB 3 Clustered
version: ''
description: |
The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Clustered database.
The InfluxDB 1.x `/write` and `/query` endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others.
This documentation is generated from the
@ -14,7 +16,10 @@ info:
license:
name: MIT
url: https://opensource.org/licenses/MIT
summary: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Clustered database.
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com
servers:
- url: /
security:

View File

@ -1,7 +1,9 @@
title: InfluxDB 3 Clustered API Service
x-influxdata-short-title: v2 API
summary: The InfluxDB v2 HTTP API for InfluxDB 3 Clustered provides a v2-compatible programmatic interface for writing data stored in an InfluxDB 3 Clustered database.
x-influxdata-short-description: The InfluxDB v2 HTTP API for InfluxDB 3 Clustered provides a v2-compatible programmatic interface for writing data stored in an InfluxDB 3 Clustered database.
description: |
The InfluxDB v2 HTTP API for InfluxDB 3 Clustered provides a v2-compatible programmatic interface for writing data stored in an InfluxDB 3 Clustered database.
The InfluxDB v2 HTTP API lets you use `/api/v2` endpoints for managing retention policy mappings and writing data stored in an InfluxDB 3 instance.
This documentation is generated from the
@ -9,3 +11,7 @@ description: |
license:
name: MIT
url: 'https://opensource.org/licenses/MIT'
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com

View File

@ -5,7 +5,6 @@
- Headers
- Pagination
- Response codes
- System information endpoints
- name: All endpoints
tags:
- Ping

View File

@ -3,6 +3,8 @@ info:
title: InfluxDB 3 Clustered API Service
version: ''
description: |
The InfluxDB v2 HTTP API for InfluxDB 3 Clustered provides a v2-compatible programmatic interface for writing data stored in an InfluxDB 3 Clustered database.
The InfluxDB v2 HTTP API lets you use `/api/v2` endpoints for managing retention policy mappings and writing data stored in an InfluxDB 3 instance.
This documentation is generated from the
@ -10,7 +12,10 @@ info:
license:
name: MIT
url: https://opensource.org/licenses/MIT
summary: The InfluxDB v2 HTTP API for InfluxDB 3 Clustered provides a v2-compatible programmatic interface for writing data stored in an InfluxDB 3 Clustered database.
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com
servers:
- url: https://{baseurl}
description: InfluxDB 3 Clustered API URL
@ -412,16 +417,6 @@ paths:
'429':
description: |
Too many requests.
#### InfluxDB Cloud
- Returns this error if a **read** or **write** request exceeds your plan's [adjustable service quotas](/influxdb3/clustered/account-management/limits/#adjustable-service-quotas)
or if a **delete** request exceeds the maximum [global limit](/influxdb3/clustered/account-management/limits/#global-limits).
- For rate limits that reset automatically, returns a `Retry-After` header that describes when to try the write again.
- For limits that can't reset (for example, **cardinality limit**), doesn't return a `Retry-After` header.
Rates (data-in (writes), queries (reads), and deletes) accrue within a fixed five-minute window.
Once a rate limit is exceeded, InfluxDB returns an error response until the current five-minute window resets.
headers:
Retry-After:
description: Non-negative decimal integer indicating seconds to wait before retrying the request.
@ -517,7 +512,7 @@ paths:
type: string
- description: |
A unix timestamp precision.
Formats timestamps as [unix (epoch) timestamps](/influxdb3/clustered/reference/glossary/#unix-timestamp) the specified precision
Formats timestamps as [unix (epoch) timestamps](/influxdb3/clustered/reference/glossary/#unix-timestamp) with the specified precision
instead of [RFC3339 timestamps](/influxdb3/clustered/reference/glossary/#rfc3339-timestamp) with nanosecond precision.
in: query
name: epoch
@ -573,12 +568,7 @@ paths:
type: string
'429':
description: |
#### InfluxDB Cloud:
- returns this error if a **read** or **write** request exceeds your
plan's [adjustable service quotas](/influxdb3/clustered/account-management/limits/#adjustable-service-quotas)
or if a **delete** request exceeds the maximum
[global limit](/influxdb3/clustered/account-management/limits/#global-limits)
- returns `Retry-After` header that describes when to try the write again.
Token is temporarily over quota. The Retry-After header describes when to try the write again.
headers:
Retry-After:
description: A non-negative decimal integer indicating the seconds to delay after the response is received.
@ -924,7 +914,7 @@ components:
type: string
retention_policy:
description: |
A [retention policy](/influxdb/v1.8/concepts/glossary/#retention-policy-rp) name.
A [retention policy](/influxdb3/clustered/reference/glossary/#retention-policy-rp) name.
Identifies the InfluxDB v1 retention policy mapping.
type: string
virtual:
@ -967,7 +957,7 @@ components:
type: string
retention_policy:
description: |
A [retention policy](/influxdb/v1.8/concepts/glossary/#retention-policy-rp) name.
A [retention policy](/influxdb3/clustered/reference/glossary/#retention-policy-rp) name.
Identifies the InfluxDB v1 retention policy mapping.
type: string
required:
@ -991,7 +981,7 @@ components:
type: boolean
retention_policy:
description: |
A [retention policy](/influxdb/v1.8/concepts/glossary/#retention-policy-rp) name.
A [retention policy](/influxdb3/clustered/reference/glossary/#retention-policy-rp) name.
Identifies the InfluxDB v1 retention policy mapping.
type: string
DBRPs:
@ -2129,7 +2119,6 @@ x-tagGroups:
- Headers
- Pagination
- Response codes
- System information endpoints
- name: All endpoints
tags:
- Ping

View File

@ -0,0 +1,34 @@
title: InfluxDB 3 Core API Service
x-influxdata-short-title: InfluxDB 3 API
x-influxdata-version-matrix:
v1: Compatibility layer for InfluxDB 1.x clients (supported)
v2: Compatibility layer for InfluxDB 2.x clients (supported)
v3: Native API for InfluxDB 3.x (current)
x-influxdata-short-description: The InfluxDB 3 HTTP API provides a programmatic interface for interactions with InfluxDB, including writing, querying, and processing data, and managing an InfluxDB 3 instance.
description: |
The InfluxDB HTTP API for InfluxDB 3 Core provides a programmatic interface for
interacting with InfluxDB 3 Core databases and resources.
Use this API to:
- Write data to InfluxDB 3 Core databases
- Query data using SQL or InfluxQL
- Process data using Processing engine plugins
- Manage databases, tables, and Processing engine triggers
- Perform administrative tasks and access system information
The API includes endpoints under the following paths:
- `/api/v3`: InfluxDB 3 Core native endpoints
- `/`: Compatibility endpoints for InfluxDB v1 workloads and clients
- `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients
<!-- TODO: verify where to host the spec that users can download.
This documentation is generated from the
[InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/).
-->
license:
name: MIT
url: 'https://opensource.org/licenses/MIT'
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com

View File

@ -0,0 +1,8 @@
- url: https://{baseurl}
description: InfluxDB 3 Core API URL
variables:
baseurl:
enum:
- 'localhost:8181'
default: 'localhost:8181'
description: InfluxDB 3 Core URL

View File

@ -0,0 +1,12 @@
- name: Using the InfluxDB HTTP API
tags:
- Quick start
- Authentication
- Common parameters
- Response codes
- Compatibility endpoints
- Data I/O
- Databases
- Processing engine
- Server information
- Tables

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,34 @@
title: InfluxDB 3 Enterprise API Service
x-influxdata-short-title: InfluxDB 3 API
x-influxdata-version-matrix:
v1: Compatibility layer for InfluxDB 1.x clients (supported)
v2: Compatibility layer for InfluxDB 2.x clients (supported)
v3: Native API for InfluxDB 3.x (current)
x-influxdata-short-description: The InfluxDB 3 HTTP API provides a programmatic interface for interactions with InfluxDB, including writing, querying, and processing data, and managing an InfluxDB 3 instance.
description: |
The InfluxDB HTTP API for InfluxDB 3 Enterprise provides a programmatic interface for
interacting with InfluxDB 3 Enterprise databases and resources.
Use this API to:
- Write data to InfluxDB 3 Enterprise databases
- Query data using SQL or InfluxQL
- Process data using Processing engine plugins
- Manage databases, tables, and Processing engine triggers
- Perform administrative tasks and access system information
The API includes endpoints under the following paths:
- `/api/v3`: InfluxDB 3 Enterprise native endpoints
- `/`: Compatibility endpoints for InfluxDB v1 workloads and clients
- `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients
<!-- TODO: verify where to host the spec that users can download.
This documentation is generated from the
[InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/).
-->
license:
name: MIT
url: 'https://opensource.org/licenses/MIT'
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com

View File

@ -0,0 +1,8 @@
- url: https://{baseurl}
description: InfluxDB 3 Enterprise API URL
variables:
baseurl:
enum:
- 'localhost:8181'
default: 'localhost:8181'
description: InfluxDB 3 Enterprise URL

View File

@ -0,0 +1,12 @@
- name: Using the InfluxDB HTTP API
tags:
- Quick start
- Authentication
- Common parameters
- Response codes
- Compatibility endpoints
- Data I/O
- Databases
- Processing engine
- Server information
- Tables

File diff suppressed because it is too large Load Diff

View File

@ -16,12 +16,19 @@ function SetInfo(data) {
}
if(data.hasOwnProperty('summary')) {
info.summary = data.summary;
} else {
// Remove summary if not provided.
// info.summary isn't a valid OpenAPI 3.0 property, but it's used by Redocly.
info['summary'] = undefined;
}
if(data.hasOwnProperty('description')) {
info.description = data.description;
}
if(data.hasOwnProperty('license')) {
info.license = data.license;
}
if(data.hasOwnProperty('contact')) {
info.contact = data.contact;
}
}
}

View File

@ -6,5 +6,8 @@
"license": "MIT",
"dependencies": {
"js-yaml": "^4.1.0"
},
"devDependencies": {
"spectral": "^0.0.0"
}
}

View File

@ -1,22 +1,30 @@
<!DOCTYPE html>
<html>
<head>
<head>
<!-- Google Tag Manager -->
<script>(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
<script>window.location.hostname.endsWith('influxdata.com') &&
(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start': new
Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
})(window,document,'script','dataLayer','GTM-WXRH9C');</script>
<!-- End Google Tag Manager -->
<meta charset="utf8" />
<meta charset='utf8' />
<title>{{title}}</title>
<meta name="description" content="{{templateOptions.description}}.">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="shortcut icon" href="/img/favicon.png" type="image/png" sizes="32x32">
<meta name='description' content='{{templateOptions.description}}' />
<meta name='viewport' content='width=device-width, initial-scale=1' />
<link
rel='shortcut icon'
href='/img/favicon.png'
type='image/png'
sizes='32x32'
/>
<meta name="google-site-verification" content="_V6CNhaIIgVsTO9max_ECw7DUfPL-ZGE7G03MQgEGMU" />
<meta
name='google-site-verification'
content='_V6CNhaIIgVsTO9max_ECw7DUfPL-ZGE7G03MQgEGMU'
/>
<style>
body {
@ -24,36 +32,87 @@
margin: 0;
}
</style>
{{#unless disableGoogleFont}}<link href="https://fonts.googleapis.com/css?family=Roboto+Mono:500,500i,700,700i|Roboto:400,400i,700,700i|Rubik:400,400i,500,500i,700,700i" rel="stylesheet">{{/unless}}
{{#unless disableGoogleFont}}<link
href="https://fonts.googleapis.com/css?family=Roboto+Mono:500,500i,700,700i|Roboto:400,400i,700,700i|Rubik:400,400i,500,500i,700,700i"
rel="stylesheet"
/>{{/unless}}
{{{redocHead}}}
<link rel="stylesheet" type="text/css" href="/api.css">
</head>
<link rel="stylesheet" type="text/css" href="/api.css" />
<script type="text/javascript"
async="true"
src="https://widget.kapa.ai/kapa-widget.bundle.js"
data-website-id="a02bca75-1dd3-411e-95c0-79ee1139be4d"
data-project-name="InfluxDB"
data-project-color="#d30971"
data-project-logo="/img/influx-logo-cubo-white.png"
data-modal-disclaimer="This AI can access [documentation for InfluxDB, clients, and related tools](https://docs.influxdata.com). Information you submit is used in accordance with our [Privacy Policy](https://www.influxdata.com/legal/privacy-policy/)."
data-modal-example-questions="How do I write and query data with the {{title}}?, How do I use client libraries for the {{title}}?" data-button-height="65px" data-button-width="65px"
data-button-text="Ask AI" data-conversation-button-icons-only="true"
data-font-family="Proxima Nova, sans-serif"
data-modal-example-questions-col-span="8"
data-modal-full-screen-on-mobile="true"
data-modal-header-bg-color="#d30971"
data-modal-header-border-bottom="none" data-modal-header-padding=".5rem"
data-modal-header-text-color="#ffffff" data-modal-x-offset="0"
data-modal-y-offset="0" data-modal-with-overlay="false"
data-modal-inner-flex-direction="column"
data-modal-inner-justify-content="end" data-modal-inner-max-width="600px"
data-modal-inner-position-left="auto"
data-modal-inner-position-right="20px"
data-modal-inner-position-bottom="calc(2.5rem + 25px)"
data-modal-size="640px"
data-modal-title-color="#fff"
data-modal-title-font-size="1.25rem"
data-modal-lock-scroll="false" ></script>
</head>
<body>
<body>
{{! <script>
document.addEventListener('DOMContentLoaded', function() {
// Prevent throwing errors when the Google Tag Manager script is blocked
if (!window.hasOwnProperty('fcdsc')) {
window.fcdsc = (function() {
// Return nothing for any function call chained off of fcdsc
return new Proxy({}, {
get: function(target, prop) {
return function() {};
}
});
})();
}
});
</script> }}
<!-- Google Tag Manager (noscript) -->
<noscript>
<iframe src="https://www.googletagmanager.com/ns.html?id=GTM-WXRH9C" height="0" width="0" style="display:none;visibility:hidden"></iframe>
<iframe
src='https://www.googletagmanager.com/ns.html?id=GTM-WXRH9C'
height='0'
width='0'
style='display:none;visibility:hidden'
></iframe>
</noscript>
<!-- End Google Tag Manager (noscript) -->
<div id="loading">
<div class="spinner"></div>
<div id='loading'>
<div class='spinner'></div>
</div>
<div id="influx-header">
<a class="back" href="/influxdb/{{templateOptions.product}}/"><span class="version">{{templateOptions.productName}}</span> Docs</a>
<a class="btn" href="https://github.com/influxdata/influxdb/issues/new/choose/" target="_blank">Submit API issue</a>
<div id='influx-header'>
<a class='back' href='/{{templateOptions.product}}/'><span
class='version'
>{{templateOptions.productName}}</span>
Docs</a>
<a
class='btn'
href='https://github.com/influxdata/influxdb/issues/new/choose/'
target='_blank'
>Submit API issue</a>
</div>
{{{redocHTML}}}
<script type="text/javascript">
function removeFadeOut( el, speed ) {
var seconds = speed/1000;
el.style.transition = "opacity "+seconds+"s ease";
el.style.opacity = 0;
setTimeout(function() {
el.parentNode.removeChild(el);
}, speed);
}
<script type='text/javascript'>
function removeFadeOut( el, speed ) { var seconds = speed/1000;
el.style.transition = "opacity "+seconds+"s ease"; el.style.opacity = 0;
setTimeout(function() { el.parentNode.removeChild(el); }, speed); }
removeFadeOut(document.getElementById('loading'), 500);
</script>
</body>
</body>
</html>

View File

@ -2,14 +2,90 @@
# yarn lockfile v1
ansi-regex@^2.0.0:
version "2.1.1"
resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
integrity sha512-TIGnTpdo+E3+pCyAluZvtED5p5wCqLdezCyhPZzKPcxvFplEt4i+W7OONCKgeZFT3+y5NZZfOOS/Bdcanm1MYA==
ansi-styles@^2.2.1:
version "2.2.1"
resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe"
integrity sha512-kmCevFghRiWM7HB5zTPULl4r9bVFSWjz62MhqizDGUrq2NWuNMQyuv4tHHoKJHs69M/MF64lEcHdYIocrdWQYA==
argparse@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38"
resolved "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz"
integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==
chalk@^1.0.0:
version "1.1.3"
resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98"
integrity sha512-U3lRVLMSlsCfjqYPbLyVv11M9CPW4I728d6TCKMAOJueEeB9/8o+eSsMnxPJD+Q+K909sdESg7C+tIkoH6on1A==
dependencies:
ansi-styles "^2.2.1"
escape-string-regexp "^1.0.2"
has-ansi "^2.0.0"
strip-ansi "^3.0.0"
supports-color "^2.0.0"
commander@^2.8.1:
version "2.20.3"
resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33"
integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==
escape-string-regexp@^1.0.2:
version "1.0.5"
resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4"
integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==
extend@^2.0.1:
version "2.0.2"
resolved "https://registry.yarnpkg.com/extend/-/extend-2.0.2.tgz#1b74985400171b85554894459c978de6ef453ab7"
integrity sha512-AgFD4VU+lVLP6vjnlNfF7OeInLTyeyckCNPEsuxz1vi786UuK/nk6ynPuhn/h+Ju9++TQyr5EpLRI14fc1QtTQ==
has-ansi@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91"
integrity sha512-C8vBJ8DwUCx19vhm7urhTuUsr4/IyP6l4VzNQDv+ryHQObW3TTTp9yB68WpYgRe2bbaGuZ/se74IqFeVnMnLZg==
dependencies:
ansi-regex "^2.0.0"
js-yaml@^4.1.0:
version "4.1.0"
resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602"
resolved "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz"
integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==
dependencies:
argparse "^2.0.1"
moment@^2.10.3:
version "2.30.1"
resolved "https://registry.yarnpkg.com/moment/-/moment-2.30.1.tgz#f8c91c07b7a786e30c59926df530b4eac96974ae"
integrity sha512-uEmtNhbDOrWPFS+hdjFCBfy9f2YoyzRpwcl+DqpC6taX21FzsTLQVbMV/W7PzNSX6x/bhC1zA3c2UQ5NzH6how==
spectral@^0.0.0:
version "0.0.0"
resolved "https://registry.yarnpkg.com/spectral/-/spectral-0.0.0.tgz#a244b28c0726a7907374ad39c58024f934b9e8a1"
integrity sha512-tJamrVCLdpHt3geQn9ypWLlcS7K02+TZV5hj1bnPjGcjQs5N0dtxzJVitcmHbR9tZQgjwj2hAO1f8v1fzzwF1Q==
dependencies:
chalk "^1.0.0"
commander "^2.8.1"
extend "^2.0.1"
moment "^2.10.3"
string-etc "^0.2.0"
string-etc@^0.2.0:
version "0.2.0"
resolved "https://registry.yarnpkg.com/string-etc/-/string-etc-0.2.0.tgz#a0f84a2d8816082266384a3c7229acbb8064eda5"
integrity sha512-J9RfI2DvBDlnISBhfOBOAXPFxE4cpEgNC6zJTjULmagQaMuu2sYrE44H8h5Paxf3Bm9Wcer92DJv9n77OAHIRg==
strip-ansi@^3.0.0:
version "3.0.1"
resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf"
integrity sha512-VhumSSbBqDTP8p2ZLKj40UjBCV4+v8bUSEpUb4KjRgWk9pbqGF4REFj6KEagidb2f/M6AzC0EmFyDNGaw9OCzg==
dependencies:
ansi-regex "^2.0.0"
supports-color@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7"
integrity sha512-KKNVtd6pCYgPIKU4cp2733HWYCpplQhddZLBUryaAHou723x+FRzQ5Df824Fj+IyyuiQTRoub4SnIFfIcrp70g==

View File

@ -19,6 +19,7 @@ function initializeChat({onChatLoad, chatAttributes}) {
* available configuration options.
* All values are strings.
*/
// If you make changes to data attributes here, you also need to port the changes to the api-docs/template.hbs API reference template.
const requiredAttributes = {
websiteId: 'a02bca75-1dd3-411e-95c0-79ee1139be4d',
projectName: 'InfluxDB',
@ -27,6 +28,7 @@ function initializeChat({onChatLoad, chatAttributes}) {
}
const optionalAttributes = {
modalDisclaimer: 'This AI can access [documentation for InfluxDB, clients, and related tools](https://docs.influxdata.com). Information you submit is used in accordance with our [Privacy Policy](https://www.influxdata.com/legal/privacy-policy/).',
modalExampleQuestions: 'Use Python to write data to InfluxDB 3,How do I query using SQL?,How do I use MQTT with Telegraf?',
buttonHide: 'true',

View File

@ -244,6 +244,7 @@
&.blue {color: $b-dodger;}
&.green {color: $gr-viridian;}
&.magenta {color: $p-comet;}
&.pink {color: $br-new-magenta;}
}
h2,

View File

@ -904,6 +904,244 @@ table tr.point{
}
}
//////////////////////// SQL WINDOW FRAME UNITS EXAMPLES ///////////////////////
table.window-frame-units {
&.groups {
.group {
position: relative;
outline-style: solid;
outline-width: 3px;
outline-offset: -5px;
border-radius: 10px;
&::before {
content: "Row Group";
display: block;
padding: .25rem .5rem;
position: absolute;
top: 3px;
left: 3px;
border-radius: 4px;
color: #fff;
font-size: .8rem;
font-weight: bold;
text-transform: uppercase;
letter-spacing: .02em;
box-shadow: 4px 4px 4px $article-bg;
}
td:nth-child(2), td:nth-child(3) {
font-weight: bold;
text-decoration: underline;
text-decoration-thickness: 2px;
text-underline-offset: 5px;
}
&:nth-of-type(1) {
&::before {background: $br-new-magenta;}
outline-color: $br-new-magenta;
td:nth-child(2), td:nth-child(3) {
text-decoration-color: $br-new-magenta;
}
}
&:nth-of-type(2) {
&::before {background: $br-new-purple;}
outline-color: $br-new-purple;
td:nth-child(2), td:nth-child(3) {
text-decoration-color: $br-new-purple;
}
}
&:nth-of-type(3) {
&::before {background: $b-dodger;}
outline-color: $b-dodger;
td:nth-child(2), td:nth-child(3) {
text-decoration-color: $b-dodger;
}
}
&:nth-of-type(4) {
&::before {background: $b-sapphire;}
outline-color: $b-sapphire;
td:nth-child(2), td:nth-child(3) {
text-decoration-color: $b-sapphire;
}
}
}
}
&.groups-with-frame {
.frame, tr.current-row {
position: relative;
outline-style: solid;
outline-width: 3px;
outline-offset: -5px;
border-radius: 10px;
&::after {
display: block;
padding: .25rem .5rem;
position: absolute;
top: 3px;
left: 3px;
border-radius: 4px;
color: #fff;
font-size: .8rem;
font-weight: bold;
text-transform: uppercase;
letter-spacing: .02em;
box-shadow: 4px 4px 4px $article-bg;
}
tr:nth-child(n + 1):nth-child(-n + 3) {
td {text-decoration-color: $br-new-magenta;}
}
tr:nth-child(n + 4):nth-child(-n + 6) {
td {text-decoration-color: $br-magenta;}
}
tr:nth-child(n + 7):nth-child(-n + 8) {
td {text-decoration-color: $b-dodger;}
}
td:nth-child(n + 2):nth-child(-n + 3) {
font-weight: bold;
text-decoration: underline;
text-decoration-thickness: 2px;
text-underline-offset: 5px;
}
}
tr.current-row {
outline-color: $br-new-magenta;
&::after {
content: "Current Row";
background: $br-new-magenta;
}
td {text-decoration-color: $b-dodger !important;}
}
.frame {
outline-color: $br-new-purple;
&::after {
content: "Frame";
background: $br-new-purple;
}
}
.group {
position: relative;
outline-color: $b-sapphire;
td:nth-child(2), td:nth-child(3) {
font-weight: bold;
text-decoration: underline;
text-decoration-thickness: 2px;
text-underline-offset: 5px;
text-decoration-color: $b-sapphire;
}
}
}
&.range-interval {
.frame, tr.current-row {
position: relative;
outline-style: solid;
outline-width: 3px;
outline-offset: -5px;
border-radius: 10px;
td:first-child {
font-weight: bold;
text-decoration: underline;
text-decoration-thickness: 2px;
text-underline-offset: 5px;
text-decoration-color: $br-new-purple;
}
&::after {
display: block;
padding: .25rem .5rem;
position: absolute;
top: 3px;
right: 3px;
border-radius: 4px;
color: #fff;
font-size: .8rem;
font-weight: bold;
text-transform: uppercase;
letter-spacing: .02em;
box-shadow: -4px 4px 4px $article-bg;
}
}
tr.current-row {
outline-color: $br-new-magenta;
td:first-child {text-decoration-color: $br-new-magenta;}
&::after {
content: "Current Row";
background: $br-new-magenta;
box-shadow: -4px 4px 4px $article-table-row-alt;
}
}
.frame {
outline-color: $br-new-purple;
&::after {
content: "Frame";
background: $br-new-purple;
}
}
}
&.range-numeric, &.rows {
.frame, tr.current-row {
position: relative;
outline-style: solid;
outline-width: 3px;
outline-offset: -5px;
border-radius: 10px;
&::after {
display: block;
padding: .25rem .5rem;
position: absolute;
top: 3px;
left: 3px;
border-radius: 4px;
color: #fff;
font-size: .8rem;
font-weight: bold;
text-transform: uppercase;
letter-spacing: .02em;
box-shadow: 4px 4px 4px $article-bg;
}
}
tr.current-row {
outline-color: $br-new-magenta;
&::after {
content: "Current Row";
background: $br-new-magenta;
}
}
.frame {
outline-color: $br-new-purple;
&::after {
content: "Frame";
background: $br-new-purple;
}
}
}
&.range-numeric {
.frame {
td:nth-child(3) {
font-weight: bold;
text-decoration: underline;
text-decoration-thickness: 2px;
text-underline-offset: 5px;
text-decoration-color: $br-new-purple;
}
tr.current-row {
td:nth-child(3) {text-decoration-color: $br-new-magenta;}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
///////////////////////////////// MEDIA QUERIES ////////////////////////////////
////////////////////////////////////////////////////////////////////////////////

View File

@ -84,6 +84,9 @@ services:
- type: volume
source: test-content
target: /app/content
- type: bind
source: ./test/shared/influxdb-templates
target: /root/influxdb-templates
working_dir: /app
cloud-dedicated-pytest:
container_name: cloud-dedicated-pytest
@ -245,6 +248,68 @@ services:
source: test-content
target: /app/content
working_dir: /app
influxdb3-core-pytest:
container_name: influxdb3-core-pytest
image: influxdata/docs-pytest
build:
context: .
dockerfile: Dockerfile.pytest
entrypoint:
- /bin/bash
- /src/test/scripts/run-tests.sh
- pytest
command:
# In the command, pass file paths to test.
# The container preprocesses the files for testing and runs the tests.
- content/influxdb3/core/**/*.md
- content/shared/**/*.md
environment:
- CONTENT_PATH=content/influxdb3/core
profiles:
- test
- influxdb3
stdin_open: true
tty: true
volumes:
# Site configuration files.
- type: bind
source: .
target: /src
read_only: true
# Files shared between host and container and writeable by both.
- type: bind
source: ./test/shared
target: /shared
- type: bind
source: ./content/influxdb3/core/.env.test
target: /app/.env.test
read_only: true
# In your code samples, use `/app/data/<FILE.lp>` or `data/<FILE.lp>` to access sample data files from the `static/downloads` directory.
- type: bind
source: ./static/downloads
target: /app/data
read_only: true
# In your code samples, use `/app/iot-starter` to store example modules or project files.
- type: volume
source: influxdb3-core-tmp
target: /app/iot-starter
# Target directory for the content under test.
# Files are copied from /src/content/<productpath> to /app/content/<productpath> before running tests.
- type: volume
source: test-content
target: /app/content
working_dir: /app
influxdb3-core:
container_name: influxdb3-core
image: quay.io/influxdb/influxdb3-core:latest
ports:
- 8181:8181
command:
- serve
- --node-id=sensors_node0
- --log-filter=debug
- --object-store=file
- --data-dir=/var/lib/influxdb3
telegraf-pytest:
container_name: telegraf-pytest
image: influxdata/docs-pytest

View File

@ -303,6 +303,29 @@ Very useful for troubleshooting, but will log any sensitive data contained withi
Environment variable: `INFLUXDB_DATA_QUERY_LOG_ENABLED`
#### query-log-path
An absolute path to the query log file.
The default is `""` (queries aren't logged to a file).
Query logging supports SIGHUP-based log rotation.
The following is an example of a `logrotate` configuration:
```
/var/log/influxdb/queries.log {
rotate 5
daily
compress
missingok
notifempty
create 644 root root
postrotate
/bin/kill -HUP `pgrep -x influxd`
endscript
}
```
#### wal-fsync-delay
Default is `"0s"`.

View File

@ -162,7 +162,7 @@ curl -XGET "localhost:8086/health"
### `/api/v2/buckets/` HTTP endpoint
The [/api/v2/buckets](/influxdb/latest/api/#tag/Buckets) endpoint accepts `GET`, `POST` and `DELETE` HTTP requests. Use this endpoint to [create](/influxdb/latest/api/#operation/PostBuckets), [delete](/influxdb/latest/api/#operation/DeleteBucketsID), [list](/influxdb/latest/api/#operation/GetBuckets), [update](/influxdb/latest/api/#operation/PatchBucketsID) and [retrieve](/influxdb/latest/api/#operation/GetBucketsID) buckets in your InfluxDB instance. Note that InfluxDB 2.x uses organizations and buckets instead of databases and retention policies.
The [/api/v2/buckets](/influxdb/v2/api/#tag/Buckets) endpoint accepts `GET`, `POST` and `DELETE` HTTP requests. Use this endpoint to [create](/influxdb/v2/api/#operation/PostBuckets), [delete](/influxdb/v2/api/#operation/DeleteBucketsID), [list](/influxdb/v2/api/#operation/GetBuckets), [update](/influxdb/v2/api/#operation/PatchBucketsID) and [retrieve](/influxdb/v2/api/#operation/GetBucketsID) buckets in your InfluxDB instance. Note that InfluxDB 2.x uses organizations and buckets instead of databases and retention policies.
**Include the following URL parameters:**
@ -201,7 +201,7 @@ curl --request DELETE "http://localhost:8086/api/v2/buckets/test/autogen"
### `/api/v2/delete/` HTTP endpoint
The [`/api/v2/delete`](/influxdb/latest/api/#tag/Delete) endpoint accepts `POST` HTTP requests. Use this endpoint to delete points from InfluxDB, including points with specific tag values, timestamps and measurements.
The [`/api/v2/delete`](/influxdb/v2/api/#tag/Delete) endpoint accepts `POST` HTTP requests. Use this endpoint to delete points from InfluxDB, including points with specific tag values, timestamps and measurements.
**Include the following URL parameters:**
@ -353,8 +353,12 @@ curl http://localhost:8086/debug/requests
##### Track requests over a ten-second interval
```bash
$ curl http://localhost:8086/debug/requests
curl http://localhost:8086/debug/requests
```
The response body contains data in JSON format:
```JSON
{
"user1:123.45.678.91": {"writes":1,"queries":0},
}
@ -365,8 +369,12 @@ The response shows that, over the past ten seconds, the `user1` user sent one re
##### Track requests over a one-minute interval
```bash
$ curl http://localhost:8086/debug/requests?seconds=60
curl http://localhost:8086/debug/requests?seconds=60
```
The response body contains data in JSON format:
```JSON
{
"user1:123.45.678.91": {"writes":3,"queries":0},
"user1:000.0.0.0": {"writes":0,"queries":16},
@ -468,8 +476,12 @@ Those `SELECT` queries require a `POST` request.
###### Query data with a `SELECT` statement
```bash
$ curl -G 'http://localhost:8086/query?db=mydb' --data-urlencode 'q=SELECT * FROM "mymeas"'
curl -G 'http://localhost:8086/query?db=mydb' --data-urlencode 'q=SELECT * FROM "mymeas"'
```
The response body contains data in JSON format:
```JSON
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag1","mytag2"],"values":[["2017-03-01T00:16:18Z",33.1,null,null],["2017-03-01T00:17:18Z",12.4,"12","14"]]}]}]}
```
@ -490,8 +502,12 @@ time myfield mytag1 mytag2
##### Query data with a `SELECT` statement and an `INTO` clause
```bash
$ curl -XPOST 'http://localhost:8086/query?db=mydb' --data-urlencode 'q=SELECT * INTO "newmeas" FROM "mymeas"'
curl -XPOST 'http://localhost:8086/query?db=mydb' --data-urlencode 'q=SELECT * INTO "newmeas" FROM "mymeas"'
```
The response body contains data in JSON format:
```JSON
{"results":[{"statement_id":0,"series":[{"name":"result","columns":["time","written"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}
```
@ -503,8 +519,12 @@ Note that the system uses epoch 0 (`1970-01-01T00:00:00Z`) as a [null timestamp
##### Create a database
```bash
$ curl -XPOST 'http://localhost:8086/query' --data-urlencode 'q=CREATE DATABASE "mydb"'
curl -XPOST 'http://localhost:8086/query' --data-urlencode 'q=CREATE DATABASE "mydb"'
```
The response data is similar to the following:
```JSON
{"results":[{"statement_id":0}]}
```
@ -536,7 +556,7 @@ See below for an [example](#create-a-database-using-basic-authentication) of bas
##### Query data with a `SELECT` statement and return pretty-printed JSON
```bash
$ curl -G 'http://localhost:8086/query?db=mydb&pretty=true' --data-urlencode 'q=SELECT * FROM "mymeas"'
curl -G 'http://localhost:8086/query?db=mydb&pretty=true' --data-urlencode 'q=SELECT * FROM "mymeas"'
{
"results": [
@ -575,8 +595,12 @@ $ curl -G 'http://localhost:8086/query?db=mydb&pretty=true' --data-urlencode 'q=
##### Query data with a `SELECT` statement and return second precision epoch timestamps
```bash
$ curl -G 'http://localhost:8086/query?db=mydb&epoch=s' --data-urlencode 'q=SELECT * FROM "mymeas"'
curl -G 'http://localhost:8086/query?db=mydb&epoch=s' --data-urlencode 'q=SELECT * FROM "mymeas"'
```
The response body data is similar to the following:
```bash
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag1","mytag2"],"values":[[1488327378,33.1,null,null],[1488327438,12.4,"12","14"]]}]}]}
```
@ -586,7 +610,7 @@ The following example shows how to authenticate with v1.x credentials in the que
create a database:
```bash
$ curl -XPOST 'http://localhost:8086/query?u=myusername&p=mypassword' --data-urlencode 'q=CREATE DATABASE "mydb"'
curl -XPOST 'http://localhost:8086/query?u=myusername&p=mypassword' --data-urlencode 'q=CREATE DATABASE "mydb"'
```
The response body contains the following:
@ -698,26 +722,37 @@ Delimit multiple placeholder key-value pairs with comma `,`.
##### Send multiple queries
```bash
$ curl -G 'http://localhost:8086/query?db=mydb&epoch=s' --data-urlencode 'q=SELECT * FROM "mymeas";SELECT mean("myfield") FROM "mymeas"'
curl -G 'http://localhost:8086/query?db=mydb&epoch=s' --data-urlencode 'q=SELECT * FROM "mymeas";SELECT mean("myfield") FROM "mymeas"'
```
The response body contains results for both queries:
```JSON
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag1","mytag2"],"values":[[1488327378,33.1,null,null],[1488327438,12.4,"12","14"]]}]},{"statement_id":1,"series":[{"name":"mymeas","columns":["time","mean"],"values":[[0,22.75]]}]}]}
```
The request includes two queries: `SELECT * FROM "mymeas"` and `SELECT mean("myfield") FROM "mymeas"'`.
In the results, the system assigns a statement identifier to each query return.
The first query's result has a `statement_id` of `0` and the second query's result has a `statement_id` of `1`.
- The request includes two queries: `SELECT * FROM "mymeas"` and `SELECT mean("myfield") FROM "mymeas"'`.
- In the results, InfluxDB assigns a statement identifier to each query:
- `"statement_id": 0`: the first query
- `"statement_id": 1`: the second query
##### Request query results in CSV format
```bash
$ curl -H "Accept: application/csv" -G 'http://localhost:8086/query?db=mydb' --data-urlencode 'q=SELECT * FROM "mymeas"'
To format results in CSV, specify `application/csv` in the HTTP `Accept` header--for example:
```bash
curl -H "Accept: application/csv" -G 'http://localhost:8086/query?db=mydb' --data-urlencode 'q=SELECT * FROM "mymeas"'
```
The response body contains data in CSV format:
```csv
name,tags,time,myfield,mytag1,mytag2
mymeas,,1488327378000000000,33.1,mytag1,mytag2
mymeas,,1488327438000000000,12.4,12,14
```
The first point has no [tag values](/enterprise_influxdb/v1/concepts/glossary/#tag-value) for the `mytag1` and `mytag2` [tag keys](/enterprise_influxdb/v1/concepts/glossary/#tag-key).
- In the sample data, the first point doesn't contain [tag values](/enterprise_influxdb/v1/concepts/glossary/#tag-value) for the `mytag1` and `mytag2` [tag keys](/enterprise_influxdb/v1/concepts/glossary/#tag-key).
##### Submit queries from a file
@ -734,57 +769,77 @@ CREATE RETENTION POLICY four_weeks ON mydb DURATION 4w REPLICATION 1;
##### Bind a parameter in the `WHERE` clause to specific tag value
```bash
$ curl -G 'http://localhost:8086/query?db=mydb' --data-urlencode 'q=SELECT * FROM "mymeas" WHERE "mytag1" = $tag_value' --data-urlencode 'params={"tag_value":"12"}'
Use the `params` option to pass arguments for a parameterized query--for example:
```bash
curl -G 'http://localhost:8086/query?db=mydb' --data-urlencode 'q=SELECT * FROM "mymeas" WHERE "mytag1" = $tag_value' --data-urlencode 'params={"tag_value":"12"}'
```
The response data is similar to the following:
```JSON
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag1","mytag2"],"values":[["2017-03-01T00:17:18Z",12.4,"12","14"]]}]}]}
```
The request maps `$tag_value` to `12`.
InfluxDB stores [tag values](/enterprise_influxdb/v1/concepts/glossary/#tag-value) as strings they and must be double quoted in the request.
- In the request, `params` maps `$tag_value` to `"12"`.
Because InfluxDB stores [tag values](/enterprise_influxdb/v1/concepts/glossary/#tag-value) as strings, you must double-quote them in parameter values.
- During query execution, InfluxDB substitutes the parameter values for the associated keys in the query.
##### Bind a parameter in the `WHERE` clause to a numerical field value
```bash
$ curl -G 'http://localhost:8086/query?db=mydb' --data-urlencode 'q=SELECT * FROM "mymeas" WHERE "myfield" > $field_value' --data-urlencode 'params={"field_value":30}'
curl -G 'http://localhost:8086/query?db=mydb' --data-urlencode 'q=SELECT * FROM "mymeas" WHERE "myfield" > $field_value' --data-urlencode 'params={"field_value":30}'
```
The response data is similar to the following:
```JSON
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag1","mytag2"],"values":[["2017-03-01T00:16:18Z",33.1,null,null]]}]}]}
```
The request maps `$field_value` to `30`.
The value `30` does not require double quotes because `myfield` stores numerical [field values](/enterprise_influxdb/v1/concepts/glossary/#field-value).
- In the request, `params` maps `$field_value` to `30`. Because `myfield` stores numerical [field values](/enterprise_influxdb/v1/concepts/glossary/#field-value), the parameter value `30` does not require double quotes.
- During query execution, InfluxDB substitutes the parameter values for the associated keys in the query.
##### Bind two parameters in the `WHERE` clause to a specific tag value and numerical field value
```bash
$ curl -G 'http://localhost:8086/query?db=mydb' --data-urlencode 'q=SELECT * FROM "mymeas" WHERE "mytag1" = $tag_value AND "myfield" < $field_value' --data-urlencode 'params={"tag_value":"12","field_value":30}'
curl -G 'http://localhost:8086/query?db=mydb' --data-urlencode 'q=SELECT * FROM "mymeas" WHERE "mytag1" = $tag_value AND "myfield" < $field_value' --data-urlencode 'params={"tag_value":"12","field_value":30}'
```
The response data is similar to the following:
```JSON
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag1","mytag2"],"values":[["2017-03-01T00:17:18Z",12.4,"12","14"]]}]}]}
```
The request maps `$tag_value` to `12` and `$field_value` to `30`.
- In the request, `params` maps `$tag_value` to `12` and `$field_value` to `30`.
- During query execution, InfluxDB substitutes the parameter values for the associated keys in the query.
#### Status codes and responses
The API response body contains results or error messages in JSON format.
To pretty-print JSON for viewing, include the query string parameter `pretty=true`
or pipe the response to a JSON-processor like [**jq**](https://stedolan.github.io/jq/).
or pipe the response to a JSON-processor, such as [**jq**](https://stedolan.github.io/jq/).
##### Summary table
| HTTP status code | Description |
| :--------------- | :---------- |
| 200 OK | Success. Response body contains data in JSON format. |
| 400 Bad Request | Unacceptable request. Can occur with a syntactically incorrect query. Response body contains an error message with additional information in JSON format. |
| 401 Unauthorized | Unacceptable request. Can occur with invalid authentication credentials. |
| `200 OK` | Success. Response body contains data in JSON format. |
| `400 Bad Request` | Unacceptable request. Can occur with a syntactically incorrect query. Response body contains an error message with additional information in JSON format. |
| `401 Unauthorized` | Unacceptable request. Can occur with invalid authentication credentials. |
#### Examples
##### A successful request that returns data
```bash
$ curl -i -G 'http://localhost:8086/query?db=mydb' --data-urlencode 'q=SELECT * FROM "mymeas"'
curl -i -G 'http://localhost:8086/query?db=mydb' --data-urlencode 'q=SELECT * FROM "mymeas"'
```
The response is HTTP status `200 OK` and the body contains data in JSON format:
```
HTTP/1.1 200 OK
Connection: close
Content-Type: application/json
@ -799,8 +854,11 @@ Transfer-Encoding: chunked
##### A query that contains an error
```bash
$ curl -i -G 'http://localhost:8086/query?db=mydb1' --data-urlencode 'q=SELECT * FROM "mymeas"'
curl -i -G 'http://localhost:8086/query?db=mydb1' --data-urlencode 'q=SELECT * FROM "mymeas"'
The response body contains details about the error:
```
HTTP/1.1 200 OK
Connection: close
Content-Type: application/json
@ -815,8 +873,13 @@ Transfer-Encoding: chunked
##### An incorrectly formatted query
```bash
$ curl -i -G 'http://localhost:8086/query?db=mydb' --data-urlencode 'q=SELECT *'
curl -i -G 'http://localhost:8086/query?db=mydb' --data-urlencode 'q=SELECT *'
```
The response is HTTP status `400 Bad Request` and the body contains details
about the error:
```
HTTP/1.1 400 Bad Request
Content-Type: application/json
Request-Id: [...]
@ -830,8 +893,12 @@ Content-Length: 76
##### A request with invalid authentication credentials
```bash
$ curl -i -XPOST 'http://localhost:8086/query?u=myusername&p=notmypassword' --data-urlencode 'q=CREATE DATABASE "mydb"'
curl -i -XPOST 'http://localhost:8086/query?u=myusername&p=notmypassword' --data-urlencode 'q=CREATE DATABASE "mydb"'
```
The response is HTTP status `401 Unauthorized` and the body contains the error message.
```
HTTP/1.1 401 Unauthorized
Content-Type: application/json
Request-Id: [...]
@ -878,8 +945,12 @@ in significant improvements in compression.
##### Write a point to the database `mydb` with a timestamp in seconds
```bash
$ curl -i -XPOST "http://localhost:8086/write?db=mydb&precision=s" --data-binary 'mymeas,mytag=1 myfield=90 1463683075'
curl -i -XPOST "http://localhost:8086/write?db=mydb&precision=s" --data-binary 'mymeas,mytag=1 myfield=90 1463683075'
```
A successful write returns HTTP status `204 No Content`--for example:
```
HTTP/1.1 204 No Content
Content-Type: application/json
Request-Id: [...]
@ -890,13 +961,7 @@ Date: Wed, 08 Nov 2017 17:33:23 GMT
##### Write a point to the database `mydb` and the retention policy `myrp`
```bash
$ curl -i -XPOST "http://localhost:8086/write?db=mydb&rp=myrp" --data-binary 'mymeas,mytag=1 myfield=90'
HTTP/1.1 204 No Content
Content-Type: application/json
Request-Id: [...]
X-Influxdb-Version: {{< latest-patch >}}
Date: Wed, 08 Nov 2017 17:34:31 GMT
curl -i -XPOST "http://localhost:8086/write?db=mydb&rp=myrp" --data-binary 'mymeas,mytag=1 myfield=90'
```
##### Write a point to the database `mydb` using HTTP authentication
@ -904,20 +969,25 @@ Date: Wed, 08 Nov 2017 17:34:31 GMT
Valid credentials:
```bash
$ curl -i -XPOST "http://localhost:8086/write?db=mydb&u=myusername&p=mypassword" --data-binary 'mymeas,mytag=1 myfield=91'
curl -i -XPOST "http://localhost:8086/write?db=mydb&u=myusername&p=mypassword" --data-binary 'mymeas,mytag=1 myfield=91'
```
A successful write returns HTTP status `204 No Content`.
```
HTTP/1.1 204 No Content
Content-Type: application/json
Request-Id: [...]
X-Influxdb-Version: {{< latest-patch >}}
Date: Wed, 08 Nov 2017 17:34:56 GMT
```
Invalid credentials:
```bash
$ curl -i -XPOST "http://localhost:8086/write?db=mydb&u=myusername&p=notmypassword" --data-binary 'mymeas,mytag=1 myfield=91'
curl -i -XPOST "http://localhost:8086/write?db=mydb&u=myusername&p=notmypassword" --data-binary 'mymeas,mytag=1 myfield=91'
```
If the username or password is incorrect, the response status is `401 Unauthorized`
and the response body contains the error message--for example:
```
HTTP/1.1 401 Unauthorized
Content-Type: application/json
Request-Id: [...]
@ -931,23 +1001,20 @@ Content-Length: 33
##### Write a point to the database `mydb` using basic authentication
Valid credentials:
```bash
$ curl -i -XPOST -u myusername:mypassword "http://localhost:8086/write?db=mydb" --data-binary 'mymeas,mytag=1 myfield=91'
HTTP/1.1 204 No Content
Content-Type: application/json
Request-Id: [...]
X-Influxdb-Version: {{< latest-patch >}}
Date: Wed, 08 Nov 2017 17:36:40 GMT
curl -i -XPOST -u myusername:mypassword "http://localhost:8086/write?db=mydb" --data-binary 'mymeas,mytag=1 myfield=91'
```
Invalid credentials:
```bash
$ curl -i -XPOST -u myusername:notmypassword "http://localhost:8086/write?db=mydb" --data-binary 'mymeas,mytag=1 myfield=91'
curl -i -XPOST -u myusername:notmypassword "http://localhost:8086/write?db=mydb" --data-binary 'mymeas,mytag=1 myfield=91'
```
If the username or password is incorrect, the response status is `401 Unauthorized`
and the response body contains the error message--for example:
```
HTTP/1.1 401 Unauthorized
Content-Type: application/json
Request-Id: [...]
@ -965,79 +1032,65 @@ Content-Length: 33
--data-binary '<Data in InfluxDB line protocol format>'
```
All data must be binary encoded and in the
Data to write must be binary encoded and in the
[InfluxDB line protocol](/enterprise_influxdb/v1/concepts/glossary/#influxdb-line-protocol) format.
Our example shows the `--data-binary` parameter from curl, which we will use in
all examples on this page.
Examples in this page use `curl` with the `--data-binary` parameter to encode
line protocol in the request.
Using any encoding method other than `--data-binary` will likely lead to issues;
`-d`, `--data-urlencode`, and `--data-ascii` may strip out newlines or
introduce new, unintended formatting.
introduce unintended formatting.
Options:
* Write several points to the database with one request by separating each point
- Write several points to the database with one request by separating each point
by a new line.
* Write points from a file with the `@` flag.
The file should contain a batch of points in the InfluxDB line protocol format.
- Write points from a file with the `@` flag.
The file should contain a batch of points in line protocol format.
Individual points must be on their own line and separated by newline characters
(`\n`).
Files containing carriage returns will cause parser errors.
Files containing carriage returns cause parser errors.
We recommend writing points in batches of 5,000 to 10,000 points.
Smaller batches, and more HTTP requests, will result in sub-optimal performance.
> [!Important]
> #### Batch writes for optimal performance
> Write points in batches of 5,000 to 10,000 points.
> Smaller batches, and more HTTP requests, will result in sub-optimal performance.
#### Examples
##### Write a point to the database `mydb` with a nanosecond timestamp
##### Write a point with a nanosecond timestamp to the `mydb` database
```bash
$ curl -i -XPOST "http://localhost:8086/write?db=mydb" --data-binary 'mymeas,mytag=1 myfield=90 1463683075000000000'
HTTP/1.1 204 No Content
Content-Type: application/json
Request-Id: [...]
X-Influxdb-Version: {{< latest-patch >}}
Date: Wed, 08 Nov 2017 18:02:57 GMT
curl -i -XPOST "http://localhost:8086/write?db=mydb" --data-binary 'mymeas,mytag=1 myfield=90 1463683075000000000'
```
##### Write a point to the database `mydb` with the local server's nanosecond timestamp
```bash
$ curl -i -XPOST "http://localhost:8086/write?db=mydb" --data-binary 'mymeas,mytag=1 myfield=90'
If successful, the response status is HTTP `204 No Content`.
```
HTTP/1.1 204 No Content
Content-Type: application/json
Request-Id: [...]
X-Influxdb-Version: {{< latest-patch >}}
Date: Wed, 08 Nov 2017 18:03:44 GMT
```
##### Write several points to the database `mydb` by separating points with a new line
##### Write a point with the local server's nanosecond timestamp to the `mydb` database
```bash
$ curl -i -XPOST "http://localhost:8086/write?db=mydb" --data-binary 'mymeas,mytag=3 myfield=89 1463689152000000000
curl -i -XPOST "http://localhost:8086/write?db=mydb" --data-binary 'mymeas,mytag=1 myfield=90'
```
##### Write several points to the database by separating points with a new line
```bash
curl -i -XPOST "http://localhost:8086/write?db=mydb" --data-binary 'mymeas,mytag=3 myfield=89 1463689152000000000
mymeas,mytag=2 myfield=34 1463689152000000000'
HTTP/1.1 204 No Content
Content-Type: application/json
Request-Id: [...]
X-Influxdb-Version: {{< latest-patch >}}
Date: Wed, 08 Nov 2017 18:04:02 GMT
```
##### Write several points to the database `mydb` from the file `data.txt`
```bash
$ curl -i -XPOST "http://localhost:8086/write?db=mydb" --data-binary @data.txt
HTTP/1.1 204 No Content
Content-Type: application/json
Request-Id: [...]
X-Influxdb-Version: {{< latest-patch >}}
Date: Wed, 08 Nov 2017 18:08:11 GMT
curl -i -XPOST "http://localhost:8086/write?db=mydb" --data-binary @data.txt
```
A sample of the data in `data.txt`:
`data.txt` contains the following sample data:
```
mymeas,mytag1=1 value=21 1463689680000000000
mymeas,mytag1=1 value=34 1463689690000000000
@ -1132,7 +1185,7 @@ Requests to `/shard-status` return the following information in JSON format:
- `size`: the size on disk of the shard in bytes
- `is_hot`: whether the time range from the shard includes `now`
{{% note %}}
An *idle* shard is fully compacted and not receiving new (potentially historical) writes.
An _idle_ shard is fully compacted and not receiving new (potentially historical) writes.
A hot shard may or may not be idle.
{{% /note %}}
- `state`: the anti-entropy status of the shard can be one of the following:

View File

@ -4,7 +4,7 @@ description: >
Use [`join.time()`](/flux/v0/stdlib/join/time/) to join two streams of data
based on time values in the `_time` column.
This type of join operation is common when joining two streams of
[time series data](/influxdb/latest/reference/glossary/#time-series-data).
[time series data](/influxdb/v2/reference/glossary/#time-series-data).
menu:
flux_v0:
parent: Join data
@ -31,7 +31,7 @@ list_code_example: |
Use [`join.time()`](/flux/v0/stdlib/join/time/) to join two streams of data
based on time values in the `_time` column.
This type of join operation is common when joining two streams of
[time series data](/influxdb/latest/reference/glossary/#time-series-data).
[time series data](/influxdb/v2/reference/glossary/#time-series-data).
`join.time()` can use any of the available join methods.
Which method you use depends on your desired behavior:

View File

@ -86,7 +86,7 @@ Durations are relative to `now()`.
URL of the InfluxDB instance to query.
See [InfluxDB OSS URLs](/influxdb/latest/reference/urls/)
See [InfluxDB OSS URLs](/influxdb/v2/reference/urls/)
or [InfluxDB Cloud regions](/influxdb/cloud/reference/regions/).
### org
@ -97,7 +97,7 @@ Organization name.
### token
InfluxDB [API token](/influxdb/latest/security/tokens/).
InfluxDB [API token](/influxdb/v2/security/tokens/).

View File

@ -119,7 +119,7 @@ Records that evaluate to _null_ or `false` are not included in the output tables
URL of the InfluxDB instance to query.
See [InfluxDB OSS URLs](/influxdb/latest/reference/urls/)
See [InfluxDB OSS URLs](/influxdb/v2/reference/urls/)
or [InfluxDB Cloud regions](/influxdb/cloud/reference/regions/).
### org
@ -130,7 +130,7 @@ Organization name.
### token
InfluxDB [API token](/influxdb/latest/security/tokens/).
InfluxDB [API token](/influxdb/v2/security/tokens/).

View File

@ -62,6 +62,7 @@ Default is the `location` option.
- [Return the hour of a time value](#return-the-hour-of-a-time-value)
- [Return the hour of a relative duration](#return-the-hour-of-a-relative-duration)
- [Return the current hour](#return-the-current-hour)
- [Return the hour in localtime ](#return-the-hour-in-localtime)
### Return the hour of a time value
@ -96,3 +97,11 @@ date.hour(t: now())
```
### Return the hour in local time
```js
import "date"
import "timezone"
date.hour(t: r._time, location: timezone.location(name: "Europe/Berlin"))
```

View File

@ -1,7 +1,7 @@
---
title: csv.from() function
description: >
`csv.from()` retrieves [annotated CSV](/influxdb/latest/reference/syntax/annotated-csv/) **from a URL**.
`csv.from()` retrieves [annotated CSV](/influxdb/v2/reference/syntax/annotated-csv/) **from a URL**.
menu:
flux_v0_ref:
name: csv.from
@ -26,7 +26,7 @@ Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md
------------------------------------------------------------------------------->
`csv.from()` retrieves [annotated CSV](/influxdb/latest/reference/syntax/annotated-csv/) **from a URL**.
`csv.from()` retrieves [annotated CSV](/influxdb/v2/reference/syntax/annotated-csv/) **from a URL**.
{{% warn %}}
#### Deprecated

View File

@ -63,7 +63,7 @@ Geometry Library to generate `s2_cell_id` tags.
Specify your [S2 Cell ID level](https://s2geometry.io/resources/s2cell_statistics.html).
**Note:** To filter more quickly, use higher S2 Cell ID levels, but know that
higher levels increase [series cardinality](/influxdb/latest/reference/glossary/#series-cardinality).
higher levels increase [series cardinality](/influxdb/v2/reference/glossary/#series-cardinality).
Language-specific implementations of the S2 Geometry Library provide methods for
generating S2 Cell ID tokens. For example:

View File

@ -29,7 +29,7 @@ Fluxdoc syntax: https://github.com/influxdata/flux/blob/master/docs/fluxdoc.md
`prometheus.histogramQuantile()` calculates a quantile on a set of Prometheus histogram values.
This function supports [Prometheus metric parsing formats](/influxdb/latest/reference/prometheus-metrics/)
This function supports [Prometheus metric parsing formats](/influxdb/v2/reference/prometheus-metrics/)
used by `prometheus.scrape()`, the Telegraf `promtheus` input plugin, and
InfluxDB scrapers available in InfluxDB OSS.
@ -53,7 +53,7 @@ Quantile to compute. Must be a float value between 0.0 and 1.0.
### metricVersion
[Prometheus metric parsing format](/influxdb/latest/reference/prometheus-metrics/)
[Prometheus metric parsing format](/influxdb/v2/reference/prometheus-metrics/)
used to parse queried Prometheus data.
Available versions are `1` and `2`.
Default is `2`.

View File

@ -86,7 +86,7 @@ _`bucket` and `bucketID` are mutually exclusive_.
URL of the InfluxDB instance to write to.
See [InfluxDB Cloud regions](/influxdb/cloud/reference/regions/)
or [InfluxDB OSS URLs](/influxdb/latest/reference/urls/).
or [InfluxDB OSS URLs](/influxdb/v2/reference/urls/).
`host` is required when writing to a remote InfluxDB instance.
If specified, `token` is also required.

View File

@ -72,7 +72,7 @@ _`org` and `orgID` are mutually exclusive_.
URL of the InfluxDB instance.
See [InfluxDB Cloud regions](/influxdb/cloud/reference/regions/)
or [InfluxDB OSS URLs](/influxdb/latest/reference/urls/).
or [InfluxDB OSS URLs](/influxdb/v2/reference/urls/).
_`host` is required when `org` or `orgID` are specified._
### token

View File

@ -90,7 +90,7 @@ String-encoded organization ID.
URL of the InfluxDB instance to query.
See [InfluxDB Cloud regions](/influxdb/cloud/reference/regions/)
or [InfluxDB OSS URLs](/influxdb/latest/reference/urls/).
or [InfluxDB OSS URLs](/influxdb/v2/reference/urls/).
### token

View File

@ -81,7 +81,7 @@ _`bucket` and `bucketID` are mutually exclusive_.
URL of the InfluxDB instance to query.
See [InfluxDB Cloud regions](/influxdb/cloud/reference/regions/)
or [InfluxDB OSS URLs](/influxdb/latest/reference/urls/).
or [InfluxDB OSS URLs](/influxdb/v2/reference/urls/).
### org

View File

@ -91,7 +91,7 @@ _`bucket` and `bucketID` are mutually exclusive_.
URL of the InfluxDB instance to write to.
See [InfluxDB Cloud regions](/influxdb/cloud/reference/regions/)
or [InfluxDB OSS URLs](/influxdb/latest/reference/urls/).
or [InfluxDB OSS URLs](/influxdb/v2/reference/urls/).
`host` is required when writing to a remote InfluxDB instance.
If specified, `token` is also required.

View File

@ -81,7 +81,7 @@ _`bucket` and `bucketID` are mutually exclusive_.
URL of the InfluxDB instance to write to.
See [InfluxDB Cloud regions](/influxdb/cloud/reference/regions/)
or [InfluxDB OSS URLs](/influxdb/latest/reference/urls/).
or [InfluxDB OSS URLs](/influxdb/v2/reference/urls/).
`host` is required when writing to a remote InfluxDB instance.
If specified, `token` is also required.

View File

@ -24,7 +24,7 @@ UI and export the resources as a template.
{{< youtube 714uHkxKM6U >}}
{{% note %}}
#### InfluxDB OSS for creating templates
#### InfluxDB OSS v2 for creating templates
Templatable resources are scoped to a single organization, so the simplest way to create a
template is to create a new organization, build the template within the organization,
and then [export all resources](#export-all-resources) as a template.
@ -54,15 +54,21 @@ Provide the following:
**JSON** (`.json`) are supported.
###### Export all resources to a template
```sh
# Syntax
influx export all -o <INFLUX_ORG> -f <FILE_PATH> -t <INFLUX_TOKEN>
<!--pytest.mark.skip-->
```bash
# Syntax
influx export all --org <INFLUX_ORG> --file <FILE_PATH> --token <INFLUX_TOKEN>
```
<!--The following fails due to an apparent missing task query in the account-->
<!--pytest.mark.skip-->
```bash
# Example
influx export all \
-o my-org \
-f ~/templates/awesome-template.yml \
-t $INFLUX_TOKEN
--org $INFLUX_ORG \
--file /path/to/TEMPLATE_FILE.yml \
--token $INFLUX_TOKEN
```
#### Export resources filtered by labelName or resourceKind
@ -81,9 +87,9 @@ and
```sh
influx export all \
-o my-org \
-f ~/templates/awesome-template.yml \
-t $INFLUX_TOKEN \
--org $INFLUX_ORG \
--file /path/to/TEMPLATE_FILE.yml \
--token $INFLUX_TOKEN \
--filter=resourceKind=Bucket \
--filter=resourceKind=Dashboard \
--filter=labelName=Example1 \
@ -94,12 +100,14 @@ For information about flags, see the
[`influx export all` documentation](/influxdb/cloud/reference/cli/influx/export/all/).
### Export specific resources
To export specific resources within an organization to a template manifest,
use the `influx export` with resource flags for each resource to include.
The command uses the API token to filter resources for the organization.
Provide the following:
- **Organization name** or **ID**
- **API token** with read access to the organization
- **API token** with read access to the organization.
- **Destination path and filename** for the template manifest.
The filename extension determines the template format—both **YAML** (`.yml`) and
**JSON** (`.json`) are supported.
@ -108,15 +116,20 @@ Provide the following:
[`influx export` documentation](/influxdb/cloud/reference/cli/influx/export/).
###### Export specific resources to a template
```sh
# Syntax
influx export all -o <INFLUX_ORG> -f <FILE_PATH> -t <INFLUX_TOKEN> [resource-flags]
<!--pytest.mark.skip-->
```bash
# Syntax
influx export --file <FILE_PATH> --token <INFLUX_TOKEN> [resource-flags]
```
<!-- Fails due to resource ID placeholders -->
<!--pytest.mark.xfail-->
```bash
# Example
influx export all \
-o my-org \
-f ~/templates/awesome-template.yml \
-t $INFLUX_TOKEN \
influx export \
--file /path/to/TEMPLATE_FILE.yml \
--token $INFLUX_TOKEN \
--buckets=00x000ooo0xx0xx,o0xx0xx00x000oo \
--dashboards=00000xX0x0X00x000 \
--telegraf-configs=00000x0x000X0x0X0
@ -125,9 +138,10 @@ influx export all \
### Export a stack
To export a stack and all its associated resources as a template, use the
`influx export stack` command.
The command uses the API token to filter resources for the organization.
Provide the following:
- **Organization name** or **ID**
- **API token** with read access to the organization
- **Destination path and filename** for the template manifest.
The filename extension determines the template format—both **YAML** (`.yml`) and
@ -135,19 +149,23 @@ Provide the following:
- **Stack ID**
###### Export a stack as a template
```sh
<!--pytest.mark.skip-->
```bash
# Syntax
influx export stack \
-o <INFLUX_ORG> \
-t <INFLUX_TOKEN> \
-f <FILE_PATH> \
--token <INFLUX_TOKEN> \
--file <FILE_PATH> \
<STACK_ID>
```
<!-- Fails due to non-existent STACK_ID -->
<!--pytest.mark.xfail-->
```bash
# Example
influx export stack \
-o my-org \
-t mYSuP3RS3CreTt0K3n
-f ~/templates/awesome-template.yml \
-t $INFLUX_TOKEN \
-f /path/to/TEMPLATE_FILE.yml \
05dbb791a4324000
```
@ -206,11 +224,47 @@ when [applying the template](/influxdb/cloud/tools/influxdb-templates/use/#apply
Users can also include the `--env-ref` flag with the appropriate key-value pair
when installing the template.
<!-- //REVIEW I can't get this to work with environment reference substitution
-- Skipping the test for now, but we should review it and fix it.
-->
<!--pytest.mark.skip-->
<!--test:setup
```sh
# Set bucket-name-1 to "myBucket"
jq -n '{
apiVersion: "influxdata.com/v2alpha1",
kind: "Bucket",
metadata: {
name: {
envRef: {
key: "bucket-name-1"
}
}
}
}' > /path/to/TEMPLATE_FILE.json
chmod +rx /path/to/TEMPLATE_FILE.json
# View formatted JSON
jq '.' /path/to/TEMPLATE_FILE.json
```
-->
For example, to set a custom bucket name when applying a template with an environment reference:
<!--pytest-codeblocks:cont-->
```sh
# The template, edited to include an environment reference:
# apiVersion: influxdata.com/v2alpha1
# kind: Bucket
# metadata:
# name:
# envRef: bucket-name-1
# Apply template, set bucket-name-1 to "myBucket", and skip verification
influx apply \
-f /path/to/template.yml \
--env-ref=bucket-name-1=myBucket
--file /path/to/TEMPLATE_FILE.json \
--env-ref bucket-name-1=myBucket \
--force yes
--org $INFLUX_ORG
--token $INFLUX_TOKEN
```
_If sharing your template, we recommend documenting what environment references

View File

@ -8,7 +8,7 @@ menu:
weight: 70
parent: Concepts
aliases:
- /influxdb/latest/concepts/time-series-index/
- /influxdb/v2/concepts/time-series-index/
---
Find overview and background information on Time Series Index (TSI) in this topic. For detail, including how to enable and configure TSI, see [Time Series Index (TSI) details](/influxdb/v1/concepts/tsi-details/).

View File

@ -12,7 +12,7 @@ aliases:
- /influxdb/v2/introduction/getting_started/
- /influxdb/v2/introduction/getting_started/
- /influxdb/v2/introduction/getting_started/
- /influxdb/latest/introduction/getting-started/
- /influxdb/v2/introduction/getting-started/
menu:
influxdb_v1:

View File

@ -7,7 +7,7 @@ menu:
weight: 70
identifier: InfluxQL
aliases:
- /influxdb/latest/query_language/_index
- /influxdb/v2/query_language/_index
---
This section introduces InfluxQL, the InfluxDB SQL-like query language for

View File

@ -12,7 +12,7 @@ aliases:
- /influxdb/v2/query_language/spec/
- /influxdb/v2/query_language/spec/
- /influxdb/v2/query_language/spec/
- /influxdb/latest/query_language/spec/
- /influxdb/v2/query_language/spec/
---
## Introduction
@ -234,12 +234,13 @@ regex_lit = "/" { unicode_char } "/" .
`=~` matches against
`!~` doesn't match against
> **Note:** InfluxQL supports using regular expressions when specifying:
> [!Note]
> InfluxQL supports using regular expressions when specifying:
>
* [field keys](/influxdb/v1/concepts/glossary/#field-key) and [tag keys](/influxdb/v1/concepts/glossary/#tag-key) in the [`SELECT` clause](/influxdb/v1/query_language/explore-data/#the-basic-select-statement)
* [measurements](/influxdb/v1/concepts/glossary/#measurement) in the [`FROM` clause](/influxdb/v1/query_language/explore-data/#the-basic-select-statement)
* [tag values](/influxdb/v1/concepts/glossary/#tag-value) and string [field values](/influxdb/v1/concepts/glossary/#field-value) in the [`WHERE` clause](/influxdb/v1/query_language/explore-data/#the-where-clause).
* [tag keys](/influxdb/v1/concepts/glossary/#tag-key) in the [`GROUP BY` clause](/influxdb/v1/query_language/explore-data/#group-by-tags)
> * [field keys](/influxdb/v1/concepts/glossary/#field-key) and [tag keys](/influxdb/v1/concepts/glossary/#tag-key) in the [`SELECT` clause](/influxdb/v1/query_language/explore-data/#the-basic-select-statement)
> * [measurements](/influxdb/v1/concepts/glossary/#measurement) in the [`FROM` clause](/influxdb/v1/query_language/explore-data/#the-basic-select-statement)
> * [tag values](/influxdb/v1/concepts/glossary/#tag-value) and string [field values](/influxdb/v1/concepts/glossary/#field-value) in the [`WHERE` clause](/influxdb/v1/query_language/explore-data/#the-where-clause).
> * [tag keys](/influxdb/v1/concepts/glossary/#tag-key) in the [`GROUP BY` clause](/influxdb/v1/query_language/explore-data/#group-by-tags)
>
>Currently, InfluxQL does not support using regular expressions to match
>non-string field values in the

View File

@ -150,7 +150,6 @@ The following precisions are available:
##### Query a non-default retention policy
<!--test:setup
```sh
service influxdb start && \
influx setup \
@ -160,7 +159,6 @@ influx setup \
--bucket BUCKET_NAME \
--force || true
```
-->
```sh

View File

@ -123,7 +123,6 @@ The following precisions are available:
##### Write data using basic authentication
<!--test:setup
```sh
service influxdb start && \
influx setup \
@ -133,7 +132,6 @@ influx setup \
--bucket BUCKET_NAME \
--force || true
```
-->
{{% oss-only %}}

View File

@ -66,7 +66,7 @@ To use the SHA checksum to verify the downloaded file, do the following:
the **SHA256:** checksum value.
2. Compute the SHA checksum of the downloaded file and compare it to the
published checksum--for example, enter the following command in your terminal.
published checksum--for example, enter the following command in your terminal:
<!--test:actual
```bash
@ -77,11 +77,11 @@ To use the SHA checksum to verify the downloaded file, do the following:
<!--pytest-codeblocks:cont-->
{{% code-placeholders "9cb54d3940c37a8c2a908458543e629412505cc71db55094147fd39088b99c6c" %}}
{{% code-placeholders "8d7872013cad3524fb728ca8483d0adc30125ad1af262ab826dcf5d1801159cf" %}}
```bash
# Use 2 spaces to separate the checksum from the filename
echo "9cb54d3940c37a8c2a908458543e629412505cc71db55094147fd39088b99c6c influxdb2-{{< latest-patch >}}_linux_amd64.tar.gz" \
echo "8d7872013cad3524fb728ca8483d0adc30125ad1af262ab826dcf5d1801159cf influxdb2-{{< latest-patch >}}_linux_amd64.tar.gz" \
| sha256sum --check -
```
@ -89,7 +89,7 @@ echo "9cb54d3940c37a8c2a908458543e629412505cc71db55094147fd39088b99c6c influxdb
Replace the following:
- {{% code-placeholder-key %}}`9cb54d3940c37a8c2a908458543e629412505cc71db55094147fd39088b99c6c`{{% /code-placeholder-key %}}:
- {{% code-placeholder-key %}}`8d7872013cad3524fb728ca8483d0adc30125ad1af262ab826dcf5d1801159cf`{{% /code-placeholder-key %}}:
the **SHA256:** checksum value that you copied from the downloads page
If the checksums match, the output is the following; otherwise, an error message.
@ -162,12 +162,10 @@ gpg: key 7C3D57159FC2F927: public key "InfluxData Package Signing Key <support@i
following in your terminal:
<!--test:setup
```sh
curl --silent --location --output-dir ~/Downloads -O \
"https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz" \
```
-->
```sh
@ -252,11 +250,11 @@ brew install influxdb
2. {{< req text="Recommended:" color="magenta" >}}: Verify the integrity of the download--for example, enter the
following command in your terminal:
{{% code-placeholders "964e1de641a43a0e1743aa5ead243e935a05631ba0bc570fb8bff486542173c1" %}}
{{% code-placeholders "224926fd77736a364cf28128f18927dda00385f0b6872a108477246a1252ae1b" %}}
```sh
# Use 2 spaces to separate the checksum from the filename
echo "964e1de641a43a0e1743aa5ead243e935a05631ba0bc570fb8bff486542173c1 influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz" \
echo "224926fd77736a364cf28128f18927dda00385f0b6872a108477246a1252ae1b influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz" \
| shasum --algorithm 256 --quiet --check -
```
@ -264,7 +262,7 @@ echo "964e1de641a43a0e1743aa5ead243e935a05631ba0bc570fb8bff486542173c1 influxdb
Replace the following:
- {{% code-placeholder-key %}}`964e1de641a43a0e1743aa5ead243e935a05631ba0bc570fb8bff486542173c1`{{% /code-placeholder-key %}}: the SHA checksum from the [downloads page](https://www.influxdata.com/downloads/#telegraf)
- {{% code-placeholder-key %}}`224926fd77736a364cf28128f18927dda00385f0b6872a108477246a1252ae1b`{{% /code-placeholder-key %}}: the SHA checksum from the [downloads page](https://www.influxdata.com/downloads/#telegraf)
3. Unpackage the InfluxDB binary.
@ -357,9 +355,9 @@ To install {{% product-name %}} on Linux, do one of the following:
echo "943666881a1b8d9b849b74caebf02d3465d6beb716510d86a39f6c8e8dac7515 influxdata-archive.key" \
| sha256sum --check - && cat influxdata-archive.key \
| gpg --dearmor \
| tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null \
| sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null \
&& echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' \
| tee /etc/apt/sources.list.d/influxdata.list
| sudo tee /etc/apt/sources.list.d/influxdata.list
# Install influxdb
sudo apt-get update && sudo apt-get install influxdb2
```

View File

@ -20,7 +20,7 @@ including buckets, organizations, users, tasks, etc.
{{% oss-only %}}
{{% note %}}
#### InfluxDB OSS and influx CLI versions
#### InfluxDB OSS v2 and influx CLI versions
Beginning with **InfluxDB 2.1**, the `influx` CLI is packaged and versioned separately
from InfluxDB.
InfluxDB and `influx` CLI versions may differ, but compatibility is noted for each command.
@ -69,6 +69,8 @@ Do one of the following:
- [Manually download and install](#manually-download-and-install)
### Use Homebrew
<!--pytest.mark.skip-->
```sh
brew install influxdb-cli
```
@ -80,6 +82,7 @@ If you used Homebrew to install **InfluxDB {{< current-version >}}**, the `influ
formula was downloaded as a dependency and should already be installed.
If installed, `influxdb-cli` will appear in the output of the following command:
<!--pytest.mark.skip-->
```sh
brew list | grep influxdb-cli
```
@ -93,6 +96,13 @@ brew list | grep influxdb-cli
<a class="btn download" href="https://dl.influxdata.com/influxdb/releases/influxdb2-client-{{< latest-patch cli=true >}}-darwin-amd64.tar.gz" download>influx CLI v{{< latest-patch cli=true >}} (macOS)</a>
<!--test:previous
```sh
curl -s -o ~/Downloads/influxdb2-client-{{< latest-patch cli=true >}}-darwin-amd64.tar.gz \
https://dl.influxdata.com/influxdb/releases/influxdb2-client-{{< latest-patch cli=true >}}-darwin-amd64.tar.gz
```
-->
2. **Unpackage the downloaded package.**
Do one of the following:
@ -101,6 +111,7 @@ brew list | grep influxdb-cli
- Run the following command in a macOS command prompt application such
**Terminal** or **[iTerm2](https://www.iterm2.com/)**:
<!--pytest-codeblocks:cont-->
```sh
# Unpackage contents to the current working directory
tar zxvf ~/Downloads/influxdb2-client-{{< latest-patch cli=true >}}-darwin-amd64.tar.gz
@ -108,6 +119,7 @@ brew list | grep influxdb-cli
3. **(Optional) Place the binary in your `$PATH`.**
<!--pytest.mark.skip-->
```sh
# (Optional) Copy the influx binary to your $PATH
sudo cp ~/Downloads/influxdb2-client-{{< latest-patch cli=true >}}-darwin-amd64/influx /usr/local/bin/
@ -145,34 +157,40 @@ brew list | grep influxdb-cli
#### Download from the command line
```sh
```bash
# amd64
wget https://dl.influxdata.com/influxdb/releases/influxdb2-client-{{< latest-patch cli=true >}}-linux-amd64.tar.gz
# arm
wget https://dl.influxdata.com/influxdb/releases/influxdb2-client-{{< latest-patch cli=true >}}-linux-arm64.tar.gz
curl -O \
https://dl.influxdata.com/influxdb/releases/influxdb2-client-{{< latest-patch cli=true >}}-linux-amd64.tar.gz
```
4. **Unpackage the downloaded package.**
<!--python-codeblocks:cont-->
```bash
# arm
curl -O \
https://dl.influxdata.com/influxdb/releases/influxdb2-client-{{< latest-patch cli=true >}}-linux-arm64.tar.gz \
```
2. **Unpackage the downloaded package.**
_**Note:** The following commands are examples. Adjust the filenames, paths, and utilities if necessary._
```sh
<!--python-codeblocks:cont-->
```bash
# amd64
tar xvzf path/to/influxdb2-client-{{< latest-patch cli=true >}}-linux-amd64.tar.gz
tar xvzf influxdb2-client-{{< latest-patch cli=true >}}-linux-amd64.tar.gz
```
<!--python-codeblocks:cont-->
```bash
# arm
tar xvzf path/to/influxdb2-client-{{< latest-patch cli=true >}}-linux-arm64.tar.gz
tar xvzf influxdb2-client-{{< latest-patch cli=true >}}-linux-arm64.tar.gz
```
3. **(Optional) Place the unpackaged `influx` executable in your system `$PATH`.**
```sh
# amd64
sudo cp influxdb2-client-{{< latest-patch cli=true >}}-linux-amd64/influx /usr/local/bin/
# arm
sudo cp influxdb2-client-{{< latest-patch cli=true >}}-linux-arm64/influx /usr/local/bin/
<!--python-codeblocks:cont-->
```bash
sudo cp ./influx /usr/local/bin/
```
If you do not move the `influx` binary into your `$PATH`, prefix the executable
@ -185,7 +203,9 @@ brew list | grep influxdb-cli
{{% tab-content %}}
{{% note %}}
We recommend running `influx` CLI commands in Powershell.
#### Use Powershell for Windows
Use Powershell for running `influx` CLI commands.
Command Prompt is not fully compatible.
{{% /note %}}
@ -229,10 +249,10 @@ Use the [`influx config create` command](/influxdb/v2/reference/cli/influx/confi
to create an `influx` CLI config and set it as active:
```sh
influx config create --config-name <config-name> \
influx config create --config-name CONFIG_NAME \
--host-url http://localhost:8086 \
--org <your-org> \
--token <your-auth-token> \
--org ORG_NAME \
--token API_TOKEN \
--active
```
@ -241,13 +261,15 @@ For more information about managing CLI configurations, see the
### Credential precedence
There are three ways to provide the necessary credentials to the `influx` CLI,
which uses the following precedence when retrieving credentials:
Provide credentials using any of the following supported methods.
The `influx` CLI uses the following precedence when retrieving credentials:
1. Command line flags (`--host`, `--org -o`, `--token -t`)
2. Environment variables (`INFLUX_HOST`, `INFLUX_ORG`, `INFLUX_TOKEN`)
3. CLI configuration file
Command line flags override environment variables, which override configuration file values.
## Usage
```

View File

@ -12,7 +12,7 @@ updated_in: CLI 2.5.0
The `influx auth create` command creates an API token in InfluxDB.
{{% warn %}}
**Issue resolved**: Using influx CLI 2.4 prevented you from creating an **all-access** or **operator** token using the `influx auth create` command. This issue is resolved in the influx 2.5 CLI release. Please [upgrade to the latest version](/influxdb/latest/tools/influx-cli/) of the influx cli.
**Issue resolved**: Using influx CLI 2.4 prevented you from creating an **all-access** or **operator** token using the `influx auth create` command. This issue is resolved in the influx 2.5 CLI release. Please [upgrade to the latest version](/influxdb/v2/tools/influx-cli/) of the influx cli.
{{% /warn %}}
## Usage

View File

@ -35,7 +35,6 @@ Use the [`influx server-config` command](/influxdb/v2/reference/cli/influx/serve
to retrieve your runtime server configuration.
<!--test:setup
```sh
service influxdb start && \
influx setup \
@ -45,7 +44,6 @@ influx setup \
--bucket BUCKET_NAME \
--force || true
```
-->
```sh
@ -2748,7 +2746,7 @@ storage-wal-max-concurrent-writes = 0
### storage-wal-max-write-delay
Maximum amount of time a write request to the WAL directory will wait when the
the [maximum number of concurrent active writes to the WAL directory](#storage-wal-max-concurrent-writes)
[maximum number of concurrent active writes to the WAL directory](#storage-wal-max-concurrent-writes)
has been met. Set to `0` to disable the timeout.
**Default:** `10m`

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,19 @@
---
title: SQL binary string functions
list_title: Binary string functions
description: >
Use binary string functions to encode and decode binary string values in
SQL queries.
menu:
influxdb3_cloud_dedicated:
name: Binary string
parent: sql-functions
weight: 308
source: /content/shared/sql-reference/functions/binary-string.md
---
<!--
The content of this page is at
// SOURCE /content/shared/sql-reference/functions/binary-string.md
-->

View File

@ -0,0 +1,19 @@
---
title: SQL hashing functions
list_title: Hashing functions
description: >
Use hashing functions to hash string values in SQL queries using established
hashing algorithms.
menu:
influxdb3_cloud_dedicated:
name: Hashing
parent: sql-functions
weight: 309
source: /shared/sql-reference/functions/hashing.md
---
<!--
The content for this page is at
// SOURCE /content/shared/sql-reference/functions/hashing.md
-->

View File

@ -0,0 +1,18 @@
---
title: SQL window functions
list_title: Window functions
description: >
SQL window functions perform an operation across a set of rows related to the
current row.
menu:
influxdb3_cloud_dedicated:
name: Window
parent: sql-functions
weight: 309
source: /shared/sql-reference/functions/window.md
---
<!--
The content for this page is at content/shared/sql-reference/functions/window.md
-->

View File

@ -11,7 +11,7 @@ menu:
weight: 302
list_code_example: |
| Operator | Meaning | Example |
| :------: | :------------------------------------------------------- | :---------------- |
| :--------------------: | :------------------------------------------------------- | :------------------------- |
| `=` | Equal to | `123 = 123` |
| `<>` | Not equal to | `123 <> 456` |
| `!=` | Not equal to | `123 != 456` |
@ -19,6 +19,8 @@ list_code_example: |
| `>=` | Greater than or equal to | `3 >= 2` |
| `<` | Less than | `1 < 2` |
| `<=` | Less than or equal to | `1 <= 2` |
| `IS DISTINCT FROM` | Is distinct from | `0 IS DISTINCT FROM 1` |
| `IS NOT DISTINCT FROM` | Is not distinct from | `0 IS NOT DISTINCT FROM 1` |
| `~` | Matches a regular expression | `'abc' ~ 'a.*'` |
| `~*` | Matches a regular expression _(case-insensitive)_ | `'Abc' ~* 'A.*'` |
| `!~` | Does not match a regular expression | `'abc' !~ 'd.*'` |

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,19 @@
---
title: SQL binary string functions
list_title: Binary string functions
description: >
Use binary string functions to encode and decode binary string values in
SQL queries.
menu:
influxdb3_cloud_serverless:
name: Binary string
parent: sql-functions
weight: 308
source: /content/shared/sql-reference/functions/binary-string.md
---
<!--
The content of this page is at
// SOURCE /content/shared/sql-reference/functions/binary-string.md
-->

View File

@ -0,0 +1,19 @@
---
title: SQL hashing functions
list_title: Hashing functions
description: >
Use hashing functions to hash string values in SQL queries using established
hashing algorithms.
menu:
influxdb3_cloud_serverless:
name: Hashing
parent: sql-functions
weight: 309
source: /shared/sql-reference/functions/hashing.md
---
<!--
The content for this page is at
// SOURCE /content/shared/sql-reference/functions/hashing.md
-->

View File

@ -0,0 +1,18 @@
---
title: SQL window functions
list_title: Window functions
description: >
SQL window functions perform an operation across a set of rows related to the
current row.
menu:
influxdb3_cloud_serverless:
name: Window
parent: sql-functions
weight: 309
source: /shared/sql-reference/functions/window.md
---
<!--
The content for this page is at content/shared/sql-reference/functions/window.md
-->

View File

@ -11,7 +11,7 @@ menu:
weight: 302
list_code_example: |
| Operator | Meaning | Example |
| :------: | :------------------------------------------------------- | :---------------- |
| :--------------------: | :------------------------------------------------------- | :------------------------- |
| `=` | Equal to | `123 = 123` |
| `<>` | Not equal to | `123 <> 456` |
| `!=` | Not equal to | `123 != 456` |
@ -19,6 +19,8 @@ list_code_example: |
| `>=` | Greater than or equal to | `3 >= 2` |
| `<` | Less than | `1 < 2` |
| `<=` | Less than or equal to | `1 <= 2` |
| `IS DISTINCT FROM` | Is distinct from | `0 IS DISTINCT FROM 1` |
| `IS NOT DISTINCT FROM` | Is not distinct from | `0 IS NOT DISTINCT FROM 1` |
| `~` | Matches a regular expression | `'abc' ~ 'a.*'` |
| `~*` | Matches a regular expression _(case-insensitive)_ | `'Abc' ~* 'A.*'` |
| `!~` | Does not match a regular expression | `'abc' !~ 'd.*'` |

View File

@ -0,0 +1,447 @@
---
title: Back up and restore your cluster
seotitle: Back up and restore your InfluxDB cluster
description: >
Use InfluxDB Clustered Catalog snapshots to keep necessary data in object
storage and restore to a recovery point in case of emergency.
menu:
influxdb3_clustered:
name: Backup and restore
parent: Administer InfluxDB Clustered
weight: 105
influxdb3/clustered/tags: [backup, restore]
---
InfluxDB Clustered automatically stores snapshots of the InfluxDB Catalog that
you can use to restore your cluster to a previous state. The snapshotting
functionality is optional and is disabled by default.
Enable snapshots to ensure you can recover
in case of emergency.
With InfluxDB Clustered snapshots enabled, each hour, InfluxDB uses the `pg_dump`
utility included with the InfluxDB Garbage Collector to export an SQL blob or
“snapshot” from the InfluxDB Catalog and store it in the object store.
The Catalog is a PostgreSQL-compatible relational database that stores metadata
for your time series data, such as schema data types, Parquet file locations, and more.
The Catalog snapshots act as recovery points for your InfluxDB cluster that
reference all Parquet files that existed in the object store at the time of the
snapshot. When a snapshot is restored to the Catalog, the Compactor
“[soft deletes](#soft-delete)” any Parquet files not listed in the snapshot.
> [!Note]
> InfluxDB won't [hard delete](#hard-delete) Parquet files listed in _any_ hourly or daily snapshot.
>
> For example, if you have Parquet files A, B, C, and D, and you restore to a
> snapshot that includes B and C, but not A and D, then A and D are soft-deleted, but remain in object
> storage until they are no longer referenced in any Catalog snapshot.
- [Soft delete](#soft-delete)
- [Hard delete](#hard-delete)
- [Recovery Point Objective (RPO)](#recovery-point-objective-rpo)
- [Recovery Time Objective (RTO)](#recovery-time-objective-rto)
- [Data written just before a snapshot may not be present after restoring](#data-written-just-before-a-snapshot-may-not-be-present-after-restoring)
- [Recommendations](#recommendations)
- [Automate object synchronization to an external S3-compatible bucket](#automate-object-synchronization-to-an-external-s3-compatible-bucket)
- [Enable short-term object versioning](#enable-short-term-object-versioning)
- [Configure snapshots](#configure-snapshots)
- [Environment Variables](#environment-variables)
- [Verify snapshots](#verify-snapshots)
- [Restore to a recovery point](#restore-to-a-recovery-point)
- [Resources](#resources)
- [prep_pg_dump.awk](#prep_pg_dumpawk)
## Soft delete
A _soft delete_ refers to when, on compaction, the Compactor sets a `deleted_at`
timestamp on the Parquet file entry in the Catalog.
The Parquet file is no
longer queryable, but remains intact in the object store.
## Hard delete
A _hard delete_ refers to when a Parquet file is actually deleted from object
storage and no longer exists.
## Recovery Point Objective (RPO)
RPO is the maximum amount of data loss (based on time) allowed after a disruptive event.
It indicates how much time can pass between data snapshots before data is considered lost if a disaster occurs.
The InfluxDB Clustered snapshot strategy RPO allows for the following maximum data loss:
- 1 hour for hourly snapshots _(up to the configured hourly snapshot expiration)_
- 1 day for daily snapshots _(up to the configured daily snapshot expiration)_
## Recovery Time Objective (RTO)
RTO is the maximum amount of downtime allowed for an InfluxDB cluster after a failure.
RTO varies depending on the size of your Catalog database, network speeds
between the client machine and the Catalog database, cluster load, the status
of your underlying hosting provider, and other factors.
## Data written just before a snapshot may not be present after restoring
Due to the variability of flushing data from Ingesters into Parquet files, data
written in the last few minutes before a snapshot may not be included.
This variability is typically less than 15 minutes, but is per table.
This means that one table may have data written up to the timestamp of the
snapshot, while another may not have data written in the 15 minutes prior to the
snapshot. All data written more than 15 minutes prior to a snapshot should be
present after restoring to that snapshot.
## Recommendations
### Automate object synchronization to an external S3-compatible bucket
Syncing objects to an external S3-compatible bucket ensures an up-to-date backup
in case your object store becomes unavailable. Recovery point snapshots only
back up the InfluxDB Catalog. If data referenced in a Catalog snapshot does not
exist in the object store, the recovery process does not restore the missing data.
### Enable short-term object versioning
If your object storage provider supports it, consider enabling short-term
object versioning on your object store--for example, 1-2 days to protect against errant writes or deleted objects.
With object versioning enabled, as objects are updated, the object store
retains distinct versions of each update that can be used to “rollback” newly
written or updated Parquet files to previous versions.
Keep in mind, storing versioned objects
does add to object storage costs.
## Configure snapshots
Use the available environment variables to enable and configure hourly Catalog
snapshots in your InfluxDB cluster. Add these environment variables to the
Garbage Collector configuration in your `AppInstance` resource:
```yaml
apiVersion: kubecfg.dev/v1alpha1
kind: AppInstance
metadata:
name: influxdb
namespace: influxdb
spec:
package:
spec:
components:
garbage-collector:
template:
containers:
iox:
env:
INFLUXDB_IOX_CREATE_CATALOG_BACKUP_DATA_SNAPSHOT_FILES: 'true'
INFLUXDB_IOX_DELETE_USING_CATALOG_BACKUP_DATA_SNAPSHOT_FILES: 'true'
INFLUXDB_IOX_KEEP_HOURLY_CATALOG_BACKUP_FILE_LISTS: '30d'
INFLUXDB_IOX_KEEP_DAILY_CATALOG_BACKUP_FILE_LISTS: '90d'
INFLUXDB_IOX_GC_OBJECTSTORE_CUTOFF: '30d'
```
### Environment Variables
#### INFLUXDB_IOX_CREATE_CATALOG_BACKUP_DATA_SNAPSHOT_FILES
Enable hourly Catalog snapshotting. The default is `'false'`. Set to `'true'`:
```yaml
INFLUXDB_IOX_CREATE_CATALOG_BACKUP_DATA_SNAPSHOT_FILES: 'true'
```
#### INFLUXDB_IOX_DELETE_USING_CATALOG_BACKUP_DATA_SNAPSHOT_FILES
Enable a snapshot check when deleting files to ensure the Garbage Collector does
not remove Parquet files from the object store that are associated with existing
snapshots. The default is `'false'`. Set to `'true'`:
```yaml
INFLUXDB_IOX_DELETE_USING_CATALOG_BACKUP_DATA_SNAPSHOT_FILES: 'true'
```
> [!Caution]
> If set to `false` (the default) with snapshots enabled, the Garbage Collector does not check
> to see if a Parquet file is associated with existing snapshots before removing
> the Parquet file from the object store. This could result in deleting Parquet
> files needed to restore the cluster to a recovery point.
#### INFLUXDB_IOX_KEEP_HOURLY_CATALOG_BACKUP_FILE_LISTS
After this duration of time, the Garbage Collector deletes _hourly_ snapshots,
allowing the Garbage Collector to [hard-delete](#hard-delete) Parquet files from the object
store and the Catalog. The default is `30d`. The recommended range for snapshots is between
`1d` and `30d`:
```yaml
INFLUXDB_IOX_KEEP_HOURLY_CATALOG_BACKUP_FILE_LISTS: '30d'
```
#### INFLUXDB_IOX_KEEP_DAILY_CATALOG_BACKUP_FILE_LISTS
After this duration of time, the Garbage Collector deletes _daily_ snapshots,
allowing the Garbage Collector to [hard-delete](#hard-delete) Parquet files from the object
store and the Catalog. The default is `90d`. The recommended range is between
`3d` and `90d`.
> [!Important]
> Daily snapshots must expire after hourly backups
> Make sure to set `INFLUXDB_IOX_KEEP_DAILY_CATALOG_BACKUP_FILE_LISTS` to a value greater than
> `INFLUXDB_IOX_KEEP_HOURLY_CATALOG_BACKUP_FILE_LISTS`.
```yaml
INFLUXDB_IOX_KEEP_DAILY_CATALOG_BACKUP_FILE_LISTS: '90d'
```
#### INFLUXDB_IOX_GC_OBJECTSTORE_CUTOFF
The duration of time after a Parquet file is no longer referenced in the Catalog
or included in any snapshots after which the Garbage Collector removes the
Parquet file from the Object store. The default is `30d`:
```yaml
INFLUXDB_IOX_GC_OBJECTSTORE_CUTOFF: '30d'
```
For an in-depth explanation of the recommended value, see the
[data lifecycle garbage tuning best practices](/influxdb3/clustered/write-data/best-practices/data-lifecycle/#tune-garbage-collection)
and [use case examples](/influxdb3/clustered/write-data/best-practices/data-lifecycle/#use-case-examples).
## Verify snapshots
InfluxDB Clustered stores hourly and daily snapshots in the
`/catalog_backup_file_lists` path in object storage. After enabling snapshots,
use clients provided by
your object storage provider to ensure that snapshots are written to the object store.
Hourly snapshots are taken at _approximately_ the beginning of each hour
(≈1:00, ≈2:00, ≈3:00, etc.). After you enable snapshotting, the first snapshot is
written on or around the beginning of the next hour.
## Restore to a recovery point
Use the following process to restore your InfluxDB cluster to a recovery point
using Catalog snapshots:
1. **Install prerequisites:**
- `kubectl` CLI for managing your Kubernetes deployment.
- `psql` CLI to interact with the PostgreSQL-compatible Catalog database with
the appropriate Data Source Name (DSN) and connection credentials.
- A client to interact with your InfluxDB clusters object store.
Supported clients depend on your object storage provider.
2. **Retrieve the recovery point snapshot from your object store.**
InfluxDB Clustered stores hourly and daily snapshots in the
`/catalog_backup_file_lists` path in object storage. Download the snapshot
that you would like to use as the recovery point. If your primary object
store is unavailable, download the snapshot from your replicated object store.
> [!Important]
> When creating and storing a snapshot, the last artifact created is the
> snapshots bloom filter. To ensure the snapshot is complete, make sure that
> the bloom filter file (`bloom.bin.gz`) exists before downloading the snapshot.
3. **Prepare your snapshot file for the restore process.**
InfluxDB Clustered snapshot `pg_dump` files are compressed text files
containing SQL that restore the contents of the Catalog. Because your Catalog
has existing data, you need to update the snapshot to prepend `CREATE`
statements with `DROP` statements. The result is a slightly modified `pg_dump`
SQL file that you can use to restore your non-empty Catalog.
> [!Note]
> If restoring to a new cluster, you do not need to update the `pg_dump`
> snapshot file.
Use the `prep_pg_dump.awk` script provided [below](#prep_pg_dump.awk) to
process your `pg_dump` file. For example:
<!-- pytest.mark.skip -->
```bash
gunzip pg_dump.gz
cat pg_dump | prep_pg_dump.awk > snapshot.sql
```
4. **Pause the kubit operator**
The `kubit` operator validates cluster sizing and prevents you from disabling
InfluxDB Clustered components. By pausing the `kubit` operator, you can
disable InfluxDB components and safely perform the restore operation.
1. In your `AppInstance` resource, set `pause` to `true`.
```yml
apiVersion: kubecfg.dev/v1alpha1
kind: AppInstance
metadata:
name: influxdb
namespace: influxdb
pause: true
# ...
```
2. Apply the change to your cluster:
<!-- pytest.mark.skip -->
```bash
kubectl apply --filename myinfluxdb.yml --namespace influxdb
```
5. **Disable InfluxDB Clustered components**
Use the `kubectl scale` command to scale InfluxDB Clustered components down
to zero replicas:
<!-- pytest.mark.skip -->
```bash
kubectl scale --namespace influxdb --replicas=0 deployment/global-gc
kubectl scale --namespace influxdb --replicas=0 deployment/global-router
kubectl scale --namespace influxdb --replicas=0 deployment/iox-shared-querier
kubectl scale --namespace influxdb --replicas=0 statefulset/iox-shared-compactor
kubectl scale --namespace influxdb --replicas=0 statefulset/iox-shared-ingester
kubectl scale --namespace influxdb --replicas=0 statefulset/iox-shared-catalog
```
> [!Note]
> If the cluster is under load, some pods may take longer to shut down.
> For example, Ingester pods must flush their Write-Ahead Logs (WAL) before
> shutting down.
Verify that pods have been removed from your cluster.
6. **Restore the SQL snapshot to the Catalog**
Use `psql` to restore the recovery point snapshot to your InfluxDB Catalog. For example:
<!-- pytest.mark.skip -->
```bash
psql CATALOG_DSN < snapshot.sql
```
The exact `psql` command depends on your PostgreSQL-compatible database
provider, their authentication requirements, and the databases DSN.
7. **Restart InfluxDB Clustered components**
1. In your `AppInstance` resource, set `pause` to `false` or remove the `pause`:
```yaml
apiVersion: kubecfg.dev/v1alpha1
kind: AppInstance
metadata:
name: influxdb
namespace: influxdb
pause: false
# ...
```
2. Apply the change to resume the `kubit` operator and scale InfluxDB
Clustered components to the number of replicas defined for each in your
`AppInstance` resource:
```bash
kubectl apply --filename myinfluxdb.yml --namespace influxdb
```
3. Verify that InfluxDB Clustered pods start running again.
Your InfluxDB cluster is now restored to the recovery point.
When the Garbage Collector runs, it identifies what Parquet files are not
associated with the recovery point and [soft deletes](#soft-delete) them.
## Resources
### prep\_pg\_dump.awk
{{% truncate %}}
<!-- pytest.mark.skip -->
```awk
#!/usr/bin/env awk -f
# Data Snapshots in IOx use pg_dump in text output format, which is simply sql. We can apply the
# pg_dump using our standard permissions, without the need for special database create permission.
# Even a new cluster which you think is empty likely has some tables populated. For ease of
# restoring the pg_dump, this script inserts DROP statements before each CREATE statement to handle
# restoring to a non-empty catalog.
#
# The intended use of this script is to modify the pg_dump output with drop statements so it can
# be applied to a non-empty catalog.
#
# WARNING: The resulting sql is destructive. Prior catalog contents are removed and replaced with
# what's in the pg_dump.
#
# Example use:
# gunzip pg_dump.gz
# cat pg_dump | prep_pg_dump.awk > clean_and_restore.sql
# psql CATALOG_DSN < clean_and_restore.sql
BEGIN {
print "-- Modified pg_dump text output with DROP statements"
}
# Function to clean up names (dropping trailing semicolon so CASCADE is included in the DROP command)
function clean_name(name) {
gsub(/[";]/, "", name)
return name
}
# Match CREATE TABLE statements and insert DROP TABLE
/^[[:space:]]*CREATE[[:space:]]+TABLE[[:space:]]+/ {
table_name = clean_name($3)
print "DROP TABLE IF EXISTS " table_name " CASCADE;"
print
next
}
# Match CREATE SCHEMA statements and insert DROP SCHEMA
/^[[:space:]]*CREATE[[:space:]]+SCHEMA[[:space:]]+/ {
schema_name = clean_name($3)
print "DROP SCHEMA IF EXISTS " schema_name " CASCADE;"
print
next
}
# Match CREATE SEQUENCE statements and insert DROP SEQUENCE
/^[[:space:]]*CREATE[[:space:]]+SEQUENCE[[:space:]]+/ {
sequence_name = clean_name($3)
print "DROP SEQUENCE IF EXISTS " sequence_name " CASCADE;"
print
next
}
# Match CREATE VIEW statements and insert DROP VIEW
/^[[:space:]]*CREATE[[:space:]]+VIEW[[:space:]]+/ {
view_name = clean_name($3)
print "DROP VIEW IF EXISTS " view_name " CASCADE;"
print
next
}
# Match CREATE FUNCTION statements and insert DROP FUNCTION
/^[[:space:]]*CREATE[[:space:]]+FUNCTION[[:space:]]+/ {
function_name = clean_name($3)
print "DROP FUNCTION IF EXISTS " function_name " CASCADE;"
print
next
}
# Match CREATE INDEX statements and insert DROP INDEX
/^[[:space:]]*CREATE[[:space:]]+INDEX[[:space:]]+/ {
index_name = clean_name($3)
print "DROP INDEX IF EXISTS " index_name " CASCADE;"
print
next
}
# Pass through all other lines
{
print
}
```
{{% /truncate %}}

Some files were not shown because too many files have changed in this diff Show More