diff --git a/api-docs/.config.yml b/api-docs/.config.yml index eea6b95a9..e337b7689 100644 --- a/api-docs/.config.yml +++ b/api-docs/.config.yml @@ -1,5 +1,5 @@ plugins: - - './openapi/plugins/docs-plugin.js' + - './../openapi/plugins/docs-plugin.js' extends: - recommended - docs/all diff --git a/api-docs/cloud-dedicated/management/content/info.yml b/api-docs/cloud-dedicated/management/content/info.yml deleted file mode 100644 index 814a15f69..000000000 --- a/api-docs/cloud-dedicated/management/content/info.yml +++ /dev/null @@ -1,12 +0,0 @@ -title: InfluxDB Cloud Dedicated Management API -x-influxdata-short-title: Management API -summary: | - The Management API for InfluxDB Cloud Dedicated provides a programmatic interface for managing an InfluxDB Cloud Dedicated instance. -description: | - The InfluxDB v3 Management API lets you manage an InfluxDB Cloud Dedicated instance and integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application. - - This documentation is generated from the - InfluxDB OpenAPI specification. -license: - name: MIT - url: 'https://opensource.org/licenses/MIT' diff --git a/api-docs/generate-api-docs.sh b/api-docs/generate-api-docs.sh index 22d87c1e8..07d42eda1 100755 --- a/api-docs/generate-api-docs.sh +++ b/api-docs/generate-api-docs.sh @@ -36,27 +36,30 @@ while getopts "hc" opt; do done function generateHtml { - specPath="$1" - product="$2" - productName="$3" - api="$4" - configPath="$5" - isDefault=$6 + local specPath="$1" + local productVersion="$2" + local productName="$3" + local api="$4" + local configPath="$5" + local isDefault=$6 # Use the product name to define the menu for the Hugo template - menu="influxdb_$(echo $product | sed 's/\./_/g;s/-/_/g;')" + local menu="$(echo $productVersion | sed 's/\./_/g;s/-/_/g;s/\//_/g;')" + # Short version name (for old aliases) + # Everything after the last slash + local versionDir=$(echo $productVersion | sed 's/.*\///g;') # Extract the API name--for example, "management" from "management@v2". - apiName=$(echo $api | sed 's/@.*//g;') + local apiName=$(echo $api | sed 's/@.*//g;') # Extract the API version--for example, "v0" from "management@v0". - version=$(echo $api | sed 's/.*@//g;') + local apiVersion=$(echo $api | sed 's/.*@//g;') # Use the title and summary defined in the product API's info.yml file. - title=$(yq '.title' $product/$apiName/content/info.yml) - menuTitle=$(yq '.x-influxdata-short-title' $product/$apiName/content/info.yml) - description=$(yq '.summary' $product/$apiName/content/info.yml) + local title=$(yq '.title' $productVersion/$apiName/content/info.yml) + local menuTitle=$(yq '.x-influxdata-short-title' $productVersion/$apiName/content/info.yml) + local description=$(yq '.summary' $productVersion/$apiName/content/info.yml) # Define the file name for the Redoc HTML output. - specbundle=redoc-static_index.html + local specbundle=redoc-static_index.html # Define the temporary file for the Hugo template and Redoc HTML. - tmpfile="${product}-${api}_index.tmp" + local tmpfile="${productVersion}-${api}_index.tmp" echo "Bundling $specPath" @@ -75,7 +78,7 @@ function generateHtml { --options.noAutoAuth \ --output=$specbundle \ --templateOptions.description=$description \ - --templateOptions.product="$product" \ + --templateOptions.product="$productVersion" \ --templateOptions.productName="$productName" if [[ $apiName == "v1-compatibility" ]]; then @@ -90,10 +93,10 @@ menu: identifier: api-reference-$apiName weight: 304 aliases: - - /influxdb/$product/api/v1/ + - /influxdb/$versionDir/api/v1/ --- " - elif [[ $version == "0" ]]; then + elif [[ $apiVersion == "0" ]]; then echo $productName $apiName frontmatter="--- title: $title @@ -119,7 +122,7 @@ menu: identifier: api-reference-$apiName weight: 102 aliases: - - /influxdb/$product/api/ + - /influxdb/$versionDir/api/ --- " else @@ -152,68 +155,74 @@ weight: 102 rm -f $specbundle # Create the directory and move the file. if [ ! -z "$apiName" ]; then - mkdir -p ../content/influxdb/$product/api/$apiName - mv $tmpfile ../content/influxdb/$product/api/$apiName/_index.html + mkdir -p ../content/$productVersion/api/$apiName + mv $tmpfile ../content/$productVersion/api/$apiName/_index.html else - mkdir -p ../content/influxdb/$product/api - mv $tmpfile ../content/influxdb/$product/api/_index.html + mkdir -p ../content/$productVersion/api + mv $tmpfile ../content/$productVersion/api/_index.html fi } # Use a combination of directory names and configuration files to build the API documentation. # Each directory represents a product, and each product directory contains a configuration file that defines APIs and their spec file locations. function build { -# Get the list of products from directory names -products="$(ls -d -- */ | grep -v 'node_modules' | grep -v 'openapi')" - -for product in $products; do - #Trim the trailing slash off the directory name - product="${product%/}" - # Get the product API configuration file. - configPath="$product/.config.yml" - if [ ! -f $configPath ]; then - configPath=".config.yml" - fi - echo "Checking product config $configPath" - # Get the product name from the configuration. - productName=$(yq e '.x-influxdata-product-name' $configPath) - if [[ -z "$productName" ]]; then - productName=InfluxDB - fi - # Get an array of product API names (keys) from the configuration file - apis=$(yq e '.apis | keys | .[]' $configPath) - # Read each element of the apis array - while IFS= read -r api; do - # Get the spec file path from the configuration. - specRootPath=$(yq e ".apis | .$api | .root" $configPath) - # Check that the YAML spec file exists. - specPath="$product/$specRootPath" - echo "Checking for spec $specPath" - if [ -d "$specPath" ] || [ ! -f "$specPath" ]; then - echo "OpenAPI spec $specPath doesn't exist." + local versions + versions="$(ls -d -- */* | grep -v 'node_modules' | grep -v 'openapi')" + for version in $versions; do + # Trim the trailing slash off the directory name + local version="${version%/}" + # Get the version API configuration file. + local configPath="$version/.config.yml" + if [ ! -f "$configPath" ]; then + configPath=".config.yml" fi - # Get default status from the configuration. - isDefault=false - defaultStatus=$(yq e ".apis | .$api | .x-influxdata-default" $configPath) - if [[ $defaultStatus == "true" ]]; then - isDefault=true + echo "Using config $configPath" + # Get the product name from the configuration. + local versionName + versionName=$(yq e '.x-influxdata-product-name' "$configPath") + if [[ -z "$versionName" ]]; then + versionName=InfluxDB fi - - # If the spec file differs from master, regenerate the HTML. - update=0 - if [[ $generate_changed == 0 ]]; then - diff=$(git diff --name-status master -- ${specPath}) - if [[ -z "$diff" ]]; then - update=1 + # Get an array of API names (keys) from the configuration file + local apis + apis=$(yq e '.apis | keys | .[]' "$configPath") + # Read each element of the apis array + while IFS= read -r api; do + echo "======Building $version $api======" + # Get the spec file path from the configuration. + local specRootPath + specRootPath=$(yq e ".apis | .$api | .root" "$configPath") + # Check that the YAML spec file exists. + local specPath + specPath="$version/$specRootPath" + if [ -d "$specPath" ] || [ ! -f "$specPath" ]; then + echo "OpenAPI spec $specPath doesn't exist." + fi + # Get default status from the configuration. + local isDefault=false + local defaultStatus + defaultStatus=$(yq e ".apis | .$api | .x-influxdata-default" "$configPath") + if [[ $defaultStatus == "true" ]]; then + isDefault=true fi - fi - if [[ $update -eq 0 ]]; then - echo "Regenerating $product $api" - generateHtml "$specPath" "$product" "$productName" "$api" "$configPath" $isDefault - fi - done <<< "$apis" -done + # If the spec file differs from master, regenerate the HTML. + local update=0 + if [[ $generate_changed == 0 ]]; then + local diff + diff=$(git diff --name-status master -- "${specPath}") + if [[ -z "$diff" ]]; then + update=1 + fi + fi + + if [[ $update -eq 0 ]]; then + echo "Regenerating $version $api" + generateHtml "$specPath" "$version" "$versionName" "$api" "$configPath" "$isDefault" + fi + echo "========Done with $version $api========" + done <<< "$apis" + done } build diff --git a/api-docs/getswagger.sh b/api-docs/getswagger.sh index 8d0fd3061..3b650fd5f 100755 --- a/api-docs/getswagger.sh +++ b/api-docs/getswagger.sh @@ -139,93 +139,98 @@ function postProcess() { } function updateCloudV2 { - outFile="cloud/v2/ref.yml" + outFile="influxdb/cloud/v2/ref.yml" if [[ -z "$baseUrl" ]]; then echo "Using existing $outFile" else curl $UPDATE_OPTIONS ${baseUrl}/contracts/ref/cloud.yml -o $outFile fi - postProcess $outFile 'cloud/.config.yml' v2@2 + postProcess $outFile 'influxdb/cloud/.config.yml' v2@2 } function updateCloudDedicatedManagement { - outFile="cloud-dedicated/management/openapi.yml" + outFile="influxdb3/cloud-dedicated/management/openapi.yml" if [[ -z "$baseUrl" ]]; then echo "Using existing $outFile" else - curl $UPDATE_OPTIONS https://raw.githubusercontent.com/influxdata/granite/ab7ee2aceacfae7f415d15ffbcf8c9d0f6f3e015/openapi.yaml -o $outFile + # Clone influxdata/granite and fetch the latest openapi.yaml file. + echo "Fetching the latest openapi.yaml file from influxdata/granite" + tmp_dir=$(mktemp -d) + git clone --depth 1 --branch main https://github.com/influxdata/granite.git "$tmp_dir" + cp "$tmp_dir/openapi.yaml" "$outFile" + rm -rf "$tmp_dir" fi - postProcess $outFile 'cloud-dedicated/.config.yml' management@0 + postProcess $outFile 'influxdb3/cloud-dedicated/.config.yml' management@0 } function updateCloudDedicatedV2 { - outFile="cloud-dedicated/v2/ref.yml" + outFile="influxdb3/cloud-dedicated/v2/ref.yml" if [[ -z "$baseUrl" ]]; then echo "Using existing $outFile" else curl $UPDATE_OPTIONS ${baseUrl}/contracts/ref/cloud.yml -o $outFile fi - postProcess $outFile 'cloud-dedicated/.config.yml' v2@2 + postProcess $outFile 'influxdb3/cloud-dedicated/.config.yml' v2@2 } function updateClusteredV2 { - outFile="clustered/v2/ref.yml" + outFile="influxdb3/clustered/v2/ref.yml" if [[ -z "$baseUrl" ]]; then echo "Using existing $outFile" else curl $UPDATE_OPTIONS ${baseUrl}/contracts/ref/cloud.yml -o $outFile fi - postProcess $outFile 'clustered/.config.yml' v2@2 + postProcess $outFile 'influxdb3/clustered/.config.yml' v2@2 } function updateCloudServerlessV2 { - outFile="cloud-serverless/v2/ref.yml" + outFile="influxdb3/cloud-serverless/v2/ref.yml" if [[ -z "$baseUrl" ]]; then echo "Using existing $outFile" else curl $UPDATE_OPTIONS ${baseUrl}/contracts/ref/cloud.yml -o $outFile fi - postProcess $outFile 'cloud-serverless/.config.yml' v2@2 + postProcess $outFile 'influxdb3/cloud-serverless/.config.yml' v2@2 } function updateOSSV2 { - outFile="v2/ref.yml" + outFile="influxdb/v2/ref.yml" if [[ -z "$baseUrlOSS" ]]; then echo "Using existing $outFile" else curl $UPDATE_OPTIONS ${baseUrlOSS}/contracts/ref/oss.yml -o $outFile fi - postProcess $outFile 'v2/.config.yml' '@2' + postProcess $outFile 'influxdb/v2/.config.yml' '@2' } function updateV1Compat { - outFile="cloud/v1-compatibility/swaggerV1Compat.yml" + outFile="influxdb/cloud/v1-compatibility/swaggerV1Compat.yml" if [[ -z "$baseUrl" ]]; then echo "Using existing $outFile" else curl $UPDATE_OPTIONS ${baseUrl}/contracts/swaggerV1Compat.yml -o $outFile fi - postProcess $outFile 'cloud/.config.yml' 'v1-compatibility' + postProcess $outFile 'influxdb/cloud/.config.yml' 'v1-compatibility' - outFile="v2/v1-compatibility/swaggerV1Compat.yml" + outFile="influxdb/v2/v1-compatibility/swaggerV1Compat.yml" cp cloud/v1-compatibility/swaggerV1Compat.yml $outFile - postProcess $outFile 'v2/.config.yml' 'v1-compatibility' + postProcess $outFile 'influxdb/v2/.config.yml' 'v1-compatibility' - outFile="cloud-dedicated/v1-compatibility/swaggerV1Compat.yml" - postProcess $outFile 'cloud-dedicated/.config.yml' 'v1-compatibility' + outFile="influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml" + postProcess $outFile 'influxdb3/cloud-dedicated/.config.yml' 'v1-compatibility' - outFile="cloud-serverless/v1-compatibility/swaggerV1Compat.yml" - postProcess $outFile 'cloud-serverless/.config.yml' 'v1-compatibility' + outFile="influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml" + postProcess $outFile 'influxdb3/cloud-serverless/.config.yml' 'v1-compatibility' - outFile="clustered/v1-compatibility/swaggerV1Compat.yml" - postProcess $outFile 'clustered/.config.yml' 'v1-compatibility' + outFile="influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml" + postProcess $outFile 'influxdb3/clustered/.config.yml' 'v1-compatibility' } UPDATE_OPTIONS="--fail" diff --git a/api-docs/cloud/.config.yml b/api-docs/influxdb/cloud/.config.yml similarity index 84% rename from api-docs/cloud/.config.yml rename to api-docs/influxdb/cloud/.config.yml index 46849d74f..49632b887 100644 --- a/api-docs/cloud/.config.yml +++ b/api-docs/influxdb/cloud/.config.yml @@ -1,5 +1,5 @@ plugins: - - '../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.js' extends: - recommended - docs/all diff --git a/api-docs/cloud/v1-compatibility/content/info.yml b/api-docs/influxdb/cloud/v1-compatibility/content/info.yml similarity index 100% rename from api-docs/cloud/v1-compatibility/content/info.yml rename to api-docs/influxdb/cloud/v1-compatibility/content/info.yml diff --git a/api-docs/cloud/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb/cloud/v1-compatibility/swaggerV1Compat.yml similarity index 100% rename from api-docs/cloud/v1-compatibility/swaggerV1Compat.yml rename to api-docs/influxdb/cloud/v1-compatibility/swaggerV1Compat.yml diff --git a/api-docs/cloud/v2/content/info.yml b/api-docs/influxdb/cloud/v2/content/info.yml similarity index 100% rename from api-docs/cloud/v2/content/info.yml rename to api-docs/influxdb/cloud/v2/content/info.yml diff --git a/api-docs/cloud/v2/content/servers.yml b/api-docs/influxdb/cloud/v2/content/servers.yml similarity index 100% rename from api-docs/cloud/v2/content/servers.yml rename to api-docs/influxdb/cloud/v2/content/servers.yml diff --git a/api-docs/cloud/v2/content/tag-groups.yml b/api-docs/influxdb/cloud/v2/content/tag-groups.yml similarity index 100% rename from api-docs/cloud/v2/content/tag-groups.yml rename to api-docs/influxdb/cloud/v2/content/tag-groups.yml diff --git a/api-docs/cloud/v2/ref.yml b/api-docs/influxdb/cloud/v2/ref.yml similarity index 100% rename from api-docs/cloud/v2/ref.yml rename to api-docs/influxdb/cloud/v2/ref.yml diff --git a/api-docs/v2/.config.yml b/api-docs/influxdb/v2/.config.yml similarity index 83% rename from api-docs/v2/.config.yml rename to api-docs/influxdb/v2/.config.yml index ffcc1e885..c7db819a6 100644 --- a/api-docs/v2/.config.yml +++ b/api-docs/influxdb/v2/.config.yml @@ -1,5 +1,5 @@ plugins: - - '../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.js' extends: - recommended - docs/all diff --git a/api-docs/v2/content/info.yml b/api-docs/influxdb/v2/content/info.yml similarity index 100% rename from api-docs/v2/content/info.yml rename to api-docs/influxdb/v2/content/info.yml diff --git a/api-docs/v2/content/tag-groups.yml b/api-docs/influxdb/v2/content/tag-groups.yml similarity index 100% rename from api-docs/v2/content/tag-groups.yml rename to api-docs/influxdb/v2/content/tag-groups.yml diff --git a/api-docs/v2/ref.yml b/api-docs/influxdb/v2/ref.yml similarity index 100% rename from api-docs/v2/ref.yml rename to api-docs/influxdb/v2/ref.yml diff --git a/api-docs/v2/v1-compatibility/content/info.yml b/api-docs/influxdb/v2/v1-compatibility/content/info.yml similarity index 100% rename from api-docs/v2/v1-compatibility/content/info.yml rename to api-docs/influxdb/v2/v1-compatibility/content/info.yml diff --git a/api-docs/v2/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb/v2/v1-compatibility/swaggerV1Compat.yml similarity index 100% rename from api-docs/v2/v1-compatibility/swaggerV1Compat.yml rename to api-docs/influxdb/v2/v1-compatibility/swaggerV1Compat.yml diff --git a/api-docs/cloud-dedicated/.config.yml b/api-docs/influxdb3/cloud-dedicated/.config.yml similarity index 70% rename from api-docs/cloud-dedicated/.config.yml rename to api-docs/influxdb3/cloud-dedicated/.config.yml index 6d7fb6c01..2febde2ac 100644 --- a/api-docs/cloud-dedicated/.config.yml +++ b/api-docs/influxdb3/cloud-dedicated/.config.yml @@ -1,9 +1,9 @@ plugins: - - '../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.js' extends: - recommended - docs/all -x-influxdata-product-name: InfluxDB v3 Cloud Dedicated +x-influxdata-product-name: InfluxDB 3 Cloud Dedicated apis: management@0: diff --git a/api-docs/influxdb3/cloud-dedicated/management/content/info.yml b/api-docs/influxdb3/cloud-dedicated/management/content/info.yml new file mode 100644 index 000000000..5b533dd20 --- /dev/null +++ b/api-docs/influxdb3/cloud-dedicated/management/content/info.yml @@ -0,0 +1,12 @@ +title: InfluxDB 3 Cloud Dedicated Management API +x-influxdata-short-title: Management API +summary: | + The Management API for InfluxDB 3 Cloud Dedicated provides a programmatic interface for managing a Cloud Dedicated instance. +description: | + The Management API lets you manage an InfluxDB 3 Cloud Dedicated instance and integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application. + + This documentation is generated from the + InfluxDB OpenAPI specification. +license: + name: MIT + url: 'https://opensource.org/licenses/MIT' diff --git a/api-docs/cloud-dedicated/management/content/servers.yml b/api-docs/influxdb3/cloud-dedicated/management/content/servers.yml similarity index 55% rename from api-docs/cloud-dedicated/management/content/servers.yml rename to api-docs/influxdb3/cloud-dedicated/management/content/servers.yml index 340d273d8..6e97ac280 100644 --- a/api-docs/cloud-dedicated/management/content/servers.yml +++ b/api-docs/influxdb3/cloud-dedicated/management/content/servers.yml @@ -1,8 +1,8 @@ - url: 'https://{baseurl}/api/v0' - description: InfluxDB Cloud Dedicated Management API URL + description: InfluxDB 3 Cloud Dedicated Management API URL variables: baseurl: enum: - 'console.influxdata.com' default: 'console.influxdata.com' - description: InfluxDB Cloud Dedicated Console URL + description: InfluxDB 3 Cloud Dedicated Console URL diff --git a/api-docs/cloud-dedicated/management/content/tag-groups.yml b/api-docs/influxdb3/cloud-dedicated/management/content/tag-groups.yml similarity index 100% rename from api-docs/cloud-dedicated/management/content/tag-groups.yml rename to api-docs/influxdb3/cloud-dedicated/management/content/tag-groups.yml diff --git a/api-docs/cloud-dedicated/management/openapi.yml b/api-docs/influxdb3/cloud-dedicated/management/openapi.yml similarity index 90% rename from api-docs/cloud-dedicated/management/openapi.yml rename to api-docs/influxdb3/cloud-dedicated/management/openapi.yml index 6aa404111..edadccd7f 100644 --- a/api-docs/cloud-dedicated/management/openapi.yml +++ b/api-docs/influxdb3/cloud-dedicated/management/openapi.yml @@ -1,26 +1,26 @@ openapi: 3.1.0 info: - title: InfluxDB Cloud Dedicated Management API + title: InfluxDB 3 Cloud Dedicated Management API description: | - The InfluxDB v3 Management API lets you manage an InfluxDB Cloud Dedicated instance and integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application. + The Management API lets you manage an InfluxDB 3 Cloud Dedicated instance and integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application. This documentation is generated from the InfluxDB OpenAPI specification. summary: | - The Management API for InfluxDB Cloud Dedicated provides a programmatic interface for managing an InfluxDB Cloud Dedicated instance. + The Management API for InfluxDB 3 Cloud Dedicated provides a programmatic interface for managing a Cloud Dedicated instance. license: name: MIT url: https://opensource.org/licenses/MIT version: '' servers: - url: https://{baseurl}/api/v0 - description: InfluxDB Cloud Dedicated Management API URL + description: InfluxDB 3 Cloud Dedicated Management API URL variables: baseurl: enum: - console.influxdata.com default: console.influxdata.com - description: InfluxDB Cloud Dedicated Console URL + description: InfluxDB 3 Cloud Dedicated Console URL security: - bearerAuthManagementToken: [] bearerAuthJwt: [] @@ -30,13 +30,13 @@ tags: description: | The InfluxDB Management API endpoints require the following credentials: - - `ACCOUNT_ID`: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the cluster belongs to. To view account ID and cluster ID, [list cluster details](/influxdb/cloud-dedicated/admin/clusters/list/#detailed-output-in-json). - - `CLUSTER_ID`: The ID of the [cluster](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that you want to manage. To view account ID and cluster ID, [list cluster details](/influxdb/cloud-dedicated/admin/clusters/list/#detailed-output-in-json). - - `Authorization MANAGEMENT_TOKEN`: the `Authorization` HTTP header with a [management token](/influxdb/cloud-dedicated/admin/tokens/management/). + - `ACCOUNT_ID`: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the cluster belongs to. To view account ID and cluster ID, [list cluster details](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json). + - `CLUSTER_ID`: The ID of the [cluster](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that you want to manage. To view account ID and cluster ID, [list cluster details](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json). + - `Authorization MANAGEMENT_TOKEN`: the `Authorization` HTTP header with a [management token](/influxdb3/cloud-dedicated/admin/tokens/management/). - See how to [create a management token](/influxdb/cloud-dedicated/admin/tokens/management/). + See how to [create a management token](/influxdb3/cloud-dedicated/admin/tokens/management/). - By default, management tokens in InfluxDB v3 are short-lived tokens issued by an OAuth2 identity provider that grant a specific user administrative access to your InfluxDB cluster. However, for automation purposes, you can manually create management tokens that authenticate directly with your InfluxDB cluster and do not require human interaction with your identity provider. + By default, management tokens in InfluxDB 3 are short-lived tokens issued by an OAuth2 identity provider that grant a specific user administrative access to your InfluxDB cluster. However, for automation purposes, you can manually create management tokens that authenticate directly with your InfluxDB cluster and do not require human interaction with your identity provider. - name: Database tokens @@ -299,13 +299,13 @@ paths: parameters: - name: accountId in: path - description: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) to get the [databases](/influxdb/cloud-dedicated/admin/databases/) for + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) to get the [databases](/influxdb3/cloud-dedicated/admin/databases/) for required: true schema: $ref: '#/components/schemas/UuidV4' - name: clusterId in: path - description: The ID of the cluster to get the [databases](/influxdb/cloud-dedicated/admin/databases/) for + description: The ID of the cluster to get the [databases](/influxdb3/cloud-dedicated/admin/databases/) for required: true schema: $ref: '#/components/schemas/UuidV4' @@ -320,7 +320,7 @@ paths: type: object properties: accountId: - description: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the database belongs to + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the database belongs to $ref: '#/components/schemas/UuidV4' clusterId: description: The ID of the cluster that the database belongs to @@ -401,7 +401,7 @@ paths: parameters: - name: accountId in: path - description: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) to create the database for + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) to create the database for required: true schema: $ref: '#/components/schemas/UuidV4' @@ -462,7 +462,7 @@ paths: type: object properties: accountId: - description: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the database belongs to + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the database belongs to $ref: '#/components/schemas/UuidV4' clusterId: description: The ID of the cluster that the database belongs to @@ -571,7 +571,7 @@ paths: parameters: - name: accountId in: path - description: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the database belongs to + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the database belongs to required: true schema: $ref: '#/components/schemas/UuidV4' @@ -629,7 +629,7 @@ paths: type: object properties: accountId: - description: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the database belongs to + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the database belongs to $ref: '#/components/schemas/UuidV4' clusterId: description: The ID of the cluster that the database belongs to @@ -727,7 +727,7 @@ paths: parameters: - name: accountId in: path - description: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the database belongs to + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the database belongs to required: true schema: $ref: '#/components/schemas/UuidV4' @@ -783,7 +783,7 @@ paths: parameters: - name: accountId in: path - description: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) to create the database table for + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) to create the database table for required: true schema: $ref: '#/components/schemas/UuidV4' @@ -841,7 +841,7 @@ paths: type: object properties: accountId: - description: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the database table belongs to + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the database table belongs to $ref: '#/components/schemas/UuidV4' clusterId: description: The ID of the cluster that the database table belongs to @@ -906,13 +906,13 @@ paths: parameters: - name: accountId in: path - description: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) to get the [database tokens](/influxdb/cloud-dedicated/admin/tokens/database/) for + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) to get the [database tokens](/influxdb3/cloud-dedicated/admin/tokens/database/) for required: true schema: $ref: '#/components/schemas/UuidV4' - name: clusterId in: path - description: The ID of the cluster to get the [database tokens](/influxdb/cloud-dedicated/admin/tokens/database/) for + description: The ID of the cluster to get the [database tokens](/influxdb3/cloud-dedicated/admin/tokens/database/) for required: true schema: $ref: '#/components/schemas/UuidV4' @@ -927,10 +927,10 @@ paths: type: object properties: accountId: - description: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) belongs to + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to $ref: '#/components/schemas/UuidV4' clusterId: - description: The ID of the cluster that the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) belongs to + description: The ID of the cluster that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to $ref: '#/components/schemas/UuidV4' id: description: The ID of the database token @@ -1004,7 +1004,7 @@ paths: tags: - Database tokens description: | - Create a [database token](/influxdb/cloud-dedicated/admin/tokens/database/) for a cluster. + Create a [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) for a cluster. The token returned on the `accessToken` property in the response can be used to authenticate query and write requests to the cluster. @@ -1019,18 +1019,18 @@ paths: We recommend storing database tokens in a **secure secret store**. For example, see how to [authenticate Telegraf using tokens in your OS secret store](https://github.com/influxdata/telegraf/tree/master/plugins/secretstores/os). - If you lose a token, [delete the token from InfluxDB](/influxdb/cloud-dedicated/admin/tokens/database/delete/) and create a new one. + If you lose a token, [delete the token from InfluxDB](/influxdb3/cloud-dedicated/admin/tokens/database/delete/) and create a new one. parameters: - name: accountId in: path - description: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) to create the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) for + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) to create the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) for required: true schema: $ref: '#/components/schemas/UuidV4' - name: clusterId in: path - description: The ID of the cluster to create the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) for + description: The ID of the cluster to create the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) for required: true schema: $ref: '#/components/schemas/UuidV4' @@ -1078,10 +1078,10 @@ paths: type: object properties: accountId: - description: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) belongs to + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to $ref: '#/components/schemas/UuidV4' clusterId: - description: The ID of the cluster that the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) belongs to + description: The ID of the cluster that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to $ref: '#/components/schemas/UuidV4' id: description: The ID of the database token @@ -1190,19 +1190,19 @@ paths: parameters: - name: accountId in: path - description: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) belongs to + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to required: true schema: $ref: '#/components/schemas/UuidV4' - name: clusterId in: path - description: The ID of the cluster that the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) belongs to + description: The ID of the cluster that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to required: true schema: $ref: '#/components/schemas/UuidV4' - name: tokenId in: path - description: The ID of the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) to get + description: The ID of the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) to get required: true schema: $ref: '#/components/schemas/UuidV4' @@ -1215,10 +1215,10 @@ paths: type: object properties: accountId: - description: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) belongs to + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to $ref: '#/components/schemas/UuidV4' clusterId: - description: The ID of the cluster that the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) belongs to + description: The ID of the cluster that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to $ref: '#/components/schemas/UuidV4' id: description: The ID of the database token @@ -1304,19 +1304,19 @@ paths: parameters: - name: accountId in: path - description: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) belongs to + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to required: true schema: $ref: '#/components/schemas/UuidV4' - name: clusterId in: path - description: The ID of the cluster that the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) belongs to + description: The ID of the cluster that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to required: true schema: $ref: '#/components/schemas/UuidV4' - name: tokenId in: path - description: The ID of the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) to update + description: The ID of the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) to update required: true schema: $ref: '#/components/schemas/UuidV4' @@ -1371,10 +1371,10 @@ paths: type: object properties: accountId: - description: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) belongs to + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to $ref: '#/components/schemas/UuidV4' clusterId: - description: The ID of the cluster that the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) belongs to + description: The ID of the cluster that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to $ref: '#/components/schemas/UuidV4' id: description: The ID of the database token @@ -1500,19 +1500,19 @@ paths: parameters: - name: accountId in: path - description: The ID of the [account](/influxdb/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) belongs to + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to required: true schema: $ref: '#/components/schemas/UuidV4' - name: clusterId in: path - description: The ID of the cluster that the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) belongs to + description: The ID of the cluster that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to required: true schema: $ref: '#/components/schemas/UuidV4' - name: tokenId in: path - description: The ID of the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) to delete + description: The ID of the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) to delete required: true schema: $ref: '#/components/schemas/UuidV4' @@ -1593,7 +1593,7 @@ components: minLength: 1 ClusterDatabaseRetentionPeriod: description: | - The retention period of the [cluster database](/influxdb/cloud-dedicated/admin/databases/) in nanoseconds, if applicable + The retention period of the [cluster database](/influxdb3/cloud-dedicated/admin/databases/) in nanoseconds, if applicable If the retention period is not set or is set to 0, the database will have infinite retention type: integer @@ -1623,7 +1623,7 @@ components: minimum: 1 ClusterDatabasePartitionTemplate: description: | - A template for [partitioning](/influxdb/cloud-dedicated/admin/custom-partitions/) a cluster database. + A template for [partitioning](/influxdb3/cloud-dedicated/admin/custom-partitions/) a cluster database. Each template part is evaluated in sequence, concatinating the final partition key from the output of each part, delimited by the partition @@ -1667,7 +1667,7 @@ components: * `time=2023-01-01, a=` -> `2023|#|!|!` * `time=2023-01-01, c=` -> `2023|!|!|` - When using the default [partitioning](/influxdb/cloud-dedicated/admin/custom-partitions/) template (YYYY-MM-DD) there is no + When using the default [partitioning](/influxdb3/cloud-dedicated/admin/custom-partitions/) template (YYYY-MM-DD) there is no encoding necessary, as the derived partition key contains a single part, and no reserved characters. [`TemplatePart::Bucket`] parts by definition will always be within the part length limit and contain no restricted characters @@ -1769,7 +1769,7 @@ components: tagName: c numberOfBuckets: 10 ClusterDatabaseTableName: - description: The name of the [cluster database](/influxdb/cloud-dedicated/admin/databases/) table + description: The name of the [cluster database](/influxdb3/cloud-dedicated/admin/databases/) table type: string examples: - TableOne @@ -1782,15 +1782,15 @@ components: - Limited Access Token - Full Access Token DatabaseTokenResourceAllDatabases: - description: A resource value for a [database token](/influxdb/cloud-dedicated/admin/tokens/database/) permission that refers to all databases + description: A resource value for a [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) permission that refers to all databases type: string enum: - '*' DatabaseTokenPermissionAction: - description: The action the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) permission allows + description: The action the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) permission allows type: string DatabaseTokenPermissionResource: - description: The resource the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) permission applies to + description: The resource the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) permission applies to anyOf: - $ref: '#/components/schemas/ClusterDatabaseName' - $ref: '#/components/schemas/DatabaseTokenResourceAllDatabases' @@ -1814,7 +1814,7 @@ components: - action: write resource: '*' DatabaseTokenPermissions: - description: The list of permissions the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) allows + description: The list of permissions the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) allows type: array items: $ref: '#/components/schemas/DatabaseTokenPermission' @@ -1827,7 +1827,7 @@ components: resource: '*' DatabaseTokenCreatedAt: description: | - The date and time that the [database token](/influxdb/cloud-dedicated/admin/tokens/database/) was created + The date and time that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) was created Uses RFC3339 format $ref: '#/components/schemas/DateTimeRfc3339' diff --git a/api-docs/cloud-dedicated/v1-compatibility/content/info.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/content/info.yml similarity index 70% rename from api-docs/cloud-dedicated/v1-compatibility/content/info.yml rename to api-docs/influxdb3/cloud-dedicated/v1-compatibility/content/info.yml index caa9d1643..5a162fa57 100644 --- a/api-docs/cloud-dedicated/v1-compatibility/content/info.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/content/info.yml @@ -1,6 +1,6 @@ -title: InfluxDB v1 HTTP API for InfluxDB Cloud Dedicated +title: InfluxDB v1 HTTP API for InfluxDB 3 Cloud Dedicated x-influxdata-short-title: v1 Compatibility API -summary: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB v3 Cloud Dedicated database. +summary: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Cloud Dedicated database. description: | The InfluxDB 1.x `/write` and `/query` endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others. @@ -9,7 +9,7 @@ description: | #### Related - [InfluxDB `/api/v2` API for InfluxDB Cloud Dedicated](/influxdb/cloud-dedicated/api/v2/) + [InfluxDB `/api/v2` API for InfluxDB 3 Cloud Dedicated](/influxdb3/cloud-dedicated/api/v2/) license: name: MIT url: 'https://opensource.org/licenses/MIT' diff --git a/api-docs/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml similarity index 96% rename from api-docs/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml rename to api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index e13b311fe..46de3a497 100644 --- a/api-docs/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -1,6 +1,6 @@ openapi: 3.0.0 info: - title: InfluxDB v1 HTTP API for InfluxDB Cloud Dedicated + title: InfluxDB v1 HTTP API for InfluxDB 3 Cloud Dedicated version: '' description: | The InfluxDB 1.x `/write` and `/query` endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others. @@ -10,11 +10,11 @@ info: #### Related - [InfluxDB `/api/v2` API for InfluxDB Cloud Dedicated](/influxdb/cloud-dedicated/api/v2/) + [InfluxDB `/api/v2` API for InfluxDB 3 Cloud Dedicated](/influxdb3/cloud-dedicated/api/v2/) license: name: MIT url: https://opensource.org/licenses/MIT - summary: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB v3 Cloud Dedicated database. + summary: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Cloud Dedicated database. servers: - url: / security: @@ -247,9 +247,9 @@ paths: The response is a HTTP `204` status code to inform you the querier is available. - For InfluxDB Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters. + For InfluxDB 3 Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters. - To check the health of ingesters before writing data, send a request to one of the [write endpoints](/influxdb/cloud-dedicated/api/v2/#tag/Write). + To check the health of ingesters before writing data, send a request to one of the [write endpoints](/influxdb3/cloud-dedicated/api/v2/#tag/Write). This endpoint doesn't require authentication. operationId: GetPing @@ -285,9 +285,9 @@ paths: The response is a HTTP `204` status code to inform you the querier is available. - For InfluxDB Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters. + For InfluxDB 3 Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters. - To check the health of ingesters before writing data, send a request to one of the [write endpoints](/influxdb/cloud-dedicated/api/v2/#tag/Write). + To check the health of ingesters before writing data, send a request to one of the [write endpoints](/influxdb3/cloud-dedicated/api/v2/#tag/Write). This endpoint doesn't require authentication. diff --git a/api-docs/cloud-dedicated/v2/content/info.yml b/api-docs/influxdb3/cloud-dedicated/v2/content/info.yml similarity index 60% rename from api-docs/cloud-dedicated/v2/content/info.yml rename to api-docs/influxdb3/cloud-dedicated/v2/content/info.yml index 3b927a971..e8b72ba41 100644 --- a/api-docs/cloud-dedicated/v2/content/info.yml +++ b/api-docs/influxdb3/cloud-dedicated/v2/content/info.yml @@ -1,9 +1,9 @@ -title: InfluxDB v2 HTTP API for InfluxDB Cloud Dedicated +title: InfluxDB v2 HTTP API for InfluxDB 3 Cloud Dedicated x-influxdata-short-title: v2 API -summary: The InfluxDB v2 HTTP API for InfluxDB Cloud Dedicated provides a v2-compatible programmatic interface for writing data stored in an InfluxDB Cloud Dedicated database. +summary: The InfluxDB v2 HTTP API for InfluxDB 3 Cloud Dedicated provides a v2-compatible programmatic interface for writing data stored in an InfluxDB 3 Cloud Dedicated database. description: | - The InfluxDB v2 HTTP API lets you use `/api/v2` endpoints for managing retention policy mappings and writing data stored in an InfluxDB v3 instance. + The InfluxDB v2 HTTP API lets you use `/api/v2` endpoints for managing retention policy mappings and writing data stored in an InfluxDB 3 instance. This documentation is generated from the [InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/openapi/master/contracts/ref/cloud.yml). diff --git a/api-docs/cloud-dedicated/v2/content/servers.yml b/api-docs/influxdb3/cloud-dedicated/v2/content/servers.yml similarity index 58% rename from api-docs/cloud-dedicated/v2/content/servers.yml rename to api-docs/influxdb3/cloud-dedicated/v2/content/servers.yml index 4b4503143..6ec2da5f1 100644 --- a/api-docs/cloud-dedicated/v2/content/servers.yml +++ b/api-docs/influxdb3/cloud-dedicated/v2/content/servers.yml @@ -1,8 +1,8 @@ - url: https://{baseurl} - description: InfluxDB Cloud Dedicated API URL + description: InfluxDB 3 Cloud Dedicated API URL variables: baseurl: enum: - 'cluster-id.a.influxdb.io' default: 'cluster-id.a.influxdb.io' - description: InfluxDB Cloud Dedicated URL + description: InfluxDB 3 Cloud Dedicated URL diff --git a/api-docs/cloud-dedicated/v2/content/tag-groups.yml b/api-docs/influxdb3/cloud-dedicated/v2/content/tag-groups.yml similarity index 100% rename from api-docs/cloud-dedicated/v2/content/tag-groups.yml rename to api-docs/influxdb3/cloud-dedicated/v2/content/tag-groups.yml diff --git a/api-docs/cloud-dedicated/v2/ref.yml b/api-docs/influxdb3/cloud-dedicated/v2/ref.yml similarity index 88% rename from api-docs/cloud-dedicated/v2/ref.yml rename to api-docs/influxdb3/cloud-dedicated/v2/ref.yml index b223b244e..eec0b7d3a 100644 --- a/api-docs/cloud-dedicated/v2/ref.yml +++ b/api-docs/influxdb3/cloud-dedicated/v2/ref.yml @@ -1,25 +1,25 @@ openapi: 3.0.0 info: - title: InfluxDB v2 HTTP API for Cloud Dedicated + title: InfluxDB v2 HTTP API for InfluxDB 3 Cloud Dedicated description: | - The InfluxDB v2 HTTP API lets you use `/api/v2` endpoints for managing retention policy mappings and writing data stored in an InfluxDB v3 instance. + The InfluxDB v2 HTTP API lets you use `/api/v2` endpoints for managing retention policy mappings and writing data stored in an InfluxDB 3 instance. This documentation is generated from the [InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/openapi/master/contracts/ref/cloud.yml). license: name: MIT url: https://opensource.org/licenses/MIT - summary: The InfluxDB v2 HTTP API for InfluxDB Cloud Dedicated provides a v2-compatible programmatic interface for writing data stored in an InfluxDB Cloud Dedicated database. + summary: The InfluxDB v2 HTTP API for InfluxDB 3 Cloud Dedicated provides a v2-compatible programmatic interface for writing data stored in an InfluxDB 3 Cloud Dedicated database. version: '' servers: - url: https://{baseurl} - description: InfluxDB Cloud Dedicated API URL + description: InfluxDB 3 Cloud Dedicated API URL variables: baseurl: enum: - cluster-id.a.influxdb.io default: cluster-id.a.influxdb.io - description: InfluxDB Cloud Dedicated URL + description: InfluxDB 3 Cloud Dedicated URL security: - BearerAuthentication: [] - TokenAuthentication: [] @@ -29,32 +29,32 @@ tags: - description: | ### Write data - InfluxDB Cloud Dedicated provides the following HTTP API endpoints for writing data: + InfluxDB 3 Cloud Dedicated provides the following HTTP API endpoints for writing data: - - **Recommended**: [`/api/v2/write` endpoint](#operation/PostWrite) for new write workloads or for bringing existing InfluxDB v2 write workloads to v3. - - [`/write` endpoint](#operation/PostLegacyWrite) for bringing existing InfluxDB v1 write workloads to v3. + - **Recommended**: [`/api/v2/write` endpoint](#operation/PostWrite) for new write workloads or for bringing existing InfluxDB v2 write workloads to InfluxDB 3. + - [`/write` endpoint](#operation/PostLegacyWrite) for bringing existing InfluxDB v1 write workloads to InfluxDB 3. Both endpoints accept the same line protocol format and process data in the same way. ### Query data - InfluxDB Cloud Dedicated provides the following protocols for executing a query: + InfluxDB 3 Cloud Dedicated provides the following protocols for executing a query: - - **Recommended**: _Flight+gRPC_ request that contains an SQL or InfluxQL query. See how to [get started querying InfluxDB using Flight and SQL](/influxdb/cloud-dedicated/get-started/query/). + - **Recommended**: _Flight+gRPC_ request that contains an SQL or InfluxQL query. See how to [get started querying InfluxDB using Flight and SQL](/influxdb3/cloud-dedicated/get-started/query/). - HTTP API [`/query` request](#operation/GetLegacyQuery) that contains an InfluxQL query. - Use this protocol when bringing existing InfluxDB v1 query workloads to v3. + Use this protocol when bringing existing InfluxDB v1 query workloads to InfluxDB 3. ### InfluxDB v2 compatibility - The HTTP API [`/api/v2/write` endpoint](#operation/PostWrite) works with the [`Bearer`](#section/Authentication/BearerAuthentication) and [`Token`](#section/Authentication/TokenAuthentication) authentication schemes and existing InfluxDB 2.x tools and code for [writing data](/influxdb/cloud-dedicated/write-data/). + The HTTP API [`/api/v2/write` endpoint](#operation/PostWrite) works with the [`Bearer`](#section/Authentication/BearerAuthentication) and [`Token`](#section/Authentication/TokenAuthentication) authentication schemes and existing InfluxDB 2.x tools and code for [writing data](/influxdb3/cloud-dedicated/write-data/). - See how to [use the InfluxDB v2 HTTP API with InfluxDB Cloud Dedicated](/influxdb/cloud-dedicated/guides/api-compatibility/v2/). + See how to [use the InfluxDB v2 HTTP API with InfluxDB 3 Cloud Dedicated](/influxdb3/cloud-dedicated/guides/api-compatibility/v2/). ### InfluxDB v1 compatibility The HTTP API [`/write` endpoint](#operation/PostLegacyWrite) and [`/query` endpoint](#operation/GetLegacyQuery) work with InfluxDB 1.x username/password [authentication schemes](#section/Authentication/) and existing InfluxDB 1.x tools and code. - See how to [use the InfluxDB v1 HTTP API with InfluxDB Cloud Dedicated](/influxdb/cloud-dedicated/guides/api-compatibility/v1/). + See how to [use the InfluxDB v1 HTTP API with InfluxDB 3 Cloud Dedicated](/influxdb3/cloud-dedicated/guides/api-compatibility/v1/). name: API compatibility x-traitTag: true - description: | @@ -102,19 +102,19 @@ tags: - HTTP clients can query the v1 [`/query` endpoint](#operation/GetLegacyQuery) using **InfluxQL** and retrieve data in **CSV** or **JSON** format. - - The `/api/v2/query` endpoint can't query InfluxDB Cloud Dedicated. + - The `/api/v2/query` endpoint can't query InfluxDB 3 Cloud Dedicated. - _Flight + gRPC_ clients can query using **SQL** or **InfluxQL** and retrieve data in **Arrow** format. #### Related guides - - [Get started querying InfluxDB](/influxdb/cloud-dedicated/get-started/query/) - - [Execute queries](/influxdb/cloud-dedicated/query-data/execute-queries/) + - [Get started querying InfluxDB](/influxdb3/cloud-dedicated/get-started/query/) + - [Execute queries](/influxdb3/cloud-dedicated/query-data/execute-queries/) name: Query - description: | - See the [**Get Started**](/influxdb/cloud-dedicated/get-started/) tutorial + See the [**Get Started**](/influxdb3/cloud-dedicated/get-started/) tutorial to get up and running authenticating with tokens, writing to databases, and querying data. - [**InfluxDB API client libraries and Flight clients**](/influxdb/cloud-dedicated/reference/client-libraries/) + [**InfluxDB API client libraries and Flight clients**](/influxdb3/cloud-dedicated/reference/client-libraries/) are available to integrate InfluxDB APIs with your application. name: Quick start x-traitTag: true @@ -131,8 +131,8 @@ tags: | `200` | Success | | | `201` | Created | One or more resources are created. The response body contains details about the resource. | | `204` | No content | The request is successful and no data is returned. For example, The [`/write` and `/api/v2/write` endpoints](#tag/Write) return this status code if all data in the batch is written and queryable. | - | `400` | Bad request | InfluxDB can't parse the request due to an incorrect parameter or bad syntax. For _writes_, the error may indicate one of the following problems:
  • [Rejected points](/influxdb/cloud-dedicated/write-data/troubleshoot/#troubleshoot-rejected-points)
  • `Authorization` header is missing or malformed or the API token doesn't have permission for the operation.
| - | `401` | Unauthorized | May indicate one of the following:
  • `Authorization: Token` header is missing or malformed
  • API token value is missing from the header
  • API token doesn't have permission. For more information about token types and permissions, see [Manage tokens](/influxdb/cloud-dedicated/admin/tokens/)
| + | `400` | Bad request | InfluxDB can't parse the request due to an incorrect parameter or bad syntax. For _writes_, the error may indicate one of the following problems:
  • [Rejected points](/influxdb3/cloud-dedicated/write-data/troubleshoot/#troubleshoot-rejected-points)
  • `Authorization` header is missing or malformed or the API token doesn't have permission for the operation.
| + | `401` | Unauthorized | May indicate one of the following:
  • `Authorization: Token` header is missing or malformed
  • API token value is missing from the header
  • API token doesn't have permission. For more information about token types and permissions, see [Manage tokens](/influxdb3/cloud-dedicated/admin/tokens/)
| | `404` | Not found | Requested resource was not found. `message` in the response body provides details about the requested resource. | | `405` | Method not allowed | The API path doesn't support the HTTP method used in the request--for example, you send a `POST` request to an endpoint that only allows `GET`. | | `422` | Unprocessable entity | Request data is invalid. `code` and `message` in the response body provide details about the problem. | @@ -143,7 +143,7 @@ tags: - name: System information endpoints - name: Usage - description: | - Write time series data to [databases](/influxdb/cloud-dedicated/admin/databases/) using InfluxDB v1 or v2 endpoints. + Write time series data to [databases](/influxdb3/cloud-dedicated/admin/databases/) using InfluxDB v1 or v2 endpoints. name: Write paths: /ping: @@ -153,9 +153,9 @@ paths: The response is a HTTP `204` status code to inform you the querier is available. - For InfluxDB Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters. + For InfluxDB 3 Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters. - To check the health of ingesters before writing data, send a request to one of the [write endpoints](/influxdb/cloud-dedicated/api/v2/#tag/Write). + To check the health of ingesters before writing data, send a request to one of the [write endpoints](/influxdb3/cloud-dedicated/api/v2/#tag/Write). This endpoint doesn't require authentication. operationId: GetPing @@ -191,9 +191,9 @@ paths: The response is a HTTP `204` status code to inform you the querier is available. - For InfluxDB Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters. + For InfluxDB 3 Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters. - To check the health of ingesters before writing data, send a request to one of the [write endpoints](/influxdb/cloud-dedicated/api/v2/#tag/Write). + To check the health of ingesters before writing data, send a request to one of the [write endpoints](/influxdb3/cloud-dedicated/api/v2/#tag/Write). This endpoint doesn't require authentication. operationId: HeadPing @@ -227,18 +227,18 @@ paths: description: | Writes data to a database. - Use this endpoint to send data in [line protocol](/influxdb/cloud-dedicated/reference/syntax/line-protocol/) format to InfluxDB. + Use this endpoint to send data in [line protocol](/influxdb3/cloud-dedicated/reference/syntax/line-protocol/) format to InfluxDB. - InfluxDB Cloud Dedicated does the following when you send a write request: + InfluxDB 3 Cloud Dedicated does the following when you send a write request: 1. Validates the request. - 2. If successful, attempts to [ingest data](/influxdb/cloud-dedicated/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](/influxdb/cloud-dedicated/write-data/troubleshoot/#review-http-status-codes). + 2. If successful, attempts to [ingest data](/influxdb3/cloud-dedicated/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](/influxdb3/cloud-dedicated/write-data/troubleshoot/#review-http-status-codes). 3. Ingests or rejects data in the batch and returns one of the following HTTP status codes: - `204 No Content`: All data in the batch is ingested. - `400 Bad Request`: Some (_when **partial writes** are configured for the cluster_) or all of the data has been rejected. Data that has not been rejected is ingested and queryable. - The response body contains error details about [rejected points](/influxdb/cloud-dedicated/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + The response body contains error details about [rejected points](/influxdb3/cloud-dedicated/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable. @@ -247,17 +247,17 @@ paths: #### Write endpoints - The [`/write`](#operation/PostLegacyWrite) and [`/api/v2/write`](#operation/PostWrite) endpoints are functionally equivalent for writing data to InfluxDB Cloud Dedicated. + The [`/write`](#operation/PostLegacyWrite) and [`/api/v2/write`](#operation/PostWrite) endpoints are functionally equivalent for writing data to InfluxDB 3 Cloud Dedicated. - - Use the [`/write` endpoint](#operation/PostLegacyWrite) for [InfluxDB v1 parameter compatibility](/influxdb/cloud-dedicated/guides/api-compatibility/v1/). - - Use the [`/api/v2/write` endpoint](#operation/PostWrite) for [InfluxDB v2 parameter compatibility](/influxdb/cloud-dedicated/guides/api-compatibility/v2/). + - Use the [`/write` endpoint](#operation/PostLegacyWrite) for [InfluxDB v1 parameter compatibility](/influxdb3/cloud-dedicated/guides/api-compatibility/v1/). + - Use the [`/api/v2/write` endpoint](#operation/PostWrite) for [InfluxDB v2 parameter compatibility](/influxdb3/cloud-dedicated/guides/api-compatibility/v2/). #### Related guides - - [Get started writing data](/influxdb/cloud-dedicated/get-started/write/) - - [Write data](/influxdb/cloud-dedicated/write-data/) - - [Best practices for writing data](/influxdb/cloud-dedicated/write-data/best-practices/) - - [Troubleshoot issues writing data](/influxdb/cloud-dedicated/write-data/troubleshoot/) + - [Get started writing data](/influxdb3/cloud-dedicated/get-started/write/) + - [Write data](/influxdb3/cloud-dedicated/write-data/) + - [Best practices for writing data](/influxdb3/cloud-dedicated/write-data/best-practices/) + - [Troubleshoot issues writing data](/influxdb3/cloud-dedicated/write-data/troubleshoot/) operationId: PostWrite parameters: - $ref: '#/components/parameters/TraceSpan' @@ -309,7 +309,7 @@ paths: #### Related guides - - [Troubleshoot issues writing data](/influxdb/cloud-dedicated/write-data/troubleshoot/) + - [Troubleshoot issues writing data](/influxdb3/cloud-dedicated/write-data/troubleshoot/) in: header name: Accept schema: @@ -364,7 +364,7 @@ paths: format: byte type: string description: | - In the request body, provide data in [line protocol format](/influxdb/cloud-dedicated/reference/syntax/line-protocol/). + In the request body, provide data in [line protocol format](/influxdb3/cloud-dedicated/reference/syntax/line-protocol/). To send compressed data, do the following: @@ -374,12 +374,12 @@ paths: #### Related guides - - [Best practices for optimizing writes](/influxdb/cloud-dedicated/write-data/best-practices/optimize-writes/) + - [Best practices for optimizing writes](/influxdb3/cloud-dedicated/write-data/best-practices/optimize-writes/) required: true responses: '201': description: | - Success ("Created"). Some points in the batch are written and queryable, and some points are rejected. The response body contains details about the [rejected points](/influxdb/cloud-dedicated/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + Success ("Created"). Some points in the batch are written and queryable, and some points are rejected. The response body contains details about the [rejected points](/influxdb3/cloud-dedicated/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. content: application/json: examples: @@ -444,7 +444,7 @@ paths: description: | Media type that the client can understand. - **Note**: With `application/csv`, query results include [**unix timestamps**](/influxdb/cloud-dedicated/reference/glossary/#unix-timestamp) instead of [RFC3339 timestamps](/influxdb/cloud-dedicated/reference/glossary/#rfc3339-timestamp). + **Note**: With `application/csv`, query results include [**unix timestamps**](/influxdb3/cloud-dedicated/reference/glossary/#unix-timestamp) instead of [RFC3339 timestamps](/influxdb3/cloud-dedicated/reference/glossary/#rfc3339-timestamp). enum: - application/json - application/csv @@ -478,7 +478,7 @@ paths: schema: type: string - description: | - The [database](/influxdb/cloud-dedicated/admin/databases/) to query data from. + The [database](/influxdb3/cloud-dedicated/admin/databases/) to query data from. in: query name: db required: true @@ -486,7 +486,7 @@ paths: type: string - description: | The retention policy to query data from. - For more information, see [InfluxQL DBRP naming convention](/influxdb/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). + For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). in: query name: rp schema: @@ -499,8 +499,8 @@ paths: type: string - description: | A unix timestamp precision. - Formats timestamps as [unix (epoch) timestamps](/influxdb/cloud-dedicated/reference/glossary/#unix-timestamp) the specified precision - instead of [RFC3339 timestamps](/influxdb/cloud-dedicated/reference/glossary/#rfc3339-timestamp) with nanosecond precision. + Formats timestamps as [unix (epoch) timestamps](/influxdb3/cloud-dedicated/reference/glossary/#unix-timestamp) the specified precision + instead of [RFC3339 timestamps](/influxdb3/cloud-dedicated/reference/glossary/#rfc3339-timestamp) with nanosecond precision. in: query name: epoch schema: @@ -557,9 +557,9 @@ paths: description: | #### InfluxDB Cloud: - returns this error if a **read** or **write** request exceeds your - plan's [adjustable service quotas](/influxdb/cloud-dedicated/account-management/limits/#adjustable-service-quotas) + plan's [adjustable service quotas](/influxdb3/cloud-dedicated/account-management/limits/#adjustable-service-quotas) or if a **delete** request exceeds the maximum - [global limit](/influxdb/cloud-dedicated/account-management/limits/#global-limits) + [global limit](/influxdb3/cloud-dedicated/account-management/limits/#global-limits) - returns `Retry-After` header that describes when to try the write again. headers: Retry-After: @@ -583,19 +583,19 @@ paths: description: | Writes data to a database. - Use this endpoint for [InfluxDB v1 parameter compatibility](/influxdb/cloud-dedicated/guides/api-compatibility/v1/) when sending data in [line protocol](/influxdb/cloud-dedicated/reference/syntax/line-protocol/) format to InfluxDB. + Use this endpoint for [InfluxDB v1 parameter compatibility](/influxdb3/cloud-dedicated/guides/api-compatibility/v1/) when sending data in [line protocol](/influxdb3/cloud-dedicated/reference/syntax/line-protocol/) format to InfluxDB. - InfluxDB Cloud Dedicated does the following when you send a write request: + InfluxDB 3 Cloud Dedicated does the following when you send a write request: 1. Validates the request. - 2. If successful, attempts to [ingest data](/influxdb/cloud-dedicated/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](/influxdb/cloud-dedicated/write-data/troubleshoot/#review-http-status-codes). + 2. If successful, attempts to [ingest data](/influxdb3/cloud-dedicated/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](/influxdb3/cloud-dedicated/write-data/troubleshoot/#review-http-status-codes). 3. Ingests or rejects data in the batch and returns one of the following HTTP status codes: - `204 No Content`: all data in the batch is ingested - `201 Created` (_If the cluster is configured to allow **partial writes**_): some points in the batch are ingested and queryable, and some points are rejected - `400 Bad Request`: all data is rejected - The response body contains error details about [rejected points](/influxdb/cloud-dedicated/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + The response body contains error details about [rejected points](/influxdb3/cloud-dedicated/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable. @@ -604,17 +604,17 @@ paths: #### Write endpoints - The [`/write`](#operation/PostLegacyWrite) and [`/api/v2/write`](#operation/PostWrite) endpoints are functionally equivalent for writing data to InfluxDB Cloud Dedicated. + The [`/write`](#operation/PostLegacyWrite) and [`/api/v2/write`](#operation/PostWrite) endpoints are functionally equivalent for writing data to InfluxDB 3 Cloud Dedicated. - - Use the [`/write` endpoint](#operation/PostLegacyWrite) for [InfluxDB v1 parameter compatibility](/influxdb/cloud-dedicated/guides/api-compatibility/v1/). - - Use the [`/api/v2/write` endpoint](#operation/PostWrite) for [InfluxDB v2 parameter compatibility](/influxdb/cloud-dedicated/guides/api-compatibility/v2/). + - Use the [`/write` endpoint](#operation/PostLegacyWrite) for [InfluxDB v1 parameter compatibility](/influxdb3/cloud-dedicated/guides/api-compatibility/v1/). + - Use the [`/api/v2/write` endpoint](#operation/PostWrite) for [InfluxDB v2 parameter compatibility](/influxdb3/cloud-dedicated/guides/api-compatibility/v2/). #### Related guides - - [Get started writing data](/influxdb/cloud-dedicated/get-started/write/) - - [Write data](/influxdb/cloud-dedicated/write-data/) - - [Best practices for writing data](/influxdb/cloud-dedicated/write-data/best-practices/) - - [Troubleshoot issues writing data](/influxdb/cloud-dedicated/write-data/troubleshoot/) + - [Get started writing data](/influxdb3/cloud-dedicated/get-started/write/) + - [Write data](/influxdb3/cloud-dedicated/write-data/) + - [Best practices for writing data](/influxdb3/cloud-dedicated/write-data/best-practices/) + - [Troubleshoot issues writing data](/influxdb3/cloud-dedicated/write-data/troubleshoot/) parameters: - $ref: '#/components/parameters/TraceSpan' - description: The InfluxDB 1.x username to authenticate the request. @@ -668,7 +668,7 @@ paths: Data from the batch was rejected and not written. The response body indicates if a partial write occurred or all data was rejected. If a partial write occurred, then some points from the batch are written and queryable. - The response body contains details about the [rejected points](/influxdb/cloud-dedicated/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + The response body contains details about the [rejected points](/influxdb3/cloud-dedicated/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. content: application/json: examples: @@ -710,7 +710,7 @@ paths: The request contained data outside the database’s retention period. InfluxDB rejected the batch and wrote no data. - The response body contains details about the [rejected points](/influxdb/cloud-dedicated/write-data/troubleshoot/#troubleshoot-rejected-points). + The response body contains details about the [rejected points](/influxdb3/cloud-dedicated/write-data/troubleshoot/#troubleshoot-rejected-points). '429': description: Token is temporarily over quota. The Retry-After header describes when to try the write again. headers: @@ -928,7 +928,7 @@ components: orgID: description: | An organization ID. - Identifies the [organization](/influxdb/cloud-dedicated/reference/glossary/#organization) that owns the mapping. + Identifies the [organization](/influxdb3/cloud-dedicated/reference/glossary/#organization) that owns the mapping. type: string retention_policy: description: | @@ -966,12 +966,12 @@ components: org: description: | An organization name. - Identifies the [organization](/influxdb/cloud-dedicated/reference/glossary/#organization) that owns the mapping. + Identifies the [organization](/influxdb3/cloud-dedicated/reference/glossary/#organization) that owns the mapping. type: string orgID: description: | An organization ID. - Identifies the [organization](/influxdb/cloud-dedicated/reference/glossary/#organization) that owns the mapping. + Identifies the [organization](/influxdb3/cloud-dedicated/reference/glossary/#organization) that owns the mapping. type: string retention_policy: description: | @@ -1009,7 +1009,7 @@ components: $ref: '#/components/schemas/DBRP' type: array DateTimeLiteral: - description: Represents an instant in time with nanosecond precision in [RFC3339Nano date/time format](/influxdb/cloud-dedicated/reference/glossary/#rfc3339nano-timestamp). + description: Represents an instant in time with nanosecond precision in [RFC3339Nano date/time format](/influxdb3/cloud-dedicated/reference/glossary/#rfc3339nano-timestamp). properties: type: $ref: '#/components/schemas/NodeType' @@ -1033,18 +1033,18 @@ components: properties: predicate: description: | - An expression in [delete predicate syntax](/influxdb/cloud-dedicated/reference/syntax/delete-predicate/). + An expression in [delete predicate syntax](/influxdb3/cloud-dedicated/reference/syntax/delete-predicate/). example: tag1="value1" and (tag2="value2" and tag3!="value3") type: string start: description: | - A timestamp ([RFC3339 date/time format](/influxdb/cloud-dedicated/reference/glossary/#rfc3339-timestamp)). + A timestamp ([RFC3339 date/time format](/influxdb3/cloud-dedicated/reference/glossary/#rfc3339-timestamp)). The earliest time to delete from. format: date-time type: string stop: description: | - A timestamp ([RFC3339 date/time format](/influxdb/cloud-dedicated/reference/glossary/#rfc3339-timestamp)). + A timestamp ([RFC3339 date/time format](/influxdb3/cloud-dedicated/reference/glossary/#rfc3339-timestamp)). The latest time to delete from. format: date-time type: string @@ -1055,7 +1055,7 @@ components: Dialect: description: | Options for tabular data output. - Default output is [annotated CSV](/influxdb/cloud-dedicated/reference/syntax/annotated-csv/#csv-response-format) with headers. + Default output is [annotated CSV](/influxdb3/cloud-dedicated/reference/syntax/annotated-csv/#csv-response-format) with headers. For more information about tabular data **dialect**, see [W3 metadata vocabulary for tabular data](https://www.w3.org/TR/2015/REC-tabular-metadata-20151217/#dialect-descriptions). @@ -1067,7 +1067,7 @@ components: #### Related guides - - See [Annotated CSV annotations](/influxdb/cloud-dedicated/reference/syntax/annotated-csv/#annotations) for examples and more information. + - See [Annotated CSV annotations](/influxdb3/cloud-dedicated/reference/syntax/annotated-csv/#annotations) for examples and more information. For more information about **annotations** in tabular data, see [W3 metadata vocabulary for tabular data](https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns). @@ -1089,7 +1089,7 @@ components: default: RFC3339 description: | The format for timestamps in results. - Default is [`RFC3339` date/time format](/influxdb/cloud-dedicated/reference/glossary/#rfc3339-timestamp). + Default is [`RFC3339` date/time format](/influxdb3/cloud-dedicated/reference/glossary/#rfc3339-timestamp). To include nanoseconds in timestamps, use `RFC3339Nano`. #### Example formatted date/time values @@ -1629,7 +1629,7 @@ components: readOnly: true type: string time: - description: The time ([RFC3339Nano date/time format](/influxdb/cloud-dedicated/reference/glossary/#rfc3339nano-timestamp)) that the event occurred. + description: The time ([RFC3339Nano date/time format](/influxdb3/cloud-dedicated/reference/glossary/#rfc3339nano-timestamp)) that the event occurred. example: 2006-01-02T15:04:05.999999999Z07:00 format: date-time readOnly: true @@ -1824,7 +1824,7 @@ components: type: integer shardGroupDurationSeconds: description: | - The [shard group duration](/influxdb/cloud-dedicated/reference/glossary/#shard). + The [shard group duration](/influxdb3/cloud-dedicated/reference/glossary/#shard). The number of seconds that each shard group covers. #### InfluxDB Cloud @@ -1833,7 +1833,7 @@ components: #### Related guides - - InfluxDB [shards and shard groups](/influxdb/cloud-dedicated/reference/internals/shards/) + - InfluxDB [shards and shard groups](/influxdb3/cloud-dedicated/reference/internals/shards/) format: int64 type: integer type: @@ -1910,7 +1910,7 @@ components: RetentionRules: description: | Retention rules to expire or retain data. - The InfluxDB `/api/v2` API uses `RetentionRules` to configure the [retention period](/influxdb/cloud-dedicated/reference/glossary/#retention-period). + The InfluxDB `/api/v2` API uses `RetentionRules` to configure the [retention period](/influxdb3/cloud-dedicated/reference/glossary/#retention-period). #### InfluxDB Cloud @@ -1977,8 +1977,8 @@ components: ### Basic authentication scheme Use the `Authorization` header with the `Basic` scheme to authenticate v1 API `/write` and `/query` requests. - When authenticating requests, InfluxDB Cloud Dedicated checks that the `password` part of the decoded credential is an authorized [database token](/influxdb/cloud-dedicated/admin/tokens/). - InfluxDB Cloud Dedicated ignores the `username` part of the decoded credential. + When authenticating requests, InfluxDB 3 Cloud Dedicated checks that the `password` part of the decoded credential is an authorized [database token](/influxdb3/cloud-dedicated/admin/tokens/). + InfluxDB 3 Cloud Dedicated ignores the `username` part of the decoded credential. ### Syntax @@ -1988,13 +1988,13 @@ components: Replace the following: - - **`[USERNAME]`**: an optional string value (ignored by InfluxDB Cloud Dedicated). - - **`DATABASE_TOKEN`**: a [database token](/influxdb/cloud-dedicated/admin/tokens/). + - **`[USERNAME]`**: an optional string value (ignored by InfluxDB 3 Cloud Dedicated). + - **`DATABASE_TOKEN`**: a [database token](/influxdb3/cloud-dedicated/admin/tokens/). - Encode the `[USERNAME]:DATABASE_TOKEN` credential using base64 encoding, and then append the encoded string to the `Authorization: Basic` header. ### Example - The following example shows how to use cURL with the `Basic` authentication scheme and a [database token](/influxdb/cloud-dedicated/admin/tokens/): + The following example shows how to use cURL with the `Basic` authentication scheme and a [database token](/influxdb3/cloud-dedicated/admin/tokens/): ```sh ####################################### @@ -2012,8 +2012,8 @@ components: Replace the following: - - **`DATABASE_NAME`**: your InfluxDB Cloud Dedicated database - - **`DATABASE_TOKEN`**: a [database token](/influxdb/cloud-dedicated/admin/tokens/) with sufficient permissions to the database + - **`DATABASE_NAME`**: your InfluxDB 3 Cloud Dedicated database + - **`DATABASE_TOKEN`**: a [database token](/influxdb3/cloud-dedicated/admin/tokens/) with sufficient permissions to the database scheme: basic type: http QuerystringAuthentication: @@ -2027,7 +2027,7 @@ components: ### Query string authentication In the URL, pass the `p` query parameter to authenticate `/write` and `/query` requests. - When authenticating requests, InfluxDB Cloud Dedicated checks that `p` (_password_) is an authorized database token and ignores the `u` (_username_) parameter. + When authenticating requests, InfluxDB 3 Cloud Dedicated checks that `p` (_password_) is an authorized database token and ignores the `u` (_username_) parameter. ### Syntax @@ -2038,7 +2038,7 @@ components: ### Example - The following example shows how to use cURL with query string authentication and a [database token](/influxdb/cloud-dedicated/admin/tokens/). + The following example shows how to use cURL with query string authentication and a [database token](/influxdb3/cloud-dedicated/admin/tokens/). ```sh ####################################### @@ -2057,8 +2057,8 @@ components: Replace the following: - - **`DATABASE_NAME`**: your InfluxDB Cloud Dedicated database - - **`DATABASE_TOKEN`**: a [database token](/influxdb/cloud-dedicated/admin/tokens/) with sufficient permissions to the database + - **`DATABASE_NAME`**: your InfluxDB 3 Cloud Dedicated database + - **`DATABASE_TOKEN`**: a [database token](/influxdb3/cloud-dedicated/admin/tokens/) with sufficient permissions to the database BearerAuthentication: type: http scheme: bearer @@ -2092,8 +2092,8 @@ components: ``` For examples and more information, see the following: - - [Authenticate API requests](/influxdb/cloud-dedicated/primers/api/v2/#authenticate-api-requests) - - [Manage tokens](/influxdb/cloud-dedicated/admin/tokens/) + - [Authenticate API requests](/influxdb3/cloud-dedicated/primers/api/v2/#authenticate-api-requests) + - [Manage tokens](/influxdb3/cloud-dedicated/admin/tokens/) TokenAuthentication: description: | Use the Token authentication @@ -2124,8 +2124,8 @@ components: ### Related guides - - [Authenticate API requests](/influxdb/cloud-dedicated/primers/api/v2/#authenticate-api-requests) - - [Manage tokens](/influxdb/cloud-dedicated/admin/tokens/) + - [Authenticate API requests](/influxdb3/cloud-dedicated/primers/api/v2/#authenticate-api-requests) + - [Manage tokens](/influxdb3/cloud-dedicated/admin/tokens/) in: header name: Authorization type: apiKey diff --git a/api-docs/clustered/.config.yml b/api-docs/influxdb3/cloud-serverless/.config.yml similarity index 66% rename from api-docs/clustered/.config.yml rename to api-docs/influxdb3/cloud-serverless/.config.yml index e1bf4a4af..1e02231fc 100644 --- a/api-docs/clustered/.config.yml +++ b/api-docs/influxdb3/cloud-serverless/.config.yml @@ -1,9 +1,9 @@ plugins: - - '../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.js' extends: - recommended - docs/all -x-influxdata-product-name: InfluxDB v3 Clustered +x-influxdata-product-name: InfluxDB 3 Serverless apis: v2@2: diff --git a/api-docs/cloud-serverless/v1-compatibility/content/info.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/content/info.yml similarity index 70% rename from api-docs/cloud-serverless/v1-compatibility/content/info.yml rename to api-docs/influxdb3/cloud-serverless/v1-compatibility/content/info.yml index 1307b7422..1e2d82d4c 100644 --- a/api-docs/cloud-serverless/v1-compatibility/content/info.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/content/info.yml @@ -1,6 +1,6 @@ -title: InfluxDB v1 HTTP API for InfluxDB Cloud Serverless +title: InfluxDB v1 HTTP API for InfluxDB 3 Cloud Serverless x-influxdata-short-title: v1 Compatibility API -summary: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB v3 Cloud Serverless bucket. +summary: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Cloud Serverless bucket. description: | The InfluxDB 1.x `/write` and `/query` endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others. @@ -9,7 +9,7 @@ description: | #### Related - [InfluxDB `/api/v2` API for InfluxDB Cloud Serverless](/influxdb/cloud-serverless/api/v2/) + [InfluxDB `/api/v2` API for InfluxDB 3 Cloud Serverless](/influxdb3/cloud-serverless/api/v2/) license: name: MIT url: 'https://opensource.org/licenses/MIT' diff --git a/api-docs/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml similarity index 98% rename from api-docs/cloud-serverless/v1-compatibility/swaggerV1Compat.yml rename to api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index 44880f9f3..12332cb35 100644 --- a/api-docs/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -1,6 +1,6 @@ openapi: 3.0.0 info: - title: InfluxDB v1 HTTP API for InfluxDB Cloud Serverless + title: InfluxDB v1 HTTP API for InfluxDB 3 Cloud Serverless version: '' description: | The InfluxDB 1.x `/write` and `/query` endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others. @@ -10,11 +10,11 @@ info: #### Related - [InfluxDB `/api/v2` API for InfluxDB Cloud Serverless](/influxdb/cloud-serverless/api/v2/) + [InfluxDB `/api/v2` API for InfluxDB 3 Cloud Serverless](/influxdb3/cloud-serverless/api/v2/) license: name: MIT url: https://opensource.org/licenses/MIT - summary: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB v3 Cloud Serverless bucket. + summary: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Cloud Serverless bucket. servers: - url: / security: diff --git a/api-docs/cloud-serverless/v2/content/info.yml b/api-docs/influxdb3/cloud-serverless/v2/content/info.yml similarity index 64% rename from api-docs/cloud-serverless/v2/content/info.yml rename to api-docs/influxdb3/cloud-serverless/v2/content/info.yml index 5aab1cdbb..f175cf3c7 100644 --- a/api-docs/cloud-serverless/v2/content/info.yml +++ b/api-docs/influxdb3/cloud-serverless/v2/content/info.yml @@ -1,9 +1,9 @@ -title: InfluxDB Cloud Serverless API Service +title: InfluxDB 3 Cloud Serverless API Service x-influxdata-short-title: v2 API summary: | - The InfluxDB v2 HTTP API for InfluxDB Cloud Serverless provides a programmatic interface for writing data stored in an InfluxDB Cloud Serverless bucket. + The InfluxDB v2 HTTP API for InfluxDB 3 Cloud Serverless provides a programmatic interface for writing data stored in an InfluxDB 3 Cloud Serverless bucket. description: | - The InfluxDB v2 HTTP API lets you use `/api/v2` endpoints for managing retention policy mappings and writing data stored in an InfluxDB v3 instance. + The InfluxDB v2 HTTP API lets you use `/api/v2` endpoints for managing retention policy mappings and writing data stored in an InfluxDB 3 instance. This documentation is generated from the [InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/openapi/master/contracts/ref/cloud.yml). diff --git a/api-docs/cloud-serverless/v2/content/servers.yml b/api-docs/influxdb3/cloud-serverless/v2/content/servers.yml similarity index 62% rename from api-docs/cloud-serverless/v2/content/servers.yml rename to api-docs/influxdb3/cloud-serverless/v2/content/servers.yml index 1d0433fb4..35fb9f892 100644 --- a/api-docs/cloud-serverless/v2/content/servers.yml +++ b/api-docs/influxdb3/cloud-serverless/v2/content/servers.yml @@ -1,8 +1,8 @@ - url: https://{baseurl} - description: InfluxDB Cloud Serverless API URL + description: InfluxDB 3 Cloud Serverless API URL variables: baseurl: enum: - 'us-east-1-1.aws.cloud2.influxdata.com' default: 'us-east-1-1.aws.cloud2.influxdata.com' - description: InfluxDB Cloud Serverless URL + description: InfluxDB 3 Cloud Serverless URL diff --git a/api-docs/cloud-serverless/v2/content/tag-groups.yml b/api-docs/influxdb3/cloud-serverless/v2/content/tag-groups.yml similarity index 100% rename from api-docs/cloud-serverless/v2/content/tag-groups.yml rename to api-docs/influxdb3/cloud-serverless/v2/content/tag-groups.yml diff --git a/api-docs/cloud-serverless/v2/ref.yml b/api-docs/influxdb3/cloud-serverless/v2/ref.yml similarity index 95% rename from api-docs/cloud-serverless/v2/ref.yml rename to api-docs/influxdb3/cloud-serverless/v2/ref.yml index 4c6c01654..530c06856 100644 --- a/api-docs/cloud-serverless/v2/ref.yml +++ b/api-docs/influxdb3/cloud-serverless/v2/ref.yml @@ -1,8 +1,8 @@ openapi: 3.0.0 info: - title: InfluxDB Cloud Serverless API Service + title: InfluxDB 3 Cloud Serverless API Service description: | - The InfluxDB v2 HTTP API lets you use `/api/v2` endpoints for managing retention policy mappings and writing data stored in an InfluxDB v3 instance. + The InfluxDB v2 HTTP API lets you use `/api/v2` endpoints for managing retention policy mappings and writing data stored in an InfluxDB 3 instance. This documentation is generated from the [InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/openapi/master/contracts/ref/cloud.yml). @@ -10,50 +10,50 @@ info: name: MIT url: https://opensource.org/licenses/MIT summary: | - The InfluxDB v2 HTTP API for InfluxDB Cloud Serverless provides a programmatic interface for writing data stored in an InfluxDB Cloud Serverless bucket. + The InfluxDB v2 HTTP API for InfluxDB 3 Cloud Serverless provides a programmatic interface for writing data stored in an InfluxDB 3 Cloud Serverless bucket. version: '' servers: - url: https://{baseurl} - description: InfluxDB Cloud Serverless API URL + description: InfluxDB 3 Cloud Serverless API URL variables: baseurl: enum: - us-east-1-1.aws.cloud2.influxdata.com default: us-east-1-1.aws.cloud2.influxdata.com - description: InfluxDB Cloud Serverless URL + description: InfluxDB 3 Cloud Serverless URL security: - TokenAuthentication: [] tags: - description: | ### Write data - InfluxDB Cloud Serverless provides the following HTTP API endpoints for writing data: + InfluxDB 3 Cloud Serverless provides the following HTTP API endpoints for writing data: - **Recommended**: [`/api/v2/write` endpoint](#operation/PostWrite) - for new write workloads or for bringing existing InfluxDB v2 write workloads to v3. - - [`/write` endpoint](#operation/PostLegacyWrite) for bringing existing InfluxDB v1 write workloads to v3. + for new write workloads or for bringing existing InfluxDB v2 write workloads to InfluxDB 3. + - [`/write` endpoint](#operation/PostLegacyWrite) for bringing existing InfluxDB v1 write workloads to InfluxDB 3. Both endpoints accept the same line protocol format and process data in the same way. ### Query data - InfluxDB Cloud Serverless provides the following protocols for executing a query: + InfluxDB 3 Cloud Serverless provides the following protocols for executing a query: - - **Recommended**: _Flight+gRPC_ request that contains an SQL or InfluxQL query. See how to [get started querying InfluxDB using Flight and SQL](/influxdb/cloud-serverless/get-started/query/). - - HTTP API [`/query` request](/influxdb/cloud-serverless/api/#operation/GetLegacyQuery) that contains an InfluxQL query. - Use this protocol when bringing existing InfluxDB v1 query workloads to v3. + - **Recommended**: _Flight+gRPC_ request that contains an SQL or InfluxQL query. See how to [get started querying InfluxDB using Flight and SQL](/influxdb3/cloud-serverless/get-started/query/). + - HTTP API [`/query` request](/influxdb3/cloud-serverless/api/#operation/GetLegacyQuery) that contains an InfluxQL query. + Use this protocol when bringing existing InfluxDB v1 query workloads to InfluxDB 3. ### InfluxDB v2 compatibility - The HTTP API [`/api/v2/write` endpoint](#operation/PostWrite) works with the [`Token` authentication scheme](#section/Authentication/TokenAuthentication) and existing InfluxDB 2.x tools and code for [writing data](/influxdb/cloud-serverless/write-data/). + The HTTP API [`/api/v2/write` endpoint](#operation/PostWrite) works with the [`Token` authentication scheme](#section/Authentication/TokenAuthentication) and existing InfluxDB 2.x tools and code for [writing data](/influxdb3/cloud-serverless/write-data/). - See how to [use the InfluxDB v2 HTTP API with InfluxDB Cloud Serverless](/influxdb/cloud-serverless/guides/api-compatibility/v2/). + See how to [use the InfluxDB v2 HTTP API with InfluxDB 3 Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v2/). ### InfluxDB v1 compatibility The HTTP API [`/write` endpoint](#operation/PostLegacyWrite) and [`/query` endpoint](#operation/GetLegacyQuery) work with InfluxDB 1.x username/password [authentication schemes](#section/Authentication/) and existing InfluxDB 1.x tools and code. - See how to [use the InfluxDB v1 HTTP API with InfluxDB Cloud Serverless](/influxdb/cloud-serverless/guides/api-compatibility/v1/). + See how to [use the InfluxDB v1 HTTP API with InfluxDB 3 Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/). name: API compatibility x-traitTag: true - description: | @@ -82,7 +82,7 @@ tags: Optionally, when creating an authorization, you can scope it to a specific user. If the user signs in with username and password, creating a _user session_, the session carries the permissions granted by all the user's authorizations. - For more information, see [how to assign a token to a specific user](/influxdb/cloud-serverless/security/tokens/create-token/). + For more information, see [how to assign a token to a specific user](/influxdb3/cloud-serverless/security/tokens/create-token/). To create a user session, use the [`POST /api/v2/signin` endpoint](#operation/PostSignin). ### Related endpoints @@ -92,22 +92,22 @@ tags: ### Related guides - - [Authorize API requests](/influxdb/cloud-serverless/api-guide/api_intro/#authentication) - - [Manage API tokens](/influxdb/cloud-serverless/security/tokens/) - - [Assign a token to a specific user](/influxdb/cloud-serverless/security/tokens/create-token/) + - [Authorize API requests](/influxdb3/cloud-serverless/api-guide/api_intro/#authentication) + - [Manage API tokens](/influxdb3/cloud-serverless/security/tokens/) + - [Assign a token to a specific user](/influxdb3/cloud-serverless/security/tokens/create-token/) name: Authorizations (API tokens) - name: Bucket Schemas - description: | - Store your data in InfluxDB [buckets](/influxdb/cloud-serverless/reference/glossary/#bucket). + Store your data in InfluxDB [buckets](/influxdb3/cloud-serverless/reference/glossary/#bucket). A bucket is a named location where time series data is stored. All buckets - have a [retention period](/influxdb/cloud-serverless/reference/glossary/#retention-period), + have a [retention period](/influxdb3/cloud-serverless/reference/glossary/#retention-period), a duration of time that each data point persists. InfluxDB drops all points with timestamps older than the bucket’s retention period. A bucket belongs to an organization. ### Related guides - - [Manage buckets](/influxdb/cloud-serverless/admin/buckets/) + - [Manage buckets](/influxdb3/cloud-serverless/admin/buckets/) name: Buckets - name: Cells - name: Checks @@ -120,10 +120,10 @@ tags: | Query parameter | Value type | Description | |:------------------------ |:--------------------- |:-------------------------------------------| - | `bucket` | string | The bucket name or ID ([find your bucket](/influxdb/cloud-serverless/admin/buckets/view-buckets/). | - | `bucketID` | string | The bucket ID ([find your bucket](/influxdb/cloud-serverless/admin/buckets/view-buckets/). | - | `org` | string | The organization name or ID ([find your organization](/influxdb/cloud-serverless/organizations/view-orgs/). | - | `orgID` | 16-byte string | The organization ID ([find your organization](/influxdb/cloud-serverless/organizations/view-orgs/). | + | `bucket` | string | The bucket name or ID ([find your bucket](/influxdb3/cloud-serverless/admin/buckets/view-buckets/). | + | `bucketID` | string | The bucket ID ([find your bucket](/influxdb3/cloud-serverless/admin/buckets/view-buckets/). | + | `org` | string | The organization name or ID ([find your organization](/influxdb3/cloud-serverless/organizations/view-orgs/). | + | `orgID` | 16-byte string | The organization ID ([find your organization](/influxdb3/cloud-serverless/organizations/view-orgs/). | name: Common parameters x-traitTag: true - name: Config @@ -141,7 +141,7 @@ tags: ### Related guides - - [Database and retention policy mapping](/influxdb/cloud-serverless/reference/api/influxdb-1x/dbrp/) + - [Database and retention policy mapping](/influxdb3/cloud-serverless/reference/api/influxdb-1x/dbrp/) name: DBRPs - description: | Delete data from an InfluxDB bucket. @@ -185,7 +185,7 @@ tags: - name: NotificationEndpoints - name: NotificationRules - description: | - Manage your [organization](/influxdb/cloud-serverless/reference/glossary/#organization). + Manage your [organization](/influxdb3/cloud-serverless/reference/glossary/#organization). An organization is a workspace for a group of users. Organizations can be used to separate different environments, projects, teams or users within InfluxDB. @@ -233,20 +233,20 @@ tags: - description: | Query data stored in a bucket. - - HTTP clients can query the v1 [`/query` endpoint](/influxdb/cloud-serverless/api/#operation/GetLegacyQuery) + - HTTP clients can query the v1 [`/query` endpoint](/influxdb3/cloud-serverless/api/#operation/GetLegacyQuery) using **InfluxQL** and retrieve data in **CSV** or **JSON** format. - _Flight + gRPC_ clients can query using **SQL** or **InfluxQL** and retrieve data in **Arrow** format. #### Related guides - - [Get started querying InfluxDB](/influxdb/cloud-serverless/get-started/query/) - - [Execute queries](/influxdb/cloud-serverless/query-data/execute-queries/) + - [Get started querying InfluxDB](/influxdb3/cloud-serverless/get-started/query/) + - [Execute queries](/influxdb3/cloud-serverless/query-data/execute-queries/) name: Query - description: | - See the [**Get started**](/influxdb/cloud-serverless/get-started/) tutorial + See the [**Get started**](/influxdb3/cloud-serverless/get-started/) tutorial to get up and running authenticating with tokens, writing to buckets, and querying data. - [**InfluxDB API client libraries and Flight clients**](/influxdb/cloud-serverless/reference/client-libraries/) + [**InfluxDB API client libraries and Flight clients**](/influxdb3/cloud-serverless/reference/client-libraries/) are available to integrate InfluxDB with your application. name: Quick start x-traitTag: true @@ -265,7 +265,7 @@ tags: | `201` | Created | Successfully created a resource. The response body may contain details, for example [`/write`](#operation/PostLegacyWrite) and [`/api/v2/write`](#operation/PostWrite) response bodies contain details of partial write failures. | | `204` | No content | The request succeeded. | | `400` | Bad request | InfluxDB can't parse the request due to an incorrect parameter or bad syntax. For _writes_, the error may indicate one of the following problems:
  • Line protocol is malformed. The response body contains the first malformed line in the data and indicates what was expected.
  • The batch contains a point with the same series as other points, but one of the field values has a different data type.
  • `Authorization` header is missing or malformed or the API token doesn't have permission for the operation.
| - | `401` | Unauthorized | May indicate one of the following:
  • `Authorization: Token` header is missing or malformed
  • API token value is missing from the header
  • API token doesn't have permission. For more information about token types and permissions, see [Manage API tokens](/influxdb/cloud-serverless/security/tokens/)
| + | `401` | Unauthorized | May indicate one of the following:
  • `Authorization: Token` header is missing or malformed
  • API token value is missing from the header
  • API token doesn't have permission. For more information about token types and permissions, see [Manage API tokens](/influxdb3/cloud-serverless/security/tokens/)
| | `404` | Not found | Requested resource was not found. `message` in the response body provides details about the requested resource. | | `405` | Method not allowed | The API path doesn't support the HTTP method used in the request--for example, you send a `POST` request to an endpoint that only allows `GET`. | | `413` | Request entity too large | Request payload exceeds the size limit. | @@ -347,12 +347,12 @@ tags: ### Related guides - - [Manage users](/influxdb/cloud-serverless/organizations/users/) + - [Manage users](/influxdb3/cloud-serverless/organizations/users/) name: Users - name: Variables - name: Views - description: | - Write time series data to [buckets](/influxdb/cloud-serverless/reference/glossary/#bucket) using InfluxDB v1 or v2 endpoints. + Write time series data to [buckets](/influxdb3/cloud-serverless/reference/glossary/#bucket) using InfluxDB v1 or v2 endpoints. name: Write paths: /api/v2: @@ -390,7 +390,7 @@ paths: To limit which authorizations are returned, pass query parameters in your request. If no query parameters are passed, InfluxDB returns all authorizations. - InfluxDB Cloud Serverless doesn't expose [API token](/influxdb/cloud-serverless/reference/glossary/#token) + InfluxDB 3 Cloud Serverless doesn't expose [API token](/influxdb3/cloud-serverless/reference/glossary/#token) values in `GET /api/v2/authorizations` responses; returns `token: redacted` for all authorizations. @@ -404,38 +404,38 @@ paths: #### Related guides - - [View tokens](/influxdb/cloud-serverless/security/tokens/view-tokens/) + - [View tokens](/influxdb3/cloud-serverless/security/tokens/view-tokens/) operationId: GetAuthorizations parameters: - $ref: '#/components/parameters/TraceSpan' - description: | A user ID. - Only returns authorizations scoped to the specified [user](/influxdb/cloud-serverless/reference/glossary/#user). + Only returns authorizations scoped to the specified [user](/influxdb3/cloud-serverless/reference/glossary/#user). in: query name: userID schema: type: string - description: | A user name. - Only returns authorizations scoped to the specified [user](/influxdb/cloud-serverless/reference/glossary/#user). + Only returns authorizations scoped to the specified [user](/influxdb3/cloud-serverless/reference/glossary/#user). in: query name: user schema: type: string - - description: An organization ID. Only returns authorizations that belong to the specified [organization](/influxdb/cloud-serverless/reference/glossary/#organization). + - description: An organization ID. Only returns authorizations that belong to the specified [organization](/influxdb3/cloud-serverless/reference/glossary/#organization). in: query name: orgID schema: type: string - description: | An organization name. - Only returns authorizations that belong to the specified [organization](/influxdb/cloud-serverless/reference/glossary/#organization). + Only returns authorizations that belong to the specified [organization](/influxdb3/cloud-serverless/reference/glossary/#organization). in: query name: org schema: type: string - description: | - An API [token](/influxdb/cloud-serverless/reference/glossary/#token) value. + An API [token](/influxdb3/cloud-serverless/reference/glossary/#token) value. Specifies an authorization by its `token` property value and returns the authorization. @@ -469,7 +469,7 @@ paths: #### InfluxDB OSS - **Warning**: The response body contains authorizations with their - [API token](/influxdb/cloud-serverless/reference/glossary/#token) values in clear text. + [API token](/influxdb3/cloud-serverless/reference/glossary/#token) values in clear text. - If the request uses an _[operator token](/influxdb/latest/security/tokens/#operator-token)_, InfluxDB OSS returns authorizations for all organizations in the instance. '400': @@ -489,7 +489,7 @@ paths: post: description: | Creates an authorization and returns the authorization with the - generated API [token](/influxdb/cloud-serverless/reference/glossary/#token). + generated API [token](/influxdb3/cloud-serverless/reference/glossary/#token). Use this endpoint to create an authorization, which generates an API token with permissions to `read` or `write` to a specific resource or `type` of resource. @@ -514,7 +514,7 @@ paths: #### Related guides - - [Create a token](/influxdb/cloud-serverless/security/tokens/create-token/) + - [Create a token](/influxdb3/cloud-serverless/security/tokens/create-token/) operationId: PostAuthorizations parameters: - $ref: '#/components/parameters/TraceSpan' @@ -625,13 +625,13 @@ paths: #### InfluxDB OSS - InfluxDB OSS returns - [API token](/influxdb/cloud-serverless/reference/glossary/#token) values in authorizations. + [API token](/influxdb3/cloud-serverless/reference/glossary/#token) values in authorizations. - If the request uses an _[operator token](/influxdb/latest/security/tokens/#operator-token)_, InfluxDB OSS returns authorizations for all organizations in the instance. #### Related guides - - [View tokens](/influxdb/cloud-serverless/security/tokens/view-tokens/) + - [View tokens](/influxdb3/cloud-serverless/security/tokens/view-tokens/) operationId: GetAuthorizationsID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -726,19 +726,19 @@ paths: /api/v2/buckets: get: description: | - Lists [buckets](/influxdb/cloud-serverless/reference/glossary/#bucket). + Lists [buckets](/influxdb3/cloud-serverless/reference/glossary/#bucket). InfluxDB retrieves buckets owned by the - [organization](/influxdb/cloud-serverless/reference/glossary/#organization) + [organization](/influxdb3/cloud-serverless/reference/glossary/#organization) associated with the authorization - ([API token](/influxdb/cloud-serverless/reference/glossary/#token)). + ([API token](/influxdb3/cloud-serverless/reference/glossary/#token)). To limit which buckets are returned, pass query parameters in your request. If no query parameters are passed, InfluxDB returns all buckets up to the default `limit`. #### InfluxDB OSS - - If you use an _[operator token](/influxdb/cloud-serverless/security/tokens/#operator-token)_ + - If you use an _[operator token](/influxdb3/cloud-serverless/security/tokens/#operator-token)_ to authenticate your request, InfluxDB retrieves resources for _all organizations_ in the instance. To retrieve resources for only a specific organization, use the @@ -749,11 +749,11 @@ paths: | Action | Permission required | |:--------------------------|:--------------------| | Retrieve _user buckets_ | `read-buckets` | - | Retrieve [_system buckets_](/influxdb/cloud-serverless/reference/internals/system-buckets/) | `read-orgs` | + | Retrieve [_system buckets_](/influxdb3/cloud-serverless/reference/internals/system-buckets/) | `read-orgs` | #### Related Guides - - [Manage buckets](/influxdb/cloud-serverless/admin/buckets/) + - [Manage buckets](/influxdb3/cloud-serverless/admin/buckets/) operationId: GetBuckets parameters: - $ref: '#/components/parameters/TraceSpan' @@ -763,7 +763,7 @@ paths: - description: | An organization name. - #### InfluxDB Cloud Serverless + #### InfluxDB 3 Cloud Serverless - Doesn't use the `org` parameter or `orgID` parameter. - Lists buckets for the organization associated with the authorization (API token). @@ -774,7 +774,7 @@ paths: - description: | An organization ID. - #### InfluxDB Cloud Serverless + #### InfluxDB 3 Cloud Serverless - Doesn't use the `org` parameter or `orgID` parameter. - Lists buckets for the organization associated with the authorization (API token). @@ -853,10 +853,10 @@ paths: --header "Content-Type: application/json" post: description: | - Creates a [bucket](/influxdb/cloud-serverless/reference/glossary/#bucket) + Creates a [bucket](/influxdb3/cloud-serverless/reference/glossary/#bucket) and returns the bucket resource. The default data - [retention period](/influxdb/cloud-serverless/reference/glossary/#retention-period) + [retention period](/influxdb3/cloud-serverless/reference/glossary/#retention-period) is 30 days. #### InfluxDB OSS @@ -875,8 +875,8 @@ paths: #### Related Guides - - [Create a bucket](/influxdb/cloud-serverless/admin/buckets/create-bucket/) - - [Create bucket CLI reference](/influxdb/cloud-serverless/reference/cli/influx/bucket/create) + - [Create a bucket](/influxdb3/cloud-serverless/admin/buckets/create-bucket/) + - [Create bucket CLI reference](/influxdb3/cloud-serverless/reference/cli/influx/bucket/create) operationId: PostBuckets parameters: - $ref: '#/components/parameters/TraceSpan' @@ -994,7 +994,7 @@ paths: description: | Deletes a bucket and all associated records. - #### InfluxDB Cloud Serverless + #### InfluxDB 3 Cloud Serverless - Does the following when you send a delete request: @@ -1008,7 +1008,7 @@ paths: #### Related Guides - - [Manage buckets](/influxdb/cloud-serverless/admin/buckets/) + - [Manage buckets](/influxdb3/cloud-serverless/admin/buckets/) operationId: DeleteBucketsID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -1025,7 +1025,7 @@ paths: description: | Success. - #### InfluxDB Cloud Serverless + #### InfluxDB 3 Cloud Serverless - The bucket is queued for deletion. '400': content: @@ -1157,14 +1157,14 @@ paths: Use this endpoint to update properties (`name`, `description`, and `retentionRules`) of a bucket. - #### InfluxDB Cloud Serverless + #### InfluxDB 3 Cloud Serverless - Requires the `retentionRules` property in the request body. If you don't provide `retentionRules`, InfluxDB responds with an HTTP `403` status code. #### Related Guides - - [Update a bucket](/influxdb/cloud-serverless/admin/buckets/update-bucket/) + - [Update a bucket](/influxdb3/cloud-serverless/admin/buckets/update-bucket/) operationId: PatchBucketsID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -1302,7 +1302,7 @@ paths: #### Related guides - Use the [`/api/v2/labels` InfluxDB API endpoint](#tag/Labels) to retrieve and manage labels. - - [Manage buckets](/influxdb/cloud-serverless/admin/buckets/) + - [Manage buckets](/influxdb3/cloud-serverless/admin/buckets/) operationId: GetBucketsIDLabels parameters: - $ref: '#/components/parameters/TraceSpan' @@ -1366,7 +1366,7 @@ paths: #### Related guides - Use the [`/api/v2/labels` InfluxDB API endpoint](#tag/Labels) to retrieve and manage labels. - - [Manage labels in the InfluxDB UI](/influxdb/cloud-serverless/visualize-data/labels/) + - [Manage labels in the InfluxDB UI](/influxdb3/cloud-serverless/visualize-data/labels/) operationId: PostBucketsIDLabels parameters: - $ref: '#/components/parameters/TraceSpan' @@ -1492,10 +1492,10 @@ paths: description: | Lists all users for a bucket. - InfluxDB [users](/influxdb/cloud-serverless/reference/glossary/#user) have + InfluxDB [users](/influxdb3/cloud-serverless/reference/glossary/#user) have permission to access InfluxDB. - [Members](/influxdb/cloud-serverless/reference/glossary/#member) are users in + [Members](/influxdb3/cloud-serverless/reference/glossary/#member) are users in an organization with access to the specified resource. Use this endpoint to retrieve all users with access to a bucket. @@ -1559,18 +1559,18 @@ paths: description: | Add a user to a bucket and return the new user information. - InfluxDB [users](/influxdb/cloud-serverless/reference/glossary/#user) have + InfluxDB [users](/influxdb3/cloud-serverless/reference/glossary/#user) have permission to access InfluxDB. - [Members](/influxdb/cloud-serverless/reference/glossary/#member) are users in + [Members](/influxdb3/cloud-serverless/reference/glossary/#member) are users in an organization. Use this endpoint to give a user member privileges to a bucket. #### Related guides - - [Manage users](/influxdb/cloud-serverless/users/) - - [Manage members](/influxdb/cloud-serverless/organizations/members/) + - [Manage users](/influxdb3/cloud-serverless/users/) + - [Manage members](/influxdb3/cloud-serverless/organizations/members/) operationId: PostBucketsIDMembers parameters: - $ref: '#/components/parameters/TraceSpan' @@ -1650,8 +1650,8 @@ paths: #### Related guides - - [Manage users](/influxdb/cloud-serverless/users/) - - [Manage members](/influxdb/cloud-serverless/organizations/members/) + - [Manage users](/influxdb3/cloud-serverless/users/) + - [Manage members](/influxdb3/cloud-serverless/organizations/members/) operationId: DeleteBucketsIDMembersID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -1692,13 +1692,13 @@ paths: /api/v2/buckets/{bucketID}/owners: get: description: | - Lists all [owners](/influxdb/cloud-serverless/reference/glossary/#owner) + Lists all [owners](/influxdb3/cloud-serverless/reference/glossary/#owner) of a bucket. Bucket owners have permission to delete buckets and remove user and member permissions from the bucket. - InfluxDB Cloud Serverless uses [`/api/v2/authorizations`](#tag/Authorizations-(API-tokens)) to assign resource permissions; doesn't use `owner` and `member` roles. + InfluxDB 3 Cloud Serverless uses [`/api/v2/authorizations`](#tag/Authorizations-(API-tokens)) to assign resource permissions; doesn't use `owner` and `member` roles. #### Limitations @@ -1766,14 +1766,14 @@ paths: - Buckets post: description: | - Adds an owner to a bucket and returns the [owners](/influxdb/cloud-serverless/reference/glossary/#owner) + Adds an owner to a bucket and returns the [owners](/influxdb3/cloud-serverless/reference/glossary/#owner) with role and user detail. Use this endpoint to create a _resource owner_ for the bucket. Bucket owners have permission to delete buckets and remove user and member permissions from the bucket. - InfluxDB Cloud Serverless uses [`/api/v2/authorizations`](#tag/Authorizations-(API-tokens)) to assign resource permissions; doesn't use `owner` and `member` roles. + InfluxDB 3 Cloud Serverless uses [`/api/v2/authorizations`](#tag/Authorizations-(API-tokens)) to assign resource permissions; doesn't use `owner` and `member` roles. #### Limitations @@ -1792,7 +1792,7 @@ paths: #### Related guides - - [Manage users](/influxdb/cloud-serverless/users/) + - [Manage users](/influxdb3/cloud-serverless/users/) operationId: PostBucketsIDOwners parameters: - $ref: '#/components/parameters/TraceSpan' @@ -1871,7 +1871,7 @@ paths: Use this endpoint to remove a user's `owner` role for a bucket. - InfluxDB Cloud Serverless uses [`/api/v2/authorizations`](#tag/Authorizations-(API-tokens)) to assign resource permissions; doesn't use `owner` and `member` roles. + InfluxDB 3 Cloud Serverless uses [`/api/v2/authorizations`](#tag/Authorizations-(API-tokens)) to assign resource permissions; doesn't use `owner` and `member` roles. #### Limitations @@ -1891,7 +1891,7 @@ paths: #### Related guides - - [Manage users](/influxdb/cloud-serverless/users/) + - [Manage users](/influxdb3/cloud-serverless/users/) operationId: DeleteBucketsIDOwnersID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -1933,7 +1933,7 @@ paths: get: description: | Lists _explicit_ - [schemas](/influxdb/cloud-serverless/reference/glossary/#schema) + [schemas](/influxdb3/cloud-serverless/reference/glossary/#schema) (`"schemaType": "explicit"`) for a bucket. _Explicit_ schemas are used to enforce column names, tags, fields, and data @@ -2012,7 +2012,7 @@ paths: - Bucket Schemas post: description: | - Creates an _explicit_ measurement [schema](/influxdb/cloud-serverless/reference/glossary/#schema) + Creates an _explicit_ measurement [schema](/influxdb3/cloud-serverless/reference/glossary/#schema) for a bucket. _Explicit_ schemas are used to enforce column names, tags, fields, and data @@ -2148,7 +2148,7 @@ paths: /api/v2/buckets/{bucketID}/schema/measurements/{measurementID}: get: description: | - Retrieves an explicit measurement [schema](/influxdb/cloud-serverless/reference/glossary/#schema). + Retrieves an explicit measurement [schema](/influxdb3/cloud-serverless/reference/glossary/#schema). operationId: getMeasurementSchema parameters: - description: | @@ -2204,7 +2204,7 @@ paths: - Bucket Schemas patch: description: | - Updates a measurement [schema](/influxdb/cloud-serverless/reference/glossary/#schema). + Updates a measurement [schema](/influxdb3/cloud-serverless/reference/glossary/#schema). Use this endpoint to update the fields (`name`, `type`, and `dataType`) of a measurement schema. @@ -2215,7 +2215,7 @@ paths: #### Related guides - - [Manage bucket schemas](/influxdb/cloud-serverless/admin/buckets/bucket-schema/). + - [Manage bucket schemas](/influxdb3/cloud-serverless/admin/buckets/bucket-schema/). - [Using bucket schemas](https://www.influxdata.com/blog/new-bucket-schema-option-protect-from-unwanted-schema-changes/). operationId: updateMeasurementSchema parameters: @@ -2336,7 +2336,7 @@ paths: #### Related guide - - [Database and retention policy mapping](/influxdb/cloud-serverless/reference/api/influxdb-1x/dbrp/) + - [Database and retention policy mapping](/influxdb3/cloud-serverless/reference/api/influxdb-1x/dbrp/) operationId: GetDBRPs parameters: - $ref: '#/components/parameters/TraceSpan' @@ -2447,7 +2447,7 @@ paths: #### Related guide - - [Database and retention policy mapping](/influxdb/cloud-serverless/reference/api/influxdb-1x/dbrp/) + - [Database and retention policy mapping](/influxdb3/cloud-serverless/reference/api/influxdb-1x/dbrp/) operationId: PostDBRP parameters: - $ref: '#/components/parameters/TraceSpan' @@ -2462,7 +2462,7 @@ paths: Note that _`retention_policy`_ is a required parameter in the request body. The value of _`retention_policy`_ can be any arbitrary `string` name or value, with the default value commonly set as `autogen`. - The value of _`retention_policy`_ isn't a [retention_policy](/influxdb/cloud-serverless/reference/glossary/#retention-policy-rp) + The value of _`retention_policy`_ isn't a [retention_policy](/influxdb3/cloud-serverless/reference/glossary/#retention-policy-rp) required: true responses: '201': @@ -2532,7 +2532,7 @@ paths: #### Related guide - - [Database and retention policy mapping](/influxdb/cloud-serverless/reference/api/influxdb-1x/dbrp/) + - [Database and retention policy mapping](/influxdb3/cloud-serverless/reference/api/influxdb-1x/dbrp/) operationId: DeleteDBRPID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -2595,7 +2595,7 @@ paths: #### Related guide - - [Database and retention policy mapping](/influxdb/cloud-serverless/reference/api/influxdb-1x/dbrp/) + - [Database and retention policy mapping](/influxdb3/cloud-serverless/reference/api/influxdb-1x/dbrp/) operationId: GetDBRPsID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -2704,7 +2704,7 @@ paths: #### Related guide - - [Database and retention policy mapping](/influxdb/cloud-serverless/reference/api/influxdb-1x/dbrp/) + - [Database and retention policy mapping](/influxdb3/cloud-serverless/reference/api/influxdb-1x/dbrp/) required: true responses: '200': @@ -2770,19 +2770,19 @@ paths: description: | Deletes data from a bucket. - **NOTE**: This endpoint has been **disabled** for InfluxDB Cloud Serverless organizations. - See how to [**delete data**](/influxdb/cloud-serverless/write-data/delete-data/). + **NOTE**: This endpoint has been **disabled** for InfluxDB 3 Cloud Serverless organizations. + See how to [**delete data**](/influxdb3/cloud-serverless/write-data/delete-data/). #### Related guides - - [Delete data](/influxdb/cloud-serverless/write-data/delete-data/) + - [Delete data](/influxdb3/cloud-serverless/write-data/delete-data/) operationId: PostDelete parameters: - $ref: '#/components/parameters/TraceSpan' - description: | An organization name or ID. - #### InfluxDB Cloud Serverless + #### InfluxDB 3 Cloud Serverless - Doesn't use the `org` parameter or `orgID` parameter. in: query @@ -2802,7 +2802,7 @@ paths: - description: | An organization ID. - #### InfluxDB Cloud Serverless + #### InfluxDB 3 Cloud Serverless - Doesn't use the `org` parameter or `orgID` parameter. in: query @@ -2835,8 +2835,8 @@ paths: responses: '204': description: | - **NOTE**: This endpoint has been **disabled** for InfluxDB Cloud Serverless organizations. - See how to [**delete data**](/influxdb/cloud-serverless/write-data/delete-data/). + **NOTE**: This endpoint has been **disabled** for InfluxDB 3 Cloud Serverless organizations. + See how to [**delete data**](/influxdb3/cloud-serverless/write-data/delete-data/). '400': content: application/json: @@ -2900,13 +2900,13 @@ paths: /api/v2/orgs: get: description: | - Lists [organizations](/influxdb/cloud-serverless/reference/glossary/#organization/). + Lists [organizations](/influxdb3/cloud-serverless/reference/glossary/#organization/). - InfluxDB Cloud Serverless only returns the organization that owns the token passed in the request. + InfluxDB 3 Cloud Serverless only returns the organization that owns the token passed in the request. #### Related guides - - [View organizations](/influxdb/cloud-serverless/organizations/view-orgs/) + - [View organizations](/influxdb3/cloud-serverless/organizations/view-orgs/) operationId: GetOrgs parameters: - $ref: '#/components/parameters/TraceSpan' @@ -2977,14 +2977,14 @@ paths: - Organizations post: description: | - Creates an [organization](/influxdb/cloud-serverless/reference/glossary/#organization) + Creates an [organization](/influxdb3/cloud-serverless/reference/glossary/#organization) and returns the newly created organization. - InfluxDB Cloud Serverless doesn't allow you to use this endpoint to create organizations. + InfluxDB 3 Cloud Serverless doesn't allow you to use this endpoint to create organizations. #### Related guides - - [Manage organizations](/influxdb/cloud-serverless/organizations) + - [Manage organizations](/influxdb3/cloud-serverless/organizations) operationId: PostOrgs parameters: - $ref: '#/components/parameters/TraceSpan' @@ -3082,7 +3082,7 @@ paths: #### Related guides - - [Delete organizations](/influxdb/cloud-serverless/organizations/delete-orgs/) + - [Delete organizations](/influxdb3/cloud-serverless/organizations/delete-orgs/) operationId: DeleteOrgsID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -3141,7 +3141,7 @@ paths: #### Related guides - - [View organizations](/influxdb/cloud-serverless/organizations/view-orgs/) + - [View organizations](/influxdb3/cloud-serverless/organizations/view-orgs/) operationId: GetOrgsID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -3210,7 +3210,7 @@ paths: #### Related Guides - - [Update an organization](/influxdb/cloud-serverless/organizations/update-org/) + - [Update an organization](/influxdb3/cloud-serverless/organizations/update-org/) operationId: PatchOrgsID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -3286,7 +3286,7 @@ paths: description: | Lists all users that belong to an organization. - InfluxDB Cloud Serverless doesn't use `owner` and `member` roles. + InfluxDB 3 Cloud Serverless doesn't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations-(API-tokens)) to manage resource permissions. operationId: GetOrgsIDMembers parameters: @@ -3359,7 +3359,7 @@ paths: description: | Add a user to an organization. - InfluxDB Cloud Serverless doesn't use `owner` and `member` roles. + InfluxDB 3 Cloud Serverless doesn't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations-(API-tokens)) to manage resource permissions. operationId: PostOrgsIDMembers parameters: @@ -3436,7 +3436,7 @@ paths: description: | Removes a member from an organization. - InfluxDB Cloud Serverless doesn't use `owner` and `member` roles. + InfluxDB 3 Cloud Serverless doesn't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations-(API-tokens)) to manage resource permissions. operationId: DeleteOrgsIDMembersID parameters: @@ -3478,7 +3478,7 @@ paths: description: | Lists all owners of an organization. - InfluxDB Cloud Serverless doesn't use `owner` and `member` roles. + InfluxDB 3 Cloud Serverless doesn't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations-(API-tokens)) to manage resource permissions. operationId: GetOrgsIDOwners parameters: @@ -3528,7 +3528,7 @@ paths: description: | Adds an owner to an organization. - InfluxDB Cloud Serverless doesn't use `owner` and `member` roles. + InfluxDB 3 Cloud Serverless doesn't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations-(API-tokens)) to manage resource permissions. operationId: PostOrgsIDOwners parameters: @@ -3595,10 +3595,10 @@ paths: /api/v2/orgs/{orgID}/owners/{userID}: delete: description: | - Removes an [owner](/influxdb/cloud-serverless/reference/glossary/#owner) from + Removes an [owner](/influxdb3/cloud-serverless/reference/glossary/#owner) from the organization. - InfluxDB Cloud Serverless doesn't use `owner` and `member` roles. + InfluxDB 3 Cloud Serverless doesn't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations-(API-tokens)) to manage resource permissions. operationId: DeleteOrgsIDOwnersID parameters: @@ -3760,7 +3760,7 @@ paths: schema: type: string - description: | - Earliest time ([unix timestamp format](/influxdb/cloud-serverless/reference/glossary/#unix-timestamp)) to include in results. + Earliest time ([unix timestamp format](/influxdb3/cloud-serverless/reference/glossary/#unix-timestamp)) to include in results. in: query name: start required: true @@ -3768,7 +3768,7 @@ paths: format: unix timestamp type: integer - description: | - Latest time ([unix timestamp format](/influxdb/cloud-serverless/reference/glossary/#unix-timestamp)) to include in results. + Latest time ([unix timestamp format](/influxdb3/cloud-serverless/reference/glossary/#unix-timestamp)) to include in results. in: query name: stop required: false @@ -3814,9 +3814,9 @@ paths: description: | Retrieves data from buckets. - This endpoint isn't supported in InfluxDB Cloud Serverless. + This endpoint isn't supported in InfluxDB 3 Cloud Serverless. - See how to [query data](/influxdb/cloud-serverless/query-data/). + See how to [query data](/influxdb3/cloud-serverless/query-data/). operationId: PostQuery parameters: - $ref: '#/components/parameters/TraceSpan' @@ -3840,7 +3840,7 @@ paths: - description: | An organization name or ID. - #### InfluxDB Cloud Serverless + #### InfluxDB 3 Cloud Serverless - Doesn't use the `org` parameter or `orgID` parameter. in: query @@ -3850,7 +3850,7 @@ paths: - description: | An organization ID. - #### InfluxDB Cloud Serverless + #### InfluxDB 3 Cloud Serverless - Doesn't use the `org` parameter or `orgID` parameter. in: query @@ -3873,8 +3873,8 @@ paths: schema: type: string description: | - This endpoint isn't supported in InfluxDB Cloud Serverless. - See how to [query data](/influxdb/cloud-serverless/query-data/). + This endpoint isn't supported in InfluxDB 3 Cloud Serverless. + See how to [query data](/influxdb3/cloud-serverless/query-data/). headers: Content-Encoding: description: Lists encodings (usually compression algorithms) that have been applied to the response payload. @@ -3915,11 +3915,11 @@ paths: $ref: '#/components/responses/ResourceNotFoundError' '429': description: | - #### InfluxDB Cloud Serverless: + #### InfluxDB 3 Cloud Serverless: - returns this error if a **read** or **write** request exceeds your - plan's [adjustable service quotas](/influxdb/cloud-serverless/account-management/limits/#adjustable-service-quotas) + plan's [adjustable service quotas](/influxdb3/cloud-serverless/account-management/limits/#adjustable-service-quotas) or if a **delete** request exceeds the maximum - [global limit](/influxdb/cloud-serverless/account-management/limits/#global-limits). + [global limit](/influxdb3/cloud-serverless/account-management/limits/#global-limits). headers: Retry-After: description: Non-negative decimal integer indicating seconds to wait before retrying the request. @@ -3937,8 +3937,8 @@ paths: post: deprecated: true description: | - This endpoint isn't supported in InfluxDB Cloud Serverless. - See how to [query data](/influxdb/cloud-serverless/query-data/). + This endpoint isn't supported in InfluxDB 3 Cloud Serverless. + See how to [query data](/influxdb3/cloud-serverless/query-data/). operationId: PostQueryAnalyze parameters: - $ref: '#/components/parameters/TraceSpan' @@ -3981,8 +3981,8 @@ paths: schema: $ref: '#/components/schemas/AnalyzeQueryResponse' description: | - This endpoint isn't supported in InfluxDB Cloud Serverless. - See how to [query data](/influxdb/cloud-serverless/query-data/). + This endpoint isn't supported in InfluxDB 3 Cloud Serverless. + See how to [query data](/influxdb3/cloud-serverless/query-data/). '400': content: application/json: @@ -4041,8 +4041,8 @@ paths: post: deprecated: true description: | - This endpoint isn't supported in InfluxDB Cloud Serverless. - See how to [query data](/influxdb/cloud-serverless/query-data/). + This endpoint isn't supported in InfluxDB 3 Cloud Serverless. + See how to [query data](/influxdb3/cloud-serverless/query-data/). operationId: PostQueryAst parameters: - $ref: '#/components/parameters/TraceSpan' @@ -4064,8 +4064,8 @@ paths: schema: $ref: '#/components/schemas/ASTResponse' description: | - This endpoint isn't supported in InfluxDB Cloud Serverless. - See how to [query data](/influxdb/cloud-serverless/query-data/). + This endpoint isn't supported in InfluxDB 3 Cloud Serverless. + See how to [query data](/influxdb3/cloud-serverless/query-data/). '400': content: application/json: @@ -4104,16 +4104,16 @@ paths: get: deprecated: true description: | - This endpoint isn't supported in InfluxDB Cloud Serverless. - See how to [query data](/influxdb/cloud-serverless/query-data/). + This endpoint isn't supported in InfluxDB 3 Cloud Serverless. + See how to [query data](/influxdb3/cloud-serverless/query-data/). operationId: GetQuerySuggestions parameters: - $ref: '#/components/parameters/TraceSpan' responses: '200': description: | - This endpoint isn't supported in InfluxDB Cloud Serverless. - See how to [query data](/influxdb/cloud-serverless/query-data/). + This endpoint isn't supported in InfluxDB 3 Cloud Serverless. + See how to [query data](/influxdb3/cloud-serverless/query-data/). '301': content: text/html: @@ -4154,8 +4154,8 @@ paths: get: deprecated: true description: | - This endpoint isn't supported in InfluxDB Cloud Serverless. - See how to [query data](/influxdb/cloud-serverless/query-data/). + This endpoint isn't supported in InfluxDB 3 Cloud Serverless. + See how to [query data](/influxdb3/cloud-serverless/query-data/). operationId: GetQuerySuggestionsName parameters: - $ref: '#/components/parameters/TraceSpan' @@ -4169,8 +4169,8 @@ paths: responses: '200': description: | - This endpoint isn't supported in InfluxDB Cloud Serverless. - See how to [query data](/influxdb/cloud-serverless/query-data/). + This endpoint isn't supported in InfluxDB 3 Cloud Serverless. + See how to [query data](/influxdb3/cloud-serverless/query-data/). '500': content: application/json: @@ -4226,7 +4226,7 @@ paths: The offset for pagination. The number of records to skip. - For more information about pagination parameters, see [Pagination](/influxdb/cloud-serverless/api/#tag/Pagination). + For more information about pagination parameters, see [Pagination](/influxdb3/cloud-serverless/api/#tag/Pagination). in: query name: offset required: false @@ -4814,7 +4814,7 @@ paths: parameters: - description: | An organization ID. - Only returns stacks owned by the specified [organization](/influxdb/cloud-serverless/reference/glossary/#organization). + Only returns stacks owned by the specified [organization](/influxdb3/cloud-serverless/reference/glossary/#organization). #### InfluxDB Cloud @@ -5119,7 +5119,7 @@ paths: /api/v2/tasks: get: description: | - Retrieves a list of [tasks](/influxdb/cloud-serverless/reference/glossary/#task). + Retrieves a list of [tasks](/influxdb3/cloud-serverless/reference/glossary/#task). To limit which tasks are returned, pass query parameters in your request. If no query parameters are passed, InfluxDB returns all tasks up to the default `limit`. @@ -5127,7 +5127,7 @@ paths: parameters: - $ref: '#/components/parameters/TraceSpan' - description: | - A [task](/influxdb/cloud-serverless/reference/glossary/#task) name. + A [task](/influxdb3/cloud-serverless/reference/glossary/#task) name. Only returns tasks with the specified name. Different tasks may have the same name. in: query @@ -5135,35 +5135,35 @@ paths: schema: type: string - description: | - A [task](/influxdb/cloud-serverless/reference/glossary/#task) ID. + A [task](/influxdb3/cloud-serverless/reference/glossary/#task) ID. Only returns tasks created after the specified task. in: query name: after schema: type: string - description: | - A [user](/influxdb/cloud-serverless/reference/glossary/#user) ID. + A [user](/influxdb3/cloud-serverless/reference/glossary/#user) ID. Only returns tasks owned by the specified user. in: query name: user schema: type: string - description: | - An [organization](/influxdb/cloud-serverless/reference/glossary/#organization) name. + An [organization](/influxdb3/cloud-serverless/reference/glossary/#organization) name. Only returns tasks owned by the specified organization. in: query name: org schema: type: string - description: | - An [organization](/influxdb/cloud-serverless/reference/glossary/#organization) ID. + An [organization](/influxdb3/cloud-serverless/reference/glossary/#organization) ID. Only returns tasks owned by the specified organization. in: query name: orgID schema: type: string - description: | - A [task](/influxdb/cloud-serverless/reference/glossary/#task) status. + A [task](/influxdb3/cloud-serverless/reference/glossary/#task) status. Only returns tasks that have the specified status (`active` or `inactive`). in: query name: status @@ -5173,7 +5173,7 @@ paths: - inactive type: string - description: | - The maximum number of [tasks](/influxdb/cloud-serverless/reference/glossary/#task) to return. + The maximum number of [tasks](/influxdb3/cloud-serverless/reference/glossary/#task) to return. Default is `100`. The minimum is `1` and the maximum is `500`. @@ -5212,7 +5212,7 @@ paths: - name type: string - description: | - A [task](/influxdb/cloud-serverless/reference/glossary/#task) type (`basic` or `system`). + A [task](/influxdb3/cloud-serverless/reference/glossary/#task) type (`basic` or `system`). Default is `system`. Specifies the level of detail for tasks in the response. The default (`system`) response contains all the metadata properties for tasks. @@ -5325,7 +5325,7 @@ paths: --header 'Authorization: Token INFLUX_API_TOKEN' post: description: | - Creates a [task](/influxdb/cloud-serverless/reference/glossary/#task) and returns the task. + Creates a [task](/influxdb3/cloud-serverless/reference/glossary/#task) and returns the task. Use this endpoint to create a scheduled task that runs a Flux script. @@ -5470,7 +5470,7 @@ paths: /api/v2/tasks/{taskID}: delete: description: | - Deletes a [task](/influxdb/cloud-serverless/reference/glossary/#task) and associated records. + Deletes a [task](/influxdb3/cloud-serverless/reference/glossary/#task) and associated records. Use this endpoint to delete a task and all associated records (task runs, logs, and labels). Once the task is deleted, InfluxDB cancels all scheduled runs of the task. @@ -5479,7 +5479,7 @@ paths: operationId: DeleteTasksID parameters: - $ref: '#/components/parameters/TraceSpan' - - description: A [task](/influxdb/cloud-serverless/reference/glossary/#task) ID. Specifies the task to delete. + - description: A [task](/influxdb3/cloud-serverless/reference/glossary/#task) ID. Specifies the task to delete. in: path name: taskID required: true @@ -5503,12 +5503,12 @@ paths: - Tasks get: description: | - Retrieves a [task](/influxdb/cloud-serverless/reference/glossary/#task). + Retrieves a [task](/influxdb3/cloud-serverless/reference/glossary/#task). operationId: GetTasksID parameters: - $ref: '#/components/parameters/TraceSpan' - description: | - A [task](/influxdb/cloud-serverless/reference/glossary/#task) ID. + A [task](/influxdb3/cloud-serverless/reference/glossary/#task) ID. Specifies the task to retrieve. in: path name: taskID @@ -5537,7 +5537,7 @@ paths: - Tasks patch: description: | - Updates a [task](/influxdb/cloud-serverless/reference/glossary/#task), + Updates a [task](/influxdb3/cloud-serverless/reference/glossary/#task), and then cancels all scheduled runs of the task. Use this endpoint to set, modify, or clear task properties--for example: `cron`, `name`, `flux`, `status`. @@ -5595,7 +5595,7 @@ paths: parameters: - $ref: '#/components/parameters/TraceSpan' - description: | - A [task](/influxdb/cloud-serverless/reference/glossary/#task) ID. + A [task](/influxdb3/cloud-serverless/reference/glossary/#task) ID. Specifies the task to update. in: path name: taskID @@ -5743,7 +5743,7 @@ paths: /api/v2/tasks/{taskID}/logs: get: description: | - Retrieves a list of all logs for a [task](/influxdb/cloud-serverless/reference/glossary/#task). + Retrieves a list of all logs for a [task](/influxdb3/cloud-serverless/reference/glossary/#task). When an InfluxDB task runs, a “run” record is created in the task’s history. Logs associated with each run provide relevant log messages, timestamps, and the exit status of the run attempt. @@ -5813,7 +5813,7 @@ paths: **Deprecated**: Tasks don't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations-(API-tokens)) to assign user permissions. - Lists all users that have the `member` role for the specified [task](/influxdb/cloud-serverless/reference/glossary/#task). + Lists all users that have the `member` role for the specified [task](/influxdb3/cloud-serverless/reference/glossary/#task). operationId: GetTasksIDMembers parameters: - $ref: '#/components/parameters/TraceSpan' @@ -5887,7 +5887,7 @@ paths: **Deprecated**: Tasks don't use `owner` and `member` roles. Use [`/api/v2/authorizations`](#tag/Authorizations-(API-tokens)) to assign user permissions. - Removes a member from a [task](/influxdb/cloud-serverless/reference/glossary/#task). + Removes a member from a [task](/influxdb3/cloud-serverless/reference/glossary/#task). operationId: DeleteTasksIDMembersID parameters: - $ref: '#/components/parameters/TraceSpan' @@ -6107,7 +6107,7 @@ paths: minimum: 1 type: integer - description: | - A timestamp ([RFC3339 date/time format](/influxdb/cloud-serverless/reference/glossary/#rfc3339-timestamp)). + A timestamp ([RFC3339 date/time format](/influxdb3/cloud-serverless/reference/glossary/#rfc3339-timestamp)). Only returns runs scheduled after this time. in: query name: afterTime @@ -6115,7 +6115,7 @@ paths: format: date-time type: string - description: | - A timestamp ([RFC3339 date/time format](/influxdb/cloud-serverless/reference/glossary/#rfc3339-timestamp)). + A timestamp ([RFC3339 date/time format](/influxdb3/cloud-serverless/reference/glossary/#rfc3339-timestamp)). Only returns runs scheduled before this time. in: query name: beforeTime @@ -6181,7 +6181,7 @@ paths: /api/v2/tasks/{taskID}/runs/{runID}: delete: description: | - Cancels a running [task](/influxdb/cloud-serverless/reference/glossary/#task). + Cancels a running [task](/influxdb3/cloud-serverless/reference/glossary/#task). Use this endpoint with InfluxDB OSS to cancel a running task. @@ -6391,7 +6391,7 @@ paths: parameters: - $ref: '#/components/parameters/TraceSpan' - description: | - A [task](/influxdb/cloud-serverless/reference/glossary/#task) ID. + A [task](/influxdb3/cloud-serverless/reference/glossary/#task) ID. Specifies the task to retry. in: path name: taskID @@ -6399,7 +6399,7 @@ paths: schema: type: string - description: | - A [task](/influxdb/cloud-serverless/reference/glossary/#task) run ID. + A [task](/influxdb3/cloud-serverless/reference/glossary/#task) run ID. Specifies the task run to retry. To find a task run ID, use the @@ -6922,7 +6922,7 @@ paths: property in the request body. - Some templates may contain queries that use - [secrets](/influxdb/cloud-serverless/reference/glossary/#secret). + [secrets](/influxdb3/cloud-serverless/reference/glossary/#secret). To provide custom secret values, pass the _`secrets`_ property in the request body. Don't expose secret values in templates. @@ -6934,7 +6934,7 @@ paths: #### Rate limits (with InfluxDB Cloud) - Adjustable service quotas apply. - For more information, see [limits and adjustable quotas](/influxdb/cloud-serverless/account-management/limits/). + For more information, see [limits and adjustable quotas](/influxdb3/cloud-serverless/account-management/limits/). operationId: ApplyTemplate @@ -7483,18 +7483,18 @@ paths: description: | Writes data to a bucket. - Use this endpoint to send data in [line protocol](/influxdb/cloud-serverless/reference/syntax/line-protocol/) format to InfluxDB. + Use this endpoint to send data in [line protocol](/influxdb3/cloud-serverless/reference/syntax/line-protocol/) format to InfluxDB. - InfluxDB Cloud Serverless does the following when you send a write request: + InfluxDB 3 Cloud Serverless does the following when you send a write request: 1. Validates the request. - 2. If successful, attempts to [ingest data](/influxdb/cloud-serverless/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](/influxdb/cloud-serverless/write-data/troubleshoot/#review-http-status-codes). + 2. If successful, attempts to [ingest data](/influxdb3/cloud-serverless/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](/influxdb3/cloud-serverless/write-data/troubleshoot/#review-http-status-codes). 3. Ingests or rejects data in the batch and returns one of the following HTTP status codes: - `204 No Content`: All data in the batch is ingested. - `400 Bad Request`: Data from the batch was rejected and not written. The response body indicates if a partial write occurred. - The response body contains error details about [rejected points](/influxdb/cloud-serverless/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + The response body contains error details about [rejected points](/influxdb3/cloud-serverless/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable. @@ -7503,22 +7503,22 @@ paths: #### Write endpoints - The [`/write`](#operation/PostLegacyWrite) and [`/api/v2/write`](#operation/PostWrite) endpoints are functionally equivalent for writing data to InfluxDB Cloud Serverless. + The [`/write`](#operation/PostLegacyWrite) and [`/api/v2/write`](#operation/PostWrite) endpoints are functionally equivalent for writing data to InfluxDB 3 Cloud Serverless. - - Use the [`/write` endpoint](#operation/PostLegacyWrite) for [InfluxDB v1 parameter compatibility](/influxdb/cloud-serverless/guides/api-compatibility/v1/). - - Use [`/api/v2/write` endpoint](#operation/PostWrite) for [InfluxDB v2 parameter compatibility](/influxdb/cloud-serverless/guides/api-compatibility/v2/). + - Use the [`/write` endpoint](#operation/PostLegacyWrite) for [InfluxDB v1 parameter compatibility](/influxdb3/cloud-serverless/guides/api-compatibility/v1/). + - Use [`/api/v2/write` endpoint](#operation/PostWrite) for [InfluxDB v2 parameter compatibility](/influxdb3/cloud-serverless/guides/api-compatibility/v2/). #### Rate limits _Write_ rate limits apply. - For more information, see [limits and adjustable quotas](/influxdb/cloud-serverless/admin/billing/limits/). + For more information, see [limits and adjustable quotas](/influxdb3/cloud-serverless/admin/billing/limits/). #### Related guides - - [Get started writing data](/influxdb/cloud-serverless/get-started/write/) - - [Write data with the InfluxDB API](/influxdb/cloud-serverless/get-started/write/) - - [Best practices for writing data](/influxdb/cloud-serverless/write-data/best-practices/) - - [Troubleshoot issues writing data](/influxdb/cloud-serverless/write-data/troubleshoot/) + - [Get started writing data](/influxdb3/cloud-serverless/get-started/write/) + - [Write data with the InfluxDB API](/influxdb3/cloud-serverless/get-started/write/) + - [Best practices for writing data](/influxdb3/cloud-serverless/write-data/best-practices/) + - [Troubleshoot issues writing data](/influxdb3/cloud-serverless/write-data/troubleshoot/) operationId: PostWrite parameters: - $ref: '#/components/parameters/TraceSpan' @@ -7563,14 +7563,14 @@ paths: Writes only return a response body if they fail--for example, due to a formatting problem or quota limit. - #### InfluxDB Cloud Serverless + #### InfluxDB 3 Cloud Serverless - Returns only `application/json` for format and limit errors. - Returns only `text/html` for some quota limit errors. #### Related guides - - [Troubleshoot issues writing data](/influxdb/cloud-serverless/write-data/troubleshoot/) + - [Troubleshoot issues writing data](/influxdb3/cloud-serverless/write-data/troubleshoot/) in: header name: Accept schema: @@ -7582,7 +7582,7 @@ paths: - description: | An organization name or ID. - InfluxDB Cloud Serverless writes data to the bucket in the organization associated with the authorization (API token); + InfluxDB 3 Cloud Serverless writes data to the bucket in the organization associated with the authorization (API token); doesn't use the `org` parameter or `orgID` parameter. in: query name: org @@ -7593,7 +7593,7 @@ paths: - description: | An organization ID. - InfluxDB Cloud Serverless writes data to the bucket in the organization associated with the authorization (API token); + InfluxDB 3 Cloud Serverless writes data to the bucket in the organization associated with the authorization (API token); doesn't use the `org` parameter or `orgID` parameter. in: query name: orgID @@ -7625,7 +7625,7 @@ paths: format: byte type: string description: | - In the request body, provide data in [line protocol format](/influxdb/cloud-serverless/reference/syntax/line-protocol/). + In the request body, provide data in [line protocol format](/influxdb3/cloud-serverless/reference/syntax/line-protocol/). To send compressed data, do the following: @@ -7635,7 +7635,7 @@ paths: #### Related guides - - [Best practices for optimizing writes](/influxdb/cloud-serverless/write-data/best-practices/optimize-writes/) + - [Best practices for optimizing writes](/influxdb3/cloud-serverless/write-data/best-practices/optimize-writes/) required: true responses: '204': @@ -7645,7 +7645,7 @@ paths: Data from the batch was rejected and not written. The response body indicates if a partial write occurred or all data was rejected. If a partial write occurred, then some points from the batch are written and queryable. - The response body contains details about the [rejected points](/influxdb/cloud-serverless/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + The response body contains details about the [rejected points](/influxdb3/cloud-serverless/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. content: application/json: examples: @@ -7703,8 +7703,8 @@ paths: #### InfluxDB Cloud - - Returns this error if a **read** or **write** request exceeds your plan's [adjustable service quotas](/influxdb/cloud-serverless/account-management/limits/#adjustable-service-quotas) - or if a **delete** request exceeds the maximum [global limit](/influxdb/cloud-serverless/account-management/limits/#global-limits). + - Returns this error if a **read** or **write** request exceeds your plan's [adjustable service quotas](/influxdb3/cloud-serverless/account-management/limits/#adjustable-service-quotas) + or if a **delete** request exceeds the maximum [global limit](/influxdb3/cloud-serverless/account-management/limits/#global-limits). - For rate limits that reset automatically, returns a `Retry-After` header that describes when to try the write again. - For limits that can't reset (for example, **cardinality limit**), doesn't return a `Retry-After` header. @@ -7753,8 +7753,8 @@ paths: #### Related guides - - [Use the InfluxDB v1 HTTP API](/influxdb/cloud-serverless/guides/api-compatibility/v1/) - - [Query data](/influxdb/cloud-serverless/query-data/) + - [Use the InfluxDB v1 HTTP API](/influxdb3/cloud-serverless/guides/api-compatibility/v1/) + - [Query data](/influxdb3/cloud-serverless/query-data/) operationId: GetLegacyQuery parameters: - $ref: '#/components/parameters/TraceSpan' @@ -7765,7 +7765,7 @@ paths: description: | Media type that the client can understand. - **Note**: With `application/csv`, query results include [**unix timestamps**](/influxdb/cloud-serverless/reference/glossary/#unix-timestamp) instead of [RFC3339 timestamps](/influxdb/cloud-serverless/reference/glossary/#rfc3339-timestamp). + **Note**: With `application/csv`, query results include [**unix timestamps**](/influxdb3/cloud-serverless/reference/glossary/#unix-timestamp) instead of [RFC3339 timestamps](/influxdb3/cloud-serverless/reference/glossary/#rfc3339-timestamp). enum: - application/json - application/csv @@ -7800,7 +7800,7 @@ paths: type: string - description: | The database to query data from. - This is mapped to an InfluxDB [bucket](/influxdb/cloud-serverless/reference/glossary/#bucket). + This is mapped to an InfluxDB [bucket](/influxdb3/cloud-serverless/reference/glossary/#bucket). For more information, see [Database and retention policy mapping](/influxdb/cloud/reference/api/influxdb-1x/dbrp/). in: query name: db @@ -7809,7 +7809,7 @@ paths: type: string - description: | The retention policy to query data from. - This is mapped to an InfluxDB [bucket](/influxdb/cloud-serverless/reference/glossary/#bucket). + This is mapped to an InfluxDB [bucket](/influxdb3/cloud-serverless/reference/glossary/#bucket). For more information, see [Database and retention policy mapping](/influxdb/cloud/reference/api/influxdb-1x/dbrp/). in: query name: rp @@ -7823,8 +7823,8 @@ paths: type: string - description: | A unix timestamp precision. - Formats timestamps as [unix (epoch) timestamps](/influxdb/cloud-serverless/reference/glossary/#unix-timestamp) the specified precision - instead of [RFC3339 timestamps](/influxdb/cloud-serverless/reference/glossary/#rfc3339-timestamp) with nanosecond precision. + Formats timestamps as [unix (epoch) timestamps](/influxdb3/cloud-serverless/reference/glossary/#unix-timestamp) the specified precision + instead of [RFC3339 timestamps](/influxdb3/cloud-serverless/reference/glossary/#rfc3339-timestamp) with nanosecond precision. in: query name: epoch schema: @@ -7884,9 +7884,9 @@ paths: description: | #### InfluxDB Cloud: - returns this error if a **read** or **write** request exceeds your - plan's [adjustable service quotas](/influxdb/cloud-serverless/account-management/limits/#adjustable-service-quotas) + plan's [adjustable service quotas](/influxdb3/cloud-serverless/account-management/limits/#adjustable-service-quotas) or if a **delete** request exceeds the maximum - [global limit](/influxdb/cloud-serverless/account-management/limits/#global-limits) + [global limit](/influxdb3/cloud-serverless/account-management/limits/#global-limits) - returns `Retry-After` header that describes when to try the write again. headers: Retry-After: @@ -7910,19 +7910,19 @@ paths: description: | Writes data to a bucket. - Use this endpoint for [InfluxDB v1 parameter compatibility](/influxdb/cloud-serverless/guides/api-compatibility/v1/) when sending data in [line protocol](/influxdb/cloud-serverless/reference/syntax/line-protocol/) format to InfluxDB. + Use this endpoint for [InfluxDB v1 parameter compatibility](/influxdb3/cloud-serverless/guides/api-compatibility/v1/) when sending data in [line protocol](/influxdb3/cloud-serverless/reference/syntax/line-protocol/) format to InfluxDB. - InfluxDB Cloud Serverless does the following when you send a write request: + InfluxDB 3 Cloud Serverless does the following when you send a write request: 1. Validates the request. - 2. If successful, attempts to [ingest data](/influxdb/cloud-serverless/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](/influxdb/cloud-serverless/write-data/troubleshoot/#review-http-status-codes). + 2. If successful, attempts to [ingest data](/influxdb3/cloud-serverless/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](/influxdb3/cloud-serverless/write-data/troubleshoot/#review-http-status-codes). 3. Ingests or rejects data in the batch and returns one of the following HTTP status codes: - `204 No Content`: all data in the batch is ingested - `201 Created`: some points in the batch are ingested and queryable, and some points are rejected - `400 Bad Request`: all data is rejected - The response body contains error details about [rejected points](/influxdb/cloud-serverless/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + The response body contains error details about [rejected points](/influxdb3/cloud-serverless/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable. @@ -7931,21 +7931,21 @@ paths: #### Write endpoints - The [`/write`](#operation/PostLegacyWrite) and [`/api/v2/write`](#operation/PostWrite) endpoints are functionally equivalent for writing data to InfluxDB Cloud Serverless. + The [`/write`](#operation/PostLegacyWrite) and [`/api/v2/write`](#operation/PostWrite) endpoints are functionally equivalent for writing data to InfluxDB 3 Cloud Serverless. - - Use the [`/write` endpoint](#operation/PostLegacyWrite) for [InfluxDB v1 parameter compatibility](/influxdb/cloud-serverless/guides/api-compatibility/v1/). - - Use [`/api/v2/write` endpoint](#operation/PostWrite) for [InfluxDB v2 parameter compatibility](/influxdb/cloud-serverless/guides/api-compatibility/v2/). + - Use the [`/write` endpoint](#operation/PostLegacyWrite) for [InfluxDB v1 parameter compatibility](/influxdb3/cloud-serverless/guides/api-compatibility/v1/). + - Use [`/api/v2/write` endpoint](#operation/PostWrite) for [InfluxDB v2 parameter compatibility](/influxdb3/cloud-serverless/guides/api-compatibility/v2/). #### Rate limits _Write_ rate limits apply. - For more information, see [limits and adjustable quotas](/influxdb/cloud-serverless/admin/billing/limits/). + For more information, see [limits and adjustable quotas](/influxdb3/cloud-serverless/admin/billing/limits/). #### Related guides - - [Write data with the InfluxDB API](/influxdb/cloud-serverless/get-started/write/) - - [Optimize writes to InfluxDB](/influxdb/cloud-serverless/write-data/best-practices/optimize-writes/) - - [Troubleshoot issues writing data](/influxdb/cloud-serverless/write-data/troubleshoot/) + - [Write data with the InfluxDB API](/influxdb3/cloud-serverless/get-started/write/) + - [Optimize writes to InfluxDB](/influxdb3/cloud-serverless/write-data/best-practices/optimize-writes/) + - [Troubleshoot issues writing data](/influxdb3/cloud-serverless/write-data/troubleshoot/) parameters: - $ref: '#/components/parameters/TraceSpan' - description: The InfluxDB 1.x username to authenticate the request. @@ -7994,7 +7994,7 @@ paths: responses: '201': description: | - Success ("Created"). Some points in the batch are written and queryable, and some points are rejected. The response body contains details about the [rejected points](/influxdb/cloud-serverless/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + Success ("Created"). Some points in the batch are written and queryable, and some points are rejected. The response body contains details about the [rejected points](/influxdb3/cloud-serverless/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. content: application/json: examples: @@ -8012,7 +8012,7 @@ paths: description: | All data in the batch is rejected and not written. - The response body contains details about the [rejected points](/influxdb/cloud-serverless/write-data/troubleshoot/#troubleshoot-rejected-points). + The response body contains details about the [rejected points](/influxdb3/cloud-serverless/write-data/troubleshoot/#troubleshoot-rejected-points). content: application/json: examples: @@ -8132,7 +8132,7 @@ components: results don't include the specified record. Use `after` instead of the `offset` parameter. - For more information about pagination parameters, see [Pagination](/influxdb/cloud-serverless/api/#tag/Pagination). + For more information about pagination parameters, see [Pagination](/influxdb3/cloud-serverless/api/#tag/Pagination). in: query name: after required: false @@ -8161,7 +8161,7 @@ components: The offset for pagination. The number of records to skip. - For more information about pagination parameters, see [Pagination](/influxdb/cloud-serverless/api/#tag/Pagination). + For more information about pagination parameters, see [Pagination](/influxdb3/cloud-serverless/api/#tag/Pagination). in: query name: offset required: false @@ -8358,14 +8358,14 @@ components: org: description: | The organization name. - Specifies the [organization](/influxdb/cloud-serverless/reference/glossary/#organization) + Specifies the [organization](/influxdb3/cloud-serverless/reference/glossary/#organization) that the token is scoped to. readOnly: true type: string orgID: description: | The organization ID. - Specifies the [organization](/influxdb/cloud-serverless/reference/glossary/#organization) that the authorization is scoped to. + Specifies the [organization](/influxdb3/cloud-serverless/reference/glossary/#organization) that the authorization is scoped to. type: string permissions: description: | @@ -8379,7 +8379,7 @@ components: description: | The API token. The token value is unique to the authorization. - [API tokens](/influxdb/cloud-serverless/reference/glossary/#token) are + [API tokens](/influxdb3/cloud-serverless/reference/glossary/#token) are used to authenticate and authorize InfluxDB API requests and `influx` CLI commands--after receiving the request, InfluxDB checks that the token is valid and that the `permissions` allow the requested action(s). @@ -8392,13 +8392,13 @@ components: user: description: | The user name. - Specifies the [user](/influxdb/cloud-serverless/reference/glossary/#user) that owns the authorization. + Specifies the [user](/influxdb3/cloud-serverless/reference/glossary/#user) that owns the authorization. If the authorization is _scoped_ to a user, the user; otherwise, the creator of the authorization. readOnly: true type: string userID: - description: The user ID. Specifies the [user](/influxdb/cloud-serverless/reference/glossary/#user) that owns the authorization. If _scoped_, the user that the authorization is scoped to; otherwise, the creator of the authorization. + description: The user ID. Specifies the [user](/influxdb3/cloud-serverless/reference/glossary/#user) that owns the authorization. If _scoped_, the user that the authorization is scoped to; otherwise, the creator of the authorization. readOnly: true type: string type: object @@ -8871,7 +8871,7 @@ components: readOnly: true type: string latestCompleted: - description: A timestamp ([RFC3339 date/time format](/influxdb/cloud-serverless/reference/glossary/#rfc3339-timestamp)) of the latest scheduled and completed run. + description: A timestamp ([RFC3339 date/time format](/influxdb3/cloud-serverless/reference/glossary/#rfc3339-timestamp)) of the latest scheduled and completed run. format: date-time readOnly: true type: string @@ -9124,7 +9124,7 @@ components: orgID: description: | An organization ID. - Identifies the [organization](/influxdb/cloud-serverless/reference/glossary/#organization) that owns the mapping. + Identifies the [organization](/influxdb3/cloud-serverless/reference/glossary/#organization) that owns the mapping. type: string retention_policy: description: | @@ -9162,12 +9162,12 @@ components: org: description: | An organization name. - Identifies the [organization](/influxdb/cloud-serverless/reference/glossary/#organization) that owns the mapping. + Identifies the [organization](/influxdb3/cloud-serverless/reference/glossary/#organization) that owns the mapping. type: string orgID: description: | An organization ID. - Identifies the [organization](/influxdb/cloud-serverless/reference/glossary/#organization) that owns the mapping. + Identifies the [organization](/influxdb3/cloud-serverless/reference/glossary/#organization) that owns the mapping. type: string retention_policy: description: | @@ -9349,7 +9349,7 @@ components: $ref: '#/components/schemas/Links' type: object DateTimeLiteral: - description: Represents an instant in time with nanosecond precision in [RFC3339Nano date/time format](/influxdb/cloud-serverless/reference/glossary/#rfc3339nano-timestamp). + description: Represents an instant in time with nanosecond precision in [RFC3339Nano date/time format](/influxdb3/cloud-serverless/reference/glossary/#rfc3339nano-timestamp). properties: type: $ref: '#/components/schemas/NodeType' @@ -9414,18 +9414,18 @@ components: properties: predicate: description: | - An expression in [delete predicate syntax](/influxdb/cloud-serverless/reference/syntax/delete-predicate/). + An expression in [delete predicate syntax](/influxdb3/cloud-serverless/reference/syntax/delete-predicate/). example: tag1="value1" and (tag2="value2" and tag3!="value3") type: string start: description: | - A timestamp ([RFC3339 date/time format](/influxdb/cloud-serverless/reference/glossary/#rfc3339-timestamp)). + A timestamp ([RFC3339 date/time format](/influxdb3/cloud-serverless/reference/glossary/#rfc3339-timestamp)). The earliest time to delete from. format: date-time type: string stop: description: | - A timestamp ([RFC3339 date/time format](/influxdb/cloud-serverless/reference/glossary/#rfc3339-timestamp)). + A timestamp ([RFC3339 date/time format](/influxdb3/cloud-serverless/reference/glossary/#rfc3339-timestamp)). The latest time to delete from. format: date-time type: string @@ -9436,7 +9436,7 @@ components: Dialect: description: | Options for tabular data output. - Default output is [annotated CSV](/influxdb/cloud-serverless/reference/syntax/annotated-csv/#csv-response-format) with headers. + Default output is [annotated CSV](/influxdb3/cloud-serverless/reference/syntax/annotated-csv/#csv-response-format) with headers. For more information about tabular data **dialect**, see [W3 metadata vocabulary for tabular data](https://www.w3.org/TR/2015/REC-tabular-metadata-20151217/#dialect-descriptions). @@ -9448,7 +9448,7 @@ components: #### Related guides - - See [Annotated CSV annotations](/influxdb/cloud-serverless/reference/syntax/annotated-csv/#annotations) for examples and more information. + - See [Annotated CSV annotations](/influxdb3/cloud-serverless/reference/syntax/annotated-csv/#annotations) for examples and more information. For more information about **annotations** in tabular data, see [W3 metadata vocabulary for tabular data](https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns). @@ -9470,7 +9470,7 @@ components: default: RFC3339 description: | The format for timestamps in results. - Default is [`RFC3339` date/time format](/influxdb/cloud-serverless/reference/glossary/#rfc3339-timestamp). + Default is [`RFC3339` date/time format](/influxdb3/cloud-serverless/reference/glossary/#rfc3339-timestamp). To include nanoseconds in timestamps, use `RFC3339Nano`. #### Example formatted date/time values @@ -10819,7 +10819,7 @@ components: readOnly: true type: string time: - description: The time ([RFC3339Nano date/time format](/influxdb/cloud-serverless/reference/glossary/#rfc3339nano-timestamp)) that the event occurred. + description: The time ([RFC3339Nano date/time format](/influxdb3/cloud-serverless/reference/glossary/#rfc3339nano-timestamp)) that the event occurred. example: 2006-01-02T15:04:05.999999999Z07:00 format: date-time readOnly: true @@ -10971,7 +10971,7 @@ components: type: array name: description: | - The [measurement](/influxdb/cloud-serverless/reference/glossary/#measurement) + The [measurement](/influxdb3/cloud-serverless/reference/glossary/#measurement) name. type: string required: @@ -11305,7 +11305,7 @@ components: readOnly: true type: string latestCompleted: - description: A timestamp ([RFC3339 date/time format](/influxdb/cloud-serverless/reference/glossary/#rfc3339-timestamp)) of the latest scheduled and completed run. + description: A timestamp ([RFC3339 date/time format](/influxdb3/cloud-serverless/reference/glossary/#rfc3339-timestamp)) of the latest scheduled and completed run. format: date-time readOnly: true type: string @@ -11670,20 +11670,20 @@ components: type: integer shardGroupDurationSeconds: description: | - The [shard group duration](/influxdb/cloud-serverless/reference/glossary/#shard). + The [shard group duration](/influxdb3/cloud-serverless/reference/glossary/#shard). The number of seconds that each shard group covers. - #### InfluxDB Cloud Serverless + #### InfluxDB 3 Cloud Serverless - Doesn't use `shardGroupDurationsSeconds`. #### InfluxDB OSS - - Default value depends on the [bucket retention period](/influxdb/cloud-serverless/reference/internals/shards/#shard-group-duration). + - Default value depends on the [bucket retention period](/influxdb3/cloud-serverless/reference/internals/shards/#shard-group-duration). #### Related guides - - InfluxDB [shards and shard groups](/influxdb/cloud-serverless/reference/internals/shards/) + - InfluxDB [shards and shard groups](/influxdb3/cloud-serverless/reference/internals/shards/) format: int64 type: integer type: @@ -11813,7 +11813,7 @@ components: [Retention policy](/influxdb/v1.8/concepts/glossary/#retention-policy-rp) is an InfluxDB 1.x concept. The InfluxDB 2.x and Cloud equivalent is - [retention period](/influxdb/cloud-serverless/reference/glossary/#retention-period). + [retention period](/influxdb3/cloud-serverless/reference/glossary/#retention-period). The InfluxDB `/api/v2` API uses `RetentionRules` to configure the retention period. type: string schemaType: @@ -12107,7 +12107,7 @@ components: The shard group duration. The duration or interval (in seconds) that each shard group covers. - #### InfluxDB Cloud Serverless + #### InfluxDB 3 Cloud Serverless - Does not use `shardGroupDurationsSeconds`. format: int64 @@ -12123,9 +12123,9 @@ components: RetentionRules: description: | Retention rules to expire or retain data. - The InfluxDB `/api/v2` API uses `RetentionRules` to configure the [retention period](/influxdb/cloud-serverless/reference/glossary/#retention-period). + The InfluxDB `/api/v2` API uses `RetentionRules` to configure the [retention period](/influxdb3/cloud-serverless/reference/glossary/#retention-period). - #### InfluxDB Cloud Serverless + #### InfluxDB 3 Cloud Serverless - `retentionRules` is required. @@ -12272,13 +12272,13 @@ components: readOnly: true type: array requestedAt: - description: The time ([RFC3339Nano date/time format](/influxdb/cloud-serverless/reference/glossary/#rfc3339nano-timestamp)) the run was manually requested. + description: The time ([RFC3339Nano date/time format](/influxdb3/cloud-serverless/reference/glossary/#rfc3339nano-timestamp)) the run was manually requested. example: 2006-01-02T15:04:05.999999999Z07:00 format: date-time readOnly: true type: string scheduledFor: - description: The time [RFC3339 date/time format](/influxdb/cloud-serverless/reference/glossary/#rfc3339-timestamp) used for the run's `now` option. + description: The time [RFC3339 date/time format](/influxdb3/cloud-serverless/reference/glossary/#rfc3339-timestamp) used for the run's `now` option. format: date-time type: string startedAt: @@ -12303,7 +12303,7 @@ components: properties: scheduledFor: description: | - The time [RFC3339 date/time format](/influxdb/cloud-serverless/reference/glossary/#rfc3339-timestamp) + The time [RFC3339 date/time format](/influxdb3/cloud-serverless/reference/glossary/#rfc3339-timestamp) used for the run's `now` option. Default is the server _now_ time. format: date-time @@ -12892,7 +12892,7 @@ components: description: A description of the task. type: string every: - description: The interval ([duration literal](/influxdb/cloud-serverless/reference/glossary/#rfc3339-timestamp)) at which the task runs. `every` also determines when the task first runs, depending on the specified time. + description: The interval ([duration literal](/influxdb3/cloud-serverless/reference/glossary/#rfc3339-timestamp)) at which the task runs. `every` also determines when the task first runs, depending on the specified time. format: duration type: string flux: @@ -12919,7 +12919,7 @@ components: readOnly: true type: string latestCompleted: - description: A timestamp ([RFC3339 date/time format](/influxdb/cloud-serverless/reference/glossary/#rfc3339-timestamp)) of the latest scheduled and completed run. + description: A timestamp ([RFC3339 date/time format](/influxdb3/cloud-serverless/reference/glossary/#rfc3339-timestamp)) of the latest scheduled and completed run. format: date-time readOnly: true type: string @@ -12955,17 +12955,17 @@ components: type: string org: description: | - An [organization](/influxdb/cloud-serverless/reference/glossary/#organization) name. + An [organization](/influxdb3/cloud-serverless/reference/glossary/#organization) name. Specifies the organization that owns the task. type: string orgID: description: | - An [organization](/influxdb/cloud-serverless/reference/glossary/#organization) ID. + An [organization](/influxdb3/cloud-serverless/reference/glossary/#organization) ID. Specifies the organization that owns the task. type: string ownerID: description: | - A [user](/influxdb/cloud-serverless/reference/glossary/#user) ID. + A [user](/influxdb3/cloud-serverless/reference/glossary/#user) ID. Specifies the owner of the task. To find a user ID, you can use the @@ -13396,7 +13396,7 @@ components: The organization owns all resources created by the template. To find your organization, see how to - [view organizations](/influxdb/cloud-serverless/organizations/view-orgs/). + [view organizations](/influxdb3/cloud-serverless/organizations/view-orgs/). type: string remotes: description: | @@ -14718,7 +14718,7 @@ components: In the examples, replace the following: - **`EMAIL_ADDRESS`**: InfluxDB Cloud username (the email address the user signed up with) - - **`PASSWORD`**: InfluxDB Cloud [API token](/influxdb/cloud-serverless/reference/glossary/#token) + - **`PASSWORD`**: InfluxDB Cloud [API token](/influxdb3/cloud-serverless/reference/glossary/#token) - **`INFLUX_URL`**: your InfluxDB Cloud URL #### Encode credentials with cURL @@ -14791,7 +14791,7 @@ components: Replace the following: - *`INFLUX_URL`*: your InfluxDB Cloud URL - - *`INFLUX_API_TOKEN`*: your [InfluxDB API token](/influxdb/cloud-serverless/reference/glossary/#token) + - *`INFLUX_API_TOKEN`*: your [InfluxDB API token](/influxdb3/cloud-serverless/reference/glossary/#token) ### Related endpoints @@ -14799,8 +14799,8 @@ components: ### Related guides - - [Authorize API requests](/influxdb/cloud-serverless/api-guide/api_intro/#authentication) - - [Manage API tokens](/influxdb/cloud-serverless/security/tokens/) + - [Authorize API requests](/influxdb3/cloud-serverless/api-guide/api_intro/#authentication) + - [Manage API tokens](/influxdb3/cloud-serverless/security/tokens/) in: header name: Authorization type: apiKey diff --git a/api-docs/cloud-serverless/.config.yml b/api-docs/influxdb3/clustered/.config.yml similarity index 66% rename from api-docs/cloud-serverless/.config.yml rename to api-docs/influxdb3/clustered/.config.yml index 16abffd30..53febaaa1 100644 --- a/api-docs/cloud-serverless/.config.yml +++ b/api-docs/influxdb3/clustered/.config.yml @@ -1,9 +1,9 @@ plugins: - - '../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.js' extends: - recommended - docs/all -x-influxdata-product-name: InfluxDB v3 Serverless +x-influxdata-product-name: InfluxDB 3 Clustered apis: v2@2: diff --git a/api-docs/clustered/v1-compatibility/content/info.yml b/api-docs/influxdb3/clustered/v1-compatibility/content/info.yml similarity index 72% rename from api-docs/clustered/v1-compatibility/content/info.yml rename to api-docs/influxdb3/clustered/v1-compatibility/content/info.yml index 7fbb572ef..80ab8c17f 100644 --- a/api-docs/clustered/v1-compatibility/content/info.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/content/info.yml @@ -1,6 +1,6 @@ -title: InfluxDB v1 HTTP API for InfluxDB Clustered +title: InfluxDB v1 HTTP API for InfluxDB 3 Clustered x-influxdata-short-title: v1 Compatibility API -summary: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB v3 Clustered database. +summary: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Clustered database. description: | The InfluxDB 1.x `/write` and `/query` endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others. @@ -9,7 +9,7 @@ description: | #### Related - [InfluxDB `/api/v2` API for InfluxDB Clustered](/influxdb/clustered/api/v2/) + [InfluxDB `/api/v2` API for InfluxDB 3 Clustered](/influxdb3/clustered/api/v2/) license: name: MIT url: 'https://opensource.org/licenses/MIT' diff --git a/api-docs/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml similarity index 98% rename from api-docs/clustered/v1-compatibility/swaggerV1Compat.yml rename to api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index 8f3a1373a..7bfc57eda 100644 --- a/api-docs/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -1,6 +1,6 @@ openapi: 3.0.0 info: - title: InfluxDB v1 HTTP API for InfluxDB Clustered + title: InfluxDB v1 HTTP API for InfluxDB 3 Clustered version: '' description: | The InfluxDB 1.x `/write` and `/query` endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others. @@ -10,11 +10,11 @@ info: #### Related - [InfluxDB `/api/v2` API for InfluxDB Clustered](/influxdb/clustered/api/v2/) + [InfluxDB `/api/v2` API for InfluxDB 3 Clustered](/influxdb3/clustered/api/v2/) license: name: MIT url: https://opensource.org/licenses/MIT - summary: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB v3 Clustered database. + summary: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Clustered database. servers: - url: / security: diff --git a/api-docs/clustered/v2/content/info.yml b/api-docs/influxdb3/clustered/v2/content/info.yml similarity index 63% rename from api-docs/clustered/v2/content/info.yml rename to api-docs/influxdb3/clustered/v2/content/info.yml index f1f4be6d3..bc01b768f 100644 --- a/api-docs/clustered/v2/content/info.yml +++ b/api-docs/influxdb3/clustered/v2/content/info.yml @@ -1,8 +1,8 @@ -title: InfluxDB Clustered API Service +title: InfluxDB 3 Clustered API Service x-influxdata-short-title: v2 API -summary: The InfluxDB v2 HTTP API for InfluxDB Clustered provides a v2-compatible programmatic interface for writing data stored in an InfluxDB Clustered database. +summary: The InfluxDB v2 HTTP API for InfluxDB 3 Clustered provides a v2-compatible programmatic interface for writing data stored in an InfluxDB 3 Clustered database. description: | - The InfluxDB v2 HTTP API lets you use `/api/v2` endpoints for managing retention policy mappings and writing data stored in an InfluxDB v3 instance. + The InfluxDB v2 HTTP API lets you use `/api/v2` endpoints for managing retention policy mappings and writing data stored in an InfluxDB 3 instance. This documentation is generated from the [InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/openapi/master/contracts/ref/cloud.yml). diff --git a/api-docs/clustered/v2/content/servers.yml b/api-docs/influxdb3/clustered/v2/content/servers.yml similarity index 58% rename from api-docs/clustered/v2/content/servers.yml rename to api-docs/influxdb3/clustered/v2/content/servers.yml index 5d8667e0b..f3616d33a 100644 --- a/api-docs/clustered/v2/content/servers.yml +++ b/api-docs/influxdb3/clustered/v2/content/servers.yml @@ -1,8 +1,8 @@ - url: https://{baseurl} - description: InfluxDB Clustered API URL + description: InfluxDB 3 Clustered API URL variables: baseurl: enum: - 'cluster-host.com' default: 'cluster-host.com' - description: InfluxDB Clustered URL + description: InfluxDB 3 Clustered URL diff --git a/api-docs/clustered/v2/content/tag-groups.yml b/api-docs/influxdb3/clustered/v2/content/tag-groups.yml similarity index 100% rename from api-docs/clustered/v2/content/tag-groups.yml rename to api-docs/influxdb3/clustered/v2/content/tag-groups.yml diff --git a/api-docs/clustered/v2/ref.yml b/api-docs/influxdb3/clustered/v2/ref.yml similarity index 89% rename from api-docs/clustered/v2/ref.yml rename to api-docs/influxdb3/clustered/v2/ref.yml index b617a7103..71245c9c2 100644 --- a/api-docs/clustered/v2/ref.yml +++ b/api-docs/influxdb3/clustered/v2/ref.yml @@ -1,25 +1,25 @@ openapi: 3.0.0 info: - title: InfluxDB Clustered API Service + title: InfluxDB 3 Clustered API Service version: '' description: | - The InfluxDB v2 HTTP API lets you use `/api/v2` endpoints for managing retention policy mappings and writing data stored in an InfluxDB v3 instance. + The InfluxDB v2 HTTP API lets you use `/api/v2` endpoints for managing retention policy mappings and writing data stored in an InfluxDB 3 instance. This documentation is generated from the [InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/openapi/master/contracts/ref/cloud.yml). license: name: MIT url: https://opensource.org/licenses/MIT - summary: The InfluxDB v2 HTTP API for InfluxDB Clustered provides a v2-compatible programmatic interface for writing data stored in an InfluxDB Clustered database. + summary: The InfluxDB v2 HTTP API for InfluxDB 3 Clustered provides a v2-compatible programmatic interface for writing data stored in an InfluxDB 3 Clustered database. servers: - url: https://{baseurl} - description: InfluxDB Clustered API URL + description: InfluxDB 3 Clustered API URL variables: baseurl: enum: - cluster-host.com default: cluster-host.com - description: InfluxDB Clustered URL + description: InfluxDB 3 Clustered URL security: - BearerAuthentication: [] - TokenAuthentication: [] @@ -29,32 +29,32 @@ tags: - description: | ### Write data - InfluxDB Clustered provides the following HTTP API endpoints for writing data: + InfluxDB 3 Clustered provides the following HTTP API endpoints for writing data: - - **Recommended**: [`/api/v2/write` endpoint](/influxdb/clustered/api/#operation/PostWrite) for new write workloads or for bringing existing InfluxDB v2 write workloads to v3. - - [`/write` endpoint](/influxdb/clustered/api/#operation/PostLegacyWrite) for bringing existing InfluxDB v1 write workloads to v3. + - **Recommended**: [`/api/v2/write` endpoint](/influxdb3/clustered/api/#operation/PostWrite) for new write workloads or for bringing existing InfluxDB v2 write workloads to InfluxDB 3. + - [`/write` endpoint](/influxdb3/clustered/api/#operation/PostLegacyWrite) for bringing existing InfluxDB v1 write workloads to InfluxDB 3. Both endpoints accept the same line protocol format and process data in the same way. ### Query data - InfluxDB Clustered provides the following protocols for executing a query: + InfluxDB 3 Clustered provides the following protocols for executing a query: - - **Recommended**: _Flight+gRPC_ request that contains an SQL or InfluxQL query. See how to [get started querying InfluxDB using Flight and SQL](/influxdb/clustered/get-started/query/). - - HTTP API [`/query` request](/influxdb/clustered/api/#operation/GetLegacyQuery) that contains an InfluxQL query. - Use this protocol when bringing existing InfluxDB v1 query workloads to v3. + - **Recommended**: _Flight+gRPC_ request that contains an SQL or InfluxQL query. See how to [get started querying InfluxDB using Flight and SQL](/influxdb3/clustered/get-started/query/). + - HTTP API [`/query` request](/influxdb3/clustered/api/#operation/GetLegacyQuery) that contains an InfluxQL query. + Use this protocol when bringing existing InfluxDB v1 query workloads to InfluxDB 3. ### InfluxDB v2 compatibility - The HTTP API [`/api/v2/write` endpoint](/influxdb/clustered/api/#operation/PostWrite) works with the [`Bearer`](#section/Authentication/BearerAuthentication) and [`Token`](#section/Authentication/TokenAuthentication) authentication schemes and existing InfluxDB 2.x tools and code for [writing data](/influxdb/clustered/write-data/). + The HTTP API [`/api/v2/write` endpoint](/influxdb3/clustered/api/#operation/PostWrite) works with the [`Bearer`](#section/Authentication/BearerAuthentication) and [`Token`](#section/Authentication/TokenAuthentication) authentication schemes and existing InfluxDB 2.x tools and code for [writing data](/influxdb3/clustered/write-data/). - See how to [use the InfluxDB v2 HTTP API with InfluxDB Clustered ](/influxdb/clustered/guides/api-compatibility/v2/). + See how to [use the InfluxDB v2 HTTP API with InfluxDB 3 Clustered ](/influxdb3/clustered/guides/api-compatibility/v2/). ### InfluxDB v1 compatibility - The HTTP API [`/write` endpoint](/influxdb/clustered/api/#operation/PostLegacyWrite) and [`/query` endpoint](/influxdb/clustered/api/#operation/GetLegacyQuery) work with InfluxDB 1.x username/password [authentication schemes](#section/Authentication/) and existing InfluxDB 1.x tools and code. + The HTTP API [`/write` endpoint](/influxdb3/clustered/api/#operation/PostLegacyWrite) and [`/query` endpoint](/influxdb3/clustered/api/#operation/GetLegacyQuery) work with InfluxDB 1.x username/password [authentication schemes](#section/Authentication/) and existing InfluxDB 1.x tools and code. - See how to [use the InfluxDB v1 HTTP API with InfluxDB Clustered ](/influxdb/clustered/guides/api-compatibility/v1/). + See how to [use the InfluxDB v1 HTTP API with InfluxDB 3 Clustered ](/influxdb3/clustered/guides/api-compatibility/v1/). name: API compatibility x-traitTag: true - description: | @@ -100,21 +100,21 @@ tags: - description: | Query data stored in a database. - - HTTP clients can query the v1 [`/query` endpoint](/influxdb/clustered/api/#operation/GetLegacyQuery) + - HTTP clients can query the v1 [`/query` endpoint](/influxdb3/clustered/api/#operation/GetLegacyQuery) using **InfluxQL** and retrieve data in **CSV** or **JSON** format. - - The `/api/v2/query` endpoint can't query InfluxDB Clustered. + - The `/api/v2/query` endpoint can't query InfluxDB 3 Clustered. - _Flight + gRPC_ clients can query using **SQL** or **InfluxQL** and retrieve data in **Arrow** format. #### Related guides - - [Get started querying InfluxDB](/influxdb/clustered/get-started/query/) - - [Execute queries](/influxdb/clustered/query-data/execute-queries/) + - [Get started querying InfluxDB](/influxdb3/clustered/get-started/query/) + - [Execute queries](/influxdb3/clustered/query-data/execute-queries/) name: Query - description: | - See the [**Get Started**](/influxdb/clustered/get-started/) tutorial + See the [**Get Started**](/influxdb3/clustered/get-started/) tutorial to get up and running authenticating with tokens, writing to databases, and querying data. - [**InfluxDB API client libraries and Flight clients**](/influxdb/clustered/reference/client-libraries/) + [**InfluxDB API client libraries and Flight clients**](/influxdb3/clustered/reference/client-libraries/) are available to integrate InfluxDB APIs with your application. name: Quick start x-traitTag: true @@ -131,7 +131,7 @@ tags: | `200` | Success | | | `204` | Success. No content | InfluxDB doesn't return data for the request. For example, a successful write request returns `204` status code, acknowledging that data is written and queryable. | | `400` | Bad request | InfluxDB can't parse the request due to an incorrect parameter or bad syntax. If line protocol in the request body is malformed. The response body contains the first malformed line and indicates what was expected. For partial writes, the number of points written and the number of points rejected are also included. | - | `401` | Unauthorized | May indicate one of the following:
  • `Authorization: Token` header is missing or malformed
  • API token value is missing from the header
  • API token doesn't have permission. For more information about token types and permissions, see [Manage tokens](/influxdb/clustered/admin/tokens/)
| + | `401` | Unauthorized | May indicate one of the following:
  • `Authorization: Token` header is missing or malformed
  • API token value is missing from the header
  • API token doesn't have permission. For more information about token types and permissions, see [Manage tokens](/influxdb3/clustered/admin/tokens/)
| | `404` | Not found | Requested resource was not found. `message` in the response body provides details about the requested resource. | | `405` | Method not allowed | The API path doesn't support the HTTP method used in the request--for example, you send a `POST` request to an endpoint that only allows `GET`. | | `413` | Request entity too large | Request payload exceeds the size limit. | @@ -144,7 +144,7 @@ tags: - name: System information endpoints - name: Usage - description: | - Write time series data to [databases](/influxdb/clustered/admin/databases/) using InfluxDB v1 or v2 endpoints. + Write time series data to [databases](/influxdb3/clustered/admin/databases/) using InfluxDB v1 or v2 endpoints. name: Write paths: /ping: @@ -222,12 +222,12 @@ paths: description: | Writes data to a database. - Use this endpoint to send data in [line protocol](/influxdb/clustered/reference/syntax/line-protocol/) format to InfluxDB. + Use this endpoint to send data in [line protocol](/influxdb3/clustered/reference/syntax/line-protocol/) format to InfluxDB. InfluxDB does the following when you send a write request: 1. Validates the request - 2. If successful, attempts to [ingest the data](/influxdb/clustered/reference/internals/durability/#data-ingest); _error_ otherwise. + 2. If successful, attempts to [ingest the data](/influxdb3/clustered/reference/internals/durability/#data-ingest); _error_ otherwise. 3. If successful, responds with _success_ (HTTP `204` status code), acknowledging that the data is written and queryable; _error_ otherwise. To ensure that InfluxDB Cloud handles writes in the order you request them, @@ -235,10 +235,10 @@ paths: #### Related guides - - [Get started writing data](/influxdb/clustered/get-started/write/) - - [Write data](/influxdb/clustered/write-data/) - - [Best practices for writing data](/influxdb/clustered/write-data/best-practices/) - - [Troubleshoot issues writing data](/influxdb/clustered/write-data/troubleshoot/) + - [Get started writing data](/influxdb3/clustered/get-started/write/) + - [Write data](/influxdb3/clustered/write-data/) + - [Best practices for writing data](/influxdb3/clustered/write-data/best-practices/) + - [Troubleshoot issues writing data](/influxdb3/clustered/write-data/troubleshoot/) operationId: PostWrite parameters: - $ref: '#/components/parameters/TraceSpan' @@ -288,7 +288,7 @@ paths: #### Related guides - - [Troubleshoot issues writing data](/influxdb/clustered/write-data/troubleshoot/) + - [Troubleshoot issues writing data](/influxdb3/clustered/write-data/troubleshoot/) in: header name: Accept schema: @@ -343,7 +343,7 @@ paths: format: byte type: string description: | - In the request body, provide data in [line protocol format](/influxdb/clustered/reference/syntax/line-protocol/). + In the request body, provide data in [line protocol format](/influxdb3/clustered/reference/syntax/line-protocol/). To send compressed data, do the following: @@ -353,7 +353,7 @@ paths: #### Related guides - - [Best practices for optimizing writes](/influxdb/clustered/write-data/best-practices/optimize-writes/) + - [Best practices for optimizing writes](/influxdb3/clustered/write-data/best-practices/optimize-writes/) required: true responses: '204': @@ -415,8 +415,8 @@ paths: #### InfluxDB Cloud - - Returns this error if a **read** or **write** request exceeds your plan's [adjustable service quotas](/influxdb/clustered/account-management/limits/#adjustable-service-quotas) - or if a **delete** request exceeds the maximum [global limit](/influxdb/clustered/account-management/limits/#global-limits). + - Returns this error if a **read** or **write** request exceeds your plan's [adjustable service quotas](/influxdb3/clustered/account-management/limits/#adjustable-service-quotas) + or if a **delete** request exceeds the maximum [global limit](/influxdb3/clustered/account-management/limits/#global-limits). - For rate limits that reset automatically, returns a `Retry-After` header that describes when to try the write again. - For limits that can't reset (for example, **cardinality limit**), doesn't return a `Retry-After` header. @@ -462,7 +462,7 @@ paths: description: | Media type that the client can understand. - **Note**: With `application/csv`, query results include [**unix timestamps**](/influxdb/clustered/reference/glossary/#unix-timestamp) instead of [RFC3339 timestamps](/influxdb/clustered/reference/glossary/#rfc3339-timestamp). + **Note**: With `application/csv`, query results include [**unix timestamps**](/influxdb3/clustered/reference/glossary/#unix-timestamp) instead of [RFC3339 timestamps](/influxdb3/clustered/reference/glossary/#rfc3339-timestamp). enum: - application/json - application/csv @@ -496,7 +496,7 @@ paths: schema: type: string - description: | - The [database](/influxdb/clustered/admin/databases/) to query data from. + The [database](/influxdb3/clustered/admin/databases/) to query data from. in: query name: db required: true @@ -504,7 +504,7 @@ paths: type: string - description: | The retention policy to query data from. - For more information, see [InfluxQL DBRP naming convention](/influxdb/clustered/admin/databases/create/#influxql-dbrp-naming-convention). + For more information, see [InfluxQL DBRP naming convention](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention). in: query name: rp schema: @@ -517,8 +517,8 @@ paths: type: string - description: | A unix timestamp precision. - Formats timestamps as [unix (epoch) timestamps](/influxdb/clustered/reference/glossary/#unix-timestamp) the specified precision - instead of [RFC3339 timestamps](/influxdb/clustered/reference/glossary/#rfc3339-timestamp) with nanosecond precision. + Formats timestamps as [unix (epoch) timestamps](/influxdb3/clustered/reference/glossary/#unix-timestamp) the specified precision + instead of [RFC3339 timestamps](/influxdb3/clustered/reference/glossary/#rfc3339-timestamp) with nanosecond precision. in: query name: epoch schema: @@ -575,9 +575,9 @@ paths: description: | #### InfluxDB Cloud: - returns this error if a **read** or **write** request exceeds your - plan's [adjustable service quotas](/influxdb/clustered/account-management/limits/#adjustable-service-quotas) + plan's [adjustable service quotas](/influxdb3/clustered/account-management/limits/#adjustable-service-quotas) or if a **delete** request exceeds the maximum - [global limit](/influxdb/clustered/account-management/limits/#global-limits) + [global limit](/influxdb3/clustered/account-management/limits/#global-limits) - returns `Retry-After` header that describes when to try the write again. headers: Retry-After: @@ -649,7 +649,7 @@ paths: description: | Data from the batch was rejected and not written. The response body indicates if a partial write occurred or all data was rejected. If a partial write occurred, then some points from the batch are written and queryable. - The response body contains details about the [rejected points](/influxdb/clustered/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + The response body contains details about the [rejected points](/influxdb3/clustered/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. content: application/json: examples: @@ -710,12 +710,12 @@ paths: description: | Writes data to a database. - Use this InfluxDB v1-compatible endpoint to send data in [line protocol](/influxdb/clustered/reference/syntax/line-protocol/) format to InfluxDB using v1 API parameters and authorization. + Use this InfluxDB v1-compatible endpoint to send data in [line protocol](/influxdb3/clustered/reference/syntax/line-protocol/) format to InfluxDB using v1 API parameters and authorization. InfluxDB does the following when you send a write request: 1. Validates the request - 2. If successful, attempts to [ingest the data](/influxdb/clustered/reference/internals/durability/#data-ingest); _error_ otherwise. + 2. If successful, attempts to [ingest the data](/influxdb3/clustered/reference/internals/durability/#data-ingest); _error_ otherwise. 3. If successful, responds with _success_ (HTTP `204` status code), acknowledging that the data is written and queryable; _error_ otherwise. To ensure that InfluxDB handles writes in the order you request them, @@ -723,9 +723,9 @@ paths: #### Related guides - - [Write data with the InfluxDB API](/influxdb/clustered/get-started/write/) - - [Optimize writes to InfluxDB](/influxdb/clustered/write-data/best-practices/optimize-writes/) - - [Troubleshoot issues writing data](/influxdb/clustered/write-data/troubleshoot/) + - [Write data with the InfluxDB API](/influxdb3/clustered/get-started/write/) + - [Optimize writes to InfluxDB](/influxdb3/clustered/write-data/best-practices/optimize-writes/) + - [Troubleshoot issues writing data](/influxdb3/clustered/write-data/troubleshoot/) summary: Write data using the InfluxDB v1 HTTP API tags: - Write @@ -920,7 +920,7 @@ components: orgID: description: | An organization ID. - Identifies the [organization](/influxdb/clustered/reference/glossary/#organization) that owns the mapping. + Identifies the [organization](/influxdb3/clustered/reference/glossary/#organization) that owns the mapping. type: string retention_policy: description: | @@ -958,12 +958,12 @@ components: org: description: | An organization name. - Identifies the [organization](/influxdb/clustered/reference/glossary/#organization) that owns the mapping. + Identifies the [organization](/influxdb3/clustered/reference/glossary/#organization) that owns the mapping. type: string orgID: description: | An organization ID. - Identifies the [organization](/influxdb/clustered/reference/glossary/#organization) that owns the mapping. + Identifies the [organization](/influxdb3/clustered/reference/glossary/#organization) that owns the mapping. type: string retention_policy: description: | @@ -1001,7 +1001,7 @@ components: $ref: '#/components/schemas/DBRP' type: array DateTimeLiteral: - description: Represents an instant in time with nanosecond precision in [RFC3339Nano date/time format](/influxdb/clustered/reference/glossary/#rfc3339nano-timestamp). + description: Represents an instant in time with nanosecond precision in [RFC3339Nano date/time format](/influxdb3/clustered/reference/glossary/#rfc3339nano-timestamp). properties: type: $ref: '#/components/schemas/NodeType' @@ -1025,18 +1025,18 @@ components: properties: predicate: description: | - An expression in [delete predicate syntax](/influxdb/clustered/reference/syntax/delete-predicate/). + An expression in [delete predicate syntax](/influxdb3/clustered/reference/syntax/delete-predicate/). example: tag1="value1" and (tag2="value2" and tag3!="value3") type: string start: description: | - A timestamp ([RFC3339 date/time format](/influxdb/clustered/reference/glossary/#rfc3339-timestamp)). + A timestamp ([RFC3339 date/time format](/influxdb3/clustered/reference/glossary/#rfc3339-timestamp)). The earliest time to delete from. format: date-time type: string stop: description: | - A timestamp ([RFC3339 date/time format](/influxdb/clustered/reference/glossary/#rfc3339-timestamp)). + A timestamp ([RFC3339 date/time format](/influxdb3/clustered/reference/glossary/#rfc3339-timestamp)). The latest time to delete from. format: date-time type: string @@ -1047,7 +1047,7 @@ components: Dialect: description: | Options for tabular data output. - Default output is [annotated CSV](/influxdb/clustered/reference/syntax/annotated-csv/#csv-response-format) with headers. + Default output is [annotated CSV](/influxdb3/clustered/reference/syntax/annotated-csv/#csv-response-format) with headers. For more information about tabular data **dialect**, see [W3 metadata vocabulary for tabular data](https://www.w3.org/TR/2015/REC-tabular-metadata-20151217/#dialect-descriptions). @@ -1059,7 +1059,7 @@ components: #### Related guides - - See [Annotated CSV annotations](/influxdb/clustered/reference/syntax/annotated-csv/#annotations) for examples and more information. + - See [Annotated CSV annotations](/influxdb3/clustered/reference/syntax/annotated-csv/#annotations) for examples and more information. For more information about **annotations** in tabular data, see [W3 metadata vocabulary for tabular data](https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns). @@ -1081,7 +1081,7 @@ components: default: RFC3339 description: | The format for timestamps in results. - Default is [`RFC3339` date/time format](/influxdb/clustered/reference/glossary/#rfc3339-timestamp). + Default is [`RFC3339` date/time format](/influxdb3/clustered/reference/glossary/#rfc3339-timestamp). To include nanoseconds in timestamps, use `RFC3339Nano`. #### Example formatted date/time values @@ -1621,7 +1621,7 @@ components: readOnly: true type: string time: - description: The time ([RFC3339Nano date/time format](/influxdb/clustered/reference/glossary/#rfc3339nano-timestamp)) that the event occurred. + description: The time ([RFC3339Nano date/time format](/influxdb3/clustered/reference/glossary/#rfc3339nano-timestamp)) that the event occurred. example: 2006-01-02T15:04:05.999999999Z07:00 format: date-time readOnly: true @@ -1816,7 +1816,7 @@ components: type: integer shardGroupDurationSeconds: description: | - The [shard group duration](/influxdb/clustered/reference/glossary/#shard). + The [shard group duration](/influxdb3/clustered/reference/glossary/#shard). The number of seconds that each shard group covers. #### InfluxDB Cloud @@ -1825,7 +1825,7 @@ components: #### Related guides - - InfluxDB [shards and shard groups](/influxdb/clustered/reference/internals/shards/) + - InfluxDB [shards and shard groups](/influxdb3/clustered/reference/internals/shards/) format: int64 type: integer type: @@ -1902,7 +1902,7 @@ components: RetentionRules: description: | Retention rules to expire or retain data. - The InfluxDB `/api/v2` API uses `RetentionRules` to configure the [retention period](/influxdb/clustered/reference/glossary/#retention-period). + The InfluxDB `/api/v2` API uses `RetentionRules` to configure the [retention period](/influxdb3/clustered/reference/glossary/#retention-period). #### InfluxDB Cloud @@ -1969,8 +1969,8 @@ components: ### Basic authentication scheme Use the `Authorization` header with the `Basic` scheme to authenticate v1 API `/write` and `/query` requests. - When authenticating requests, InfluxDB Clustered checks that the `password` part of the decoded credential is an authorized [database token](/influxdb/clustered/admin/tokens/#database-tokens). - InfluxDB Clustered ignores the `username` part of the decoded credential. + When authenticating requests, InfluxDB 3 Clustered checks that the `password` part of the decoded credential is an authorized [database token](/influxdb3/clustered/admin/tokens/#database-tokens). + InfluxDB 3 Clustered ignores the `username` part of the decoded credential. ### Syntax @@ -1980,13 +1980,13 @@ components: Replace the following: - - **`[USERNAME]`**: an optional string value (ignored by InfluxDB Clustered). - - **`DATABASE_TOKEN`**: a [database token](/influxdb/clustered/admin/tokens/#database-tokens). + - **`[USERNAME]`**: an optional string value (ignored by InfluxDB 3 Clustered). + - **`DATABASE_TOKEN`**: a [database token](/influxdb3/clustered/admin/tokens/#database-tokens). - Encode the `[USERNAME]:DATABASE_TOKEN` credential using base64 encoding, and then append the encoded string to the `Authorization: Basic` header. ### Example - The following example shows how to use cURL with the `Basic` authentication scheme and a [database token](/influxdb/clustered/admin/tokens/#database-tokens): + The following example shows how to use cURL with the `Basic` authentication scheme and a [database token](/influxdb3/clustered/admin/tokens/#database-tokens): ```sh ####################################### @@ -2004,8 +2004,8 @@ components: Replace the following: - - **`DATABASE_NAME`**: your InfluxDB Clustered database - - **`DATABASE_TOKEN`**: a [database token](/influxdb/clustered/admin/tokens/#database-tokens) with sufficient permissions to the database + - **`DATABASE_NAME`**: your InfluxDB 3 Clustered database + - **`DATABASE_TOKEN`**: a [database token](/influxdb3/clustered/admin/tokens/#database-tokens) with sufficient permissions to the database scheme: basic type: http QuerystringAuthentication: @@ -2019,7 +2019,7 @@ components: ### Query string authentication In the URL, pass the `p` query parameter to authenticate `/write` and `/query` requests. - When authenticating requests, InfluxDB Clustered checks that `p` (_password_) is an authorized database token and ignores the `u` (_username_) parameter. + When authenticating requests, InfluxDB 3 Clustered checks that `p` (_password_) is an authorized database token and ignores the `u` (_username_) parameter. ### Syntax @@ -2030,7 +2030,7 @@ components: ### Example - The following example shows how to use cURL with query string authentication and a [database token](/influxdb/clustered/admin/tokens/#database-tokens). + The following example shows how to use cURL with query string authentication and a [database token](/influxdb3/clustered/admin/tokens/#database-tokens). ```sh ####################################### @@ -2049,8 +2049,8 @@ components: Replace the following: - - **`DATABASE_NAME`**: your InfluxDB Clustered database - - **`DATABASE_TOKEN`**: a [database token](/influxdb/clustered/admin/tokens/#database-tokens) with sufficient permissions to the database + - **`DATABASE_NAME`**: your InfluxDB 3 Clustered database + - **`DATABASE_TOKEN`**: a [database token](/influxdb3/clustered/admin/tokens/#database-tokens) with sufficient permissions to the database BearerAuthentication: type: http scheme: bearer @@ -2084,8 +2084,8 @@ components: ``` For examples and more information, see the following: - - [Authenticate API requests](/influxdb/clustered/primers/api/v2/#authenticate-api-requests) - - [Manage tokens](/influxdb/clustered/admin/tokens/) + - [Authenticate API requests](/influxdb3/clustered/primers/api/v2/#authenticate-api-requests) + - [Manage tokens](/influxdb3/clustered/admin/tokens/) TokenAuthentication: description: | Use the Token authentication @@ -2116,8 +2116,8 @@ components: ### Related guides - - [Authenticate API requests](/influxdb/clustered/primers/api/v2/#authenticate-api-requests) - - [Manage tokens](/influxdb/clustered/admin/tokens/) + - [Authenticate API requests](/influxdb3/clustered/primers/api/v2/#authenticate-api-requests) + - [Manage tokens](/influxdb3/clustered/admin/tokens/) in: header name: Authorization type: apiKey diff --git a/assets/js/custom-timestamps.js b/assets/js/custom-timestamps.js index 2442931f8..9f8cb6d4c 100644 --- a/assets/js/custom-timestamps.js +++ b/assets/js/custom-timestamps.js @@ -131,7 +131,7 @@ function updateTimestamps (newStartDate) { oldDatePart = datePart(x.rfc3339.replace(/T.*$/, '')); newDatePart = datePart(x.rfc3339_new.replace(/T.*$/, '')); rfc3339Regex = new RegExp( - `${oldDatePart.year}(.*)${oldDatePart.month}(.*)${oldDatePart.day}`, + `${oldDatePart.year}(.*?)${oldDatePart.month}(.*?)${oldDatePart.day}`, 'g' ); rfc3339Repl = `${newDatePart.year}$1${newDatePart.month}$2${newDatePart.day}`; diff --git a/assets/styles/layouts/article/_buttons.scss b/assets/styles/layouts/article/_buttons.scss index d05d41265..abd4394c7 100644 --- a/assets/styles/layouts/article/_buttons.scss +++ b/assets/styles/layouts/article/_buttons.scss @@ -56,6 +56,10 @@ a.btn { margin-right: -.65rem; } + &.small { + padding: .4rem 1rem; + } + &.small-plus { padding: .25em; line-height: .65rem; diff --git a/content/influxdb/v1/tools/api.md b/content/influxdb/v1/tools/api.md index 237bfb732..6dc56de32 100644 --- a/content/influxdb/v1/tools/api.md +++ b/content/influxdb/v1/tools/api.md @@ -141,6 +141,7 @@ The `/api/v2/write` endpoint maps the supplied version 1.x database and retentio to access the platform and all its capabilities. InfluxDB v1.x uses a username and password combination when accessing the HTTP APIs. Use the Token schema to provide your InfluxDB 1.x username and password separated by a colon (`:`). + The `Token` scheme is the word `Token`, a space, and your credentials (all case-sensitive). For example: `Authorization: Token username:password`. {{< code-tabs-wrapper >}} diff --git a/content/influxdb/v2/api-guide/influxdb-1x/_index.md b/content/influxdb/v2/api-guide/influxdb-1x/_index.md index 9aa3df46a..ac40b92ba 100644 --- a/content/influxdb/v2/api-guide/influxdb-1x/_index.md +++ b/content/influxdb/v2/api-guide/influxdb-1x/_index.md @@ -36,6 +36,7 @@ Token authentication requires the following credential: - **token**: InfluxDB [API token](/influxdb/v2/admin/tokens/) Use the `Authorization` header with the `Token` scheme to provide your token to InfluxDB. +The `Token` scheme is the word `Token`, a space, and your token (all case-sensitive). #### Syntax diff --git a/content/influxdb3/cloud-dedicated/api/management/_index.html b/content/influxdb3/cloud-dedicated/api/management/_index.html new file mode 100644 index 000000000..66324e6c2 --- /dev/null +++ b/content/influxdb3/cloud-dedicated/api/management/_index.html @@ -0,0 +1,848 @@ +--- +title: InfluxDB 3 Cloud Dedicated Management API +description: The Management API for InfluxDB 3 Cloud Dedicated provides a programmatic interface for managing a Cloud Dedicated instance. +layout: api +weight: 102 +menu: + influxdb3_cloud_dedicated: + parent: InfluxDB HTTP API + name: Management API + identifier: api-reference-management +--- + + + + + + + + + + + InfluxDB + + + + + + + + + + + + + + + + +
+
+
+ + +

InfluxDB 3 Cloud Dedicated Management API

License: MIT

The Management API for InfluxDB 3 Cloud Dedicated provides a programmatic interface for managing a Cloud Dedicated instance.

+

The Management API lets you manage an InfluxDB 3 Cloud Dedicated instance and integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application.

+

This documentation is generated from the +InfluxDB OpenAPI specification.

+

Authentication

The InfluxDB Management API endpoints require the following credentials:

+
    +
  • ACCOUNT_ID: The ID of the account that the cluster belongs to. To view account ID and cluster ID, list cluster details.

    +
  • +
  • CLUSTER_ID: The ID of the cluster that you want to manage. To view account ID and cluster ID, list cluster details.

    +
  • +
  • Authorization MANAGEMENT_TOKEN: the Authorization HTTP header with a management token.

    +

    See how to create a management token.

    +

    By default, management tokens in InfluxDB 3 are short-lived tokens issued by an OAuth2 identity provider that grant a specific user administrative access to your InfluxDB cluster. However, for automation purposes, you can manually create management tokens that authenticate directly with your InfluxDB cluster and do not require human interaction with your identity provider.

    +
  • +
+

Database tokens

Manage database read/write tokens for a cluster

+

Get all database tokens for a cluster

path Parameters
accountId
required
string <uuid> (UuidV4)

The ID of the account to get the database tokens for

+
clusterId
required
string <uuid> (UuidV4)

The ID of the cluster to get the database tokens for

+

Responses

Request samples

HOST="https://console.influxdata.com"
+
+list_tokens () {
+  local response=$( \
+    curl \
+      --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/tokens" \
+      --header "Accept: application/json" \
+      --header "Authorization: Bearer $MANAGEMENT_TOKEN" \
+  )
+  echo "$response"
+}
+

Response samples

Content type
application/json
[
  • {
    },
  • {
    },
  • {
    }
]

Create a database token

Create a database token for a cluster.

+

The token returned on the accessToken property in the response can be used to authenticate query and write requests to the cluster.

+

Notable behaviors

+
    +
  • InfluxDB might take some time--from a few seconds to a few minutes--to activate and synchronize new tokens. If a new database token doesn't immediately work (you receive a 401 Unauthorized error) for querying or writing, wait and then try your request again.

    +
  • +
  • Token strings are viewable only on token creation and aren't stored by InfluxDB; you can't recover a lost token.

    +
  • +
+

Store secure tokens in a secret store

+

We recommend storing database tokens in a secure secret store. +For example, see how to authenticate Telegraf using tokens in your OS secret store.

+

If you lose a token, delete the token from InfluxDB and create a new one.

+
path Parameters
accountId
required
string <uuid> (UuidV4)

The ID of the account to create the database token for

+
clusterId
required
string <uuid> (UuidV4)

The ID of the cluster to create the database token for

+
Request Body schema: application/json
description
required
string (schemas)

The description of the database token

+
Array of objects (schemas)

The list of permissions the database token allows

+

Responses

Request samples

Content type
application/json
Example
{
  • "description": "Limited Access Token",
  • "permissions": [
    ]
}

Response samples

Content type
application/json
Example
{
  • "accountId": "11111111-1111-4111-8111-111111111111",
  • "clusterId": "33333333-3333-4333-8333-333333333333",
  • "id": "55555555-5555-4555-8555-555555555555",
  • "description": "Limited Access Token",
  • "permissions": [
    ],
  • "createdAt": "2023-12-21T17:32:28.000Z",
  • "accessToken": "apiv1_5555555555555555555555555555555555555555555555555555555555555555"
}

Get a database token

path Parameters
accountId
required
string <uuid> (UuidV4)

The ID of the account that the database token belongs to

+
clusterId
required
string <uuid> (UuidV4)

The ID of the cluster that the database token belongs to

+
tokenId
required
string <uuid> (UuidV4)

The ID of the database token to get

+

Responses

Request samples

HOST="https://console.influxdata.com"
+
+get_token () {
+  local tokenId=$1
+  local response=$( \
+    curl \
+      --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/tokens/$tokenId" \
+      --header "Accept: application/json" \
+      --header "Authorization: Bearer $MANAGEMENT_TOKEN" \
+  )
+  echo "$response"
+}
+

Response samples

Content type
application/json
Example
{
  • "accountId": "11111111-1111-4111-8111-111111111111",
  • "clusterId": "33333333-3333-4333-8333-333333333333",
  • "id": "55555555-5555-4555-8555-555555555555",
  • "description": "Limited Access Token",
  • "permissions": [
    ],
  • "createdAt": "2023-12-21T17:32:28.000Z"
}

Update a database token

path Parameters
accountId
required
string <uuid> (UuidV4)

The ID of the account that the database token belongs to

+
clusterId
required
string <uuid> (UuidV4)

The ID of the cluster that the database token belongs to

+
tokenId
required
string <uuid> (UuidV4)

The ID of the database token to update

+
Request Body schema: application/json
description
string (schemas)

The description of the database token

+
Array of objects (schemas)

The list of permissions the database token allows

+

Responses

Request samples

Content type
application/json
Example
{
  • "description": "Updated Limited Access Token",
  • "permissions": [
    ]
}

Response samples

Content type
application/json
Example
{
  • "accountId": "11111111-1111-4111-8111-111111111111",
  • "clusterId": "33333333-3333-4333-8333-333333333333",
  • "id": "55555555-5555-4555-8555-555555555555",
  • "description": "Updated Limited Access Token",
  • "permissions": [
    ],
  • "createdAt": "2023-12-21T17:32:28.000Z"
}

Delete a database token

path Parameters
accountId
required
string <uuid> (UuidV4)

The ID of the account that the database token belongs to

+
clusterId
required
string <uuid> (UuidV4)

The ID of the cluster that the database token belongs to

+
tokenId
required
string <uuid> (UuidV4)

The ID of the database token to delete

+

Responses

Request samples

HOST="https://console.influxdata.com"
+
+delete_token () {
+  local tokenId=$1
+  local response=$( \
+    curl \
+      --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/tokens/$tokenId" \
+      --request DELETE \
+      --header "Accept: application/json" \
+      --header "Authorization: Bearer $MANAGEMENT_TOKEN" \
+  )
+  echo "$response"
+}
+

Response samples

Content type
application/json
{
  • "code": 400,
  • "message": "bad request"
}

Databases

Manage databases for a cluster

+

Get all databases for a cluster

path Parameters
accountId
required
string <uuid> (UuidV4)

The ID of the account to get the databases for

+
clusterId
required
string <uuid> (UuidV4)

The ID of the cluster to get the databases for

+

Responses

Request samples

HOST="https://console.influxdata.com"
+
+list_databases () {
+  local response=$( \
+    curl \
+      --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/databases" \
+      --header "Accept: application/json" \
+      --header "Authorization: Bearer $MANAGEMENT_TOKEN" \
+  )
+  echo "$response"
+}
+

Response samples

Content type
application/json
[
  • {
    },
  • {
    }
]

Create a database

path Parameters
accountId
required
string <uuid> (UuidV4)

The ID of the account to create the database for

+
clusterId
required
string <uuid> (UuidV4)

The ID of the cluster to create the database for

+
Request Body schema: application/json
maxColumnsPerTable
integer <int32> (schemas) >= 1
Default: 200

The maximum number of columns per table for the cluster database

+
maxTables
integer <int32> (schemas) >= 1
Default: 500

The maximum number of tables for the cluster database

+
name
required
string (schemas) [ 1 .. 64 ] characters

The name of the cluster database

+
Array of ClusterDatabasePartitionTemplatePartTagValue (object) or ClusterDatabasePartitionTemplatePartTimeFormat (object) or ClusterDatabasePartitionTemplatePartBucket (object) (schemas) [ 1 .. 8 ] items unique

A template for partitioning a cluster database.

+

Each template part is evaluated in sequence, concatinating the final +partition key from the output of each part, delimited by the partition +key delimiter |.

+

For example, using the partition template below:

+
[
+  {
+    "type": "time",
+    "value": "%Y"
+  },
+  {
+    "type": "tag",
+    "value": "bananas"
+  },
+  {
+    "type": "tag",
+    "value": "plátanos"
+  },
+  {
+    "type": "bucket",
+    "value": {
+      "tagName": "c",
+      "numberOfBuckets": 10
+    }
+  }
+]
+

The following partition keys are derived:

+
    +
  • time=2023-01-01, a=bananas, b=plátanos, c=ananas -> 2023|bananas|plátanos|5
  • +
  • time=2023-01-01, b=plátanos -> 2023|!|plátanos|!
  • +
  • time=2023-01-01, another=cat, b=plátanos -> 2023|!|plátanos|!
  • +
  • time=2023-01-01 -> 2023|!|!|!
  • +
  • time=2023-01-01, a=cat|dog, b=!, c=! -> 2023|cat%7Cdog|%21|8
  • +
  • time=2023-01-01, a=%50, c=%50 -> 2023|%2550|!|9
  • +
  • time=2023-01-01, a=, c= -> 2023|^|!|0
  • +
  • time=2023-01-01, a=<long string> -> 2023|<long string>#|!|!
  • +
  • time=2023-01-01, c=<long string> -> 2023|!|!|<bucket ID for untruncated long string>
  • +
+

When using the default partitioning template (YYYY-MM-DD) there is no +encoding necessary, as the derived partition key contains a single part, and +no reserved characters. [TemplatePart::Bucket] parts by definition will +always be within the part length limit and contain no restricted characters +so are also not percent-encoded and/or truncated.

+
retentionPeriod
integer <int64> (schemas) >= 0
Default: 0

The retention period of the cluster database in nanoseconds, if applicable

+

If the retention period is not set or is set to 0, the database will have infinite retention

+

Responses

Request samples

Content type
application/json
Example
{
  • "name": "DatabaseOne"
}

Response samples

Content type
application/json
Example
{
  • "accountId": "11111111-1111-4111-8111-111111111111",
  • "clusterId": "33333333-3333-4333-8333-333333333333",
  • "name": "DatabaseOne",
  • "maxTables": 500,
  • "maxColumnsPerTable": 200,
  • "retentionPeriod": 0
}

Update a database

path Parameters
accountId
required
string <uuid> (UuidV4)

The ID of the account that the database belongs to

+
clusterId
required
string <uuid> (UuidV4)

The ID of the cluster that the database belongs to

+
databaseName
required
string (ClusterDatabaseName) [ 1 .. 64 ] characters

The name of the database to update

+
Request Body schema: application/json
maxColumnsPerTable
integer <int32> (schemas) >= 1
Default: 200

The maximum number of columns per table for the cluster database

+
maxTables
integer <int32> (schemas) >= 1
Default: 500

The maximum number of tables for the cluster database

+
retentionPeriod
integer <int64> (schemas) >= 0
Default: 0

The retention period of the cluster database in nanoseconds, if applicable

+

If the retention period is not set or is set to 0, the database will have infinite retention

+

Responses

Request samples

Content type
application/json
Example
{
  • "maxTables": 300,
  • "maxColumnsPerTable": 150,
  • "retentionPeriod": 600000000000
}

Response samples

Content type
application/json
Example
{
  • "accountId": "11111111-1111-4111-8111-111111111111",
  • "clusterId": "33333333-3333-4333-8333-333333333333",
  • "name": "DatabaseOne",
  • "maxTables": 300,
  • "maxColumnsPerTable": 150,
  • "retentionPeriod": 600000000000
}

Delete a database

path Parameters
accountId
required
string <uuid> (UuidV4)

The ID of the account that the database belongs to

+
clusterId
required
string <uuid> (UuidV4)

The ID of the cluster that the database belongs to

+
databaseName
required
string (ClusterDatabaseName) [ 1 .. 64 ] characters

The name of the database to delete

+

Responses

Request samples

HOST="https://console.influxdata.com"
+
+delete_database () {
+  local databaseName=$1
+  local response=$( \
+    curl \
+      --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/databases/$databaseName" \
+      --request DELETE \
+      --header "Accept: application/json" \
+      --header "Authorization: Bearer $MANAGEMENT_TOKEN" \
+  )
+  echo "$response"
+}
+

Response samples

Content type
application/json
{
  • "code": 400,
  • "message": "bad request"
}

Tables

Manage tables in a database

+

Create a database table

path Parameters
accountId
required
string <uuid> (UuidV4)

The ID of the account to create the database table for

+
clusterId
required
string <uuid> (UuidV4)

The ID of the cluster to create the database table for

+
databaseName
required
string (ClusterDatabaseName) [ 1 .. 64 ] characters

The name of the database to create the database table for

+
Request Body schema: application/json
name
required
string (schemas) non-empty

The name of the cluster database table

+
Array of ClusterDatabasePartitionTemplatePartTagValue (object) or ClusterDatabasePartitionTemplatePartTimeFormat (object) or ClusterDatabasePartitionTemplatePartBucket (object) (schemas) [ 1 .. 8 ] items unique

A template for partitioning a cluster database.

+

Each template part is evaluated in sequence, concatinating the final +partition key from the output of each part, delimited by the partition +key delimiter |.

+

For example, using the partition template below:

+
[
+  {
+    "type": "time",
+    "value": "%Y"
+  },
+  {
+    "type": "tag",
+    "value": "bananas"
+  },
+  {
+    "type": "tag",
+    "value": "plátanos"
+  },
+  {
+    "type": "bucket",
+    "value": {
+      "tagName": "c",
+      "numberOfBuckets": 10
+    }
+  }
+]
+

The following partition keys are derived:

+
    +
  • time=2023-01-01, a=bananas, b=plátanos, c=ananas -> 2023|bananas|plátanos|5
  • +
  • time=2023-01-01, b=plátanos -> 2023|!|plátanos|!
  • +
  • time=2023-01-01, another=cat, b=plátanos -> 2023|!|plátanos|!
  • +
  • time=2023-01-01 -> 2023|!|!|!
  • +
  • time=2023-01-01, a=cat|dog, b=!, c=! -> 2023|cat%7Cdog|%21|8
  • +
  • time=2023-01-01, a=%50, c=%50 -> 2023|%2550|!|9
  • +
  • time=2023-01-01, a=, c= -> 2023|^|!|0
  • +
  • time=2023-01-01, a=<long string> -> 2023|<long string>#|!|!
  • +
  • time=2023-01-01, c=<long string> -> 2023|!|!|<bucket ID for untruncated long string>
  • +
+

When using the default partitioning template (YYYY-MM-DD) there is no +encoding necessary, as the derived partition key contains a single part, and +no reserved characters. [TemplatePart::Bucket] parts by definition will +always be within the part length limit and contain no restricted characters +so are also not percent-encoded and/or truncated.

+

Responses

Request samples

Content type
application/json
Example
{
  • "name": "TableOne"
}

Response samples

Content type
application/json
Example
{
  • "accountId": "11111111-1111-4111-8111-111111111111",
  • "clusterId": "33333333-3333-4333-8333-333333333333",
  • "databaseName": "DatabaseOne",
  • "name": "TableOne"
}
+ + + + + diff --git a/content/influxdb3/cloud-dedicated/api/v1-compatibility/_index.html b/content/influxdb3/cloud-dedicated/api/v1-compatibility/_index.html new file mode 100644 index 000000000..cb1826f4e --- /dev/null +++ b/content/influxdb3/cloud-dedicated/api/v1-compatibility/_index.html @@ -0,0 +1,609 @@ +--- +title: InfluxDB v1 HTTP API for InfluxDB 3 Cloud Dedicated +description: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Cloud Dedicated database. +layout: api +menu: + influxdb3_cloud_dedicated: + parent: InfluxDB HTTP API + name: v1 Compatibility API + identifier: api-reference-v1-compatibility +weight: 304 +aliases: + - /influxdb/cloud-dedicated/api/v1/ +--- + + + + + + + + + + + InfluxDB + + + + + + + + + + + + + + + + +
+
+
+ + +

InfluxDB v1 HTTP API for InfluxDB 3 Cloud Dedicated

License: MIT

The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Cloud Dedicated database.

+

The InfluxDB 1.x /write and /query endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others.

+

This documentation is generated from the +InfluxDB OpenAPI specification.

+ +

InfluxDB /api/v2 API for InfluxDB 3 Cloud Dedicated

+

Authentication

The InfluxDB 1.x API requires authentication for all requests. +InfluxDB Cloud uses InfluxDB API tokens to authenticate requests.

+

For more information, see the following:

+ +

TokenAuthentication

Use the Token authentication +scheme to authenticate to the InfluxDB API.

+

In your API requests, send an Authorization header. +For the header value, provide the word Token followed by a space and an InfluxDB API token. +The word Token is case-sensitive.

+

Syntax

+

Authorization: Token YOUR_INFLUX_TOKEN

+

For examples and more information, see the following:

+ +
Security Scheme Type API Key
Header parameter name: Authorization

BasicAuthentication

Use the HTTP Basic authentication +scheme with clients that support the InfluxDB 1.x convention of username and password (that don't support the Authorization: Token scheme):

+

For examples and more information, see how to authenticate with a username and password.

+
Security Scheme Type HTTP
HTTP Authorization Scheme basic

QuerystringAuthentication

Use the Querystring authentication +scheme with InfluxDB 1.x API parameters to provide credentials through the query string.

+

For examples and more information, see how to authenticate with a username and password.

+
Security Scheme Type API Key
Query parameter name: u=&p=

Query

Query using the InfluxDB v1 HTTP API

query Parameters
db
required
string

Bucket to query.

+
p
string

User token.

+
q
string

Defines the influxql query to run.

+
rp
string

Retention policy name.

+
u
string

Username.

+
header Parameters
Accept
string
Default: application/json
Enum: "application/json" "application/csv" "text/csv" "application/x-msgpack"

Specifies how query results should be encoded in the response. Note: With application/csv, query results include epoch timestamps instead of RFC3339 timestamps.

+
Accept-Encoding
string
Default: identity
Enum: "gzip" "identity"

The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand.

+
Content-Type
string
Value: "application/vnd.influxql"
Zap-Trace-Span
string
Example: trace_id,1,span_id,1,baggage,[object Object]

OpenTracing span context

+
Request Body schema: text/plain

InfluxQL query to execute.

+
string

Responses

Response samples

Content type
No sample

Write

Write time series data into InfluxDB in a V1-compatible format

query Parameters
db
required
string

Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy.

+
p
string

User token.

+
precision
string

Write precision.

+
rp
string

Retention policy name.

+
u
string

Username.

+
header Parameters
Content-Encoding
string
Default: identity
Enum: "gzip" "identity"

When present, its value indicates to the database that compression is applied to the line protocol body.

+
Zap-Trace-Span
string
Example: trace_id,1,span_id,1,baggage,[object Object]

OpenTracing span context

+
Request Body schema: text/plain

Line protocol body

+
string

Responses

Response samples

Content type
application/json
{
  • "code": "internal error",
  • "message": "string",
  • "op": "string",
  • "err": "string",
  • "line": 0
}

Ping

Get the status of the instance

Reports the InfluxQL bridge querier health and the InfluxDB version of the instance.

+

The response is a HTTP 204 status code to inform you the querier is available.

+

For InfluxDB 3 Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters.

+

To check the health of ingesters before writing data, send a request to one of the write endpoints.

+

This endpoint doesn't require authentication.

+
Authorizations:
None

Responses

Get the status of the instance

Reports the InfluxQL bridge querier health and the InfluxDB version of the instance.

+

The response is a HTTP 204 status code to inform you the querier is available.

+

For InfluxDB 3 Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters.

+

To check the health of ingesters before writing data, send a request to one of the write endpoints.

+

This endpoint doesn't require authentication.

+
Authorizations:
None

Responses

+ + + + + diff --git a/content/influxdb3/cloud-dedicated/api/v2/_index.html b/content/influxdb3/cloud-dedicated/api/v2/_index.html new file mode 100644 index 000000000..af765539b --- /dev/null +++ b/content/influxdb3/cloud-dedicated/api/v2/_index.html @@ -0,0 +1,1002 @@ +--- +title: InfluxDB v2 HTTP API for InfluxDB 3 Cloud Dedicated +description: The InfluxDB v2 HTTP API for InfluxDB 3 Cloud Dedicated provides a v2-compatible programmatic interface for writing data stored in an InfluxDB 3 Cloud Dedicated database. +layout: api +menu: + influxdb3_cloud_dedicated: + parent: InfluxDB HTTP API + name: v2 API + identifier: api-reference-v2 +weight: 102 +aliases: + - /influxdb/cloud-dedicated/api/ +--- + + + + + + + + + + + InfluxDB + + + + + + + + + + + + + + + + +
+
+
+ + +

InfluxDB v2 HTTP API for InfluxDB 3 Cloud Dedicated

License: MIT

The InfluxDB v2 HTTP API for InfluxDB 3 Cloud Dedicated provides a v2-compatible programmatic interface for writing data stored in an InfluxDB 3 Cloud Dedicated database.

+

The InfluxDB v2 HTTP API lets you use /api/v2 endpoints for managing retention policy mappings and writing data stored in an InfluxDB 3 instance.

+

This documentation is generated from the +InfluxDB OpenAPI specification.

+

Quick start

See the Get Started tutorial +to get up and running authenticating with tokens, writing to databases, and querying data.

+

InfluxDB API client libraries and Flight clients +are available to integrate InfluxDB APIs with your application.

+

API compatibility

Write data

+

InfluxDB 3 Cloud Dedicated provides the following HTTP API endpoints for writing data:

+
    +
  • Recommended: /api/v2/write endpoint for new write workloads or for bringing existing InfluxDB v2 write workloads to InfluxDB 3.
  • +
  • /write endpoint for bringing existing InfluxDB v1 write workloads to InfluxDB 3.
  • +
+

Both endpoints accept the same line protocol format and process data in the same way.

+

Query data

+

InfluxDB 3 Cloud Dedicated provides the following protocols for executing a query:

+ +

InfluxDB v2 compatibility

+

The HTTP API /api/v2/write endpoint works with the Bearer and Token authentication schemes and existing InfluxDB 2.x tools and code for writing data.

+

See how to use the InfluxDB v2 HTTP API with InfluxDB 3 Cloud Dedicated.

+

InfluxDB v1 compatibility

+

The HTTP API /write endpoint and /query endpoint work with InfluxDB 1.x username/password authentication schemes and existing InfluxDB 1.x tools and code.

+

See how to use the InfluxDB v1 HTTP API with InfluxDB 3 Cloud Dedicated.

+

Authentication

Use one of the following schemes to authenticate to the InfluxDB API:

+ +

BasicAuthentication

Basic authentication scheme

+

Use the Authorization header with the Basic scheme to authenticate v1 API /write and /query requests. +When authenticating requests, InfluxDB 3 Cloud Dedicated checks that the password part of the decoded credential is an authorized database token. +InfluxDB 3 Cloud Dedicated ignores the username part of the decoded credential.

+

Syntax

+
Authorization: Basic <base64-encoded [USERNAME]:DATABASE_TOKEN>
+

Replace the following:

+
    +
  • [USERNAME]: an optional string value (ignored by InfluxDB 3 Cloud Dedicated).
  • +
  • DATABASE_TOKEN: a database token.
  • +
  • Encode the [USERNAME]:DATABASE_TOKEN credential using base64 encoding, and then append the encoded string to the Authorization: Basic header.
  • +
+

Example

+

The following example shows how to use cURL with the Basic authentication scheme and a database token:

+
#######################################
+# Use Basic authentication with a database token
+# to query the InfluxDB v1 HTTP API
+#######################################
+# Use the --user option with `--user username:DATABASE_TOKEN` syntax
+#######################################
+
+curl --get "http://cluster-id.a.influxdb.io/query" \
+  --user "":"DATABASE_TOKEN" \
+  --data-urlencode "db=DATABASE_NAME" \
+  --data-urlencode "q=SELECT * FROM MEASUREMENT"
+

Replace the following:

+
    +
  • DATABASE_NAME: your InfluxDB 3 Cloud Dedicated database
  • +
  • DATABASE_TOKEN: a database token with sufficient permissions to the database
  • +
+
Security Scheme Type HTTP
HTTP Authorization Scheme basic

QuerystringAuthentication

Use the Querystring authentication +scheme with InfluxDB 1.x API parameters to provide credentials through the query string.

+

Query string authentication

+

In the URL, pass the p query parameter to authenticate /write and /query requests. +When authenticating requests, InfluxDB 3 Cloud Dedicated checks that p (password) is an authorized database token and ignores the u (username) parameter.

+

Syntax

+
https://cluster-id.a.influxdb.io/query/?[u=any]&p=DATABASE_TOKEN
+https://cluster-id.a.influxdb.io/write/?[u=any]&p=DATABASE_TOKEN
+

Example

+

The following example shows how to use cURL with query string authentication and a database token.

+
#######################################
+# Use an InfluxDB 1.x compatible username and password
+# to query the InfluxDB v1 HTTP API
+#######################################
+# Use authentication query parameters:
+#   ?p=DATABASE_TOKEN
+#######################################
+
+curl --get "https://cluster-id.a.influxdb.io/query" \
+  --data-urlencode "p=DATABASE_TOKEN" \
+  --data-urlencode "db=DATABASE_NAME" \
+  --data-urlencode "q=SELECT * FROM MEASUREMENT"
+

Replace the following:

+
    +
  • DATABASE_NAME: your InfluxDB 3 Cloud Dedicated database
  • +
  • DATABASE_TOKEN: a database token with sufficient permissions to the database
  • +
+
Security Scheme Type API Key
Query parameter name: u=&p=

BearerAuthentication

Use the OAuth Bearer authentication +scheme to authenticate to the InfluxDB API.

+

In your API requests, send an Authorization header. +For the header value, provide the word Bearer followed by a space and a database token.

+

Syntax

+
Authorization: Bearer INFLUX_TOKEN
+

Example

+
########################################################
+# Use the Bearer token authentication scheme with /api/v2/write
+# to write data.
+########################################################
+
+curl --request post "https://cluster-id.a.influxdb.io/api/v2/write?bucket=DATABASE_NAME&precision=s" \
+  --header "Authorization: Bearer DATABASE_TOKEN" \
+  --data-binary 'home,room=kitchen temp=72 1463683075'
+

For examples and more information, see the following:

+ +
Security Scheme Type HTTP
HTTP Authorization Scheme bearer
Bearer format "JWT"

TokenAuthentication

Use the Token authentication +scheme to authenticate to the InfluxDB API.

+

In your API requests, send an Authorization header. +For the header value, provide the word Token followed by a space and a database token. +The word Token is case-sensitive.

+

Syntax

+
Authorization: Token INFLUX_API_TOKEN
+

Example

+
########################################################
+# Use the Token authentication scheme with /api/v2/write
+# to write data.
+########################################################
+
+curl --request post "https://cluster-id.a.influxdb.io/api/v2/write?bucket=DATABASE_NAME&precision=s" \
+  --header "Authorization: Token DATABASE_TOKEN" \
+  --data-binary 'home,room=kitchen temp=72 1463683075'
+ + +
Security Scheme Type API Key
Header parameter name: Authorization

Headers

InfluxDB HTTP API endpoints use standard HTTP request and response headers. +The following table shows common headers used by many InfluxDB API endpoints. +Some endpoints may use other headers that perform functions more specific to those endpoints--for example, +the POST /api/v2/write endpoint accepts the Content-Encoding header to indicate the compression applied to line protocol in the request body.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
HeaderValue typeDescription
AcceptstringThe content type that the client can understand.
AuthorizationstringThe authorization scheme and credential.
Content-LengthintegerThe size of the entity-body, in bytes, sent to the database.
Content-TypestringThe format of the data in the request body.
+

Response codes

InfluxDB HTTP API endpoints use standard HTTP status codes for success and failure responses. +The response body may include additional details. +For details about a specific operation's response, +see Responses and Response Samples for that operation.

+

API operations may return the following HTTP status codes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 Code StatusDescription
200Success
201CreatedOne or more resources are created. The response body contains details about the resource.
204No contentThe request is successful and no data is returned. For example, The /write and /api/v2/write endpoints return this status code if all data in the batch is written and queryable.
400Bad requestInfluxDB can't parse the request due to an incorrect parameter or bad syntax. For writes, the error may indicate one of the following problems:
  • Rejected points
  • Authorization header is missing or malformed or the API token doesn't have permission for the operation.
401UnauthorizedMay indicate one of the following:
  • Authorization: Token header is missing or malformed
  • API token value is missing from the header
  • API token doesn't have permission. For more information about token types and permissions, see Manage tokens
404Not foundRequested resource was not found. message in the response body provides details about the requested resource.
405Method not allowedThe API path doesn't support the HTTP method used in the request--for example, you send a POST request to an endpoint that only allows GET.
422Unprocessable entityRequest data is invalid. code and message in the response body provide details about the problem.
500Internal server error
503Service unavailableServer is temporarily unavailable to process the request. The Retry-After header describes when to try the request again.
+

System information endpoints

Ping

Get the status of the instance

Reports the InfluxQL bridge querier health and the InfluxDB version of the instance.

+

The response is a HTTP 204 status code to inform you the querier is available.

+

For InfluxDB 3 Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters.

+

To check the health of ingesters before writing data, send a request to one of the write endpoints.

+

This endpoint doesn't require authentication.

+
Authorizations:
None

Responses

Get the status of the instance

Reports the InfluxQL bridge querier health and the InfluxDB version of the instance.

+

The response is a HTTP 204 status code to inform you the querier is available.

+

For InfluxDB 3 Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters.

+

To check the health of ingesters before writing data, send a request to one of the write endpoints.

+

This endpoint doesn't require authentication.

+
Authorizations:
None

Responses

Query

Query data stored in a database.

+
    +
  • HTTP clients can query the v1 /query endpoint +using InfluxQL and retrieve data in CSV or JSON format.
  • +
  • The /api/v2/query endpoint can't query InfluxDB 3 Cloud Dedicated.
  • +
  • Flight + gRPC clients can query using SQL or InfluxQL and retrieve data in Arrow format.
  • +
+ + +

Query using the InfluxDB v1 HTTP API

Queries InfluxDB using InfluxQL with InfluxDB v1 request and response formats.

+
query Parameters
db
required
string

The database to query data from.

+
epoch
string
Enum: "ns" "u" "µ" "ms" "s" "m" "h"

A unix timestamp precision. +Formats timestamps as unix (epoch) timestamps the specified precision +instead of RFC3339 timestamps with nanosecond precision.

+
p
string

The InfluxDB 1.x password to authenticate the request.

+
q
required
string

The InfluxQL query to execute. To execute multiple queries, delimit queries with a semicolon (;).

+
rp
string

The retention policy to query data from. +For more information, see InfluxQL DBRP naming convention.

+
u
string

The InfluxDB 1.x username to authenticate the request.

+
header Parameters
Accept
string
Default: application/json
Enum: "application/json" "application/csv" "text/csv" "application/x-msgpack"

Media type that the client can understand.

+

Note: With application/csv, query results include unix timestamps instead of RFC3339 timestamps.

+
Accept-Encoding
string
Default: identity
Enum: "gzip" "identity"

The content encoding (usually a compression algorithm) that the client can understand.

+
Content-Type
string
Value: "application/json"
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
No sample

Write

Write time series data to databases using InfluxDB v1 or v2 endpoints.

+

Write data

Writes data to a database.

+

Use this endpoint to send data in line protocol format to InfluxDB.

+

InfluxDB 3 Cloud Dedicated does the following when you send a write request:

+
    +
  1. Validates the request.

    +
  2. +
  3. If successful, attempts to ingest data from the request body; otherwise, responds with an error status.

    +
  4. +
  5. Ingests or rejects data in the batch and returns one of the following HTTP status codes:

    +
      +
    • 204 No Content: All data in the batch is ingested.
    • +
    • 400 Bad Request: Some (when partial writes are configured for the cluster) or all of the data has been rejected. Data that has not been rejected is ingested and queryable.
    • +
    +
  6. +
+

The response body contains error details about rejected points, up to 100 points.

+

Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable.

+

To ensure that InfluxDB handles writes in the order you request them, +wait for the response before you send the next request.

+

Write endpoints

+

The /write and /api/v2/write endpoints are functionally equivalent for writing data to InfluxDB 3 Cloud Dedicated.

+ + + +
query Parameters
bucket
required
string

A database name or ID. +InfluxDB writes all points in the batch to the specified database.

+
org
required
string

Ignored. An organization name or ID.

+

InfluxDB ignores this parameter; authorizes the request using the specified database token +and writes data to the specified cluster database.

+
orgID
string

Ignored. An organization ID.

+

InfluxDB ignores this parameter; authorizes the request using the specified database token +and writes data to the specified cluster database.

+
precision
string (WritePrecision)
Enum: "ms" "s" "us" "ns"

The precision for unix timestamps in the line protocol batch.

+
header Parameters
Accept
string
Default: application/json
Value: "application/json"

The content type that the client can understand. +Writes only return a response body if they fail--for example, +due to a formatting problem or quota limit.

+

InfluxDB Cloud

+
    +
  • Returns only application/json for format and limit errors.
  • +
  • Returns only text/html for some quota limit errors.
  • +
+ + +
Content-Encoding
string
Default: identity
Enum: "gzip" "identity"

The compression applied to the line protocol in the request payload. +To send a gzip payload, pass Content-Encoding: gzip header.

+
Content-Length
integer

The size of the entity-body, in bytes, sent to InfluxDB. +If the length is greater than the max body configuration option, +the server responds with status code 413.

+
Content-Type
string
Default: text/plain; charset=utf-8
Enum: "text/plain" "text/plain; charset=utf-8"

The format of the data in the request body. +To send a line protocol payload, pass Content-Type: text/plain; charset=utf-8.

+
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: text/plain

In the request body, provide data in line protocol format.

+

To send compressed data, do the following:

+
    +
  1. Use gzip to compress the line protocol data.
  2. +
  3. In your request, send the compressed data and the +Content-Encoding: gzip header.
  4. +
+ + +
string <byte>

Responses

Request samples

Content type
text/plain
airSensors,sensor_id=TLM0201 temperature=73.97038159354763,humidity=35.23103248356096,co=0.48445310567793615 1630424257000000000
+airSensors,sensor_id=TLM0202 temperature=75.30007505999716,humidity=35.651929918691714,co=0.5141876544505826 1630424257000000000
+

Response samples

Content type
application/json
{
  • "code": "invalid",
  • "line": 2,
  • "message": "failed to parse line protocol: errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)"
}

Write data using the InfluxDB v1 HTTP API

Writes data to a database.

+

Use this endpoint for InfluxDB v1 parameter compatibility when sending data in line protocol format to InfluxDB.

+

InfluxDB 3 Cloud Dedicated does the following when you send a write request:

+
    +
  1. Validates the request.

    +
  2. +
  3. If successful, attempts to ingest data from the request body; otherwise, responds with an error status.

    +
  4. +
  5. Ingests or rejects data in the batch and returns one of the following HTTP status codes:

    +
      +
    • 204 No Content: all data in the batch is ingested
    • +
    • 201 Created (If the cluster is configured to allow partial writes): some points in the batch are ingested and queryable, and some points are rejected
    • +
    • 400 Bad Request: all data is rejected
    • +
    +
  6. +
+

The response body contains error details about rejected points, up to 100 points.

+

Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable.

+

To ensure that InfluxDB handles writes in the order you request them, +wait for the response before you send the next request.

+

Write endpoints

+

The /write and /api/v2/write endpoints are functionally equivalent for writing data to InfluxDB 3 Cloud Dedicated.

+ + + +
query Parameters
db
required
string

database to write to. If none exists, InfluxDB creates a database with a default 3-day retention policy.

+
p
string

The InfluxDB 1.x password to authenticate the request.

+
precision
string

Write precision.

+
rp
string

Retention policy name.

+
u
string

The InfluxDB 1.x username to authenticate the request.

+
header Parameters
Content-Encoding
string
Default: identity
Enum: "gzip" "identity"

When present, its value indicates to the database that compression is applied to the line protocol body.

+
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: text/plain

Line protocol body

+
string

Responses

Response samples

Content type
application/json
Example
{
  • "code": "invalid",
  • "line": 2,
  • "message": "no data written, errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)"
}
+ + + + + diff --git a/content/influxdb3/cloud-dedicated/guides/api-compatibility/v1/_index.md b/content/influxdb3/cloud-dedicated/guides/api-compatibility/v1/_index.md index ea6279fae..e646c077e 100644 --- a/content/influxdb3/cloud-dedicated/guides/api-compatibility/v1/_index.md +++ b/content/influxdb3/cloud-dedicated/guides/api-compatibility/v1/_index.md @@ -68,7 +68,7 @@ With the InfluxDB v1 API, you can use database tokens in InfluxDB 1.x username a schemes, in the InfluxDB v2 `Authorization: Token` scheme, or in the OAuth `Authorization: Bearer` scheme. - [Authenticate with a username and password scheme](#authenticate-with-a-username-and-password-scheme) -- [Authenticate with a token scheme](#authenticate-with-a-token) +- [Authenticate with a token scheme](#authenticate-with-a-token-scheme) ### Authenticate with a username and password scheme @@ -158,6 +158,8 @@ The `Token` scheme is used in the InfluxDB 2.x API. `Bearer` is defined by the [OAuth 2.0 Framework](https://www.rfc-editor.org/rfc/rfc6750#page-14). Support for one or the other may vary across InfluxDB API clients. +Include the word `Bearer` or `Token`, a space, and your **token** value (all case-sensitive). + #### Syntax ```http diff --git a/content/influxdb3/cloud-serverless/api/v1-compatibility/_index.html b/content/influxdb3/cloud-serverless/api/v1-compatibility/_index.html new file mode 100644 index 000000000..7274eb439 --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/v1-compatibility/_index.html @@ -0,0 +1,579 @@ +--- +title: InfluxDB v1 HTTP API for InfluxDB 3 Cloud Serverless +description: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Cloud Serverless bucket. +layout: api +menu: + influxdb3_cloud_serverless: + parent: InfluxDB HTTP API + name: v1 Compatibility API + identifier: api-reference-v1-compatibility +weight: 304 +aliases: + - /influxdb/cloud-serverless/api/v1/ +--- + + + + + + + + + + + InfluxDB + + + + + + + + + + + + + + + + +
+
+
+ + +

InfluxDB v1 HTTP API for InfluxDB 3 Cloud Serverless

License: MIT

The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Cloud Serverless bucket.

+

The InfluxDB 1.x /write and /query endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others.

+

This documentation is generated from the +InfluxDB OpenAPI specification.

+ +

InfluxDB /api/v2 API for InfluxDB 3 Cloud Serverless

+

Authentication

The InfluxDB 1.x API requires authentication for all requests. +InfluxDB Cloud uses InfluxDB API tokens to authenticate requests.

+

For more information, see the following:

+ +

TokenAuthentication

Use the Token authentication +scheme to authenticate to the InfluxDB API.

+

In your API requests, send an Authorization header. +For the header value, provide the word Token followed by a space and an InfluxDB API token. +The word Token is case-sensitive.

+

Syntax

+

Authorization: Token YOUR_INFLUX_TOKEN

+

For examples and more information, see the following:

+ +
Security Scheme Type API Key
Header parameter name: Authorization

BasicAuthentication

Use the HTTP Basic authentication +scheme with clients that support the InfluxDB 1.x convention of username and password (that don't support the Authorization: Token scheme):

+

For examples and more information, see how to authenticate with a username and password.

+
Security Scheme Type HTTP
HTTP Authorization Scheme basic

QuerystringAuthentication

Use the Querystring authentication +scheme with InfluxDB 1.x API parameters to provide credentials through the query string.

+

For examples and more information, see how to authenticate with a username and password.

+
Security Scheme Type API Key
Query parameter name: u=&p=

Query

Query using the InfluxDB v1 HTTP API

query Parameters
db
required
string

Bucket to query.

+
p
string

User token.

+
q
string

Defines the influxql query to run.

+
rp
string

Retention policy name.

+
u
string

Username.

+
header Parameters
Accept
string
Default: application/json
Enum: "application/json" "application/csv" "text/csv" "application/x-msgpack"

Specifies how query results should be encoded in the response. Note: With application/csv, query results include epoch timestamps instead of RFC3339 timestamps.

+
Accept-Encoding
string
Default: identity
Enum: "gzip" "identity"

The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand.

+
Content-Type
string
Value: "application/vnd.influxql"
Zap-Trace-Span
string
Example: trace_id,1,span_id,1,baggage,[object Object]

OpenTracing span context

+
Request Body schema: text/plain

InfluxQL query to execute.

+
string

Responses

Response samples

Content type
No sample

Write

Write time series data into InfluxDB in a V1-compatible format

query Parameters
db
required
string

Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy.

+
p
string

User token.

+
precision
string

Write precision.

+
rp
string

Retention policy name.

+
u
string

Username.

+
header Parameters
Content-Encoding
string
Default: identity
Enum: "gzip" "identity"

When present, its value indicates to the database that compression is applied to the line protocol body.

+
Zap-Trace-Span
string
Example: trace_id,1,span_id,1,baggage,[object Object]

OpenTracing span context

+
Request Body schema: text/plain

Line protocol body

+
string

Responses

Response samples

Content type
application/json
{
  • "code": "internal error",
  • "message": "string",
  • "op": "string",
  • "err": "string",
  • "line": 0
}
+ + + + + diff --git a/content/influxdb3/cloud-serverless/api/v2/_index.html b/content/influxdb3/cloud-serverless/api/v2/_index.html new file mode 100644 index 000000000..bafe01ebc --- /dev/null +++ b/content/influxdb3/cloud-serverless/api/v2/_index.html @@ -0,0 +1,4673 @@ +--- +title: InfluxDB 3 Cloud Serverless API Service +description: The InfluxDB v2 HTTP API for InfluxDB 3 Cloud Serverless provides a programmatic interface for writing data stored in an InfluxDB 3 Cloud Serverless bucket. +layout: api +menu: + influxdb3_cloud_serverless: + parent: InfluxDB HTTP API + name: v2 API + identifier: api-reference-v2 +weight: 102 +aliases: + - /influxdb/cloud-serverless/api/ +--- + + + + + + + + + + + InfluxDB + + + + + + + + + + + + + + + + +
+
+
+ + +

InfluxDB 3 Cloud Serverless API Service

License: MIT

The InfluxDB v2 HTTP API for InfluxDB 3 Cloud Serverless provides a programmatic interface for writing data stored in an InfluxDB 3 Cloud Serverless bucket.

+

The InfluxDB v2 HTTP API lets you use /api/v2 endpoints for managing retention policy mappings and writing data stored in an InfluxDB 3 instance.

+

This documentation is generated from the +InfluxDB OpenAPI specification.

+

Quick start

See the Get started tutorial +to get up and running authenticating with tokens, writing to buckets, and querying data.

+

InfluxDB API client libraries and Flight clients +are available to integrate InfluxDB with your application.

+

Authentication

Use one of the following schemes to authenticate to the InfluxDB API:

+ +

BasicAuthentication

Basic authentication scheme

+

Use the HTTP Basic authentication scheme for InfluxDB /api/v2 API operations that support it:

+

Syntax

+

Authorization: Basic BASE64_ENCODED_CREDENTIALS

+

To construct the BASE64_ENCODED_CREDENTIALS, combine the username and +the password with a colon (USERNAME:PASSWORD), and then encode the +resulting string in base64. +Many HTTP clients encode the credentials for you before sending the +request.

+

Warning: Base64-encoding can easily be reversed to obtain the original +username and password. It is used to keep the data intact and does not provide +security. You should always use HTTPS when authenticating or sending a request with +sensitive information.

+

Examples

+

In the examples, replace the following:

+
    +
  • EMAIL_ADDRESS: InfluxDB Cloud username (the email address the user signed up with)
  • +
  • PASSWORD: InfluxDB Cloud API token
  • +
  • INFLUX_URL: your InfluxDB Cloud URL
  • +
+

Encode credentials with cURL

+

The following example shows how to use cURL to send an API request that uses Basic authentication. +With the --user option, cURL encodes the credentials and passes them +in the Authorization: Basic header.

+
curl --get "INFLUX_URL/api/v2/signin"
+    --user "EMAIL_ADDRESS":"PASSWORD"
+

Encode credentials with Flux

+

The Flux http.basicAuth() function returns a Base64-encoded +basic authentication header using a specified username and password combination.

+

Encode credentials with JavaScript

+

The following example shows how to use the JavaScript btoa() function +to create a Base64-encoded string:

+
btoa('EMAIL_ADDRESS:PASSWORD')
+

The output is the following:

+
'VVNFUk5BTUU6UEFTU1dPUkQ='
+

Once you have the Base64-encoded credentials, you can pass them in the +Authorization header--for example:

+
curl --get "INFLUX_URL/api/v2/signin"
+    --header "Authorization: Basic VVNFUk5BTUU6UEFTU1dPUkQ="
+

To learn more about HTTP authentication, see +Mozilla Developer Network (MDN) Web Docs, HTTP authentication._

+
Security Scheme Type HTTP
HTTP Authorization Scheme basic

TokenAuthentication

Use the Token authentication +scheme to authenticate to the InfluxDB API.

+

In your API requests, send an Authorization header. +For the header value, provide the word Token followed by a space and an InfluxDB API token. +The word Token is case-sensitive.

+

Syntax

+

Authorization: Token INFLUX_API_TOKEN

+

Example

+

Use Token authentication with cURL

+

The following example shows how to use cURL to send an API request that uses Token authentication:

+
curl --request GET "INFLUX_URL/api/v2/buckets" \
+     --header "Authorization: Token INFLUX_API_TOKEN"
+

Replace the following:

+ + + + + +
Security Scheme Type API Key
Header parameter name: Authorization

Supported operations

The following table shows the most common operations that the InfluxDB /api/v2 API supports. +Some resources may support other operations that perform functions more specific to those resources. +For example, you can use the PATCH /api/v2/scripts endpoint to update properties of a script +resource.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Operation
WriteWrites (POST) data to a bucket.
RunExecutes (POST) a query or script and returns the result.
ListRetrieves (GET) a list of zero or more resources.
CreateCreates (POST) a new resource and returns the resource.
UpdateModifies (PUT) an existing resource to reflect data in your request.
DeleteRemoves (DELETE) a specific resource.
+

Headers

InfluxDB HTTP API endpoints use standard HTTP request and response headers. +The following table shows common headers used by many InfluxDB API endpoints. +Some endpoints may use other headers that perform functions more specific to those endpoints--for example, +the POST /api/v2/write endpoint accepts the Content-Encoding header to indicate the compression applied to line protocol in the request body.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
HeaderValue typeDescription
AcceptstringThe content type that the client can understand.
AuthorizationstringThe authorization scheme and credential.
Content-LengthintegerThe size of the entity-body, in bytes.
Content-TypestringThe format of the data in the request body.
+

Pagination

Some InfluxDB API list operations may support the following query parameters for paginating results:

+ + + + + + + + + + + + + + + + + + + + + + + +
Query parameterValue typeDescription
limitintegerThe maximum number of records to return (after other parameters are applied).
offsetintegerThe number of records to skip (before limit, after other parameters are applied).
afterstring (resource ID)Only returns resources created after the specified resource.
+

Limitations

+
    +
  • For specific endpoint parameters and examples, see the endpoint definition.

    +
  • +
  • If you specify an offset parameter value greater than the total number of records, +then InfluxDB returns an empty list in the response +(given offset skips the specified number of records).

    +

    The following example passes offset=50 to skip the first 50 results, +but the user only has 10 buckets:

    +
    curl --request GET "INFLUX_URL/api/v2/buckets?limit=1&offset=50" \
    +    --header "Authorization: Token INFLUX_API_TOKEN"
    +

    The response contains the following:

    +
    {
    +  "links": {
    +      "prev": "/api/v2/buckets?descending=false\u0026limit=1\u0026offset=49\u0026orgID=ORG_ID",
    +      "self": "/api/v2/buckets?descending=false\u0026limit=1\u0026offset=50\u0026orgID=ORG_ID"
    +  },
    +  "buckets": []
    +}
    +
  • +
+

Response codes

InfluxDB HTTP API endpoints use standard HTTP status codes for success and failure responses. +The response body may include additional details. +For details about a specific operation's response, +see Responses and Response Samples for that operation.

+

API operations may return the following HTTP status codes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 Code StatusDescription
200Success
201CreatedSuccessfully created a resource. The response body may contain details, for example /write and /api/v2/write response bodies contain details of partial write failures.
204No contentThe request succeeded.
400Bad requestInfluxDB can't parse the request due to an incorrect parameter or bad syntax. For writes, the error may indicate one of the following problems:
  • Line protocol is malformed. The response body contains the first malformed line in the data and indicates what was expected.
  • The batch contains a point with the same series as other points, but one of the field values has a different data type.
  • Authorization header is missing or malformed or the API token doesn't have permission for the operation.
401UnauthorizedMay indicate one of the following:
  • Authorization: Token header is missing or malformed
  • API token value is missing from the header
  • API token doesn't have permission. For more information about token types and permissions, see Manage API tokens
404Not foundRequested resource was not found. message in the response body provides details about the requested resource.
405Method not allowedThe API path doesn't support the HTTP method used in the request--for example, you send a POST request to an endpoint that only allows GET.
413Request entity too largeRequest payload exceeds the size limit.
422Unprocessable entityRequest data is invalid. code and message in the response body provide details about the problem.
429Too many requestsAPI token is temporarily over the request quota. The Retry-After header describes when to try the request again.
500Internal server error
503Service unavailableServer is temporarily unavailable to process the request. The Retry-After header describes when to try the request again.
+

Data I/O endpoints

Write data

Writes data to a bucket.

+

Use this endpoint to send data in line protocol format to InfluxDB.

+

InfluxDB 3 Cloud Serverless does the following when you send a write request:

+
    +
  1. Validates the request.

    +
  2. +
  3. If successful, attempts to ingest data from the request body; otherwise, responds with an error status.

    +
  4. +
  5. Ingests or rejects data in the batch and returns one of the following HTTP status codes:

    +
      +
    • 204 No Content: All data in the batch is ingested.
    • +
    • 400 Bad Request: Data from the batch was rejected and not written. The response body indicates if a partial write occurred.
    • +
    +
  6. +
+

The response body contains error details about rejected points, up to 100 points.

+

Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable.

+

To ensure that InfluxDB handles writes in the order you request them, +wait for the response before you send the next request.

+

Write endpoints

+

The /write and /api/v2/write endpoints are functionally equivalent for writing data to InfluxDB 3 Cloud Serverless.

+ +

Rate limits

+

Write rate limits apply. +For more information, see limits and adjustable quotas.

+ + +
Authorizations:
query Parameters
bucket
required
string

A bucket name or ID. +InfluxDB writes all points in the batch to the specified bucket.

+
org
required
string

An organization name or ID.

+

InfluxDB 3 Cloud Serverless writes data to the bucket in the organization associated with the authorization (API token); +doesn't use the org parameter or orgID parameter.

+
orgID
string

An organization ID.

+

InfluxDB 3 Cloud Serverless writes data to the bucket in the organization associated with the authorization (API token); +doesn't use the org parameter or orgID parameter.

+
precision
string (WritePrecision)
Enum: "ms" "s" "us" "ns"

The precision for unix timestamps in the line protocol batch.

+
header Parameters
Accept
string
Default: application/json
Value: "application/json"

The content type that the client can understand. +Writes only return a response body if they fail--for example, +due to a formatting problem or quota limit.

+

InfluxDB 3 Cloud Serverless

+
    +
  • Returns only application/json for format and limit errors.
  • +
  • Returns only text/html for some quota limit errors.
  • +
+ + +
Content-Encoding
string
Default: identity
Enum: "gzip" "identity"

The compression applied to the line protocol in the request payload. +To send a gzip payload, pass Content-Encoding: gzip header.

+
Content-Length
integer

The size of the entity-body, in bytes, sent to InfluxDB. +If the length is greater than the max body configuration option, +the server responds with status code 413.

+
Content-Type
string
Default: text/plain; charset=utf-8
Enum: "text/plain" "text/plain; charset=utf-8"

The format of the data in the request body. +To send a line protocol payload, pass Content-Type: text/plain; charset=utf-8.

+
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: text/plain

In the request body, provide data in line protocol format.

+

To send compressed data, do the following:

+
    +
  1. Use gzip to compress the line protocol data.
  2. +
  3. In your request, send the compressed data and the +Content-Encoding: gzip header.
  4. +
+ + +
string <byte>

Responses

Request samples

Content type
text/plain
airSensors,sensor_id=TLM0201 temperature=73.97038159354763,humidity=35.23103248356096,co=0.48445310567793615 1630424257000000000
+airSensors,sensor_id=TLM0202 temperature=75.30007505999716,humidity=35.651929918691714,co=0.5141876544505826 1630424257000000000
+

Response samples

Content type
application/json
Example
{
  • "code": "invalid",
  • "line": 2,
  • "message": "no data written, errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)"
}

Query using the InfluxDB v1 HTTP API

Queries InfluxDB using InfluxQL with InfluxDB v1 request and response formats.

+ + +
Authorizations:
query Parameters
db
required
string

The database to query data from. +This is mapped to an InfluxDB bucket. +For more information, see Database and retention policy mapping.

+
epoch
string
Enum: "ns" "u" "µ" "ms" "s" "m" "h"

A unix timestamp precision. +Formats timestamps as unix (epoch) timestamps the specified precision +instead of RFC3339 timestamps with nanosecond precision.

+
p
string

The InfluxDB 1.x password to authenticate the request.

+
q
required
string

The InfluxQL query to execute. To execute multiple queries, delimit queries with a semicolon (;).

+
rp
string

The retention policy to query data from. +This is mapped to an InfluxDB bucket. +For more information, see Database and retention policy mapping.

+
u
string

The InfluxDB 1.x username to authenticate the request.

+
header Parameters
Accept
string
Default: application/json
Enum: "application/json" "application/csv" "text/csv" "application/x-msgpack"

Media type that the client can understand.

+

Note: With application/csv, query results include unix timestamps instead of RFC3339 timestamps.

+
Accept-Encoding
string
Default: identity
Enum: "gzip" "identity"

The content encoding (usually a compression algorithm) that the client can understand.

+
Content-Type
string
Value: "application/json"
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
No sample

Write data using the InfluxDB v1 HTTP API

Writes data to a bucket.

+

Use this endpoint for InfluxDB v1 parameter compatibility when sending data in line protocol format to InfluxDB.

+

InfluxDB 3 Cloud Serverless does the following when you send a write request:

+
    +
  1. Validates the request.

    +
  2. +
  3. If successful, attempts to ingest data from the request body; otherwise, responds with an error status.

    +
  4. +
  5. Ingests or rejects data in the batch and returns one of the following HTTP status codes:

    +
      +
    • 204 No Content: all data in the batch is ingested
    • +
    • 201 Created: some points in the batch are ingested and queryable, and some points are rejected
    • +
    • 400 Bad Request: all data is rejected
    • +
    +
  6. +
+

The response body contains error details about rejected points, up to 100 points.

+

Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable.

+

To ensure that InfluxDB handles writes in the order you request them, +wait for the response before you send the next request.

+

Write endpoints

+

The /write and /api/v2/write endpoints are functionally equivalent for writing data to InfluxDB 3 Cloud Serverless.

+ +

Rate limits

+

Write rate limits apply. +For more information, see limits and adjustable quotas.

+ + +
Authorizations:
query Parameters
db
required
string

Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy.

+
p
string

The InfluxDB 1.x password to authenticate the request.

+
precision
string

Write precision.

+
rp
string

Retention policy name.

+
u
string

The InfluxDB 1.x username to authenticate the request.

+
header Parameters
Content-Encoding
string
Default: identity
Enum: "gzip" "identity"

When present, indicates that compression is applied to the line protocol body.

+
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: text/plain

Line protocol body

+
string

Responses

Response samples

Content type
application/json
{
  • "code": "invalid",
  • "line": 2,
  • "message": "failed to parse line protocol: errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)"
}

Security and access endpoints

List authorizations

Lists authorizations.

+

To limit which authorizations are returned, pass query parameters in your request. +If no query parameters are passed, InfluxDB returns all authorizations.

+

InfluxDB 3 Cloud Serverless doesn't expose API token + values in GET /api/v2/authorizations responses; + returns token: redacted for all authorizations.

+

Required permissions

+

To retrieve an authorization, the request must use an API token that has the +following permissions:

+
    +
  • read-authorizations
  • +
  • read-user for the user that the authorization is scoped to
  • +
+ + +
Authorizations:
query Parameters
org
string

An organization name. +Only returns authorizations that belong to the specified organization.

+
orgID
string

An organization ID. Only returns authorizations that belong to the specified organization.

+
token
string

An API token value. +Specifies an authorization by its token property value +and returns the authorization.

+

InfluxDB OSS

+
    +
  • Doesn't support this parameter. InfluxDB OSS ignores the token= parameter, +applies other parameters, and then returns the result.
  • +
+

Limitations

+
    +
  • The parameter is non-repeatable. If you specify more than one, +only the first one is used. If a resource with the specified +property value doesn't exist, then the response body contains an empty list.
  • +
+
user
string

A user name. +Only returns authorizations scoped to the specified user.

+
userID
string

A user ID. +Only returns authorizations scoped to the specified user.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "authorizations": [
    ],
  • "links": {}
}

Create an authorization

Creates an authorization and returns the authorization with the +generated API token.

+

Use this endpoint to create an authorization, which generates an API token +with permissions to read or write to a specific resource or type of resource. +The API token is the authorization's token property value.

+

To follow best practices for secure API token generation and retrieval, +InfluxDB enforces access restrictions on API tokens.

+
    +
  • InfluxDB allows access to the API token value immediately after the authorization is created.
  • +
  • You can’t change access (read/write) permissions for an API token after it’s created.
  • +
  • Tokens stop working when the user who created the token is deleted.
  • +
+

We recommend the following for managing your tokens:

+
    +
  • Create a generic user to create and manage tokens for writing data.
  • +
  • Store your tokens in a secure password vault for future access.
  • +
+

Required permissions

+
    +
  • write-authorizations
  • +
  • write-user for the user that the authorization is scoped to
  • +
+ + +
Authorizations:
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

The authorization to create.

+
description
string

A description of the token.

+
orgID
required
string

An organization ID. +Specifies the organization that owns the authorization.

+
required
Array of objects (Permission) non-empty

A list of permissions for an authorization. +In the list, provide at least one permission object.

+

In a permission, the resource.type property grants access to all +resources of the specified type. +To grant access to only a specific resource, specify the +resource.id property.

+
status
string
Default: "active"
Enum: "active" "inactive"

Status of the token. If inactive, InfluxDB rejects requests that use the token.

+
userID
string

A user ID. +Specifies the user that the authorization is scoped to.

+

When a user authenticates with username and password, +InfluxDB generates a user session with all the permissions +specified by all the user's authorizations.

+

Responses

Request samples

Content type
application/json
Example

Creates an authorization.

+
{
  • "description": "iot_users read buckets",
  • "orgID": "INFLUX_ORG_ID",
  • "permissions": [
    ]
}

Response samples

Content type
application/json
{
  • "description": "string",
  • "status": "active",
  • "createdAt": "2019-08-24T14:15:22Z",
  • "id": "string",
  • "links": {
    },
  • "org": "string",
  • "orgID": "string",
  • "permissions": [
    ],
  • "token": "string",
  • "updatedAt": "2019-08-24T14:15:22Z",
  • "user": "string",
  • "userID": "string"
}

Delete an authorization

Deletes an authorization.

+

Use the endpoint to delete an API token.

+

If you want to disable an API token instead of delete it, +update the authorization's status to inactive.

+
Authorizations:
path Parameters
authID
required
string

An authorization ID. Specifies the authorization to delete.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "invalid",
  • "message": "id must have a length of 16 bytes"
}

Retrieve an authorization

Retrieves an authorization.

+

Use this endpoint to retrieve information about an API token, including +the token's permissions and the user that the token is scoped to.

+

InfluxDB OSS

+
    +
  • InfluxDB OSS returns +API token values in authorizations.
  • +
  • If the request uses an operator token, +InfluxDB OSS returns authorizations for all organizations in the instance.
  • +
+ + +
Authorizations:
path Parameters
authID
required
string

An authorization ID. Specifies the authorization to retrieve.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "description": "string",
  • "status": "active",
  • "createdAt": "2019-08-24T14:15:22Z",
  • "id": "string",
  • "links": {
    },
  • "org": "string",
  • "orgID": "string",
  • "permissions": [
    ],
  • "token": "string",
  • "updatedAt": "2019-08-24T14:15:22Z",
  • "user": "string",
  • "userID": "string"
}

Update an API token to be active or inactive

Updates an authorization.

+

Use this endpoint to set an API token's status to be active or inactive. +InfluxDB rejects requests that use inactive API tokens.

+
Authorizations:
path Parameters
authID
required
string

An authorization ID. Specifies the authorization to update.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

In the request body, provide the authorization properties to update.

+
description
string

A description of the token.

+
status
string
Default: "active"
Enum: "active" "inactive"

Status of the token. If inactive, InfluxDB rejects requests that use the token.

+

Responses

Request samples

Content type
application/json
{
  • "description": "string",
  • "status": "active"
}

Response samples

Content type
application/json
{
  • "description": "string",
  • "status": "active",
  • "createdAt": "2019-08-24T14:15:22Z",
  • "id": "string",
  • "links": {
    },
  • "org": "string",
  • "orgID": "string",
  • "permissions": [
    ],
  • "token": "string",
  • "updatedAt": "2019-08-24T14:15:22Z",
  • "user": "string",
  • "userID": "string"
}

System information endpoints

List all top level routes

Retrieves all the top level routes for the InfluxDB API.

+

Limitations

+
    +
  • Only returns top level routes--for example, the response contains +"tasks":"/api/v2/tasks", and doesn't contain resource-specific routes +for tasks (/api/v2/tasks/TASK_ID/...).
  • +
+
Authorizations:
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{}

List all known resources

Authorizations:
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
[
  • "string"
]

Authorizations (API tokens)

Create and manage authorizations (API tokens).

+

An authorization contains a list of read and write +permissions for organization resources and provides an API token for authentication. +An authorization belongs to an organization and only contains permissions for that organization.

+

We recommend the following for managing your tokens:

+
    +
  • Create a generic user to create and manage tokens for writing data.
  • +
  • Store your tokens in a secure password vault for future access.
  • +
+

User sessions with authorizations

+

Optionally, when creating an authorization, you can scope it to a specific user. +If the user signs in with username and password, creating a user session, +the session carries the permissions granted by all the user's authorizations. +For more information, see how to assign a token to a specific user. +To create a user session, use the POST /api/v2/signin endpoint.

+ + + + +

List authorizations

Lists authorizations.

+

To limit which authorizations are returned, pass query parameters in your request. +If no query parameters are passed, InfluxDB returns all authorizations.

+

InfluxDB 3 Cloud Serverless doesn't expose API token + values in GET /api/v2/authorizations responses; + returns token: redacted for all authorizations.

+

Required permissions

+

To retrieve an authorization, the request must use an API token that has the +following permissions:

+
    +
  • read-authorizations
  • +
  • read-user for the user that the authorization is scoped to
  • +
+ + +
Authorizations:
query Parameters
org
string

An organization name. +Only returns authorizations that belong to the specified organization.

+
orgID
string

An organization ID. Only returns authorizations that belong to the specified organization.

+
token
string

An API token value. +Specifies an authorization by its token property value +and returns the authorization.

+

InfluxDB OSS

+
    +
  • Doesn't support this parameter. InfluxDB OSS ignores the token= parameter, +applies other parameters, and then returns the result.
  • +
+

Limitations

+
    +
  • The parameter is non-repeatable. If you specify more than one, +only the first one is used. If a resource with the specified +property value doesn't exist, then the response body contains an empty list.
  • +
+
user
string

A user name. +Only returns authorizations scoped to the specified user.

+
userID
string

A user ID. +Only returns authorizations scoped to the specified user.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "authorizations": [
    ],
  • "links": {}
}

Create an authorization

Creates an authorization and returns the authorization with the +generated API token.

+

Use this endpoint to create an authorization, which generates an API token +with permissions to read or write to a specific resource or type of resource. +The API token is the authorization's token property value.

+

To follow best practices for secure API token generation and retrieval, +InfluxDB enforces access restrictions on API tokens.

+
    +
  • InfluxDB allows access to the API token value immediately after the authorization is created.
  • +
  • You can’t change access (read/write) permissions for an API token after it’s created.
  • +
  • Tokens stop working when the user who created the token is deleted.
  • +
+

We recommend the following for managing your tokens:

+
    +
  • Create a generic user to create and manage tokens for writing data.
  • +
  • Store your tokens in a secure password vault for future access.
  • +
+

Required permissions

+
    +
  • write-authorizations
  • +
  • write-user for the user that the authorization is scoped to
  • +
+ + +
Authorizations:
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

The authorization to create.

+
description
string

A description of the token.

+
orgID
required
string

An organization ID. +Specifies the organization that owns the authorization.

+
required
Array of objects (Permission) non-empty

A list of permissions for an authorization. +In the list, provide at least one permission object.

+

In a permission, the resource.type property grants access to all +resources of the specified type. +To grant access to only a specific resource, specify the +resource.id property.

+
status
string
Default: "active"
Enum: "active" "inactive"

Status of the token. If inactive, InfluxDB rejects requests that use the token.

+
userID
string

A user ID. +Specifies the user that the authorization is scoped to.

+

When a user authenticates with username and password, +InfluxDB generates a user session with all the permissions +specified by all the user's authorizations.

+

Responses

Request samples

Content type
application/json
Example

Creates an authorization.

+
{
  • "description": "iot_users read buckets",
  • "orgID": "INFLUX_ORG_ID",
  • "permissions": [
    ]
}

Response samples

Content type
application/json
{
  • "description": "string",
  • "status": "active",
  • "createdAt": "2019-08-24T14:15:22Z",
  • "id": "string",
  • "links": {
    },
  • "org": "string",
  • "orgID": "string",
  • "permissions": [
    ],
  • "token": "string",
  • "updatedAt": "2019-08-24T14:15:22Z",
  • "user": "string",
  • "userID": "string"
}

Delete an authorization

Deletes an authorization.

+

Use the endpoint to delete an API token.

+

If you want to disable an API token instead of delete it, +update the authorization's status to inactive.

+
Authorizations:
path Parameters
authID
required
string

An authorization ID. Specifies the authorization to delete.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "invalid",
  • "message": "id must have a length of 16 bytes"
}

Retrieve an authorization

Retrieves an authorization.

+

Use this endpoint to retrieve information about an API token, including +the token's permissions and the user that the token is scoped to.

+

InfluxDB OSS

+
    +
  • InfluxDB OSS returns +API token values in authorizations.
  • +
  • If the request uses an operator token, +InfluxDB OSS returns authorizations for all organizations in the instance.
  • +
+ + +
Authorizations:
path Parameters
authID
required
string

An authorization ID. Specifies the authorization to retrieve.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "description": "string",
  • "status": "active",
  • "createdAt": "2019-08-24T14:15:22Z",
  • "id": "string",
  • "links": {
    },
  • "org": "string",
  • "orgID": "string",
  • "permissions": [
    ],
  • "token": "string",
  • "updatedAt": "2019-08-24T14:15:22Z",
  • "user": "string",
  • "userID": "string"
}

Update an API token to be active or inactive

Updates an authorization.

+

Use this endpoint to set an API token's status to be active or inactive. +InfluxDB rejects requests that use inactive API tokens.

+
Authorizations:
path Parameters
authID
required
string

An authorization ID. Specifies the authorization to update.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

In the request body, provide the authorization properties to update.

+
description
string

A description of the token.

+
status
string
Default: "active"
Enum: "active" "inactive"

Status of the token. If inactive, InfluxDB rejects requests that use the token.

+

Responses

Request samples

Content type
application/json
{
  • "description": "string",
  • "status": "active"
}

Response samples

Content type
application/json
{
  • "description": "string",
  • "status": "active",
  • "createdAt": "2019-08-24T14:15:22Z",
  • "id": "string",
  • "links": {
    },
  • "org": "string",
  • "orgID": "string",
  • "permissions": [
    ],
  • "token": "string",
  • "updatedAt": "2019-08-24T14:15:22Z",
  • "user": "string",
  • "userID": "string"
}

Bucket Schemas

List measurement schemas of a bucket

Lists explicit +schemas +("schemaType": "explicit") for a bucket.

+

Explicit schemas are used to enforce column names, tags, fields, and data +types for your data.

+

By default, buckets have an implicit schema-type ("schemaType": "implicit") +that conforms to your data.

+ + +
Authorizations:
path Parameters
bucketID
required
string

A bucket ID. +Lists measurement schemas for the specified bucket.

+
query Parameters
name
string

A measurement name. +Only returns measurement schemas with the specified name.

+
org
string

An organization name. +Specifies the organization that owns the schema.

+
orgID
string

An organization ID. +Specifies the organization that owns the schema.

+

Responses

Response samples

Content type
application/json
{
  • "measurementSchemas": [
    ]
}

Create a measurement schema for a bucket

Creates an explicit measurement schema +for a bucket.

+

Explicit schemas are used to enforce column names, tags, fields, and data +types for your data.

+

By default, buckets have an implicit schema-type ("schemaType": "implicit") +that conforms to your data.

+

Use this endpoint to create schemas that prevent non-conforming write requests.

+

Limitations

+
    +
  • Buckets must be created with the "explicit" schemaType in order to use +schemas.
  • +
+ +
Authorizations:
path Parameters
bucketID
required
string

A bucket ID. +Adds a schema for the specified bucket.

+
query Parameters
org
string

An organization name. +Specifies the organization that owns the schema.

+
orgID
string

An organization ID. +Specifies the organization that owns the schema.

+
Request Body schema: application/json
required
Array of objects (MeasurementSchemaColumn)

Ordered collection of column definitions.

+
name
required
string

The measurement +name.

+

Responses

Request samples

Content type
application/json
{
  • "columns": [
    ],
  • "name": "cpu"
}

Response samples

Content type
application/json
{
  • "bucketID": "ba3c5e7f9b0a0010",
  • "columns": [
    ],
  • "createdAt": "2021-01-21T00:48:40.993Z",
  • "id": "1a3c5e7f9b0a8642",
  • "name": "cpu",
  • "orgID": "0a3c5e7f9b0a0001",
  • "updatedAt": "2021-01-21T00:48:40.993Z"
}

Retrieve a measurement schema

Retrieves an explicit measurement schema.

+
Authorizations:
path Parameters
bucketID
required
string

A bucket ID. +Retrieves schemas for the specified bucket.

+
measurementID
required
string

The measurement schema ID. +Specifies the measurement schema to retrieve.

+
query Parameters
org
string

Organization name. +Specifies the organization that owns the schema.

+
orgID
string

Organization ID. +Specifies the organization that owns the schema.

+

Responses

Response samples

Content type
application/json
{
  • "bucketID": "ba3c5e7f9b0a0010",
  • "columns": [
    ],
  • "createdAt": "2021-01-21T00:48:40.993Z",
  • "id": "1a3c5e7f9b0a8642",
  • "name": "cpu",
  • "orgID": "0a3c5e7f9b0a0001",
  • "updatedAt": "2021-01-21T00:48:40.993Z"
}

Update a measurement schema

Updates a measurement schema.

+

Use this endpoint to update the fields (name, type, and dataType) of a +measurement schema.

+

Limitations

+
    +
  • You can't update the name of a measurement.
  • +
+ + +
Authorizations:
path Parameters
bucketID
required
string

A bucket ID. +Specifies the bucket to retrieve schemas for.

+
measurementID
required
string

A measurement schema ID. +Retrieves the specified measurement schema.

+
query Parameters
org
string

An organization name. +Specifies the organization that owns the schema.

+
orgID
string

An organization ID. +Specifies the organization that owns the schema.

+
Request Body schema: application/json
required
Array of objects (MeasurementSchemaColumn)

An ordered collection of column definitions

+

Responses

Request samples

Content type
application/json
{
  • "columns": [
    ]
}

Response samples

Content type
application/json
{
  • "bucketID": "ba3c5e7f9b0a0010",
  • "columns": [
    ],
  • "createdAt": "2021-01-21T00:48:40.993Z",
  • "id": "1a3c5e7f9b0a8642",
  • "name": "cpu",
  • "orgID": "0a3c5e7f9b0a0001",
  • "updatedAt": "2021-01-21T00:48:40.993Z"
}

Buckets

Store your data in InfluxDB buckets. +A bucket is a named location where time series data is stored. All buckets +have a retention period, +a duration of time that each data point persists. InfluxDB drops all +points with timestamps older than the bucket’s retention period. +A bucket belongs to an organization.

+ + +

List buckets

Lists buckets.

+

InfluxDB retrieves buckets owned by the +organization +associated with the authorization +(API token). +To limit which buckets are returned, pass query parameters in your request. +If no query parameters are passed, InfluxDB returns all buckets up to the +default limit.

+

InfluxDB OSS

+
    +
  • If you use an operator token +to authenticate your request, InfluxDB retrieves resources for all +organizations in the instance. +To retrieve resources for only a specific organization, use the +org parameter or the orgID parameter to specify the organization.
  • +
+

Required permissions

+ + + + + + + + + + + + + + + +
ActionPermission required
Retrieve user bucketsread-buckets
Retrieve system bucketsread-orgs
+ + +
Authorizations:
query Parameters
after
string

A resource ID to seek from. +Returns records created after the specified record; +results don't include the specified record.

+

Use after instead of the offset parameter. +For more information about pagination parameters, see Pagination.

+
id
string

A bucket ID. +Only returns the bucket with the specified ID.

+
limit
integer [ 1 .. 100 ]
Default: 20

Limits the number of records returned. Default is 20.

+
name
string

A bucket name. +Only returns buckets with the specified name.

+
offset
integer >= 0

The offset for pagination. +The number of records to skip.

+

For more information about pagination parameters, see Pagination.

+
org
string

An organization name.

+

InfluxDB 3 Cloud Serverless

+
    +
  • Doesn't use the org parameter or orgID parameter.
  • +
  • Lists buckets for the organization associated with the authorization (API token).
  • +
+
orgID
string

An organization ID.

+

InfluxDB 3 Cloud Serverless

+
    +
  • Doesn't use the org parameter or orgID parameter.
  • +
  • Lists buckets for the organization associated with the authorization (API token).
  • +
+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Request samples

curl --request GET "http://localhost:8086/api/v2/buckets?name=_monitoring" \
+  --header "Authorization: Token INFLUX_TOKEN" \
+  --header "Accept: application/json" \
+  --header "Content-Type: application/json"
+

Response samples

Content type
application/json
{
  • "buckets": [
    ],
  • "links": {
    }
}

Create a bucket

Creates a bucket +and returns the bucket resource. +The default data +retention period +is 30 days.

+

InfluxDB OSS

+
    +
  • A single InfluxDB OSS instance supports active writes or queries for +approximately 20 buckets across all organizations at a given time. +Reading or writing to more than 20 buckets at a time can adversely affect +performance.
  • +
+

Limitations

+
    +
  • InfluxDB Cloud Free Plan allows users to create up to two buckets. +Exceeding the bucket quota will result in an HTTP 403 status code. +For additional information regarding InfluxDB Cloud offerings, see +InfluxDB Cloud Pricing.
  • +
+ + +
Authorizations:
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

The bucket to create.

+
description
string

A description of the bucket.

+
name
required
string

The bucket name.

+
orgID
required
string

The organization ID. +Specifies the organization that owns the bucket.

+
Array of objects (RetentionRules)

Retention rules to expire or retain data. +The InfluxDB /api/v2 API uses RetentionRules to configure the retention period.

+

InfluxDB 3 Cloud Serverless

+
    +
  • retentionRules is required.
  • +
+

InfluxDB OSS

+
    +
  • retentionRules isn't required.
  • +
+
rp
string
Default: "0"

The retention policy for the bucket. +For InfluxDB 1.x, specifies the duration of time that each data point +in the retention policy persists.

+

If you need compatibility with InfluxDB 1.x, specify a value for the rp property; +otherwise, see the retentionRules property.

+

Retention policy +is an InfluxDB 1.x concept. +The InfluxDB 2.x and Cloud equivalent is +retention period. +The InfluxDB /api/v2 API uses RetentionRules to configure the retention period.

+
schemaType
string (SchemaType)
Enum: "implicit" "explicit"

Responses

Request samples

Content type
application/json
{
  • "description": "string",
  • "name": "string",
  • "orgID": "string",
  • "retentionRules": [
    ],
  • "rp": "0",
  • "schemaType": "implicit"
}

Response samples

Content type
application/json
{
  • "createdAt": "2022-08-03T23:04:41.073704121Z",
  • "description": "A bucket holding air sensor data",
  • "id": "37407e232b3911d8",
  • "labels": [ ],
  • "links": {
    },
  • "name": "air_sensor",
  • "orgID": "INFLUX_ORG_ID",
  • "retentionRules": [
    ],
  • "schemaType": "implicit",
  • "type": "user",
  • "updatedAt": "2022-08-03T23:04:41.073704228Z"
}

Delete a bucket

Deletes a bucket and all associated records.

+

InfluxDB 3 Cloud Serverless

+
    +
  • Does the following when you send a delete request:

    +
      +
    1. Validates the request and queues the delete.
    2. +
    3. Returns an HTTP 204 status code if queued; error otherwise.
    4. +
    5. Handles the delete asynchronously.
    6. +
    +
  • +
+

Limitations

+
    +
  • Only one bucket can be deleted per request.
  • +
+ + +
Authorizations:
path Parameters
bucketID
required
string

Bucket ID. +The ID of the bucket to delete.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Request samples

curl --request DELETE "http://localhost:8086/api/v2/buckets/BUCKET_ID" \
+  --header "Authorization: Token INFLUX_TOKEN" \
+  --header 'Accept: application/json'
+

Response samples

Content type
application/json
{
  • "code": "invalid",
  • "message": "id must have a length of 16 bytes"
}

Retrieve a bucket

Retrieves a bucket.

+

Use this endpoint to retrieve information for a specific bucket.

+
Authorizations:
path Parameters
bucketID
required
string

The ID of the bucket to retrieve.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "createdAt": "2022-08-03T23:04:41.073704121Z",
  • "description": "bucket for air sensor data",
  • "id": "37407e232b3911d8",
  • "labels": [ ],
  • "links": {
    },
  • "name": "air-sensor",
  • "orgID": "bea7ea952287f70d",
  • "retentionRules": [
    ],
  • "schemaType": "implicit",
  • "type": "user",
  • "updatedAt": "2022-08-03T23:04:41.073704228Z"
}

Update a bucket

Updates a bucket.

+

Use this endpoint to update properties +(name, description, and retentionRules) of a bucket.

+

InfluxDB 3 Cloud Serverless

+
    +
  • Requires the retentionRules property in the request body. If you don't +provide retentionRules, InfluxDB responds with an HTTP 403 status code.
  • +
+ + +
Authorizations:
path Parameters
bucketID
required
string

The bucket ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

The bucket update to apply.

+
description
string

A description of the bucket.

+
name
string

The name of the bucket.

+
Array of objects (PatchRetentionRules)

Updates to rules to expire or retain data. No rules means no updates.

+

Responses

Request samples

Content type
application/json
{
  • "description": "string",
  • "name": "string",
  • "retentionRules": [
    ]
}

Response samples

Content type
application/json
{
  • "createdAt": "2022-08-03T23:04:41.073704121Z",
  • "description": "bucket holding air sensor data",
  • "id": "37407e232b3911d8",
  • "labels": [ ],
  • "links": {
    },
  • "name": "air_sensor",
  • "orgID": "INFLUX_ORG_ID",
  • "retentionRules": [
    ],
  • "schemaType": "implicit",
  • "type": "user",
  • "updatedAt": "2022-08-07T22:49:49.422962913Z"
}

List all labels for a bucket

Lists all labels for a bucket.

+

Labels are objects that contain labelID, name, description, and color +key-value pairs. They may be used for grouping and filtering InfluxDB +resources. +Labels are also capable of grouping across different resources--for example, +you can apply a label named air_sensor to a bucket and a task to quickly +organize resources.

+ + +
Authorizations:
path Parameters
bucketID
required
string

The ID of the bucket to retrieve labels for.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "labels": [
    ],
  • "links": {
    }
}

Add a label to a bucket

Adds a label to a bucket and returns the new label information.

+

Labels are objects that contain labelID, name, description, and color +key-value pairs. They may be used for grouping and filtering across one or +more kinds of resources--for example, you can apply a label named +air_sensor to a bucket and a task to quickly organize resources.

+

Limitations

+
    +
  • Before adding a label to a bucket, you must create the label if you +haven't already. To create a label with the InfluxDB API, send a POST +request to the /api/v2/labels endpoint).
  • +
+ + +
Authorizations:
path Parameters
bucketID
required
string

Bucket ID. +The ID of the bucket to label.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

An object that contains a labelID to add to the bucket.

+
labelID
required
string

A label ID. +Specifies the label to attach.

+

Responses

Request samples

Content type
application/json
{
  • "labelID": "string"
}

Response samples

Content type
application/json
{
  • "label": {
    },
  • "links": {
    }
}

Delete a label from a bucket

Authorizations:
path Parameters
bucketID
required
string

The bucket ID.

+
labelID
required
string

The ID of the label to delete.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "internal error",
  • "err": "string",
  • "message": "string",
  • "op": "string"
}

List all users with member privileges for a bucket

Lists all users for a bucket.

+

InfluxDB users have +permission to access InfluxDB.

+

Members are users in +an organization with access to the specified resource.

+

Use this endpoint to retrieve all users with access to a bucket.

+ +
Authorizations:
path Parameters
bucketID
required
string

The ID of the bucket to retrieve users for.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "links": {
    },
  • "users": [
    ]
}

Add a member to a bucket

Add a user to a bucket and return the new user information.

+

InfluxDB users have +permission to access InfluxDB.

+

Members are users in +an organization.

+

Use this endpoint to give a user member privileges to a bucket.

+ + +
Authorizations:
path Parameters
bucketID
required
string

The ID of the bucket to retrieve users for.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

A user to add as a member to the bucket.

+
id
required
string

The ID of the user to add to the resource.

+
name
string

The name of the user to add to the resource.

+

Responses

Request samples

Content type
application/json
{
  • "id": "string",
  • "name": "string"
}

Response samples

Content type
application/json
{
  • "id": "09cfb87051cbe000",
  • "links": {
    },
  • "name": "example_user_1",
  • "role": "member",
  • "status": "active"
}

Remove a member from a bucket

Removes a member from a bucket.

+

Use this endpoint to remove a user's member privileges from a bucket. This +removes the user's read and write permissions for the bucket.

+ + +
Authorizations:
path Parameters
bucketID
required
string

The ID of the bucket to remove a user from.

+
userID
required
string

The ID of the user to remove.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "unauthorized",
  • "message": "unauthorized access"
}

List all owners of a bucket

Lists all owners +of a bucket.

+

Bucket owners have permission to delete buckets and remove user and member +permissions from the bucket.

+

InfluxDB 3 Cloud Serverless uses /api/v2/authorizations to assign resource permissions; doesn't use owner and member roles.

+

Limitations

+
    +
  • Owner permissions are separate from API token permissions.
  • +
  • Owner permissions are used in the context of the InfluxDB UI.
  • +
+

Required permissions

+
    +
  • read-orgs INFLUX_ORG_ID
  • +
+

INFLUX_ORG_ID is the ID of the organization that you want to retrieve a +list of owners for.

+ + +
Authorizations:
path Parameters
bucketID
required
string

The ID of the bucket to retrieve owners for.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "links": {
    },
  • "users": [
    ]
}

Add an owner to a bucket

Adds an owner to a bucket and returns the owners +with role and user detail.

+

Use this endpoint to create a resource owner for the bucket. +Bucket owners have permission to delete buckets and remove user and member +permissions from the bucket.

+

InfluxDB 3 Cloud Serverless uses /api/v2/authorizations to assign resource permissions; doesn't use owner and member roles.

+

Limitations

+
    +
  • Owner permissions are separate from API token permissions.
  • +
  • Owner permissions are used in the context of the InfluxDB UI.
  • +
+

Required permissions

+
    +
  • write-orgs INFLUX_ORG_ID
  • +
  • INFLUX_ORG_ID* is the ID of the organization that you want to add + an owner for.
  • +
+ + + + +
Authorizations:
path Parameters
bucketID
required
string

The ID of the bucket to add an owner for.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

A user to add as an owner for the bucket.

+
id
required
string

The ID of the user to add to the resource.

+
name
string

The name of the user to add to the resource.

+

Responses

Request samples

Content type
application/json
{
  • "id": "d88d182d91b0950f",
  • "links": {
    },
  • "name": "example-user",
  • "role": "owner",
  • "status": "active"
}

Response samples

Content type
application/json
{
  • "id": "string",
  • "links": {
    },
  • "name": "string",
  • "status": "active",
  • "role": "owner"
}

Remove an owner from a bucket

Removes an owner from a bucket.

+

Use this endpoint to remove a user's owner role for a bucket.

+

InfluxDB 3 Cloud Serverless uses /api/v2/authorizations to assign resource permissions; doesn't use owner and member roles.

+

Limitations

+
    +
  • Owner permissions are separate from API token permissions.
  • +
  • Owner permissions are used in the context of the InfluxDB UI.
  • +
+

Required permissions

+
    +
  • write-orgs INFLUX_ORG_ID
  • +
+

INFLUX_ORG_ID is the ID of the organization that you want to remove an owner +from.

+ + + + +
Authorizations:
path Parameters
bucketID
required
string

The ID of the bucket to remove an owner from.

+
userID
required
string

The ID of the owner to remove.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "unauthorized",
  • "message": "unauthorized access"
}

Delete

Delete data from an InfluxDB bucket.

+

Delete data

Deletes data from a bucket.

+

NOTE: This endpoint has been disabled for InfluxDB 3 Cloud Serverless organizations. +See how to delete data.

+ + +
Authorizations:
query Parameters
bucket
string

A bucket name or ID. +Specifies the bucket to delete data from. +If you pass both bucket and bucketID, bucketID takes precedence.

+
bucketID
string

A bucket ID. +Specifies the bucket to delete data from. +If you pass both bucket and bucketID, bucketID takes precedence.

+
org
string

An organization name or ID.

+

InfluxDB 3 Cloud Serverless

+
    +
  • Doesn't use the org parameter or orgID parameter.
  • +
+
orgID
string

An organization ID.

+

InfluxDB 3 Cloud Serverless

+
    +
  • Doesn't use the org parameter or orgID parameter.
  • +
+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

Time range parameters and an optional delete predicate expression.

+

To select points to delete within the specified time range, pass a +delete predicate expression in the predicate property of the request body. +If you don't pass a predicate, InfluxDB deletes all data with timestamps +in the specified time range.

+
predicate
string

An expression in delete predicate syntax.

+
start
required
string <date-time>

A timestamp (RFC3339 date/time format). +The earliest time to delete from.

+
stop
required
string <date-time>

A timestamp (RFC3339 date/time format). +The latest time to delete from.

+

Responses

Request samples

Content type
application/json
{
  • "predicate": "tag1=\"value1\" and (tag2=\"value2\" and tag3!=\"value3\")",
  • "start": "2019-08-24T14:15:22Z",
  • "stop": "2019-08-24T14:15:22Z"
}

Response samples

Content type
application/json
{
  • "code": "invalid",
  • "message": "failed to decode request body: organization not found"
}

DBRPs

The InfluxDB 1.x data model includes databases +and retention policies. +InfluxDB 2.x replaces databases and retention policies with buckets. +To support InfluxDB 1.x query and write patterns in InfluxDB 2.x, +databases and retention policies are mapped to buckets using the +database and retention policy (DBRP) mapping service. +The DBRP mapping service uses the database and retention policy +specified in 1.x compatibility API requests to route operations to a bucket.

+ + +

List database retention policy mappings

Lists database retention policy (DBRP) mappings.

+ + +
Authorizations:
query Parameters
bucketID
string

A bucket ID. +Only returns DBRP mappings that belong to the specified bucket.

+
db
string

A database. +Only returns DBRP mappings that belong to the 1.x database.

+
default
boolean

Specifies filtering on default

+
id
string

A DBPR mapping ID. +Only returns the specified DBRP mapping.

+
org
string

An organization name. +Only returns DBRP mappings for the specified organization.

+
orgID
string

An organization ID. +Only returns DBRP mappings for the specified organization.

+
rp
string

A retention policy. +Specifies the 1.x retention policy to filter on.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "content": [
    ]
}

Add a database retention policy mapping

Creates a database retention policy (DBRP) mapping and returns the mapping.

+

Use this endpoint to add InfluxDB 1.x API compatibility to your +InfluxDB Cloud or InfluxDB OSS 2.x buckets. Your buckets must contain a +DBRP mapping in order to query and write using the InfluxDB 1.x API. +object.

+ + +
Authorizations:
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

The database retention policy mapping to add.

+

Note that retention_policy is a required parameter in the request body. +The value of retention_policy can be any arbitrary string name or +value, with the default value commonly set as autogen. +The value of retention_policy isn't a retention_policy

+
bucketID
required
string

A bucket ID. +Identifies the bucket used as the target for the translation.

+
database
required
string

A database name. +Identifies the InfluxDB v1 database.

+
default
boolean

Set to true to use this DBRP mapping as the default retention policy +for the database (specified by the database property's value).

+
org
string

An organization name. +Identifies the organization that owns the mapping.

+
orgID
string

An organization ID. +Identifies the organization that owns the mapping.

+
retention_policy
required
string

A retention policy name. +Identifies the InfluxDB v1 retention policy mapping.

+

Responses

Request samples

Content type
application/json
{
  • "bucketID": "string",
  • "database": "string",
  • "default": true,
  • "org": "string",
  • "orgID": "string",
  • "retention_policy": "string"
}

Response samples

Content type
application/json
{
  • "bucketID": "4d4d9d5b61dee751",
  • "database": "example_database",
  • "default": true,
  • "id": "0a3cbb5dd526a000",
  • "orgID": "bea7ea952287f70d",
  • "retention_policy": "autogen"
}

Delete a database retention policy

Deletes the specified database retention policy (DBRP) mapping.

+ + +
Authorizations:
path Parameters
dbrpID
required
string

A DBRP mapping ID. +Only returns the specified DBRP mapping.

+
query Parameters
org
string

An organization name. +Specifies the organization that owns the DBRP mapping.

+
orgID
string

An organization ID. +Specifies the organization that owns the DBRP mapping.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json

The query parameters contain invalid values.

+
{
  • "code": "invalid",
  • "message": "invalid ID"
}

Retrieve a database retention policy mapping

Retrieves the specified retention policy (DBRP) mapping.

+ + +
Authorizations:
path Parameters
dbrpID
required
string

A DBRP mapping ID. +Specifies the DBRP mapping.

+
query Parameters
org
string

An organization name. +Specifies the organization that owns the DBRP mapping.

+
orgID
string

An organization ID. +Specifies the organization that owns the DBRP mapping.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "content": {
    }
}

Update a database retention policy mapping

Authorizations:
path Parameters
dbrpID
required
string

A DBRP mapping ID. +Specifies the DBRP mapping.

+
query Parameters
org
string

An organization name. +Specifies the organization that owns the DBRP mapping.

+
orgID
string

An organization ID. +Specifies the organization that owns the DBRP mapping.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

Updates the database retention policy (DBRP) mapping and returns the mapping.

+

Use this endpoint to modify the retention policy (retention_policy property) of a DBRP mapping.

+ + +
default
boolean

Set to true to use this DBRP mapping as the default retention policy +for the database (specified by the database property's value). +To remove the default mapping, set to false.

+
retention_policy
string

A retention policy name. +Identifies the InfluxDB v1 retention policy mapping.

+

Responses

Request samples

Content type
application/json
{
  • "default": true,
  • "retention_policy": "string"
}

Response samples

Content type
application/json
{
  • "content": {
    }
}

Invokable Scripts

Store, manage, and execute scripts in InfluxDB. +A script stores your custom Flux script and provides an invokable +endpoint that accepts runtime parameters. +In a script, you can specify custom runtime parameters +(params)--for example, params.myparameter. +Once you create a script, InfluxDB generates an +/api/v2/scripts/SCRIPT_ID/invoke endpoint +for your organization. +You can run the script from API requests and tasks, defining parameter +values for each run. +When the script runs, InfluxDB replaces params references in the +script with the runtime parameter values you define.

+

Use the /api/v2/scripts endpoints to create and manage scripts. +See related guides to learn how to define parameters and execute scripts.

+ +

List scripts

Lists scripts.

+ +
Authorizations:
query Parameters
limit
integer [ 0 .. 500 ]
Default: 100

The maximum number of scripts to return. Default is 100.

+
name
string

The script name. Lists scripts with the specified name.

+
offset
integer >= 0

The offset for pagination. +The number of records to skip.

+

For more information about pagination parameters, see Pagination.

+

Responses

Request samples

curl --request GET "INFLUX_URL/api/v2/scripts?limit=100&offset=0" \
+  --header "Authorization: Token INFLUX_API_TOKEN" \
+  --header "Accept: application/json" \
+  --header "Content-Type: application/json"
+

Response samples

Content type
application/json
{
  • "scripts": [
    ]
}

Create a script

Creates an invokable script +and returns the script.

+ +
Authorizations:
Request Body schema: application/json

The script to create.

+
description
required
string

Script description. A description of the script.

+
language
required
string (ScriptLanguage)
Enum: "flux" "sql"
name
required
string

Script name. The name must be unique within the organization.

+
script
required
string

The script to execute.

+

Responses

Request samples

Content type
application/json
{
  • "description": "string",
  • "language": "flux",
  • "name": "string",
  • "script": "string"
}

Response samples

Content type
application/json
{
  • "createdAt": "2022-07-17T23:43:26.660308Z",
  • "description": "getLastPoint finds the last point in a bucket",
  • "id": "09afa23ff13e4000",
  • "language": "flux",
  • "name": "getLastPoint",
  • "orgID": "bea7ea952287f70d",
  • "script": "from(bucket: params.mybucket) |> range(start: -7d) |> limit(n:1)",
  • "updatedAt": "2022-07-17T23:43:26.660308Z"
}

Delete a script

Deletes a script and all associated records.

+

Limitations

+
    +
  • You can delete only one script per request.
  • +
  • If the script ID you provide doesn't exist for the organization, InfluxDB +responds with an HTTP 204 status code.
  • +
+ +
Authorizations:
path Parameters
scriptID
required
string

A script ID. +Deletes the specified script.

+

Responses

Request samples

curl -X 'DELETE' \
+  "https://cloud2.influxdata.com/api/v2/scripts/SCRIPT_ID" \
+  --header "Authorization: Token INFLUX_TOKEN" \
+  --header 'Accept: application/json'
+

Response samples

Content type
application/json
{
  • "code": "unauthorized",
  • "message": "unauthorized access"
}

Retrieve a script

Retrieves a script.

+ +
Authorizations:
path Parameters
scriptID
required
string

A script ID. +Retrieves the specified script.

+

Responses

Response samples

Content type
application/json
{
  • "createdAt": "2022-07-17T23:49:45.731237Z",
  • "description": "getLastPoint finds the last point in a bucket",
  • "id": "09afa3b220fe4000",
  • "language": "flux",
  • "name": "getLastPoint",
  • "orgID": "bea7ea952287f70d",
  • "script": "from(bucket: my-bucket) |> range(start: -7d) |> limit(n:1)",
  • "updatedAt": "2022-07-17T23:49:45.731237Z"
}

Update a script

Updates an invokable script.

+

Use this endpoint to modify values for script properties (description and script).

+

To update a script, pass an object that contains the updated key-value pairs.

+

Limitations

+
    +
  • If you send an empty request body, the script will neither update nor +store an empty script, but InfluxDB will respond with an HTTP 200 status +code.
  • +
+ +
Authorizations:
path Parameters
scriptID
required
string

A script ID. +Updates the specified script.

+
Request Body schema: application/json

An object that contains the updated script properties to apply.

+
description
string

A description of the script.

+
script
string

The script to execute.

+

Responses

Request samples

Content type
application/json
{
  • "description": "string",
  • "script": "string"
}

Response samples

Content type
application/json
{
  • "createdAt": "2022-07-17T23:49:45.731237Z",
  • "description": "get last point from new bucket",
  • "id": "09afa3b220fe4000",
  • "language": "flux",
  • "name": "getLastPoint",
  • "orgID": "bea7ea952287f70d",
  • "script": "from(bucket: newBucket) |> range(start: -7d) |> limit(n:1)",
  • "updatedAt": "2022-07-19T22:27:23.185436Z"
}

Invoke a script

Runs a script and returns the result. +When the script runs, InfluxDB replaces params keys referenced in the script with +params key-values passed in the request body--for example:

+

The following sample script contains a mybucket parameter :

+
"script": "from(bucket: params.mybucket)
+            |> range(start: -7d)
+            |> limit(n:1)"
+

The following example POST /api/v2/scripts/SCRIPT_ID/invoke request body +passes a value for the mybucket parameter:

+
{
+  "params": {
+    "mybucket": "air_sensor"
+  }
+}
+ +
Authorizations:
path Parameters
scriptID
required
string

A script ID. +Runs the specified script.

+
Request Body schema: application/json
object

The script parameters. +params contains key-value pairs that map values to the params.keys +in a script. +When you invoke a script with params, InfluxDB passes the values as +invocation parameters to the script.

+

Responses

Request samples

Content type
application/json
{
  • "params": { }
}

Response samples

Content type
text/csv
,result,table,_start,_stop,_time,_value,_field,_measurement,host
+,_result,0,2019-10-30T01:28:02.52716421Z,2022-07-26T01:28:02.52716421Z,2020-01-01T00:00:00Z,72.01,used_percent,mem,host2
+

Find script parameters.

Analyzes a script and determines required parameters. +Find all params keys referenced in a script and return a list +of keys. If it is possible to determine the type of the value +from the context then the type is also returned -- for example:

+

The following sample script contains a mybucket parameter :

+
"script": "from(bucket: params.mybucket)
+            |> range(start: -7d)
+            |> limit(n:1)"
+

Requesting the parameters using GET /api/v2/scripts/SCRIPT_ID/params +returns the following:

+
{
+  "params": {
+    "mybucket": "string"
+  }
+}
+

The type name returned for a parameter will be one of:

+
    +
  • any
  • +
  • bool
  • +
  • duration
  • +
  • float
  • +
  • int
  • +
  • string
  • +
  • time
  • +
  • uint
  • +
+

The type name any is used when the type of a parameter cannot +be determined from the context, or the type is determined to +be a structured type such as an array or record.

+ +
Authorizations:
path Parameters
scriptID
required
string

A script ID. +The script to analyze for params.

+

Responses

Request samples

curl --request GET "https://cloud2.influxdata.com/api/v2/scripts/SCRIPT_ID/params" \
+          --header "Authorization: Token INFLUX_TOKEN"
+

Response samples

Content type
application/json
{
  • "params": {
    }
}

Limits

Retrieve limits for an organization

Authorizations:
path Parameters
orgID
required
string

The ID of the organization.

+

Responses

Response samples

Content type
application/json
{
  • "limits": {
    },
  • "links": {}
}

Organizations

Manage your organization. +An organization is a workspace for a group of users. Organizations can be +used to separate different environments, projects, teams or users within +InfluxDB.

+

Use the /api/v2/orgs endpoints to view and manage organizations.

+

List organizations

Lists organizations.

+

InfluxDB 3 Cloud Serverless only returns the organization that owns the token passed in the request.

+ + +
Authorizations:
query Parameters
descending
boolean
Default: false
limit
integer [ 1 .. 100 ]
Default: 20

Limits the number of records returned. Default is 20.

+
offset
integer >= 0

The offset for pagination. +The number of records to skip.

+

For more information about pagination parameters, see Pagination.

+
org
string

An organization name. +Only returns the specified organization.

+
orgID
string

An organization ID. +Only returns the specified organization.

+
userID
string

A user ID. +Only returns organizations where the specified user is a member or owner.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "links": {
    },
  • "orgs": [
    ]
}

Create an organization

Creates an organization +and returns the newly created organization.

+

InfluxDB 3 Cloud Serverless doesn't allow you to use this endpoint to create organizations.

+ + +
Authorizations:
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

The organization to create.

+
description
string

The description of the organization.

+
name
required
string

The name of the organization.

+

Responses

Request samples

Content type
application/json
{
  • "description": "string",
  • "name": "string"
}

Response samples

Content type
application/json
{
  • "createdAt": "2022-08-24T23:05:52.881317Z",
  • "description": "",
  • "id": "INFLUX_ORG_ID",
  • "links": {
    },
  • "name": "INFLUX_ORG",
  • "updatedAt": "2022-08-24T23:05:52.881318Z"
}

Delete an organization

Deletes an organization.

+

Deleting an organization from InfluxDB Cloud can't be undone. +Once deleted, all data associated with the organization is removed.

+

InfluxDB Cloud

+
    +
  • Does the following when you send a delete request:

    +
      +
    1. Validates the request and queues the delete.
    2. +
    3. Returns an HTTP 204 status code if queued; error otherwise.
    4. +
    5. Handles the delete asynchronously.
    6. +
    +
  • +
+

InfluxDB OSS

+
    +
  • Validates the request, handles the delete synchronously, +and then responds with success or failure.
  • +
+

Limitations

+
    +
  • Only one organization can be deleted per request.
  • +
+ + +
Authorizations:
path Parameters
orgID
required
string

The ID of the organization to delete.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "invalid",
  • "message": "failed to decode request body: organization not found"
}

Retrieve an organization

Retrieves an organization.

+

Use this endpoint to retrieve information for a specific organization.

+ + +
Authorizations:
path Parameters
orgID
required
string

The ID of the organization to retrieve.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "createdAt": "2019-08-24T14:15:22Z",
  • "defaultStorageType": "tsm",
  • "description": "string",
  • "id": "string",
  • "links": {
    },
  • "name": "string",
  • "status": "active",
  • "updatedAt": "2019-08-24T14:15:22Z"
}

Update an organization

Updates an organization.

+

Use this endpoint to update properties +(name, description) of an organization.

+

Updating an organization’s name affects all resources that reference the +organization by name, including the following:

+
    +
  • Queries
  • +
  • Dashboards
  • +
  • Tasks
  • +
  • Telegraf configurations
  • +
  • Templates
  • +
+

If you change an organization name, be sure to update the organization name +in these resources as well.

+ + +
Authorizations:
path Parameters
orgID
required
string

The ID of the organization to update.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

The organization update to apply.

+
description
string

The description of the organization.

+
name
string

The name of the organization.

+

Responses

Request samples

Content type
application/json
{
  • "description": "string",
  • "name": "string"
}

Response samples

Content type
application/json
{
  • "createdAt": "2019-08-24T14:15:22Z",
  • "defaultStorageType": "tsm",
  • "description": "string",
  • "id": "string",
  • "links": {
    },
  • "name": "string",
  • "status": "active",
  • "updatedAt": "2019-08-24T14:15:22Z"
}

List all members of an organization

Lists all users that belong to an organization.

+

InfluxDB 3 Cloud Serverless doesn't use owner and member roles. +Use /api/v2/authorizations to manage resource permissions.

+
Authorizations:
path Parameters
orgID
required
string

The ID of the organization to retrieve users for.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "links": {
    },
  • "users": [
    ]
}

Add a member to an organization

Add a user to an organization.

+

InfluxDB 3 Cloud Serverless doesn't use owner and member roles. +Use /api/v2/authorizations to manage resource permissions.

+
Authorizations:
path Parameters
orgID
required
string

The ID of the organization.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

The user to add to the organization.

+
id
required
string

The ID of the user to add to the resource.

+
name
string

The name of the user to add to the resource.

+

Responses

Request samples

Content type
application/json
{
  • "id": "string",
  • "name": "string"
}

Response samples

Content type
application/json
{
  • "id": "09cfb87051cbe000",
  • "links": {
    },
  • "name": "example_user_1",
  • "role": "member",
  • "status": "active"
}

Remove a member from an organization

Removes a member from an organization.

+

InfluxDB 3 Cloud Serverless doesn't use owner and member roles. +Use /api/v2/authorizations to manage resource permissions.

+
Authorizations:
path Parameters
orgID
required
string

The ID of the organization to remove a user from.

+
userID
required
string

The ID of the user to remove.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "unauthorized",
  • "message": "unauthorized access"
}

List all owners of an organization

Lists all owners of an organization.

+

InfluxDB 3 Cloud Serverless doesn't use owner and member roles. +Use /api/v2/authorizations to manage resource permissions.

+
Authorizations:
path Parameters
orgID
required
string

The ID of the organization to list owners for.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "links": {
    },
  • "users": [
    ]
}

Add an owner to an organization

Adds an owner to an organization.

+

InfluxDB 3 Cloud Serverless doesn't use owner and member roles. +Use /api/v2/authorizations to manage resource permissions.

+
Authorizations:
path Parameters
orgID
required
string

The ID of the organization that you want to add an owner for.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

The user to add as an owner of the organization.

+
id
required
string

The ID of the user to add to the resource.

+
name
string

The name of the user to add to the resource.

+

Responses

Request samples

Content type
application/json
{
  • "id": "09cfb87051cbe000",
  • "links": {
    },
  • "name": "example_user_1",
  • "role": "owner",
  • "status": "active"
}

Response samples

Content type
application/json
{
  • "id": "string",
  • "links": {
    },
  • "name": "string",
  • "status": "active",
  • "role": "owner"
}

Remove an owner from an organization

Removes an owner from +the organization.

+

InfluxDB 3 Cloud Serverless doesn't use owner and member roles. +Use /api/v2/authorizations to manage resource permissions.

+
Authorizations:
path Parameters
orgID
required
string

The ID of the organization to remove an owner from.

+
userID
required
string

The ID of the user to remove.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "unauthorized",
  • "message": "unauthorized access"
}

Query

Query data stored in a bucket.

+
    +
  • HTTP clients can query the v1 /query endpoint +using InfluxQL and retrieve data in CSV or JSON format.
  • +
  • Flight + gRPC clients can query using SQL or InfluxQL and retrieve data in Arrow format.
  • +
+ + +

Query data Deprecated

Retrieves data from buckets.

+

This endpoint isn't supported in InfluxDB 3 Cloud Serverless.

+

See how to query data.

+
Authorizations:
query Parameters
org
string

An organization name or ID.

+

InfluxDB 3 Cloud Serverless

+
    +
  • Doesn't use the org parameter or orgID parameter.
  • +
+
orgID
string

An organization ID.

+

InfluxDB 3 Cloud Serverless

+
    +
  • Doesn't use the org parameter or orgID parameter.
  • +
+
header Parameters
Accept-Encoding
string
Default: identity
Enum: "gzip" "identity"

The content encoding (usually a compression algorithm) that the client can understand.

+
Content-Type
string
Enum: "application/json" "application/vnd.flux"
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema:

Flux query or specification to execute

+
object (Dialect)

Options for tabular data output. +Default output is annotated CSV with headers.

+

For more information about tabular data dialect, +see W3 metadata vocabulary for tabular data.

+
object (File)

Represents a source from a single file

+
now
string <date-time>

Specifies the time that should be reported as now in the query. +Default is the server now time.

+
object

Key-value pairs passed as parameters during query execution.

+

To use parameters in your query, pass a query with params references (in dot notation)--for example:

+
  query: "from(bucket: params.mybucket)\
+              |> range(start: params.rangeStart) |> limit(n:1)"
+

and pass params with the key-value pairs--for example:

+
  params: {
+    "mybucket": "environment",
+    "rangeStart": "-30d"
+  }
+

During query execution, InfluxDB passes params to your script and substitutes the values.

+

Limitations

+
    +
  • If you use params, you can't use extern.
  • +
+
query
required
string

The query script to execute.

+
type
string
Value: "flux"

The type of query. Must be "flux".

+

Responses

Request samples

Content type
{
  • "dialect": {
    },
  • "extern": {
    },
  • "now": "2019-08-24T14:15:22Z",
  • "params": { },
  • "query": "string",
  • "type": "flux"
}

Response samples

Content type
application/json
{
  • "code": "invalid",
  • "message": "failed to decode request body: organization not found"
}

Analyze a Flux query Deprecated

This endpoint isn't supported in InfluxDB 3 Cloud Serverless. +See how to query data.

+
Authorizations:
header Parameters
Content-Type
string
Value: "application/json"
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

Flux query to analyze

+
object (Dialect)

Options for tabular data output. +Default output is annotated CSV with headers.

+

For more information about tabular data dialect, +see W3 metadata vocabulary for tabular data.

+
object (File)

Represents a source from a single file

+
now
string <date-time>

Specifies the time that should be reported as now in the query. +Default is the server now time.

+
object

Key-value pairs passed as parameters during query execution.

+

To use parameters in your query, pass a query with params references (in dot notation)--for example:

+
  query: "from(bucket: params.mybucket)\
+              |> range(start: params.rangeStart) |> limit(n:1)"
+

and pass params with the key-value pairs--for example:

+
  params: {
+    "mybucket": "environment",
+    "rangeStart": "-30d"
+  }
+

During query execution, InfluxDB passes params to your script and substitutes the values.

+

Limitations

+
    +
  • If you use params, you can't use extern.
  • +
+
query
required
string

The query script to execute.

+
type
string
Value: "flux"

The type of query. Must be "flux".

+

Responses

Request samples

Content type
application/json
{
  • "dialect": {
    },
  • "extern": {
    },
  • "now": "2019-08-24T14:15:22Z",
  • "params": { },
  • "query": "string",
  • "type": "flux"
}

Response samples

Content type
application/json

Returns an error object if the Flux query is missing a property key.

+

The following sample query is missing the bucket property key:

+
{
+  "query": "from(: \"iot_center\")\
+  ...
+}
+
{
  • "errors": [
    ]
}

Generate a query Abstract Syntax Tree (AST) Deprecated

This endpoint isn't supported in InfluxDB 3 Cloud Serverless. +See how to query data.

+
Authorizations:
header Parameters
Content-Type
string
Value: "application/json"
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

The Flux query to analyze.

+
query
required
string

The Flux query script to be analyzed.

+

Responses

Request samples

Content type
application/json
{
  • "query": "string"
}

Response samples

Content type
application/json

If the request body contains a missing property key in from(), +returns invalid and problem detail.

+
{
  • "code": "invalid",
  • "message": "invalid AST: loc 1:6-1:19: missing property key"
}

List Flux query suggestions Deprecated

This endpoint isn't supported in InfluxDB 3 Cloud Serverless. +See how to query data.

+
Authorizations:
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Request samples

curl --request GET "INFLUX_URL/api/v2/query/suggestions" \
+  --header "Accept: application/json" \
+  --header "Authorization: Token INFLUX_API_TOKEN"
+

Response samples

Content type
text/html

The URL has been permanently moved. Use /api/v2/query/suggestions.

+
<a href="/api/v2/query/suggestions?orgID=INFLUX_ORG_ID">Moved Permanently</a>
+

Retrieve a query suggestion for a branching suggestion Deprecated

This endpoint isn't supported in InfluxDB 3 Cloud Serverless. +See how to query data.

+
Authorizations:
path Parameters
name
required
string

A Flux function name.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json

The requested function doesn't exist.

+
{
  • "code": "internal error",
  • "message": "An internal error has occurred"
}

Query using the InfluxDB v1 HTTP API

Queries InfluxDB using InfluxQL with InfluxDB v1 request and response formats.

+ + +
Authorizations:
query Parameters
db
required
string

The database to query data from. +This is mapped to an InfluxDB bucket. +For more information, see Database and retention policy mapping.

+
epoch
string
Enum: "ns" "u" "µ" "ms" "s" "m" "h"

A unix timestamp precision. +Formats timestamps as unix (epoch) timestamps the specified precision +instead of RFC3339 timestamps with nanosecond precision.

+
p
string

The InfluxDB 1.x password to authenticate the request.

+
q
required
string

The InfluxQL query to execute. To execute multiple queries, delimit queries with a semicolon (;).

+
rp
string

The retention policy to query data from. +This is mapped to an InfluxDB bucket. +For more information, see Database and retention policy mapping.

+
u
string

The InfluxDB 1.x username to authenticate the request.

+
header Parameters
Accept
string
Default: application/json
Enum: "application/json" "application/csv" "text/csv" "application/x-msgpack"

Media type that the client can understand.

+

Note: With application/csv, query results include unix timestamps instead of RFC3339 timestamps.

+
Accept-Encoding
string
Default: identity
Enum: "gzip" "identity"

The content encoding (usually a compression algorithm) that the client can understand.

+
Content-Type
string
Value: "application/json"
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
No sample

Resources

List all known resources

Authorizations:
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
[
  • "string"
]

Routes

List all top level routes

Retrieves all the top level routes for the InfluxDB API.

+

Limitations

+
    +
  • Only returns top level routes--for example, the response contains +"tasks":"/api/v2/tasks", and doesn't contain resource-specific routes +for tasks (/api/v2/tasks/TASK_ID/...).
  • +
+
Authorizations:
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{}

Secrets

List all secret keys for an organization

Authorizations:
path Parameters
orgID
required
string

The organization ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "secrets": [
    ],
  • "links": {
    }
}

Update secrets in an organization

Authorizations:
path Parameters
orgID
required
string

The organization ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

Secret key value pairs to update/add

+
property name*
string

Responses

Request samples

Content type
application/json
{
  • "apikey": "abc123xyz"
}

Response samples

Content type
application/json
{
  • "code": "internal error",
  • "err": "string",
  • "message": "string",
  • "op": "string"
}

Delete a secret from an organization

Authorizations:
path Parameters
orgID
required
string

The organization ID.

+
secretID
required
string

The secret ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "internal error",
  • "err": "string",
  • "message": "string",
  • "op": "string"
}

Delete secrets from an organization Deprecated

Authorizations:
path Parameters
orgID
required
string

The organization ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

Secret key to delete

+
secrets
Array of strings

Responses

Request samples

Content type
application/json
{
  • "secrets": [
    ]
}

Response samples

Content type
application/json
{
  • "code": "internal error",
  • "err": "string",
  • "message": "string",
  • "op": "string"
}

Tasks

Process and analyze your data with tasks +in the InfluxDB task engine. +Use the /api/v2/tasks endpoints to schedule and manage tasks, retry task runs, and retrieve run logs.

+

To configure a task, provide the script and the schedule to run the task. +For examples, see how to create a task with the POST /api/v2/tasks endpoint.

+ + +

Properties

+

A task object contains information about an InfluxDB task resource.

+

The following table defines the properties that appear in this object:

+
authorizationID
string

An authorization ID. +Specifies the authorization used when the task communicates with the query engine.

+

To find an authorization ID, use the +GET /api/v2/authorizations endpoint to +list authorizations.

+
createdAt
string <date-time>
cron
string

A Cron expression that defines the schedule on which the task runs. InfluxDB uses the system time when evaluating Cron expressions.

+
description
string

A description of the task.

+
every
string <duration>

The interval (duration literal) at which the task runs. every also determines when the task first runs, depending on the specified time.

+
flux
string <flux>

The Flux script that the task executes.

+

Limitations

+
    +
  • If you use the flux property, you can't use the scriptID and scriptParameters properties.
  • +
+
id
required
string
Array of objects (Labels)
lastRunError
string
lastRunStatus
string
Enum: "failed" "success" "canceled"
latestCompleted
string <date-time>

A timestamp (RFC3339 date/time format) of the latest scheduled and completed run.

+
object
name
required
string

The name of the task.

+
offset
string <duration>

A duration to delay execution of the task after the scheduled time has elapsed. 0 removes the offset.

+
org
string

An organization name. +Specifies the organization that owns the task.

+
orgID
required
string

An organization ID. +Specifies the organization that owns the task.

+
ownerID
string

A user ID. +Specifies the owner of the task.

+

To find a user ID, you can use the +GET /api/v2/users endpoint to +list users.

+
scriptID
string

A script ID. +Specifies the invokable script that the task executes.

+

Limitations

+
    +
  • If you use the scriptID property, you can't use the flux property.
  • +
+ +
scriptParameters
object

Key-value pairs for params in the script. +Defines the invocation parameter values passed to the script specified by scriptID. +When running the task, InfluxDB executes the script with the parameters +you provide.

+

Limitations

+
    +
  • To use scriptParameters, you must provide a scriptID.
  • +
  • If you use the scriptID and scriptParameters properties, you can't use the flux property.
  • +
+
status
string (TaskStatusType)
Enum: "active" "inactive"

inactive cancels scheduled runs and prevents manual runs of the task.

+
updatedAt
string <date-time>
{
  • "authorizationID": "string",
  • "createdAt": "2019-08-24T14:15:22Z",
  • "cron": "string",
  • "description": "string",
  • "every": "string",
  • "flux": "string",
  • "id": "string",
  • "labels": [
    ],
  • "lastRunError": "string",
  • "lastRunStatus": "failed",
  • "latestCompleted": "2019-08-24T14:15:22Z",
  • "links": {
    },
  • "name": "string",
  • "offset": "string",
  • "org": "string",
  • "orgID": "string",
  • "ownerID": "string",
  • "scriptID": "string",
  • "scriptParameters": { },
  • "status": "active",
  • "updatedAt": "2019-08-24T14:15:22Z"
}
+

List all tasks

Retrieves a list of tasks.

+

To limit which tasks are returned, pass query parameters in your request. +If no query parameters are passed, InfluxDB returns all tasks up to the default limit.

+
Authorizations:
query Parameters
after
string

A task ID. +Only returns tasks created after the specified task.

+
limit
integer [ -1 .. 500 ]
Default: 100
Examples:
  • limit=-1 - Return all tasks, without pagination.
  • limit=50 - Return a maximum of 50 tasks.

The maximum number of tasks to return. +Default is 100. +The minimum is 1 and the maximum is 500.

+

To reduce the payload size, combine type=basic and limit (see Request samples). +For more information about the basic response, see the type parameter.

+
name
string

A task name. +Only returns tasks with the specified name. +Different tasks may have the same name.

+
offset
integer >= 0
Default: 0

The number of records to skip.

+
org
string

An organization name. +Only returns tasks owned by the specified organization.

+
orgID
string

An organization ID. +Only returns tasks owned by the specified organization.

+
scriptID
string

A script ID. +Only returns tasks that use the specified invokable script.

+
sortBy
string
Value: "name"

The sort field. Only name is supported. +Specifies the field used to sort records in the list.

+
status
string
Enum: "active" "inactive"

A task status. +Only returns tasks that have the specified status (active or inactive).

+
type
string
Default: ""
Enum: "basic" "system"

A task type (basic or system). +Default is system. +Specifies the level of detail for tasks in the response. +The default (system) response contains all the metadata properties for tasks. +To reduce the response size, pass basic to omit some task properties (flux, createdAt, updatedAt).

+
user
string

A user ID. +Only returns tasks owned by the specified user.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Request samples

curl INFLUX_URL/api/v2/tasks/?limit=-1&type=basic \
+  --header 'Content-Type: application/json' \
+  --header 'Authorization: Token INFLUX_API_TOKEN'
+

Response samples

Content type
application/json
Example

A sample response body for the ?type=basic parameter. +type=basic omits some task fields (createdAt and updatedAt) +and field values (org, flux) in the response.

+
{
  • "links": {
    },
  • "tasks": [
    ]
}

Create a task

Creates a task and returns the task.

+

Use this endpoint to create a scheduled task that runs a Flux script.

+

InfluxDB Cloud

+
    +
  • You can use either flux or scriptID to provide the task script.

    +
      +
    • flux: a string of "raw" Flux that contains task options and the script--for example:

      +
      {
      +  "flux": "option task = {name: \"CPU Total 1 Hour New\", every: 1h}\
      +  from(bucket: \"telegraf\")
      +    |> range(start: -1h)
      +    |> filter(fn: (r) => (r._measurement == \"cpu\"))
      +    |> filter(fn: (r) =>\n\t\t(r._field == \"usage_system\"))
      +    |> filter(fn: (r) => (r.cpu == \"cpu-total\"))
      +    |> aggregateWindow(every: 1h, fn: max)
      +    |> to(bucket: \"cpu_usage_user_total_1h\", org: \"INFLUX_ORG\")",
      +  "status": "active",
      +  "description": "This task downsamples CPU data every hour"
      +}
      +
    • +
    • scriptID: the ID of an invokable script +for the task to run. +To pass task options when using scriptID, pass the options as +properties in the request body--for example:

      +
      {
      +  "name": "CPU Total 1 Hour New",
      +  "description": "This task downsamples CPU data every hour",
      +  "every": "1h",
      +  "scriptID": "SCRIPT_ID",
      +  "scriptParameters":
      +    {
      +      "rangeStart": "-1h",
      +      "bucket": "telegraf",
      +      "filterField": "cpu-total"
      +    }
      +  }
      +
    • +
    +
  • +
+

Limitations:

+
    +
  • You can't use flux and scriptID for the same task.
  • +
+ +
Authorizations:
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

The task to create

+
cron
string

A Cron expression that defines the schedule on which the task runs. InfluxDB bases cron runs on the system time.

+
description
string

The description of the task.

+
every
string

The interval (duration literal)) at which the task runs. +every also determines when the task first runs, depending on the specified time.

+
flux
string

The Flux script that the task runs.

+

Limitations

+
    +
  • If you use the flux property, you can't use the scriptID and scriptParameters properties.
  • +
+
name
string

The name of the task

+
offset
string <duration>

A duration to delay execution of the task after the scheduled time has elapsed. 0 removes the offset.

+
org
string

The name of the organization that owns the task.

+
orgID
string

The ID of the organization that owns the task.

+
scriptID
string

The ID of the script that the task runs.

+

Limitations

+
    +
  • If you use the scriptID property, you can't use the flux property.
  • +
+
scriptParameters
object

The parameter key-value pairs passed to the script (referenced by scriptID) during the task run.

+

Limitations

+
    +
  • scriptParameters requires scriptID.
  • +
  • If you use the scriptID and scriptParameters properties, you can't use the flux property.
  • +
+
status
string (TaskStatusType)
Enum: "active" "inactive"

inactive cancels scheduled runs and prevents manual runs of the task.

+

Responses

Request samples

Content type
application/json
{
  • "cron": "string",
  • "description": "string",
  • "every": "string",
  • "flux": "string",
  • "name": "string",
  • "offset": "string",
  • "org": "string",
  • "orgID": "string",
  • "scriptID": "string",
  • "scriptParameters": { },
  • "status": "active"
}

Response samples

Content type
application/json
{
  • "authorizationID": "string",
  • "createdAt": "2019-08-24T14:15:22Z",
  • "cron": "string",
  • "description": "string",
  • "every": "string",
  • "flux": "string",
  • "id": "string",
  • "labels": [
    ],
  • "lastRunError": "string",
  • "lastRunStatus": "failed",
  • "latestCompleted": "2019-08-24T14:15:22Z",
  • "links": {
    },
  • "name": "string",
  • "offset": "string",
  • "org": "string",
  • "orgID": "string",
  • "ownerID": "string",
  • "scriptID": "string",
  • "scriptParameters": { },
  • "status": "active",
  • "updatedAt": "2019-08-24T14:15:22Z"
}

Delete a task

Deletes a task and associated records.

+

Use this endpoint to delete a task and all associated records (task runs, logs, and labels). +Once the task is deleted, InfluxDB cancels all scheduled runs of the task.

+

If you want to disable a task instead of delete it, update the task status to inactive.

+
Authorizations:
path Parameters
taskID
required
string

A task ID. Specifies the task to delete.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "invalid",
  • "message": "failed to decode request body: organization not found"
}

Retrieve a task

Retrieves a task.

+
Authorizations:
path Parameters
taskID
required
string

A task ID. +Specifies the task to retrieve.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "authorizationID": "string",
  • "createdAt": "2019-08-24T14:15:22Z",
  • "cron": "string",
  • "description": "string",
  • "every": "string",
  • "flux": "string",
  • "id": "string",
  • "labels": [
    ],
  • "lastRunError": "string",
  • "lastRunStatus": "failed",
  • "latestCompleted": "2019-08-24T14:15:22Z",
  • "links": {
    },
  • "name": "string",
  • "offset": "string",
  • "org": "string",
  • "orgID": "string",
  • "ownerID": "string",
  • "scriptID": "string",
  • "scriptParameters": { },
  • "status": "active",
  • "updatedAt": "2019-08-24T14:15:22Z"
}

Update a task

Updates a task, +and then cancels all scheduled runs of the task.

+

Use this endpoint to set, modify, or clear task properties--for example: cron, name, flux, status. +Once InfluxDB applies the update, it cancels all previously scheduled runs of the task.

+

To update a task, pass an object that contains the updated key-value pairs. +To activate or inactivate a task, set the status property. +"status": "inactive" cancels scheduled runs and prevents manual runs of the task.

+

InfluxDB Cloud

+
    +
  • Use either flux or scriptID to provide the task script.

    +
      +
    • flux: a string of "raw" Flux that contains task options and the script--for example:

      +
      {
      +  "flux": "option task = {name: \"CPU Total 1 Hour New\", every: 1h}\
      +  from(bucket: \"telegraf\")
      +    |> range(start: -1h)
      +    |> filter(fn: (r) => (r._measurement == \"cpu\"))
      +    |> filter(fn: (r) =>\n\t\t(r._field == \"usage_system\"))
      +    |> filter(fn: (r) => (r.cpu == \"cpu-total\"))
      +    |> aggregateWindow(every: 1h, fn: max)
      +    |> to(bucket: \"cpu_usage_user_total_1h\", org: \"INFLUX_ORG\")",
      +  "status": "active",
      +  "description": "This task downsamples CPU data every hour"
      +}
      +
    • +
    • scriptID: the ID of an invokable script +for the task to run. +To pass task options when using scriptID, pass the options as +properties in the request body--for example:

      +
      {
      +  "name": "CPU Total 1 Hour New",
      +  "description": "This task downsamples CPU data every hour",
      +  "every": "1h",
      +  "scriptID": "SCRIPT_ID",
      +  "scriptParameters":
      +    {
      +      "rangeStart": "-1h",
      +      "bucket": "telegraf",
      +      "filterField": "cpu-total"
      +    }
      +  }
      +
    • +
    +
  • +
+

Limitations:

+
    +
  • You can't use flux and scriptID for the same task.
  • +
+
Authorizations:
path Parameters
taskID
required
string

A task ID. +Specifies the task to update.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

An task update to apply.

+
cron
string

Update the 'cron' option in the flux script.

+
description
string

Update the description of the task.

+
every
string

Update the 'every' option in the flux script.

+
flux
string

Update the Flux script that the task runs.

+
name
string

Update the 'name' option in the flux script.

+
offset
string

Update the 'offset' option in the flux script.

+
scriptID
string

Update the 'scriptID' of the task.

+
scriptParameters
object

Update the 'scriptParameters' of the task.

+
status
string (TaskStatusType)
Enum: "active" "inactive"

inactive cancels scheduled runs and prevents manual runs of the task.

+

Responses

Request samples

Content type
application/json
{
  • "cron": "string",
  • "description": "string",
  • "every": "string",
  • "flux": "string",
  • "name": "string",
  • "offset": "string",
  • "scriptID": "string",
  • "scriptParameters": { },
  • "status": "active"
}

Response samples

Content type
application/json
{
  • "authorizationID": "string",
  • "createdAt": "2019-08-24T14:15:22Z",
  • "cron": "string",
  • "description": "string",
  • "every": "string",
  • "flux": "string",
  • "id": "string",
  • "labels": [
    ],
  • "lastRunError": "string",
  • "lastRunStatus": "failed",
  • "latestCompleted": "2019-08-24T14:15:22Z",
  • "links": {
    },
  • "name": "string",
  • "offset": "string",
  • "org": "string",
  • "orgID": "string",
  • "ownerID": "string",
  • "scriptID": "string",
  • "scriptParameters": { },
  • "status": "active",
  • "updatedAt": "2019-08-24T14:15:22Z"
}

List labels for a task

Retrieves a list of all labels for a task.

+

Labels may be used for grouping and filtering tasks.

+
Authorizations:
path Parameters
taskID
required
string

The ID of the task to retrieve labels for.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{}

Add a label to a task

Adds a label to a task.

+

Use this endpoint to add a label that you can use to filter tasks in the InfluxDB UI.

+
Authorizations:
path Parameters
taskID
required
string

The ID of the task to label.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

An object that contains a labelID to add to the task.

+
labelID
required
string

A label ID. +Specifies the label to attach.

+

Responses

Request samples

Content type
application/json
{
  • "labelID": "string"
}

Response samples

Content type
application/json
{}

Delete a label from a task

Deletes a label from a task.

+
Authorizations:
path Parameters
labelID
required
string

The ID of the label to delete.

+
taskID
required
string

The ID of the task to delete the label from.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "invalid",
  • "message": "failed to decode request body: organization not found"
}

Retrieve all logs for a task

Retrieves a list of all logs for a task.

+

When an InfluxDB task runs, a “run” record is created in the task’s history. +Logs associated with each run provide relevant log messages, timestamps, and the exit status of the run attempt.

+

Use this endpoint to retrieve only the log events for a task, +without additional task metadata.

+
Authorizations:
path Parameters
taskID
required
string

The task ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
Example
{
  • "events": [
    ]
}

List all task members Deprecated

Deprecated: Tasks don't use owner and member roles. +Use /api/v2/authorizations to assign user permissions.

+

Lists all users that have the member role for the specified task.

+
Authorizations:
path Parameters
taskID
required
string

The task ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "links": {},
  • "users": [
    ]
}

Add a member to a task Deprecated

Deprecated: Tasks don't use owner and member roles. +Use /api/v2/authorizations to assign user permissions.

+

Adds a user to members of a task and returns the member.

+
Authorizations:
path Parameters
taskID
required
string

The task ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

A user to add as a member of the task.

+
id
required
string

The ID of the user to add to the resource.

+
name
string

The name of the user to add to the resource.

+

Responses

Request samples

Content type
application/json
{
  • "id": "string",
  • "name": "string"
}

Response samples

Content type
application/json
{
  • "id": "string",
  • "links": {
    },
  • "name": "string",
  • "status": "active",
  • "role": "member"
}

Remove a member from a task Deprecated

Deprecated: Tasks don't use owner and member roles. +Use /api/v2/authorizations to assign user permissions.

+

Removes a member from a task.

+
Authorizations:
path Parameters
taskID
required
string

The task ID.

+
userID
required
string

The ID of the member to remove.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "internal error",
  • "err": "string",
  • "message": "string",
  • "op": "string"
}

List all owners of a task Deprecated

Deprecated: Tasks don't use owner and member roles. +Use /api/v2/authorizations to assign user permissions.

+

Retrieves all users that have owner permission for a task.

+
Authorizations:
path Parameters
taskID
required
string

The ID of the task to retrieve owners for.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "links": {},
  • "users": [
    ]
}

Add an owner for a task Deprecated

Deprecated: Tasks don't use owner and member roles. +Use /api/v2/authorizations to assign user permissions.

+

Assigns a task owner role to a user.

+

Use this endpoint to create a resource owner for the task. +A resource owner is a user with role: owner for a specific resource.

+
Authorizations:
path Parameters
taskID
required
string

The task ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

A user to add as an owner of the task.

+
id
required
string

The ID of the user to add to the resource.

+
name
string

The name of the user to add to the resource.

+

Responses

Request samples

Content type
application/json
{
  • "id": "string",
  • "name": "string"
}

Response samples

Content type
application/json
{
  • "id": "0772396d1f411000",
  • "links": {
    },
  • "name": "USER_NAME",
  • "role": "owner",
  • "status": "active"
}

Remove an owner from a task Deprecated

Deprecated: Tasks don't use owner and member roles. +Use /api/v2/authorizations to assign user permissions.

+
Authorizations:
path Parameters
taskID
required
string

The task ID.

+
userID
required
string

The ID of the owner to remove.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "internal error",
  • "err": "string",
  • "message": "string",
  • "op": "string"
}

List runs for a task

Retrieves a list of runs for a task.

+

To limit which task runs are returned, pass query parameters in your request. +If no query parameters are passed, InfluxDB returns all task runs up to the default limit.

+ +
Authorizations:
path Parameters
taskID
required
string

The ID of the task to get runs for. +Only returns runs for this task.

+
query Parameters
after
string

A task run ID. Only returns runs created after this run.

+
afterTime
string <date-time>

A timestamp (RFC3339 date/time format). +Only returns runs scheduled after this time.

+
beforeTime
string <date-time>

A timestamp (RFC3339 date/time format). +Only returns runs scheduled before this time.

+
limit
integer [ 1 .. 500 ]
Default: 100

Limits the number of task runs returned. Default is 100.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "links": {},
  • "runs": [
    ]
}

Start a task run, overriding the schedule

Schedules a task run to start immediately, ignoring scheduled runs.

+

Use this endpoint to manually start a task run. +Scheduled runs will continue to run as scheduled. +This may result in concurrently running tasks.

+

To retry a previous run (and avoid creating a new run), +use the POST /api/v2/tasks/{taskID}/runs/{runID}/retry endpoint.

+ +
Authorizations:
path Parameters
taskID
required
string
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json
scheduledFor
string or null <date-time>

The time RFC3339 date/time format +used for the run's now option. +Default is the server now time.

+

Responses

Request samples

Content type
application/json
{
  • "scheduledFor": "2019-08-24T14:15:22Z"
}

Response samples

Content type
application/json
{
  • "finishedAt": "2006-01-02T15:04:05.999999999Z07:00",
  • "flux": "string",
  • "id": "string",
  • "links": {
    },
  • "log": [
    ],
  • "requestedAt": "2006-01-02T15:04:05.999999999Z07:00",
  • "scheduledFor": "2019-08-24T14:15:22Z",
  • "startedAt": "2006-01-02T15:04:05.999999999Z07:00",
  • "status": "scheduled",
  • "taskID": "string"
}

Cancel a running task

Cancels a running task.

+

Use this endpoint with InfluxDB OSS to cancel a running task.

+

InfluxDB Cloud

+
    +
  • Doesn't support this operation. +
  • +
+
Authorizations:
path Parameters
runID
required
string

The ID of the task run to cancel.

+
taskID
required
string

The ID of the task to cancel.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "invalid",
  • "message": "failed to decode request body: organization not found"
}

Retrieve a run for a task.

Retrieves a specific run for a task.

+

Use this endpoint to retrieve detail and logs for a specific task run.

+ +
Authorizations:
path Parameters
runID
required
string

The ID of the run to retrieve.

+
taskID
required
string

The ID of the task to retrieve runs for.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "finishedAt": "2022-07-18T14:46:07.308254Z",
  • "id": "09b070dadaa7d000",
  • "links": {
    },
  • "log": [
    ],
  • "requestedAt": "2022-07-18T14:46:06Z",
  • "scheduledFor": "2022-07-18T14:46:06Z",
  • "startedAt": "2022-07-18T14:46:07.16222Z",
  • "status": "success",
  • "taskID": "0996e56b2f378000"
}

Retrieve all logs for a run

Retrieves all logs for a task run. +A log is a list of run events with runID, time, and message properties.

+

Use this endpoint to help analyze task performance and troubleshoot failed task runs.

+ +
Authorizations:
path Parameters
runID
required
string

The ID of the run to get logs for.

+
taskID
required
string

The ID of the task to get logs for.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
Example
{
  • "events": [
    ]
}

Retry a task run

Queues a task run to +retry and returns the scheduled run.

+

To manually start a new task run, use the +POST /api/v2/tasks/{taskID}/runs endpoint.

+

Limitations

+
    +
  • The task must be active (status: "active").
  • +
+
Authorizations:
path Parameters
runID
required
string

A task run ID. +Specifies the task run to retry.

+

To find a task run ID, use the +GET /api/v2/tasks/{taskID}/runs endpoint +to list task runs.

+
taskID
required
string

A task ID. +Specifies the task to retry.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json; charset=utf-8
object

Responses

Request samples

Content type
application/json; charset=utf-8
{ }

Response samples

Content type
application/json
{
  • "id": "09d60ffe08738000",
  • "links": {
    },
  • "requestedAt": "2022-08-16T20:05:11.84145Z",
  • "scheduledFor": "2022-08-15T00:00:00Z",
  • "status": "scheduled",
  • "taskID": "09a776832f381000"
}

Telegrafs

List all Telegraf configurations

Authorizations:
query Parameters
orgID
string

The organization ID the Telegraf config belongs to.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "configurations": [
    ]
}

Create a Telegraf configuration

Authorizations:
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

Telegraf configuration to create

+
config
string
description
string
object
name
string
orgID
string
Array of objects

Responses

Request samples

Content type
application/json
{
  • "config": "string",
  • "description": "string",
  • "metadata": {
    },
  • "name": "string",
  • "orgID": "string",
  • "plugins": [
    ]
}

Response samples

Content type
application/json
{
  • "config": "string",
  • "description": "string",
  • "metadata": {
    },
  • "name": "string",
  • "orgID": "string",
  • "id": "string",
  • "labels": [
    ],
  • "links": {
    }
}

Delete a Telegraf configuration

Authorizations:
path Parameters
telegrafID
required
string

The Telegraf configuration ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "internal error",
  • "err": "string",
  • "message": "string",
  • "op": "string"
}

Retrieve a Telegraf configuration

Authorizations:
path Parameters
telegrafID
required
string

The Telegraf configuration ID.

+
header Parameters
Accept
string
Default: application/toml
Enum: "application/toml" "application/json" "application/octet-stream"
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
{
  • "config": "string",
  • "description": "string",
  • "metadata": {
    },
  • "name": "string",
  • "orgID": "string",
  • "id": "string",
  • "labels": [
    ],
  • "links": {
    }
}

Update a Telegraf configuration

Authorizations:
path Parameters
telegrafID
required
string

The Telegraf config ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

Telegraf configuration update to apply

+
config
string
description
string
object
name
string
orgID
string
Array of objects

Responses

Request samples

Content type
application/json
{
  • "config": "string",
  • "description": "string",
  • "metadata": {
    },
  • "name": "string",
  • "orgID": "string",
  • "plugins": [
    ]
}

Response samples

Content type
application/json
{
  • "config": "string",
  • "description": "string",
  • "metadata": {
    },
  • "name": "string",
  • "orgID": "string",
  • "id": "string",
  • "labels": [
    ],
  • "links": {
    }
}

List all labels for a Telegraf config

Authorizations:
path Parameters
telegrafID
required
string

The Telegraf config ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{}

Add a label to a Telegraf config

Authorizations:
path Parameters
telegrafID
required
string

The Telegraf config ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

Label to add

+
labelID
required
string

A label ID. +Specifies the label to attach.

+

Responses

Request samples

Content type
application/json
{
  • "labelID": "string"
}

Response samples

Content type
application/json
{}

Delete a label from a Telegraf config

Authorizations:
path Parameters
labelID
required
string

The label ID.

+
telegrafID
required
string

The Telegraf config ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "internal error",
  • "err": "string",
  • "message": "string",
  • "op": "string"
}

List all users with member privileges for a Telegraf config

Authorizations:
path Parameters
telegrafID
required
string

The Telegraf config ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "links": {},
  • "users": [
    ]
}

Add a member to a Telegraf config

Authorizations:
path Parameters
telegrafID
required
string

The Telegraf config ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

User to add as member

+
id
required
string

The ID of the user to add to the resource.

+
name
string

The name of the user to add to the resource.

+

Responses

Request samples

Content type
application/json
{
  • "id": "string",
  • "name": "string"
}

Response samples

Content type
application/json
{
  • "id": "string",
  • "links": {
    },
  • "name": "string",
  • "status": "active",
  • "role": "member"
}

Remove a member from a Telegraf config

Authorizations:
path Parameters
telegrafID
required
string

The Telegraf config ID.

+
userID
required
string

The ID of the member to remove.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "internal error",
  • "err": "string",
  • "message": "string",
  • "op": "string"
}

List all owners of a Telegraf configuration

Authorizations:
path Parameters
telegrafID
required
string

The Telegraf configuration ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "links": {},
  • "users": [
    ]
}

Add an owner to a Telegraf configuration

Authorizations:
path Parameters
telegrafID
required
string

The Telegraf configuration ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

User to add as owner

+
id
required
string

The ID of the user to add to the resource.

+
name
string

The name of the user to add to the resource.

+

Responses

Request samples

Content type
application/json
{
  • "id": "string",
  • "name": "string"
}

Response samples

Content type
application/json
{
  • "id": "string",
  • "links": {
    },
  • "name": "string",
  • "status": "active",
  • "role": "owner"
}

Remove an owner from a Telegraf config

Authorizations:
path Parameters
telegrafID
required
string

The Telegraf config ID.

+
userID
required
string

The ID of the owner to remove.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "internal error",
  • "err": "string",
  • "message": "string",
  • "op": "string"
}

Templates

Export and apply InfluxDB templates. +Manage stacks of templated InfluxDB resources.

+

InfluxDB templates are prepackaged configurations for resources. +Use InfluxDB templates to configure a fresh instance of InfluxDB, +back up your dashboard configuration, or share your configuration.

+

Use the /api/v2/templates endpoints to export templates and apply templates.

+

InfluxDB stacks are stateful InfluxDB templates that let you +add, update, and remove installed template resources over time, avoid duplicating +resources when applying the same or similar templates more than once, and +apply changes to distributed instances of InfluxDB OSS or InfluxDB Cloud.

+

Use the /api/v2/stacks endpoints to manage installed template resources.

+ +

List installed stacks

Lists installed InfluxDB stacks.

+

To limit stacks in the response, pass query parameters in your request. +If no query parameters are passed, InfluxDB returns all installed stacks +for the organization.

+ +
Authorizations:
query Parameters
name
string
Examples:
  • name=project-stack-0 - Find stacks with the event name

A stack name. +Finds stack events with this name and returns the stacks.

+

Repeatable. +To filter for more than one stack name, +repeat this parameter with each name--for example:

+
    +
  • INFLUX_URL/api/v2/stacks?&orgID=INFLUX_ORG_ID&name=project-stack-0&name=project-stack-1
  • +
+
orgID
required
string

An organization ID. +Only returns stacks owned by the specified organization.

+

InfluxDB Cloud

+
    +
  • Doesn't require this parameter; +InfluxDB only returns resources allowed by the API token.
  • +
+
stackID
string
Examples:
  • stackID=09bd87cd33be3000 - Find a stack with the ID

A stack ID. +Only returns the specified stack.

+

Repeatable. +To filter for more than one stack ID, +repeat this parameter with each ID--for example:

+
    +
  • INFLUX_URL/api/v2/stacks?&orgID=INFLUX_ORG_ID&stackID=09bd87cd33be3000&stackID=09bef35081fe3000
  • +
+

Responses

Response samples

Content type
application/json
{
  • "stacks": [
    ]
}

Create a stack

Creates or initializes a stack.

+

Use this endpoint to manually initialize a new stack with the following +optional information:

+
    +
  • Stack name
  • +
  • Stack description
  • +
  • URLs for template manifest files
  • +
+

To automatically create a stack when applying templates, +use the /api/v2/templates/apply endpoint.

+

Required permissions

+
    +
  • write permission for the organization
  • +
+ +
Authorizations:
Request Body schema: application/json

The stack to create.

+
description
string
name
string
orgID
string
urls
Array of strings

Responses

Request samples

Content type
application/json
{
  • "description": "string",
  • "name": "string",
  • "orgID": "string",
  • "urls": [
    ]
}

Response samples

Content type
application/json
{
  • "createdAt": "2019-08-24T14:15:22Z",
  • "events": [
    ],
  • "id": "string",
  • "orgID": "string"
}

Delete a stack and associated resources

Authorizations:
path Parameters
stack_id
required
string

The identifier of the stack.

+
query Parameters
orgID
required
string

The identifier of the organization.

+

Responses

Response samples

Content type
application/json
{
  • "code": "internal error",
  • "err": "string",
  • "message": "string",
  • "op": "string"
}

Retrieve a stack

Authorizations:
path Parameters
stack_id
required
string

The identifier of the stack.

+

Responses

Response samples

Content type
application/json
{
  • "createdAt": "2019-08-24T14:15:22Z",
  • "events": [
    ],
  • "id": "string",
  • "orgID": "string"
}

Update a stack

Authorizations:
path Parameters
stack_id
required
string

The identifier of the stack.

+
Request Body schema: application/json

The stack to update.

+
Array of objects
description
string or null
name
string or null
templateURLs
Array of strings or null

Responses

Request samples

Content type
application/json
{
  • "additionalResources": [
    ],
  • "description": "string",
  • "name": "string",
  • "templateURLs": [
    ]
}

Response samples

Content type
application/json
{
  • "createdAt": "2019-08-24T14:15:22Z",
  • "events": [
    ],
  • "id": "string",
  • "orgID": "string"
}

Uninstall a stack

Authorizations:
path Parameters
stack_id
required
string

The identifier of the stack.

+

Responses

Response samples

Content type
application/json
{
  • "createdAt": "2019-08-24T14:15:22Z",
  • "events": [
    ],
  • "id": "string",
  • "orgID": "string"
}

Apply or dry-run a template

Applies a template to +create or update a stack of InfluxDB +resources. +The response contains the diff of changes and the stack ID.

+

Use this endpoint to install an InfluxDB template to an organization. +Provide template URLs or template objects in your request. +To customize which template resources are installed, use the actions +parameter.

+

By default, when you apply a template, InfluxDB installs the template to +create and update stack resources and then generates a diff of the changes. +If you pass dryRun: true in the request body, InfluxDB validates the +template and generates the resource diff, but doesn’t make any +changes to your instance.

+

Custom values for templates

+
    +
  • Some templates may contain environment references for custom metadata. +To provide custom values for environment references, pass the envRefs +property in the request body.

    +
  • +
  • Some templates may contain queries that use +secrets. +To provide custom secret values, pass the secrets property +in the request body. +Don't expose secret values in templates.

    +
  • +
+

Required permissions

+
    +
  • write permissions for resource types in the template.
  • +
+

Rate limits (with InfluxDB Cloud)

+ + +
Authorizations:
Request Body schema:

Parameters for applying templates.

+
Array of objects or objects

A list of action objects. +Actions let you customize how InfluxDB applies templates in the request.

+

You can use the following actions to prevent creating or updating resources:

+
    +
  • A skipKind action skips template resources of a specified kind.
  • +
  • A skipResource action skips template resources with a specified metadata.name +and kind.
  • +
+
dryRun
boolean

Only applies a dry run of the templates passed in the request.

+
    +
  • Validates the template and generates a resource diff and summary.
  • +
  • Doesn't install templates or make changes to the InfluxDB instance.
  • +
+
object

An object with key-value pairs that map to environment references in templates.

+

Environment references in templates are envRef objects with an envRef.key +property. +To substitute a custom environment reference value when applying templates, +pass envRefs with the envRef.key and the value.

+

When you apply a template, InfluxDB replaces envRef objects in the template +with the values that you provide in the envRefs parameter.

+

The following template fields may use environment references:

+
    +
  • metadata.name
  • +
  • spec.endpointName
  • +
  • spec.associations.name
  • +
+ +
orgID
string

Organization ID. +InfluxDB applies templates to this organization. +The organization owns all resources created by the template.

+

To find your organization, see how to +view organizations.

+
Array of objects

A list of URLs for template files.

+

To apply a template manifest file located at a URL, pass remotes +with an array that contains the URL.

+
object

An object with key-value pairs that map to secrets in queries.

+

Queries may reference secrets stored in InfluxDB--for example, +the following Flux script retrieves POSTGRES_USERNAME and POSTGRES_PASSWORD +secrets and then uses them to connect to a PostgreSQL database:

+
import "sql"
+import "influxdata/influxdb/secrets"
+
+username = secrets.get(key: "POSTGRES_USERNAME")
+password = secrets.get(key: "POSTGRES_PASSWORD")
+
+sql.from(
+  driverName: "postgres",
+  dataSourceName: "postgresql://${username}:${password}@localhost:5432",
+  query: "SELECT * FROM example_table",
+)
+

To define secret values in your /api/v2/templates/apply request, +pass the secrets parameter with key-value pairs--for example:

+
{
+  ...
+  "secrets": {
+    "POSTGRES_USERNAME": "pguser",
+    "POSTGRES_PASSWORD": "foo"
+  }
+  ...
+}
+

InfluxDB stores the key-value pairs as secrets that you can access with secrets.get(). +Once stored, you can't view secret values in InfluxDB.

+ +
stackID
string

ID of the stack to update.

+

To apply templates to an existing stack in the organization, use the stackID parameter. +If you apply templates without providing a stack ID, +InfluxDB initializes a new stack with all new resources.

+

To find a stack ID, use the InfluxDB /api/v2/stacks API endpoint to list stacks.

+ +
object

A template object to apply. +A template object has a contents property +with an array of InfluxDB resource configurations.

+

Pass template to apply only one template object. +If you use template, you can't use the templates parameter. +If you want to apply multiple template objects, use templates instead.

+
Array of objects

A list of template objects to apply. +A template object has a contents property +with an array of InfluxDB resource configurations.

+

Use the templates parameter to apply multiple template objects. +If you use templates, you can't use the template parameter.

+

Responses

Request samples

Content type
Example
{
  • "actions": [
    ],
  • "orgID": "INFLUX_ORG_ID",
  • "templates": [
    ]
}

Response samples

Content type
application/json
{
  • "diff": {
    },
  • "errors": [
    ],
  • "sources": [
    ],
  • "stackID": "string",
  • "summary": {
    }
}

Export a new template

Authorizations:
Request Body schema: application/json

Export resources as an InfluxDB template.

+
One of
Array of objects
Array of objects
stackID
string

Responses

Request samples

Content type
application/json
Example
{
  • "orgIDs": [
    ],
  • "resources": [
    ],
  • "stackID": "string"
}

Response samples

Content type
[
  • {
    }
]

Usage

Retrieve usage for an organization

Authorizations:
path Parameters
orgID
required
string

The ID of the organization.

+
query Parameters
raw
boolean
Default: false

return raw usage data

+
start
required
integer <unix timestamp>

Earliest time (unix timestamp format) to include in results.

+
stop
integer <unix timestamp>

Latest time (unix timestamp format) to include in results.

+

Responses

Response samples

Content type
application/json
{
  • "code": "internal error",
  • "err": "string",
  • "message": "string",
  • "op": "string"
}

Variables

List all variables

Authorizations:
query Parameters
org
string

The name of the organization.

+
orgID
string

The organization ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "variables": [
    ]
}

Create a variable

Authorizations:
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

Variable to create

+
required
QueryVariableProperties (object) or ConstantVariableProperties (object) or MapVariableProperties (object) (VariableProperties)
createdAt
string <date-time>
description
string
Array of objects (Labels)
name
required
string
orgID
required
string
selected
Array of strings
sort_order
integer
updatedAt
string <date-time>

Responses

Request samples

Content type
application/json
{
  • "arguments": {
    },
  • "createdAt": "2019-08-24T14:15:22Z",
  • "description": "string",
  • "labels": [
    ],
  • "name": "string",
  • "orgID": "string",
  • "selected": [
    ],
  • "sort_order": 0,
  • "updatedAt": "2019-08-24T14:15:22Z"
}

Response samples

Content type
application/json
{
  • "arguments": {
    },
  • "createdAt": "2019-08-24T14:15:22Z",
  • "description": "string",
  • "id": "string",
  • "labels": [
    ],
  • "links": {},
  • "name": "string",
  • "orgID": "string",
  • "selected": [
    ],
  • "sort_order": 0,
  • "updatedAt": "2019-08-24T14:15:22Z"
}

Delete a variable

Authorizations:
path Parameters
variableID
required
string

The variable ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "internal error",
  • "err": "string",
  • "message": "string",
  • "op": "string"
}

Retrieve a variable

Authorizations:
path Parameters
variableID
required
string

The variable ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "arguments": {
    },
  • "createdAt": "2019-08-24T14:15:22Z",
  • "description": "string",
  • "id": "string",
  • "labels": [
    ],
  • "links": {},
  • "name": "string",
  • "orgID": "string",
  • "selected": [
    ],
  • "sort_order": 0,
  • "updatedAt": "2019-08-24T14:15:22Z"
}

Update a variable

Authorizations:
path Parameters
variableID
required
string

The variable ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

Variable update to apply

+
required
QueryVariableProperties (object) or ConstantVariableProperties (object) or MapVariableProperties (object) (VariableProperties)
createdAt
string <date-time>
description
string
Array of objects (Labels)
name
required
string
orgID
required
string
selected
Array of strings
sort_order
integer
updatedAt
string <date-time>

Responses

Request samples

Content type
application/json
{
  • "arguments": {
    },
  • "createdAt": "2019-08-24T14:15:22Z",
  • "description": "string",
  • "labels": [
    ],
  • "name": "string",
  • "orgID": "string",
  • "selected": [
    ],
  • "sort_order": 0,
  • "updatedAt": "2019-08-24T14:15:22Z"
}

Response samples

Content type
application/json
{
  • "arguments": {
    },
  • "createdAt": "2019-08-24T14:15:22Z",
  • "description": "string",
  • "id": "string",
  • "labels": [
    ],
  • "links": {},
  • "name": "string",
  • "orgID": "string",
  • "selected": [
    ],
  • "sort_order": 0,
  • "updatedAt": "2019-08-24T14:15:22Z"
}

Replace a variable

Authorizations:
path Parameters
variableID
required
string

The variable ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

Variable to replace

+
required
QueryVariableProperties (object) or ConstantVariableProperties (object) or MapVariableProperties (object) (VariableProperties)
createdAt
string <date-time>
description
string
Array of objects (Labels)
name
required
string
orgID
required
string
selected
Array of strings
sort_order
integer
updatedAt
string <date-time>

Responses

Request samples

Content type
application/json
{
  • "arguments": {
    },
  • "createdAt": "2019-08-24T14:15:22Z",
  • "description": "string",
  • "labels": [
    ],
  • "name": "string",
  • "orgID": "string",
  • "selected": [
    ],
  • "sort_order": 0,
  • "updatedAt": "2019-08-24T14:15:22Z"
}

Response samples

Content type
application/json
{
  • "arguments": {
    },
  • "createdAt": "2019-08-24T14:15:22Z",
  • "description": "string",
  • "id": "string",
  • "labels": [
    ],
  • "links": {},
  • "name": "string",
  • "orgID": "string",
  • "selected": [
    ],
  • "sort_order": 0,
  • "updatedAt": "2019-08-24T14:15:22Z"
}

List all labels for a variable

Authorizations:
path Parameters
variableID
required
string

The variable ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{}

Add a label to a variable

Authorizations:
path Parameters
variableID
required
string

The variable ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: application/json

Label to add

+
labelID
required
string

A label ID. +Specifies the label to attach.

+

Responses

Request samples

Content type
application/json
{
  • "labelID": "string"
}

Response samples

Content type
application/json
{}

Delete a label from a variable

Authorizations:
path Parameters
labelID
required
string

The label ID to delete.

+
variableID
required
string

The variable ID.

+
header Parameters
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
application/json
{
  • "code": "internal error",
  • "err": "string",
  • "message": "string",
  • "op": "string"
}

Write

Write time series data to buckets using InfluxDB v1 or v2 endpoints.

+

Write data

Writes data to a bucket.

+

Use this endpoint to send data in line protocol format to InfluxDB.

+

InfluxDB 3 Cloud Serverless does the following when you send a write request:

+
    +
  1. Validates the request.

    +
  2. +
  3. If successful, attempts to ingest data from the request body; otherwise, responds with an error status.

    +
  4. +
  5. Ingests or rejects data in the batch and returns one of the following HTTP status codes:

    +
      +
    • 204 No Content: All data in the batch is ingested.
    • +
    • 400 Bad Request: Data from the batch was rejected and not written. The response body indicates if a partial write occurred.
    • +
    +
  6. +
+

The response body contains error details about rejected points, up to 100 points.

+

Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable.

+

To ensure that InfluxDB handles writes in the order you request them, +wait for the response before you send the next request.

+

Write endpoints

+

The /write and /api/v2/write endpoints are functionally equivalent for writing data to InfluxDB 3 Cloud Serverless.

+ +

Rate limits

+

Write rate limits apply. +For more information, see limits and adjustable quotas.

+ + +
Authorizations:
query Parameters
bucket
required
string

A bucket name or ID. +InfluxDB writes all points in the batch to the specified bucket.

+
org
required
string

An organization name or ID.

+

InfluxDB 3 Cloud Serverless writes data to the bucket in the organization associated with the authorization (API token); +doesn't use the org parameter or orgID parameter.

+
orgID
string

An organization ID.

+

InfluxDB 3 Cloud Serverless writes data to the bucket in the organization associated with the authorization (API token); +doesn't use the org parameter or orgID parameter.

+
precision
string (WritePrecision)
Enum: "ms" "s" "us" "ns"

The precision for unix timestamps in the line protocol batch.

+
header Parameters
Accept
string
Default: application/json
Value: "application/json"

The content type that the client can understand. +Writes only return a response body if they fail--for example, +due to a formatting problem or quota limit.

+

InfluxDB 3 Cloud Serverless

+
    +
  • Returns only application/json for format and limit errors.
  • +
  • Returns only text/html for some quota limit errors.
  • +
+ + +
Content-Encoding
string
Default: identity
Enum: "gzip" "identity"

The compression applied to the line protocol in the request payload. +To send a gzip payload, pass Content-Encoding: gzip header.

+
Content-Length
integer

The size of the entity-body, in bytes, sent to InfluxDB. +If the length is greater than the max body configuration option, +the server responds with status code 413.

+
Content-Type
string
Default: text/plain; charset=utf-8
Enum: "text/plain" "text/plain; charset=utf-8"

The format of the data in the request body. +To send a line protocol payload, pass Content-Type: text/plain; charset=utf-8.

+
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: text/plain

In the request body, provide data in line protocol format.

+

To send compressed data, do the following:

+
    +
  1. Use gzip to compress the line protocol data.
  2. +
  3. In your request, send the compressed data and the +Content-Encoding: gzip header.
  4. +
+ + +
string <byte>

Responses

Request samples

Content type
text/plain
airSensors,sensor_id=TLM0201 temperature=73.97038159354763,humidity=35.23103248356096,co=0.48445310567793615 1630424257000000000
+airSensors,sensor_id=TLM0202 temperature=75.30007505999716,humidity=35.651929918691714,co=0.5141876544505826 1630424257000000000
+

Response samples

Content type
application/json
Example
{
  • "code": "invalid",
  • "line": 2,
  • "message": "no data written, errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)"
}

Write data using the InfluxDB v1 HTTP API

Writes data to a bucket.

+

Use this endpoint for InfluxDB v1 parameter compatibility when sending data in line protocol format to InfluxDB.

+

InfluxDB 3 Cloud Serverless does the following when you send a write request:

+
    +
  1. Validates the request.

    +
  2. +
  3. If successful, attempts to ingest data from the request body; otherwise, responds with an error status.

    +
  4. +
  5. Ingests or rejects data in the batch and returns one of the following HTTP status codes:

    +
      +
    • 204 No Content: all data in the batch is ingested
    • +
    • 201 Created: some points in the batch are ingested and queryable, and some points are rejected
    • +
    • 400 Bad Request: all data is rejected
    • +
    +
  6. +
+

The response body contains error details about rejected points, up to 100 points.

+

Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable.

+

To ensure that InfluxDB handles writes in the order you request them, +wait for the response before you send the next request.

+

Write endpoints

+

The /write and /api/v2/write endpoints are functionally equivalent for writing data to InfluxDB 3 Cloud Serverless.

+ +

Rate limits

+

Write rate limits apply. +For more information, see limits and adjustable quotas.

+ + +
Authorizations:
query Parameters
db
required
string

Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy.

+
p
string

The InfluxDB 1.x password to authenticate the request.

+
precision
string

Write precision.

+
rp
string

Retention policy name.

+
u
string

The InfluxDB 1.x username to authenticate the request.

+
header Parameters
Content-Encoding
string
Default: identity
Enum: "gzip" "identity"

When present, indicates that compression is applied to the line protocol body.

+
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: text/plain

Line protocol body

+
string

Responses

Response samples

Content type
application/json
{
  • "code": "invalid",
  • "line": 2,
  • "message": "failed to parse line protocol: errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)"
}
+ + + + + diff --git a/content/influxdb3/cloud-serverless/guides/api-compatibility/v1/_index.md b/content/influxdb3/cloud-serverless/guides/api-compatibility/v1/_index.md index 002c104d9..0449bab4f 100644 --- a/content/influxdb3/cloud-serverless/guides/api-compatibility/v1/_index.md +++ b/content/influxdb3/cloud-serverless/guides/api-compatibility/v1/_index.md @@ -54,7 +54,7 @@ With the InfluxDB v1 API, you can use API tokens in InfluxDB 1.x username and pa schemes or in the InfluxDB v2 `Authorization: Token` scheme. - [Authenticate with a username and password scheme](#authenticate-with-a-username-and-password-scheme) -- [Authenticate with a token scheme](#authenticate-with-a-token) +- [Authenticate with a token scheme](#authenticate-with-a-token-scheme) ### Authenticate with a username and password scheme @@ -151,6 +151,8 @@ Replace the following: Use the `Authorization: Token` scheme to pass a [token](/influxdb3/cloud-serverless/admin/tokens/) for authenticating v1 API `/write` and `/query` requests. +Include the word `Token`, a space, and your **token** value (all case-sensitive). + #### Syntax ```http diff --git a/content/influxdb3/cloud-serverless/write-data/best-practices/schema-design.md b/content/influxdb3/cloud-serverless/write-data/best-practices/schema-design.md index ed768fe24..4c8e1df87 100644 --- a/content/influxdb3/cloud-serverless/write-data/best-practices/schema-design.md +++ b/content/influxdb3/cloud-serverless/write-data/best-practices/schema-design.md @@ -4,7 +4,7 @@ seotitle: InfluxDB schema design recommendations and best practices description: > Design your schema for simpler and more performant queries. menu: - influxdb_cloud_serverless: + influxdb3_cloud_serverless: name: Schema design weight: 201 parent: write-best-practices diff --git a/content/influxdb3/clustered/admin/bypass-identity-provider.md b/content/influxdb3/clustered/admin/bypass-identity-provider.md index bb6d2c3f2..d8fd49bd4 100644 --- a/content/influxdb3/clustered/admin/bypass-identity-provider.md +++ b/content/influxdb3/clustered/admin/bypass-identity-provider.md @@ -69,7 +69,7 @@ The only way to revoke the token is to do the following: kubectl delete secret rsa-keys admin-token --namespace INFLUXDB_NAMESPACE ``` -2. Rerun the `key-gen` and `create-amin-token` jobs: +2. Rerun the `key-gen` and `create-admin-token` jobs: 1. List the jobs in your InfluxDB namespace to find the key-gen job pod: diff --git a/content/influxdb3/clustered/api/v1-compatibility/_index.html b/content/influxdb3/clustered/api/v1-compatibility/_index.html new file mode 100644 index 000000000..54fc87790 --- /dev/null +++ b/content/influxdb3/clustered/api/v1-compatibility/_index.html @@ -0,0 +1,579 @@ +--- +title: InfluxDB v1 HTTP API for InfluxDB 3 Clustered +description: The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Clustered database. +layout: api +menu: + influxdb3_clustered: + parent: InfluxDB HTTP API + name: v1 Compatibility API + identifier: api-reference-v1-compatibility +weight: 304 +aliases: + - /influxdb/clustered/api/v1/ +--- + + + + + + + + + + + InfluxDB + + + + + + + + + + + + + + + + +
+
+
+ + +

InfluxDB v1 HTTP API for InfluxDB 3 Clustered

License: MIT

The InfluxDB v1 HTTP API provides v1 compatibility for writing and querying data in an InfluxDB 3 Clustered database.

+

The InfluxDB 1.x /write and /query endpoints work with InfluxDB 1.x client libraries and third-party integrations like Grafana and others.

+

This documentation is generated from the +InfluxDB OpenAPI specification.

+ +

InfluxDB /api/v2 API for InfluxDB 3 Clustered

+

Authentication

The InfluxDB 1.x API requires authentication for all requests. +InfluxDB Cloud uses InfluxDB API tokens to authenticate requests.

+

For more information, see the following:

+ +

TokenAuthentication

Use the Token authentication +scheme to authenticate to the InfluxDB API.

+

In your API requests, send an Authorization header. +For the header value, provide the word Token followed by a space and an InfluxDB API token. +The word Token is case-sensitive.

+

Syntax

+

Authorization: Token YOUR_INFLUX_TOKEN

+

For examples and more information, see the following:

+ +
Security Scheme Type API Key
Header parameter name: Authorization

BasicAuthentication

Use the HTTP Basic authentication +scheme with clients that support the InfluxDB 1.x convention of username and password (that don't support the Authorization: Token scheme):

+

For examples and more information, see how to authenticate with a username and password.

+
Security Scheme Type HTTP
HTTP Authorization Scheme basic

QuerystringAuthentication

Use the Querystring authentication +scheme with InfluxDB 1.x API parameters to provide credentials through the query string.

+

For examples and more information, see how to authenticate with a username and password.

+
Security Scheme Type API Key
Query parameter name: u=&p=

Query

Query using the InfluxDB v1 HTTP API

query Parameters
db
required
string

Bucket to query.

+
p
string

User token.

+
q
string

Defines the influxql query to run.

+
rp
string

Retention policy name.

+
u
string

Username.

+
header Parameters
Accept
string
Default: application/json
Enum: "application/json" "application/csv" "text/csv" "application/x-msgpack"

Specifies how query results should be encoded in the response. Note: With application/csv, query results include epoch timestamps instead of RFC3339 timestamps.

+
Accept-Encoding
string
Default: identity
Enum: "gzip" "identity"

The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand.

+
Content-Type
string
Value: "application/vnd.influxql"
Zap-Trace-Span
string
Example: trace_id,1,span_id,1,baggage,[object Object]

OpenTracing span context

+
Request Body schema: text/plain

InfluxQL query to execute.

+
string

Responses

Response samples

Content type
No sample

Write

Write time series data into InfluxDB in a V1-compatible format

query Parameters
db
required
string

Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy.

+
p
string

User token.

+
precision
string

Write precision.

+
rp
string

Retention policy name.

+
u
string

Username.

+
header Parameters
Content-Encoding
string
Default: identity
Enum: "gzip" "identity"

When present, its value indicates to the database that compression is applied to the line protocol body.

+
Zap-Trace-Span
string
Example: trace_id,1,span_id,1,baggage,[object Object]

OpenTracing span context

+
Request Body schema: text/plain

Line protocol body

+
string

Responses

Response samples

Content type
application/json
{
  • "code": "internal error",
  • "message": "string",
  • "op": "string",
  • "err": "string",
  • "line": 0
}
+ + + + + diff --git a/content/influxdb3/clustered/api/v2/_index.html b/content/influxdb3/clustered/api/v2/_index.html new file mode 100644 index 000000000..cd9eca651 --- /dev/null +++ b/content/influxdb3/clustered/api/v2/_index.html @@ -0,0 +1,966 @@ +--- +title: InfluxDB 3 Clustered API Service +description: The InfluxDB v2 HTTP API for InfluxDB 3 Clustered provides a v2-compatible programmatic interface for writing data stored in an InfluxDB 3 Clustered database. +layout: api +menu: + influxdb3_clustered: + parent: InfluxDB HTTP API + name: v2 API + identifier: api-reference-v2 +weight: 102 +aliases: + - /influxdb/clustered/api/ +--- + + + + + + + + + + + InfluxDB + + + + + + + + + + + + + + + + +
+
+
+ + +

InfluxDB 3 Clustered API Service

License: MIT

The InfluxDB v2 HTTP API for InfluxDB 3 Clustered provides a v2-compatible programmatic interface for writing data stored in an InfluxDB 3 Clustered database.

+

The InfluxDB v2 HTTP API lets you use /api/v2 endpoints for managing retention policy mappings and writing data stored in an InfluxDB 3 instance.

+

This documentation is generated from the +InfluxDB OpenAPI specification.

+

Quick start

See the Get Started tutorial +to get up and running authenticating with tokens, writing to databases, and querying data.

+

InfluxDB API client libraries and Flight clients +are available to integrate InfluxDB APIs with your application.

+

Authentication

Use one of the following schemes to authenticate to the InfluxDB API:

+ +

BasicAuthentication

Basic authentication scheme

+

Use the Authorization header with the Basic scheme to authenticate v1 API /write and /query requests. +When authenticating requests, InfluxDB 3 Clustered checks that the password part of the decoded credential is an authorized database token. +InfluxDB 3 Clustered ignores the username part of the decoded credential.

+

Syntax

+
Authorization: Basic <base64-encoded [USERNAME]:DATABASE_TOKEN>
+

Replace the following:

+
    +
  • [USERNAME]: an optional string value (ignored by InfluxDB 3 Clustered).
  • +
  • DATABASE_TOKEN: a database token.
  • +
  • Encode the [USERNAME]:DATABASE_TOKEN credential using base64 encoding, and then append the encoded string to the Authorization: Basic header.
  • +
+

Example

+

The following example shows how to use cURL with the Basic authentication scheme and a database token:

+
#######################################
+# Use Basic authentication with a database token
+# to query the InfluxDB v1 HTTP API
+#######################################
+# Use the --user option with `--user username:DATABASE_TOKEN` syntax
+#######################################
+
+curl --get "http://cluster-id.a.influxdb.io/query" \
+  --user "":"DATABASE_TOKEN" \
+  --data-urlencode "db=DATABASE_NAME" \
+  --data-urlencode "q=SELECT * FROM MEASUREMENT"
+

Replace the following:

+
    +
  • DATABASE_NAME: your InfluxDB 3 Clustered database
  • +
  • DATABASE_TOKEN: a database token with sufficient permissions to the database
  • +
+
Security Scheme Type HTTP
HTTP Authorization Scheme basic

QuerystringAuthentication

Use the Querystring authentication +scheme with InfluxDB 1.x API parameters to provide credentials through the query string.

+

Query string authentication

+

In the URL, pass the p query parameter to authenticate /write and /query requests. +When authenticating requests, InfluxDB 3 Clustered checks that p (password) is an authorized database token and ignores the u (username) parameter.

+

Syntax

+
https://cluster-id.a.influxdb.io/query/?[u=any]&p=DATABASE_TOKEN
+https://cluster-id.a.influxdb.io/write/?[u=any]&p=DATABASE_TOKEN
+

Example

+

The following example shows how to use cURL with query string authentication and a database token.

+
#######################################
+# Use an InfluxDB 1.x compatible username and password
+# to query the InfluxDB v1 HTTP API
+#######################################
+# Use authentication query parameters:
+#   ?p=DATABASE_TOKEN
+#######################################
+
+curl --get "https://cluster-id.a.influxdb.io/query" \
+  --data-urlencode "p=DATABASE_TOKEN" \
+  --data-urlencode "db=DATABASE_NAME" \
+  --data-urlencode "q=SELECT * FROM MEASUREMENT"
+

Replace the following:

+
    +
  • DATABASE_NAME: your InfluxDB 3 Clustered database
  • +
  • DATABASE_TOKEN: a database token with sufficient permissions to the database
  • +
+
Security Scheme Type API Key
Query parameter name: u=&p=

BearerAuthentication

Use the OAuth Bearer authentication +scheme to authenticate to the InfluxDB API.

+

In your API requests, send an Authorization header. +For the header value, provide the word Bearer followed by a space and a database token.

+

Syntax

+
Authorization: Bearer INFLUX_TOKEN
+

Example

+
########################################################
+# Use the Bearer token authentication scheme with /api/v2/write
+# to write data.
+########################################################
+
+curl --request post "https://cluster-id.a.influxdb.io/api/v2/write?bucket=DATABASE_NAME&precision=s" \
+  --header "Authorization: Bearer DATABASE_TOKEN" \
+  --data-binary 'home,room=kitchen temp=72 1463683075'
+

For examples and more information, see the following:

+ +
Security Scheme Type HTTP
HTTP Authorization Scheme bearer
Bearer format "JWT"

TokenAuthentication

Use the Token authentication +scheme to authenticate to the InfluxDB API.

+

In your API requests, send an Authorization header. +For the header value, provide the word Token followed by a space and a database token. +The word Token is case-sensitive.

+

Syntax

+
Authorization: Token INFLUX_API_TOKEN
+

Example

+
########################################################
+# Use the Token authentication scheme with /api/v2/write
+# to write data.
+########################################################
+
+curl --request post "https://cluster-id.a.influxdb.io/api/v2/write?bucket=DATABASE_NAME&precision=s" \
+  --header "Authorization: Token DATABASE_TOKEN" \
+  --data-binary 'home,room=kitchen temp=72 1463683075'
+ + +
Security Scheme Type API Key
Header parameter name: Authorization

Headers

InfluxDB HTTP API endpoints use standard HTTP request and response headers. +The following table shows common headers used by many InfluxDB API endpoints. +Some endpoints may use other headers that perform functions more specific to those endpoints--for example, +the POST /api/v2/write endpoint accepts the Content-Encoding header to indicate the compression applied to line protocol in the request body.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
HeaderValue typeDescription
AcceptstringThe content type that the client can understand.
AuthorizationstringThe authorization scheme and credential.
Content-LengthintegerThe size of the entity-body, in bytes, sent to the database.
Content-TypestringThe format of the data in the request body.
+

Response codes

InfluxDB HTTP API endpoints use standard HTTP status codes for success and failure responses. +The response body may include additional details. +For details about a specific operation's response, +see Responses and Response Samples for that operation.

+

API operations may return the following HTTP status codes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 Code StatusDescription
200Success
204Success. No contentInfluxDB doesn't return data for the request. For example, a successful write request returns 204 status code, acknowledging that data is written and queryable.
400Bad requestInfluxDB can't parse the request due to an incorrect parameter or bad syntax. If line protocol in the request body is malformed. The response body contains the first malformed line and indicates what was expected. For partial writes, the number of points written and the number of points rejected are also included.
401UnauthorizedMay indicate one of the following:
  • Authorization: Token header is missing or malformed
  • API token value is missing from the header
  • API token doesn't have permission. For more information about token types and permissions, see Manage tokens
404Not foundRequested resource was not found. message in the response body provides details about the requested resource.
405Method not allowedThe API path doesn't support the HTTP method used in the request--for example, you send a POST request to an endpoint that only allows GET.
413Request entity too largeRequest payload exceeds the size limit.
422Unprocessable entityRequest data is invalid. code and message in the response body provide details about the problem.
429Too many requestsAPI token is temporarily over the request quota. The Retry-After header describes when to try the request again.
500Internal server error
503Service unavailableServer is temporarily unavailable to process the request. The Retry-After header describes when to try the request again.
+

System information endpoints

Ping

Get the status of the instance

Retrieves the status and InfluxDB version of the instance.

+

Use this endpoint to monitor uptime for the InfluxDB instance. The response +returns a HTTP 204 status code to inform you the instance is available.

+

This endpoint doesn't require authentication.

+
Authorizations:
None

Responses

Get the status of the instance

Returns the status and InfluxDB version of the instance.

+

Use this endpoint to monitor uptime for the InfluxDB instance. The response +returns a HTTP 204 status code to inform you the instance is available.

+

This endpoint doesn't require authentication.

+
Authorizations:
None

Responses

Query

Query data stored in a database.

+
    +
  • HTTP clients can query the v1 /query endpoint +using InfluxQL and retrieve data in CSV or JSON format.
  • +
  • The /api/v2/query endpoint can't query InfluxDB 3 Clustered.
  • +
  • Flight + gRPC clients can query using SQL or InfluxQL and retrieve data in Arrow format.
  • +
+ + +

Query using the InfluxDB v1 HTTP API

Queries InfluxDB using InfluxQL with InfluxDB v1 request and response formats.

+
query Parameters
db
required
string

The database to query data from.

+
epoch
string
Enum: "ns" "u" "µ" "ms" "s" "m" "h"

A unix timestamp precision. +Formats timestamps as unix (epoch) timestamps the specified precision +instead of RFC3339 timestamps with nanosecond precision.

+
p
string

The InfluxDB 1.x password to authenticate the request.

+
q
required
string

The InfluxQL query to execute. To execute multiple queries, delimit queries with a semicolon (;).

+
rp
string

The retention policy to query data from. +For more information, see InfluxQL DBRP naming convention.

+
u
string

The InfluxDB 1.x username to authenticate the request.

+
header Parameters
Accept
string
Default: application/json
Enum: "application/json" "application/csv" "text/csv" "application/x-msgpack"

Media type that the client can understand.

+

Note: With application/csv, query results include unix timestamps instead of RFC3339 timestamps.

+
Accept-Encoding
string
Default: identity
Enum: "gzip" "identity"

The content encoding (usually a compression algorithm) that the client can understand.

+
Content-Type
string
Value: "application/json"
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+

Responses

Response samples

Content type
No sample

Write

Write time series data to databases using InfluxDB v1 or v2 endpoints.

+

Write data

Writes data to a database.

+

Use this endpoint to send data in line protocol format to InfluxDB.

+

InfluxDB does the following when you send a write request:

+
    +
  1. Validates the request
  2. +
  3. If successful, attempts to ingest the data; error otherwise.
  4. +
  5. If successful, responds with success (HTTP 204 status code), acknowledging that the data is written and queryable; error otherwise.
  6. +
+

To ensure that InfluxDB Cloud handles writes in the order you request them, +wait for a success response (HTTP 2xx status code) before you send the next request.

+ + +
query Parameters
bucket
required
string

A database name or ID. +InfluxDB writes all points in the batch to the specified database.

+
org
required
string

Ignored. An organization name or ID.

+

InfluxDB ignores this parameter; authorizes the request using the specified database token +and writes data to the specified cluster database.

+
orgID
string

Ignored. An organization ID.

+

InfluxDB ignores this parameter; authorizes the request using the specified database token +and writes data to the specified cluster database.

+
precision
string (WritePrecision)
Enum: "ms" "s" "us" "ns"

The precision for unix timestamps in the line protocol batch.

+
header Parameters
Accept
string
Default: application/json
Value: "application/json"

The content type that the client can understand. +Writes only return a response body if they fail--for example, +due to a formatting problem or quota limit.

+
    +
  • Returns only application/json for format and limit errors.
  • +
  • Returns only text/html for some quota limit errors.
  • +
+ + +
Content-Encoding
string
Default: identity
Enum: "gzip" "identity"

The compression applied to the line protocol in the request payload. +To send a gzip payload, pass Content-Encoding: gzip header.

+
Content-Length
integer

The size of the entity-body, in bytes, sent to InfluxDB. +If the length is greater than the max body configuration option, +the server responds with status code 413.

+
Content-Type
string
Default: text/plain; charset=utf-8
Enum: "text/plain" "text/plain; charset=utf-8"

The format of the data in the request body. +To send a line protocol payload, pass Content-Type: text/plain; charset=utf-8.

+
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: text/plain

In the request body, provide data in line protocol format.

+

To send compressed data, do the following:

+
    +
  1. Use gzip to compress the line protocol data.
  2. +
  3. In your request, send the compressed data and the +Content-Encoding: gzip header.
  4. +
+ + +
string <byte>

Responses

Request samples

Content type
text/plain
airSensors,sensor_id=TLM0201 temperature=73.97038159354763,humidity=35.23103248356096,co=0.48445310567793615 1630424257000000000
+airSensors,sensor_id=TLM0202 temperature=75.30007505999716,humidity=35.651929918691714,co=0.5141876544505826 1630424257000000000
+

Response samples

Content type
application/json
{
  • "code": "invalid",
  • "message": "failed to parse line protocol: error writing line 2: Unable to insert iox::column_type::field::integer type into column temp with type iox::column_type::field::string"
}

Write data using the InfluxDB v1 HTTP API

Writes data to a database.

+

Use this InfluxDB v1-compatible endpoint to send data in line protocol format to InfluxDB using v1 API parameters and authorization.

+

InfluxDB does the following when you send a write request:

+
    +
  1. Validates the request
  2. +
  3. If successful, attempts to ingest the data; error otherwise.
  4. +
  5. If successful, responds with success (HTTP 204 status code), acknowledging that the data is written and queryable; error otherwise.
  6. +
+

To ensure that InfluxDB handles writes in the order you request them, +wait for a success response (HTTP 2xx status code) before you send the next request.

+ + +
query Parameters
db
required
string

database to write to. If none exists, InfluxDB creates a database with a default 3-day retention policy.

+
p
string

The InfluxDB 1.x password to authenticate the request.

+
precision
string

Write precision.

+
rp
string

Retention policy name.

+
u
string

The InfluxDB 1.x username to authenticate the request.

+
header Parameters
Content-Encoding
string
Default: identity
Enum: "gzip" "identity"

When present, its value indicates to the database that compression is applied to the line protocol body.

+
Zap-Trace-Span
string
Example: baggage,[object Object],span_id,1,trace_id,1

OpenTracing span context

+
Request Body schema: text/plain

Line protocol body

+
string

Responses

Response samples

Content type
application/json
Example
{
  • "code": "invalid",
  • "line": 2,
  • "message": "no data written, errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)"
}
+ + + + + diff --git a/content/influxdb3/clustered/get-started/write.md b/content/influxdb3/clustered/get-started/write.md index e659cc608..be1f74528 100644 --- a/content/influxdb3/clustered/get-started/write.md +++ b/content/influxdb3/clustered/get-started/write.md @@ -450,7 +450,9 @@ Include the following with your request: With the {{% product-name %}} [v1 API `/write` endpoint](/influxdb3/clustered/api/#operation/PostLegacyWrite), `Authorization: Bearer` and `Authorization: Token` are equivalent and you can -use either scheme to pass a database token in your request. For more information +use either scheme to pass a database token in your request. +Include the word `Bearer` or `Token`, a space, and your **token** value (all case-sensitive). +For more information about HTTP API token schemes, see how to [authenticate API requests](/influxdb3/clustered/guides/api-compatibility/v1/). {{% /note %}} diff --git a/content/influxdb3/clustered/guides/api-compatibility/v1/_index.md b/content/influxdb3/clustered/guides/api-compatibility/v1/_index.md index e4cfc421e..3b49c7609 100644 --- a/content/influxdb3/clustered/guides/api-compatibility/v1/_index.md +++ b/content/influxdb3/clustered/guides/api-compatibility/v1/_index.md @@ -67,7 +67,7 @@ With the InfluxDB v1 API, you can use database tokens in InfluxDB 1.x username a schemes, in the InfluxDB v2 `Authorization: Token` scheme, or in the OAuth `Authorization: Bearer` scheme. - [Authenticate with a username and password scheme](#authenticate-with-a-username-and-password-scheme) -- [Authenticate with a token scheme](#authenticate-with-a-token) +- [Authenticate with a token scheme](#authenticate-with-a-token-scheme) ### Authenticate with a username and password scheme @@ -155,6 +155,8 @@ The `Token` scheme is used in the InfluxDB 2.x API. `Bearer` is defined by the [OAuth 2.0 Framework](https://www.rfc-editor.org/rfc/rfc6750#page-14). Support for one or the other may vary across InfluxDB API clients. +Include the word `Bearer` or `Token`, a space, and your **token** value (all case-sensitive). + #### Syntax ```http diff --git a/content/influxdb3/clustered/write-data/best-practices/schema-design.md b/content/influxdb3/clustered/write-data/best-practices/schema-design.md index 38a34f926..c346b61e6 100644 --- a/content/influxdb3/clustered/write-data/best-practices/schema-design.md +++ b/content/influxdb3/clustered/write-data/best-practices/schema-design.md @@ -4,7 +4,7 @@ seotitle: InfluxDB schema design recommendations and best practices description: > Design your schema for simpler and more performant queries. menu: - influxdb_clustered: + influxdb3_clustered: name: Schema design weight: 201 parent: write-best-practices diff --git a/content/influxdb3/core/_index.md b/content/influxdb3/core/_index.md index a13673137..82dfea7d1 100644 --- a/content/influxdb3/core/_index.md +++ b/content/influxdb3/core/_index.md @@ -2,8 +2,9 @@ title: InfluxDB 3 Core documentation description: > InfluxDB 3 Core is an open source time series database designed and optimized - for real-time and recent data (last 72 hours). - Learn how to use and leverage InfluxDB 3 in use cases such as edge data collection, IoT data, and events. + for real-time and recent data. + Learn how to use and leverage InfluxDB 3 in use cases such as edge data + collection, IoT data, and events. menu: influxdb3_core: name: InfluxDB 3 Core diff --git a/content/influxdb3/core/get-started/_index.md b/content/influxdb3/core/get-started/_index.md index a4ba0e28c..4f8a41738 100644 --- a/content/influxdb3/core/get-started/_index.md +++ b/content/influxdb3/core/get-started/_index.md @@ -2,8 +2,9 @@ title: Get started with InfluxDB 3 Core description: > InfluxDB 3 Core is an open source time series database designed and optimized - for real-time and recent data (last 72 hours). - Learn how to use and leverage InfluxDB 3 in use cases such as edge data collection, IoT data, and events. + for real-time and recent data. + Learn how to use and leverage InfluxDB 3 in use cases such as edge data + collection, IoT data, and events. menu: influxdb3_core: name: Get started diff --git a/content/influxdb3/core/install.md b/content/influxdb3/core/install.md index 403c980d5..b2d497271 100644 --- a/content/influxdb3/core/install.md +++ b/content/influxdb3/core/install.md @@ -25,8 +25,9 @@ InfluxDB 3 Core runs on **Linux**, **macOS**, and **Windows**. A key feature of InfluxDB 3 is its use of object storage to store time series data in Apache Parquet format. You can choose to store these files on your local -file system, however, we recommend using an object store for the best overall -performance. {{< product-name >}} natively supports Amazon S3, +file system. Performance on your local filesystem will likely be better, but +object storage has the advantage of not running out of space and being accessible +by other systems over the network. {{< product-name >}} natively supports Amazon S3, Azure Blob Storage, and Google Cloud Storage. You can also use many local object storage implementations that provide an S3-compatible API, such as [Minio](https://min.io/). diff --git a/content/influxdb3/core/query-data/_index.md b/content/influxdb3/core/query-data/_index.md new file mode 100644 index 000000000..6dc888338 --- /dev/null +++ b/content/influxdb3/core/query-data/_index.md @@ -0,0 +1,15 @@ +--- +title: Query data in {{< product-name >}} +description: > + Learn to query data stored in InfluxDB using SQL and InfluxQL. +menu: + influxdb3_core: + name: Query data +weight: 4 +influxdb3/core/tags: [query] +source: /shared/influxdb3-query-guides/_index.md +--- + + diff --git a/content/influxdb3/core/query-data/execute-queries/_index.md b/content/influxdb3/core/query-data/execute-queries/_index.md new file mode 100644 index 000000000..cfd303dab --- /dev/null +++ b/content/influxdb3/core/query-data/execute-queries/_index.md @@ -0,0 +1,20 @@ +--- +title: Execute queries +description: > + Use tools and libraries to query data stored in {{< product-name >}}. +weight: 101 +menu: + influxdb3_core: + name: Execute queries + parent: Query data +influxdb3/core/tags: [query, sql, influxql] +aliases: + - /influxdb3/core/query-data/tools/ + - /influxdb3/core/query-data/sql/execute-queries/ + - /influxdb3/core/query-data/influxql/execute-queries/ +source: /shared/influxdb3-query-guides/execute-queries/_index.md +--- + + diff --git a/content/influxdb3/core/query-data/execute-queries/influxdb-v1-api.md b/content/influxdb3/core/query-data/execute-queries/influxdb-v1-api.md new file mode 100644 index 000000000..a5e9220ca --- /dev/null +++ b/content/influxdb3/core/query-data/execute-queries/influxdb-v1-api.md @@ -0,0 +1,31 @@ +--- +title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data +seotitle: Use InfluxQL and InfluxDB v1 HTTP query API +list_title: Use the v1 query API and InfluxQL +description: > + Use the InfluxDB v1 HTTP query API to query data in {{< product-name >}} + with InfluxQL. +weight: 302 +menu: + influxdb3_core: + parent: Execute queries + name: Use the v1 query API +influxdb3/core/tags: [query, influxql, python] +metadata: [InfluxQL] +related: + - /influxdb3/core/api-compatibility/v1/ +aliases: + - /influxdb3/core/query-data/influxql/execute-queries/influxdb-v1-api/ +list_code_example: | + ```sh + curl --get http://{{< influxdb/host >}}/query \ + --header "Authorization: Token DATABASE_TOKEN" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=SELECT * FROM home" + ``` +source: /shared/influxdb3-query-guides/execute-queries/influxdb-v1-api.md +--- + + diff --git a/content/influxdb3/core/query-data/execute-queries/influxdb3-cli.md b/content/influxdb3/core/query-data/execute-queries/influxdb3-cli.md new file mode 100644 index 000000000..43222ff08 --- /dev/null +++ b/content/influxdb3/core/query-data/execute-queries/influxdb3-cli.md @@ -0,0 +1,28 @@ +--- +title: Use the influxdb3 CLI to query data +list_title: Use the influxdb3 CLI +description: > + Use the `influxdb3 query` command to query data in {{< product-name >}} with SQL. +weight: 301 +menu: + influxdb3_core: + parent: Execute queries + name: Use the influxdb3 CLI +influxdb3/core/tags: [query, sql, influxql, influxdb3, CLI] +related: + - /influxdb3/core/reference/cli/influxdb3/query/ + - /influxdb3/core/reference/sql/ + - /influxdb3/core/reference/influxql/ + # - /influxdb3/core/get-started/query/#execute-an-sql-query, Get started querying data +list_code_example: | + ```sh + influxdb3 query \ + --database DATABASE_NAME \ + "SELECT * FROM home" + ``` +source: /shared/influxdb3-query-guides/execute-queries/influxdb3-cli.md +--- + + diff --git a/content/influxdb3/core/query-data/influxql/_index.md b/content/influxdb3/core/query-data/influxql/_index.md new file mode 100644 index 000000000..110f0365d --- /dev/null +++ b/content/influxdb3/core/query-data/influxql/_index.md @@ -0,0 +1,16 @@ +--- +title: Query data with InfluxQL +description: > + Learn to use InfluxQL to query data stored in {{< product-name >}}. +menu: + influxdb3_core: + name: Query with InfluxQL + parent: Query data +weight: 102 +influxdb3/core/tags: [query, influxql] +source: /shared/influxdb3-query-guides/influxql/_index.md +--- + + diff --git a/content/influxdb3/core/query-data/influxql/aggregate-select.md b/content/influxdb3/core/query-data/influxql/aggregate-select.md new file mode 100644 index 000000000..3e0f02a80 --- /dev/null +++ b/content/influxdb3/core/query-data/influxql/aggregate-select.md @@ -0,0 +1,41 @@ +--- +title: Aggregate data with InfluxQL +seotitle: Aggregate or apply selector functions to data with InfluxQL +description: > + Use InfluxQL aggregate and selector functions to perform aggregate operations + on your time series data. +menu: + influxdb3_core: + name: Aggregate data + parent: Query with InfluxQL + identifier: query-influxql-aggregate +weight: 203 +influxdb3/core/tags: [query, influxql] +related: + - /influxdb3/core/reference/influxql/functions/aggregates/ + - /influxdb3/core/reference/influxql/functions/selectors/ +list_code_example: | + ##### Aggregate fields by groups + ```sql + SELECT + MEAN(temp) AS mean, + FIRST(hum) as first, + FROM home + GROUP BY tag + ``` + + ##### Aggregate by time-based intervals + ```sql + SELECT + MEAN(temp), + sum(hum), + FROM home + WHERE time >= now() - 24h + GROUP BY time(1h),room + ``` +source: /shared/influxdb3-query-guides/influxql/aggregate-select.md +--- + + diff --git a/content/influxdb3/core/query-data/influxql/basic-query.md b/content/influxdb3/core/query-data/influxql/basic-query.md new file mode 100644 index 000000000..feaef1157 --- /dev/null +++ b/content/influxdb3/core/query-data/influxql/basic-query.md @@ -0,0 +1,23 @@ +--- +title: Perform a basic InfluxQL query +seotitle: Perform a basic InfluxQL query in {{< product-name >}} +description: > + A basic InfluxQL query that queries data from InfluxDB most commonly includes + `SELECT`, `FROM`, and `WHERE` clauses. +menu: + influxdb3_core: + name: Basic query + parent: Query with InfluxQL + identifier: query-influxql-basic +weight: 202 +influxdb3/core/tags: [query, influxql] +list_code_example: | + ```sql + SELECT temp, room FROM home WHERE time >= now() - 1d + ``` +source: /shared/influxdb3-query-guides/influxql/basic-query.md +--- + + diff --git a/content/influxdb3/core/query-data/influxql/explore-schema.md b/content/influxdb3/core/query-data/influxql/explore-schema.md new file mode 100644 index 000000000..985221604 --- /dev/null +++ b/content/influxdb3/core/query-data/influxql/explore-schema.md @@ -0,0 +1,39 @@ +--- +title: Explore your schema with InfluxQL +description: > + Use InfluxQL `SHOW` statements to return information about your data schema. +menu: + influxdb3_core: + name: Explore your schema + parent: Query with InfluxQL + identifier: query-influxql-schema +weight: 201 +influxdb3/core/tags: [query, influxql] +related: + - /influxdb3/core/reference/influxql/show/ +list_code_example: | + ##### List measurements + ```sql + SHOW MEASUREMENTS + ``` + + ##### List field keys in a measurement + ```sql + SHOW FIELD KEYS FROM "measurement" + ``` + + ##### List tag keys in a measurement + ```sql + SHOW TAG KEYS FROM "measurement" + ``` + + ##### List tag values for a specific tag key + ```sql + SHOW TAG VALUES FROM "measurement" WITH KEY = "tag-key" WHERE time > now() - 1d + ``` +source: /shared/influxdb3-query-guides/influxql/explore-schema.md +--- + + diff --git a/content/influxdb3/core/query-data/influxql/parameterized-queries.md b/content/influxdb3/core/query-data/influxql/parameterized-queries.md new file mode 100644 index 000000000..e6ef5e1f9 --- /dev/null +++ b/content/influxdb3/core/query-data/influxql/parameterized-queries.md @@ -0,0 +1,45 @@ +--- +title: Use parameterized queries with InfluxQL +description: > + Use parameterized queries to prevent injection attacks and make queries more reusable. +weight: 404 +menu: + influxdb3_core: + name: Parameterized queries + parent: Query with InfluxQL + identifier: parameterized-queries-influxql +influxdb3/core/tags: [query, security, influxql] +list_code_example: | + ##### Using Go and the influxdb3-go client + + ```go + // Use the $parameter syntax to reference parameters in a query. + // The following InfluxQL query contains $room and $min_time parameters. + query := ` + SELECT * FROM home + WHERE time >= $min_time + AND temp >= $min_temp + AND room = $room` + + // Assign parameter names to input values. + parameters := influxdb3.QueryParameters{ + "room": "Kitchen", + "min_temp": 20.0, + "min_time": "2024-03-18 00:00:00.00", + } + + // Call the client's function to query InfluxDB with parameters and the + // the InfluxQL QueryType. + iterator, err := client.QueryWithParameters(context.Background(), + query, + parameters, + influxdb3.WithQueryType(influxdb3.InfluxQL)) + ``` +# Leaving in draft until tested +draft: true +source: /shared/influxdb3-query-guides/influxql/parameterized-queries.md +--- + + diff --git a/content/influxdb3/core/query-data/influxql/troubleshoot.md b/content/influxdb3/core/query-data/influxql/troubleshoot.md new file mode 100644 index 000000000..9c9d0bef6 --- /dev/null +++ b/content/influxdb3/core/query-data/influxql/troubleshoot.md @@ -0,0 +1,15 @@ +--- +title: Troubleshoot InfluxQL errors +description: > + Learn how to troubleshoot and fix common InfluxQL errors. +menu: + influxdb3_core: + name: Troubleshoot errors + parent: Query with InfluxQL +weight: 230 +source: /shared/influxdb3-query-guides/influxql/troubleshoot.md +--- + + diff --git a/content/influxdb3/core/query-data/sql/_index.md b/content/influxdb3/core/query-data/sql/_index.md new file mode 100644 index 000000000..bb77b902f --- /dev/null +++ b/content/influxdb3/core/query-data/sql/_index.md @@ -0,0 +1,17 @@ +--- +title: Query data with SQL +seotitle: Query data with SQL +description: > + Learn to query data stored in {{< product-name >}} using SQL. +menu: + influxdb3_core: + name: Query with SQL + parent: Query data +weight: 101 +influxdb3/core/tags: [query, sql] +source: /shared/influxdb3-query-guides/sql/_index.md +--- + + diff --git a/content/influxdb3/core/query-data/sql/aggregate-select.md b/content/influxdb3/core/query-data/sql/aggregate-select.md new file mode 100644 index 000000000..b35b75396 --- /dev/null +++ b/content/influxdb3/core/query-data/sql/aggregate-select.md @@ -0,0 +1,43 @@ +--- +title: Aggregate data with SQL +description: > + Use aggregate and selector functions to perform aggregate operations on your + time series data. +menu: + influxdb3_core: + name: Aggregate data + parent: Query with SQL + identifier: query-sql-aggregate +weight: 203 +influxdb3/core/tags: [query, sql] +related: + - /influxdb3/core/reference/sql/functions/aggregate/ + - /influxdb3/core/reference/sql/functions/selector/ + - /influxdb3/core/reference/sql/group-by/ +list_code_example: | + ##### Aggregate fields by groups + ```sql + SELECT + mean(field1) AS mean, + selector_first(field2)['value'] as first, + tag1 + FROM home + GROUP BY tag + ``` + + ##### Aggregate by time-based intervals + ```sql + SELECT + DATE_BIN(INTERVAL '1 hour', time, '2022-01-01T00:00:00Z'::TIMESTAMP) AS time, + mean(field1), + sum(field2), + tag1 + FROM home + GROUP BY 1, tag1 + ``` +source: /shared/influxdb3-query-guides/sql/aggregate-select.md +--- + + diff --git a/content/influxdb3/core/query-data/sql/basic-query.md b/content/influxdb3/core/query-data/sql/basic-query.md new file mode 100644 index 000000000..9c61e63c9 --- /dev/null +++ b/content/influxdb3/core/query-data/sql/basic-query.md @@ -0,0 +1,23 @@ +--- +title: Perform a basic SQL query +seotitle: Perform a basic SQL query in InfluxDB 3 Core +description: > + A basic SQL query that queries data from {{< product-name >}} most commonly + includes `SELECT`, `FROM`, and `WHERE` clauses. +menu: + influxdb3_core: + name: Basic query + parent: Query with SQL + identifier: query-sql-basic +weight: 202 +influxdb3/core/tags: [query, sql] +list_code_example: | + ```sql + SELECT temp, room FROM home WHERE time >= now() - INTERVAL '1 day' + ``` +source: /shared/influxdb3-query-guides/sql/basic-query.md +--- + + diff --git a/content/influxdb3/core/query-data/sql/cast-types.md b/content/influxdb3/core/query-data/sql/cast-types.md new file mode 100644 index 000000000..6b56078b8 --- /dev/null +++ b/content/influxdb3/core/query-data/sql/cast-types.md @@ -0,0 +1,29 @@ +--- +title: Cast values to different types +seotitle: Cast values to different data types in SQL +description: > + Use the `CAST` function or double-colon `::` casting shorthand syntax to cast + a value to a specific type. +menu: + influxdb3_core: + name: Cast types + parent: Query with SQL + identifier: query-sql-cast-types +weight: 205 +influxdb3/core/tags: [query, sql] +related: + - /influxdb3/core/reference/sql/data-types/ +list_code_example: | + ```sql + -- CAST clause + SELECT CAST(1234.5 AS BIGINT) + + -- Double-colon casting shorthand + SELECT 1234.5::BIGINT + ``` +source: /shared/influxdb3-query-guides/sql/cast-types.md +--- + + diff --git a/content/influxdb3/core/query-data/sql/explore-schema.md b/content/influxdb3/core/query-data/sql/explore-schema.md new file mode 100644 index 000000000..31d234ff1 --- /dev/null +++ b/content/influxdb3/core/query-data/sql/explore-schema.md @@ -0,0 +1,27 @@ +--- +title: Explore your schema with SQL +description: > + Use SQL to explore your data schema in your {{< product-name >}} database. +menu: + influxdb3_core: + name: Explore your schema + parent: Query with SQL + identifier: query-sql-schema +weight: 201 +influxdb3/core/tags: [query, sql] +list_code_example: | + ##### List tables + ```sql + SHOW TABLES + ``` + + ##### List columns in a table + ```sql + SHOW COLUMNS IN table + ``` +source: /shared/influxdb3-query-guides/sql/explore-schema.md +--- + + diff --git a/content/influxdb3/core/query-data/sql/fill-gaps.md b/content/influxdb3/core/query-data/sql/fill-gaps.md new file mode 100644 index 000000000..4c9fd6dd5 --- /dev/null +++ b/content/influxdb3/core/query-data/sql/fill-gaps.md @@ -0,0 +1,30 @@ +--- +title: Fill gaps in data +seotitle: Fill gaps in data with SQL +description: > + Use [`date_bin_gapfill`](/influxdb3/core/reference/sql/functions/time-and-date/#date_bin_gapfill) + with [`interpolate`](/influxdb3/core/reference/sql/functions/misc/#interpolate) + or [`locf`](/influxdb3/core/reference/sql/functions/misc/#locf) to + fill gaps of time where no data is returned. +menu: + influxdb3_core: + parent: Query with SQL +weight: 206 +list_code_example: | + ```sql + SELECT + date_bin_gapfill(INTERVAL '30 minutes', time) as time, + room, + interpolate(avg(temp)) + FROM home + WHERE + time >= '2022-01-01T08:00:00Z' + AND time <= '2022-01-01T10:00:00Z' + GROUP BY 1, room + ``` +source: /shared/influxdb3-query-guides/sql/fill-gaps.md +--- + + diff --git a/content/influxdb3/core/query-data/sql/parameterized-queries.md b/content/influxdb3/core/query-data/sql/parameterized-queries.md new file mode 100644 index 000000000..5a4959ffa --- /dev/null +++ b/content/influxdb3/core/query-data/sql/parameterized-queries.md @@ -0,0 +1,42 @@ +--- +title: Use parameterized queries with SQL +description: > + Use parameterized queries to prevent injection attacks and make queries more reusable. +weight: 404 +menu: + influxdb3_core: + name: Parameterized queries + parent: Query with SQL + identifier: parameterized-queries-sql +influxdb3/core/tags: [query, security, sql] +list_code_example: | + ##### Using Go and the influxdb3-go client + + ```go + // Use the $parameter syntax to reference parameters in a query. + // The following SQL query contains $room and $min_temp placeholders. + query := ` + SELECT * FROM home + WHERE time >= $min_time + AND temp >= $min_temp + AND room = $room` + + // Assign parameter names to input values. + parameters := influxdb3.QueryParameters{ + "room": "Kitchen", + "min_temp": 20.0, + "min_time": "2024-03-18 00:00:00.00", + + } + + // Call the client's function to query InfluxDB with parameters. + iterator, err := client.QueryWithParameters(context.Background(), query, parameters) + ``` +# Leaving in draft until tested +draft: true +source: /shared/influxdb3-query-guides/sql/parameterized-queries.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/core/reference/cli/influxdb3/serve.md b/content/influxdb3/core/reference/cli/influxdb3/serve.md index 4c2521950..1c426eaaa 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/core/reference/cli/influxdb3/serve.md @@ -7,6 +7,8 @@ menu: parent: influxdb3 name: influxdb3 serve weight: 300 +related: + - /influxdb3/core/reference/config-options/ --- The `influxdb3 serve` command starts the {{< product-name >}} server. @@ -16,7 +18,7 @@ The `influxdb3 serve` command starts the {{< product-name >}} server. ```bash -influxdb3 serve [OPTIONS] --writer-id +influxdb3 serve [OPTIONS] --node-id ``` ## Options @@ -79,7 +81,7 @@ influxdb3 serve [OPTIONS] --writer-id | | `--snapshotted-wal-files-to-keep` | _See [configuration options](/influxdb3/core/reference/config-options/#snapshotted-wal-files-to-keep)_ | | | `--query-log-size` | _See [configuration options](/influxdb3/core/reference/config-options/#query-log-size)_ | | | `--buffer-mem-limit-mb` | _See [configuration options](/influxdb3/core/reference/config-options/#buffer-mem-limit-mb)_ | -| {{< req "\*" >}} | `--writer-id` | _See [configuration options](/influxdb3/core/reference/config-options/#writer-id)_ | +| {{< req "\*" >}} | `--node-id` | _See [configuration options](/influxdb3/core/reference/config-options/#node-id)_ | | | `--parquet-mem-cache-size-mb` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-size-mb)_ | | | `--parquet-mem-cache-prune-percentage` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-percentage)_ | | | `--parquet-mem-cache-prune-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-interval)_ | @@ -88,6 +90,7 @@ influxdb3 serve [OPTIONS] --writer-id | | `--distinct-cache-eviction-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#distinct-cache-eviction-interval)_ | | | `--plugin-dir` | _See [configuration options](/influxdb3/core/reference/config-options/#plugin-dir)_ | | | `--force-snapshot-mem-threshold` | _See [configuration options](/influxdb3/core/reference/config-options/#force-snapshot-mem-threshold)_ | +| | `--query-file-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#query-file-limit)_ | {{< caption >}} {{< req text="\* Required options" >}} @@ -119,7 +122,7 @@ with a unique identifier for your {{< product-name >}} server. influxdb3 serve \ --object-store file \ --data-dir ~/.influxdb3 \ - --writer-id MY_HOST_ID + --node-id MY_HOST_ID ``` ### Run the InfluxDB 3 server with extra verbose logging @@ -131,7 +134,7 @@ influxdb3 serve \ --verbose \ --object-store file \ --data-dir ~/.influxdb3 \ - --writer-id MY_HOST_ID + --node-id MY_HOST_ID ``` ### Run InfluxDB 3 with debug logging using LOG_FILTER @@ -142,7 +145,7 @@ influxdb3 serve \ LOG_FILTER=debug influxdb3 serve \ --object-store file \ --data-dir ~/.influxdb3 \ - --writer-id MY_HOST_ID + --node-id MY_HOST_ID ``` {{% /code-placeholders %}} diff --git a/content/influxdb3/core/reference/cli/influxdb3/show/system/_index.md b/content/influxdb3/core/reference/cli/influxdb3/show/system/_index.md new file mode 100644 index 000000000..71793e77c --- /dev/null +++ b/content/influxdb3/core/reference/cli/influxdb3/show/system/_index.md @@ -0,0 +1,16 @@ +--- +title: influxdb3 show system +description: > + The `influxdb3 show system` command displays data from {{< product-name >}} + system tables. +menu: + influxdb3_core: + parent: influxdb3 show + name: influxdb3 show system +weight: 400 +source: /shared/influxdb3-cli/show/system/_index.md +--- + + diff --git a/content/influxdb3/core/reference/cli/influxdb3/show/system/summary.md b/content/influxdb3/core/reference/cli/influxdb3/show/system/summary.md new file mode 100644 index 000000000..470e91e39 --- /dev/null +++ b/content/influxdb3/core/reference/cli/influxdb3/show/system/summary.md @@ -0,0 +1,16 @@ +--- +title: influxdb3 show system summary +description: > + The `influxdb3 show system summary` command returns a summary various types of + system table data. +menu: + influxdb3_core: + parent: influxdb3 show system + name: influxdb3 show system summary +weight: 401 +source: /shared/influxdb3-cli/show/system/summary.md +--- + + diff --git a/content/influxdb3/core/reference/cli/influxdb3/show/system/table-list.md b/content/influxdb3/core/reference/cli/influxdb3/show/system/table-list.md new file mode 100644 index 000000000..dbfbc6c36 --- /dev/null +++ b/content/influxdb3/core/reference/cli/influxdb3/show/system/table-list.md @@ -0,0 +1,15 @@ +--- +title: influxdb3 show system table-list +description: > + The `influxdb3 show system table-list` command lists available system tables. +menu: + influxdb3_core: + parent: influxdb3 show system + name: influxdb3 show system table-list +weight: 401 +source: /shared/influxdb3-cli/show/system/table-list.md +--- + + diff --git a/content/influxdb3/core/reference/cli/influxdb3/show/system/table.md b/content/influxdb3/core/reference/cli/influxdb3/show/system/table.md new file mode 100644 index 000000000..4652500e8 --- /dev/null +++ b/content/influxdb3/core/reference/cli/influxdb3/show/system/table.md @@ -0,0 +1,15 @@ +--- +title: influxdb3 show system table +description: > + The `influxdb3 show system table` command queries data from a system table. +menu: + influxdb3_core: + parent: influxdb3 show system + name: influxdb3 show system table +weight: 401 +source: /shared/influxdb3-cli/show/system/table.md +--- + + diff --git a/content/influxdb3/core/reference/config-options.md b/content/influxdb3/core/reference/config-options.md index b2ac4d638..53b0f9726 100644 --- a/content/influxdb3/core/reference/config-options.md +++ b/content/influxdb3/core/reference/config-options.md @@ -1,5 +1,5 @@ --- -title: InfluxDB 3 Core configuration options +title: '{{< product-name >}} configuration options' description: > InfluxDB 3 Core lets you customize your server configuration by using `influxdb3 serve` command options or by setting environment variables. @@ -27,7 +27,7 @@ environment variables. influxdb3 serve \ --object-store file \ --data-dir ~/.influxdb3 \ - --writer-id my-host \ + --node-id my-host \ --log-filter info \ --max-http-request-size 20971520 \ --aws-allow-http @@ -53,7 +53,8 @@ influxdb3 serve - [General](#general) - [object-store](#object-store) - [data-dir](#data-dir) - - [writer-id](#writer-id) + - [node-id](#node-id) + - [query-file-limit](#query-file-limit) - [AWS](#aws) - [aws-access-key-id](#aws-access-key-id) - [aws-secret-access-key](#aws-secret-access-key) @@ -134,9 +135,9 @@ influxdb3 serve ### General - [object-store](#object-store) -- [bucket](#bucket) - [data-dir](#data-dir) -- [writer-id](#writer-id) +- [node-id](#node-id) +- [query-file-limit](#query-file-limit) #### object-store @@ -166,15 +167,49 @@ Defines the location {{< product-name >}} uses to store files locally. --- -#### writer-id +#### node-id -Specifies the writer identifier used as a prefix in all object store file paths. +Specifies the node identifier used as a prefix in all object store file paths. This should be unique for any hosts sharing the same object store configuration--for example, the same bucket. -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------------------- | -| `--writer-id` | `INFLUXDB3_WRITER_IDENTIFIER_PREFIX` | +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------------------- | +| `--node-id` | `INFLUXDB3_NODE_IDENTIFIER_PREFIX` | + +--- + +#### query-file-limit + +Limits the number of Parquet files a query can access. + +**Default:** `432` + +With the default `432` setting and the default [`gen1-duration`](#`gen1-duration`) +setting of 10 minutes, queries can access up to a 72 hours of data, but +potentially less depending on whether all data for a given 10 minute block of +time was ingested during the same period. + +You can increase this limit to allow more files to be queried, but be aware of +the following side-effects: + +- Degraded query performance for queries that read more Parquet files +- Increased memory usage +- Your system potentially killing the `influxdb3` process due to Out-of-Memory + (OOM) errors +- If using object storage to store data, many GET requests to access the data + (as many as 2 per file) + +> [!Note] +> We recommend keeping the default setting and querying smaller time ranges. +> If you need to query longer time ranges or faster query performance on any query +> that accesses an hour or more of data, [InfluxDB 3 Enterprise](/influxdb3/enterprise/) +> optimizes data storage by compacting and rearranging Parquet files to achieve +> faster query performance. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------------- | +| `--query-file-limit` | `INFLUXDB3_QUERY_FILE_LIMIT` | --- diff --git a/content/influxdb3/core/reference/sample-data.md b/content/influxdb3/core/reference/sample-data.md new file mode 100644 index 000000000..5fa671c08 --- /dev/null +++ b/content/influxdb3/core/reference/sample-data.md @@ -0,0 +1,17 @@ +--- +title: Sample data +description: > + Sample datasets are used throughout the the {{< product-name >}} documentation + to demonstrate functionality. + Use the following sample datasets to replicate provided examples. +menu: + influxdb3_core: + name: Sample data + parent: Reference +weight: 182 +source: /shared/influxdb3-sample-data/sample-data.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/core/write-data/_index.md b/content/influxdb3/core/write-data/_index.md new file mode 100644 index 000000000..bd179e2b0 --- /dev/null +++ b/content/influxdb3/core/write-data/_index.md @@ -0,0 +1,16 @@ +--- +title: Write data to {{% product-name %}} +list_title: Write data +description: > + Collect and write time series data to {{% product-name %}}. +weight: 3 +menu: + influxdb3_core: + name: Write data +influxdb3/core/tags: [write, line protocol] +source: /shared/influxdb3-write-guides/_index.md +--- + + diff --git a/content/influxdb3/core/write-data/best-practices/_index.md b/content/influxdb3/core/write-data/best-practices/_index.md new file mode 100644 index 000000000..ddb74b2fa --- /dev/null +++ b/content/influxdb3/core/write-data/best-practices/_index.md @@ -0,0 +1,17 @@ +--- +title: Best practices for writing data +seotitle: Best practices for writing data to {{< product-name >}} +description: > + Learn about the recommendations and best practices for writing data to {{< product-name >}}. +weight: 105 +menu: + influxdb3_core: + name: Best practices + identifier: write-best-practices + parent: Write data +source: /shared/influxdb3-write-guides/best-practices/_index.md +--- + + diff --git a/content/influxdb3/core/write-data/best-practices/optimize-writes.md b/content/influxdb3/core/write-data/best-practices/optimize-writes.md new file mode 100644 index 000000000..fb4b43eb1 --- /dev/null +++ b/content/influxdb3/core/write-data/best-practices/optimize-writes.md @@ -0,0 +1,20 @@ +--- +title: Optimize writes to {{< product-name >}} +description: > + Tips and examples to optimize performance and system overhead when writing + data to {{< product-name >}}. +weight: 203 +menu: + influxdb3_core: + name: Optimize writes + parent: write-best-practices +influxdb/cloud/tags: [best practices, write] +related: + - /resources/videos/ingest-data/, How to Ingest Data in InfluxDB (Video) + - /influxdb3/core/write-data/use-telegraf/ +source: /shared/influxdb3-write-guides/best-practices/optimize-writes.md +--- + + diff --git a/content/influxdb3/core/write-data/best-practices/schema-design.md b/content/influxdb3/core/write-data/best-practices/schema-design.md new file mode 100644 index 000000000..f6b24a7e6 --- /dev/null +++ b/content/influxdb3/core/write-data/best-practices/schema-design.md @@ -0,0 +1,20 @@ +--- +title: InfluxDB schema design recommendations +seotitle: InfluxDB schema design recommendations and best practices +description: > + Design your schema for simpler and more performant queries. +menu: + influxdb3_core: + name: Schema design + weight: 201 + parent: write-best-practices +related: + - /influxdb3/core/admin/databases/ + - /influxdb3/core/reference/cli/influxdb3/ + - /influxdb3/core/query-data/troubleshoot-and-optimize/ +source: /shared/influxdb3-write-guides/best-practices/schema-design.md +--- + + diff --git a/content/influxdb3/core/write-data/client-libraries.md b/content/influxdb3/core/write-data/client-libraries.md new file mode 100644 index 000000000..874ecb657 --- /dev/null +++ b/content/influxdb3/core/write-data/client-libraries.md @@ -0,0 +1,20 @@ +--- +title: Use InfluxDB client libraries to write data +description: > + Use InfluxDB API clients to write points as line protocol data to InfluxDB + Clustered. +menu: + influxdb3_core: + name: Use client libraries + parent: Write data + identifier: write-client-libs +weight: 103 +related: + - /influxdb3/core/reference/syntax/line-protocol/ + - /influxdb3/core/get-started/write/ +source: /shared/influxdb3-write-guides/client-libraries.md +--- + + diff --git a/content/influxdb3/core/write-data/influxdb3-cli.md b/content/influxdb3/core/write-data/influxdb3-cli.md new file mode 100644 index 000000000..65fdd0b46 --- /dev/null +++ b/content/influxdb3/core/write-data/influxdb3-cli.md @@ -0,0 +1,23 @@ +--- +title: Use the influxdb3 CLI to write data +description: > + Use the [`influxdb3` CLI](/influxdb3/core/reference/cli/influxdb3/) + to write line protocol data to InfluxDB Clustered. +menu: + influxdb3_core: + name: Use the influxdb3 CLI + parent: Write data + identifier: write-influxdb3 +weight: 101 +related: + - /influxdb3/core/reference/cli/influxdb3/write/ + - /influxdb3/core/reference/syntax/line-protocol/ + - /influxdb3/core/get-started/write/ +alt_links: + cloud-serverless: /influxdb3/cloud-serverless/write-data/line-protocol/ +source: /shared/influxdb3-write-guides/influxdb3-cli.md +--- + + diff --git a/content/influxdb3/core/write-data/troubleshoot.md b/content/influxdb3/core/write-data/troubleshoot.md new file mode 100644 index 000000000..cbd26faeb --- /dev/null +++ b/content/influxdb3/core/write-data/troubleshoot.md @@ -0,0 +1,24 @@ +--- +title: Troubleshoot issues writing data +seotitle: Troubleshoot issues writing data to InfluxDB +weight: 106 +description: > + Troubleshoot issues writing data. + Find response codes for failed writes. + Discover how writes fail, from exceeding rate or payload limits, to syntax + errors and schema conflicts. +menu: + influxdb3_core: + name: Troubleshoot issues + parent: Write data +influxdb3/core/tags: [write, line protocol, errors] +related: + - /influxdb3/core/reference/syntax/line-protocol/ + - /influxdb3/core/write-data/best-practices/ + - /influxdb3/core/reference/internals/durability/ +source: /shared/influxdb3-write-guides/troubleshoot.md +--- + + diff --git a/content/influxdb3/core/write-data/use-telegraf/_index.md b/content/influxdb3/core/write-data/use-telegraf/_index.md new file mode 100644 index 000000000..69095ba84 --- /dev/null +++ b/content/influxdb3/core/write-data/use-telegraf/_index.md @@ -0,0 +1,22 @@ +--- +title: Use Telegraf to write data +seotitle: Use the Telegraf agent to collect and write data +weight: 102 +description: > + Use Telegraf to collect and write data to {{< product-name >}}. +aliases: + - /influxdb3/core/collect-data/advanced-telegraf + - /influxdb3/core/collect-data/use-telegraf + - /influxdb3/core/write-data/no-code/use-telegraf/ +menu: + influxdb3_core: + name: Use Telegraf + parent: Write data +alt_links: + cloud: /influxdb/cloud/write-data/no-code/use-telegraf/ +source: /shared/influxdb3-write-guides/use-telegraf/_index.md +--- + + diff --git a/content/influxdb3/core/write-data/use-telegraf/configure.md b/content/influxdb3/core/write-data/use-telegraf/configure.md new file mode 100644 index 000000000..9e7f21e3d --- /dev/null +++ b/content/influxdb3/core/write-data/use-telegraf/configure.md @@ -0,0 +1,23 @@ +--- +title: Configure Telegraf to write to {{< product-name >}} +seotitle: Configure Telegraf to write data to {{< product-name >}} +description: > + Update existing or create new Telegraf configurations to use the `influxdb_v2` + output plugin to write to {{< product-name >}}. + Start Telegraf using the custom configuration. +menu: + influxdb3_core: + name: Configure Telegraf + parent: Use Telegraf +weight: 101 +influxdb3/core/tags: [telegraf] +related: + - /telegraf/v1/plugins/, Telegraf plugins +alt_links: + cloud: /influxdb/cloud/write-data/no-code/use-telegraf/manual-config/ +source: /shared/influxdb3-write-guides/use-telegraf/configure.md +--- + + diff --git a/content/influxdb3/core/write-data/use-telegraf/csv.md b/content/influxdb3/core/write-data/use-telegraf/csv.md new file mode 100644 index 000000000..e26b591b0 --- /dev/null +++ b/content/influxdb3/core/write-data/use-telegraf/csv.md @@ -0,0 +1,21 @@ +--- +title: Use Telegraf to write CSV data +description: > + Use the Telegraf `file` input plugin to read and parse CSV data into + [line protocol](/influxdb3/core/reference/syntax/line-protocol/) + and write it to {{< product-name >}}. +menu: + influxdb3_core: + parent: Use Telegraf + name: Write CSV + identifier: write-csv-telegraf +weight: 203 +related: + - /telegraf/v1/data_formats/input/csv/ + - /influxdb3/core/write-data/use-telegraf/ +source: /shared/influxdb3-write-guides/use-telegraf/csv.md +--- + + diff --git a/content/influxdb3/core/write-data/use-telegraf/dual-write.md b/content/influxdb3/core/write-data/use-telegraf/dual-write.md new file mode 100644 index 000000000..35f87ab3b --- /dev/null +++ b/content/influxdb3/core/write-data/use-telegraf/dual-write.md @@ -0,0 +1,18 @@ +--- +title: Use Telegraf to dual write to InfluxDB +description: > + Configure Telegraf to write data to multiple InfluxDB instances or clusters + simultaneously. +menu: + influxdb3_core: + name: Dual write to InfluxDB + parent: Use Telegraf +weight: 203 +alt_links: + cloud: /influxdb/cloud/write-data/no-code/use-telegraf/dual-write/ +source: /shared/influxdb3-write-guides/use-telegraf/dual-write.md +--- + + diff --git a/content/influxdb3/enterprise/install.md b/content/influxdb3/enterprise/install.md index 5aaf676ed..e0302c9bd 100644 --- a/content/influxdb3/enterprise/install.md +++ b/content/influxdb3/enterprise/install.md @@ -25,8 +25,9 @@ InfluxDB 3 Enterprise runs on **Linux**, **macOS**, and **Windows**. A key feature of InfluxDB 3 is its use of object storage to store time series data in Apache Parquet format. You can choose to store these files on your local -file system, however, we recommend using an object store for the best overall -performance. {{< product-name >}} natively supports Amazon S3, +file system. Performance on your local filesystem will likely be better, but +object storage has the advantage of not running out of space and being accessible +by other systems over the network. {{< product-name >}} natively supports Amazon S3, Azure Blob Storage, and Google Cloud Storage. You can also use many local object storage implementations that provide an S3-compatible API, such as [Minio](https://min.io/). diff --git a/content/influxdb3/enterprise/query-data/_index.md b/content/influxdb3/enterprise/query-data/_index.md new file mode 100644 index 000000000..ffa486a04 --- /dev/null +++ b/content/influxdb3/enterprise/query-data/_index.md @@ -0,0 +1,15 @@ +--- +title: Query data in {{< product-name >}} +description: > + Learn to query data stored in InfluxDB using SQL and InfluxQL. +menu: + influxdb3_enterprise: + name: Query data +weight: 4 +influxdb3/enterprise/tags: [query] +source: /shared/influxdb3-query-guides/_index.md +--- + + diff --git a/content/influxdb3/enterprise/query-data/execute-queries/_index.md b/content/influxdb3/enterprise/query-data/execute-queries/_index.md new file mode 100644 index 000000000..6f3676f35 --- /dev/null +++ b/content/influxdb3/enterprise/query-data/execute-queries/_index.md @@ -0,0 +1,20 @@ +--- +title: Execute queries +description: > + Use tools and libraries to query data stored in {{< product-name >}}. +weight: 101 +menu: + influxdb3_enterprise: + name: Execute queries + parent: Query data +influxdb3/enterprise/tags: [query, sql, influxql] +aliases: + - /influxdb3/enterprise/query-data/tools/ + - /influxdb3/enterprise/query-data/sql/execute-queries/ + - /influxdb3/enterprise/query-data/influxql/execute-queries/ +source: /shared/influxdb3-query-guides/execute-queries/_index.md +--- + + diff --git a/content/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api.md b/content/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api.md new file mode 100644 index 000000000..0c40fafc5 --- /dev/null +++ b/content/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api.md @@ -0,0 +1,31 @@ +--- +title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data +seotitle: Use InfluxQL and InfluxDB v1 HTTP query API +list_title: Use the v1 query API and InfluxQL +description: > + Use the InfluxDB v1 HTTP query API to query data in {{< product-name >}} + with InfluxQL. +weight: 302 +menu: + influxdb3_enterprise: + parent: Execute queries + name: Use the v1 query API +influxdb3/enterprise/tags: [query, influxql, python] +metadata: [InfluxQL] +related: + - /influxdb3/enterprise/api-compatibility/v1/ +aliases: + - /influxdb3/enterprise/query-data/influxql/execute-queries/influxdb-v1-api/ +list_code_example: | + ```sh + curl --get http://{{< influxdb/host >}}/query \ + --header "Authorization: Token DATABASE_TOKEN" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=SELECT * FROM home" + ``` +source: /shared/influxdb3-query-guides/execute-queries/influxdb-v1-api.md +--- + + diff --git a/content/influxdb3/enterprise/query-data/execute-queries/influxdb3-cli.md b/content/influxdb3/enterprise/query-data/execute-queries/influxdb3-cli.md new file mode 100644 index 000000000..8b19fdc9c --- /dev/null +++ b/content/influxdb3/enterprise/query-data/execute-queries/influxdb3-cli.md @@ -0,0 +1,28 @@ +--- +title: Use the influxdb3 CLI to query data +list_title: Use the influxdb3 CLI +description: > + Use the `influxdb3 query` command to query data in {{< product-name >}} with SQL. +weight: 301 +menu: + influxdb3_enterprise: + parent: Execute queries + name: Use the influxdb3 CLI +influxdb3/enterprise/tags: [query, sql, influxql, influxdb3, CLI] +related: + - /influxdb3/enterprise/reference/cli/influxdb3/query/ + - /influxdb3/enterprise/reference/sql/ + - /influxdb3/enterprise/reference/influxql/ + # - /influxdb3/enterprise/get-started/query/#execute-an-sql-query, Get started querying data +list_code_example: | + ```sh + influxdb3 query \ + --database DATABASE_NAME \ + "SELECT * FROM home" + ``` +source: /shared/influxdb3-query-guides/execute-queries/influxdb3-cli.md +--- + + diff --git a/content/influxdb3/enterprise/query-data/influxql/_index.md b/content/influxdb3/enterprise/query-data/influxql/_index.md new file mode 100644 index 000000000..c8fccfa37 --- /dev/null +++ b/content/influxdb3/enterprise/query-data/influxql/_index.md @@ -0,0 +1,16 @@ +--- +title: Query data with InfluxQL +description: > + Learn to use InfluxQL to query data stored in {{< product-name >}}. +menu: + influxdb3_enterprise: + name: Query with InfluxQL + parent: Query data +weight: 102 +influxdb3/enterprise/tags: [query, influxql] +source: /shared/influxdb3-query-guides/influxql/_index.md +--- + + diff --git a/content/influxdb3/enterprise/query-data/influxql/aggregate-select.md b/content/influxdb3/enterprise/query-data/influxql/aggregate-select.md new file mode 100644 index 000000000..610c1d23c --- /dev/null +++ b/content/influxdb3/enterprise/query-data/influxql/aggregate-select.md @@ -0,0 +1,41 @@ +--- +title: Aggregate data with InfluxQL +seotitle: Aggregate or apply selector functions to data with InfluxQL +description: > + Use InfluxQL aggregate and selector functions to perform aggregate operations + on your time series data. +menu: + influxdb3_enterprise: + name: Aggregate data + parent: Query with InfluxQL + identifier: query-influxql-aggregate +weight: 203 +influxdb3/enterprise/tags: [query, influxql] +related: + - /influxdb3/enterprise/reference/influxql/functions/aggregates/ + - /influxdb3/enterprise/reference/influxql/functions/selectors/ +list_code_example: | + ##### Aggregate fields by groups + ```sql + SELECT + MEAN(temp) AS mean, + FIRST(hum) as first, + FROM home + GROUP BY tag + ``` + + ##### Aggregate by time-based intervals + ```sql + SELECT + MEAN(temp), + sum(hum), + FROM home + WHERE time >= now() - 24h + GROUP BY time(1h),room + ``` +source: /shared/influxdb3-query-guides/influxql/aggregate-select.md +--- + + diff --git a/content/influxdb3/enterprise/query-data/influxql/basic-query.md b/content/influxdb3/enterprise/query-data/influxql/basic-query.md new file mode 100644 index 000000000..ee43ba7b8 --- /dev/null +++ b/content/influxdb3/enterprise/query-data/influxql/basic-query.md @@ -0,0 +1,23 @@ +--- +title: Perform a basic InfluxQL query +seotitle: Perform a basic InfluxQL query in {{< product-name >}} +description: > + A basic InfluxQL query that queries data from InfluxDB most commonly includes + `SELECT`, `FROM`, and `WHERE` clauses. +menu: + influxdb3_enterprise: + name: Basic query + parent: Query with InfluxQL + identifier: query-influxql-basic +weight: 202 +influxdb3/enterprise/tags: [query, influxql] +list_code_example: | + ```sql + SELECT temp, room FROM home WHERE time >= now() - 1d + ``` +source: /shared/influxdb3-query-guides/influxql/basic-query.md +--- + + diff --git a/content/influxdb3/enterprise/query-data/influxql/explore-schema.md b/content/influxdb3/enterprise/query-data/influxql/explore-schema.md new file mode 100644 index 000000000..08d606e4d --- /dev/null +++ b/content/influxdb3/enterprise/query-data/influxql/explore-schema.md @@ -0,0 +1,39 @@ +--- +title: Explore your schema with InfluxQL +description: > + Use InfluxQL `SHOW` statements to return information about your data schema. +menu: + influxdb3_enterprise: + name: Explore your schema + parent: Query with InfluxQL + identifier: query-influxql-schema +weight: 201 +influxdb3/enterprise/tags: [query, influxql] +related: + - /influxdb3/enterprise/reference/influxql/show/ +list_code_example: | + ##### List measurements + ```sql + SHOW MEASUREMENTS + ``` + + ##### List field keys in a measurement + ```sql + SHOW FIELD KEYS FROM "measurement" + ``` + + ##### List tag keys in a measurement + ```sql + SHOW TAG KEYS FROM "measurement" + ``` + + ##### List tag values for a specific tag key + ```sql + SHOW TAG VALUES FROM "measurement" WITH KEY = "tag-key" WHERE time > now() - 1d + ``` +source: /shared/influxdb3-query-guides/influxql/explore-schema.md +--- + + diff --git a/content/influxdb3/enterprise/query-data/influxql/parameterized-queries.md b/content/influxdb3/enterprise/query-data/influxql/parameterized-queries.md new file mode 100644 index 000000000..c681656b7 --- /dev/null +++ b/content/influxdb3/enterprise/query-data/influxql/parameterized-queries.md @@ -0,0 +1,45 @@ +--- +title: Use parameterized queries with InfluxQL +description: > + Use parameterized queries to prevent injection attacks and make queries more reusable. +weight: 404 +menu: + influxdb3_enterprise: + name: Parameterized queries + parent: Query with InfluxQL + identifier: parameterized-queries-influxql +influxdb3/enterprise/tags: [query, security, influxql] +list_code_example: | + ##### Using Go and the influxdb3-go client + + ```go + // Use the $parameter syntax to reference parameters in a query. + // The following InfluxQL query contains $room and $min_time parameters. + query := ` + SELECT * FROM home + WHERE time >= $min_time + AND temp >= $min_temp + AND room = $room` + + // Assign parameter names to input values. + parameters := influxdb3.QueryParameters{ + "room": "Kitchen", + "min_temp": 20.0, + "min_time": "2024-03-18 00:00:00.00", + } + + // Call the client's function to query InfluxDB with parameters and the + // the InfluxQL QueryType. + iterator, err := client.QueryWithParameters(context.Background(), + query, + parameters, + influxdb3.WithQueryType(influxdb3.InfluxQL)) + ``` +# Leaving in draft until tested +draft: true +source: /shared/influxdb3-query-guides/influxql/parameterized-queries.md +--- + + diff --git a/content/influxdb3/enterprise/query-data/influxql/troubleshoot.md b/content/influxdb3/enterprise/query-data/influxql/troubleshoot.md new file mode 100644 index 000000000..32a6de79f --- /dev/null +++ b/content/influxdb3/enterprise/query-data/influxql/troubleshoot.md @@ -0,0 +1,15 @@ +--- +title: Troubleshoot InfluxQL errors +description: > + Learn how to troubleshoot and fix common InfluxQL errors. +menu: + influxdb3_enterprise: + name: Troubleshoot errors + parent: Query with InfluxQL +weight: 230 +source: /shared/influxdb3-query-guides/influxql/troubleshoot.md +--- + + diff --git a/content/influxdb3/enterprise/query-data/sql/_index.md b/content/influxdb3/enterprise/query-data/sql/_index.md new file mode 100644 index 000000000..6767cb686 --- /dev/null +++ b/content/influxdb3/enterprise/query-data/sql/_index.md @@ -0,0 +1,17 @@ +--- +title: Query data with SQL +seotitle: Query data with SQL +description: > + Learn to query data stored in {{< product-name >}} using SQL. +menu: + influxdb3_enterprise: + name: Query with SQL + parent: Query data +weight: 101 +influxdb3/enterprise/tags: [query, sql] +source: /shared/influxdb3-query-guides/sql/_index.md +--- + + diff --git a/content/influxdb3/enterprise/query-data/sql/aggregate-select.md b/content/influxdb3/enterprise/query-data/sql/aggregate-select.md new file mode 100644 index 000000000..e314ea263 --- /dev/null +++ b/content/influxdb3/enterprise/query-data/sql/aggregate-select.md @@ -0,0 +1,43 @@ +--- +title: Aggregate data with SQL +description: > + Use aggregate and selector functions to perform aggregate operations on your + time series data. +menu: + influxdb3_enterprise: + name: Aggregate data + parent: Query with SQL + identifier: query-sql-aggregate +weight: 203 +influxdb3/enterprise/tags: [query, sql] +related: + - /influxdb3/enterprise/reference/sql/functions/aggregate/ + - /influxdb3/enterprise/reference/sql/functions/selector/ + - /influxdb3/enterprise/reference/sql/group-by/ +list_code_example: | + ##### Aggregate fields by groups + ```sql + SELECT + mean(field1) AS mean, + selector_first(field2)['value'] as first, + tag1 + FROM home + GROUP BY tag + ``` + + ##### Aggregate by time-based intervals + ```sql + SELECT + DATE_BIN(INTERVAL '1 hour', time, '2022-01-01T00:00:00Z'::TIMESTAMP) AS time, + mean(field1), + sum(field2), + tag1 + FROM home + GROUP BY 1, tag1 + ``` +source: /shared/influxdb3-query-guides/sql/aggregate-select.md +--- + + diff --git a/content/influxdb3/enterprise/query-data/sql/basic-query.md b/content/influxdb3/enterprise/query-data/sql/basic-query.md new file mode 100644 index 000000000..8649b1e13 --- /dev/null +++ b/content/influxdb3/enterprise/query-data/sql/basic-query.md @@ -0,0 +1,23 @@ +--- +title: Perform a basic SQL query +seotitle: Perform a basic SQL query in {{< product-name >}} +description: > + A basic SQL query that queries data from {{< product-name >}} most commonly + includes `SELECT`, `FROM`, and `WHERE` clauses. +menu: + influxdb3_enterprise: + name: Basic query + parent: Query with SQL + identifier: query-sql-basic +weight: 202 +influxdb3/enterprise/tags: [query, sql] +list_code_example: | + ```sql + SELECT temp, room FROM home WHERE time >= now() - INTERVAL '1 day' + ``` +source: /shared/influxdb3-query-guides/sql/basic-query.md +--- + + diff --git a/content/influxdb3/enterprise/query-data/sql/cast-types.md b/content/influxdb3/enterprise/query-data/sql/cast-types.md new file mode 100644 index 000000000..b2732c98a --- /dev/null +++ b/content/influxdb3/enterprise/query-data/sql/cast-types.md @@ -0,0 +1,29 @@ +--- +title: Cast values to different types +seotitle: Cast values to different data types in SQL +description: > + Use the `CAST` function or double-colon `::` casting shorthand syntax to cast + a value to a specific type. +menu: + influxdb3_enterprise: + name: Cast types + parent: Query with SQL + identifier: query-sql-cast-types +weight: 205 +influxdb3/enterprise/tags: [query, sql] +related: + - /influxdb3/enterprise/reference/sql/data-types/ +list_code_example: | + ```sql + -- CAST clause + SELECT CAST(1234.5 AS BIGINT) + + -- Double-colon casting shorthand + SELECT 1234.5::BIGINT + ``` +source: /shared/influxdb3-query-guides/sql/cast-types.md +--- + + diff --git a/content/influxdb3/enterprise/query-data/sql/explore-schema.md b/content/influxdb3/enterprise/query-data/sql/explore-schema.md new file mode 100644 index 000000000..b513f62d3 --- /dev/null +++ b/content/influxdb3/enterprise/query-data/sql/explore-schema.md @@ -0,0 +1,27 @@ +--- +title: Explore your schema with SQL +description: > + Use SQL to explore your data schema in your {{< product-name >}} database. +menu: + influxdb3_enterprise: + name: Explore your schema + parent: Query with SQL + identifier: query-sql-schema +weight: 201 +influxdb3/enterprise/tags: [query, sql] +list_code_example: | + ##### List tables + ```sql + SHOW TABLES + ``` + + ##### List columns in a table + ```sql + SHOW COLUMNS IN table + ``` +source: /shared/influxdb3-query-guides/sql/explore-schema.md +--- + + diff --git a/content/influxdb3/enterprise/query-data/sql/fill-gaps.md b/content/influxdb3/enterprise/query-data/sql/fill-gaps.md new file mode 100644 index 000000000..7e6705731 --- /dev/null +++ b/content/influxdb3/enterprise/query-data/sql/fill-gaps.md @@ -0,0 +1,30 @@ +--- +title: Fill gaps in data +seotitle: Fill gaps in data with SQL +description: > + Use [`date_bin_gapfill`](/influxdb3/enterprise/reference/sql/functions/time-and-date/#date_bin_gapfill) + with [`interpolate`](/influxdb3/enterprise/reference/sql/functions/misc/#interpolate) + or [`locf`](/influxdb3/enterprise/reference/sql/functions/misc/#locf) to + fill gaps of time where no data is returned. +menu: + influxdb3_enterprise: + parent: Query with SQL +weight: 206 +list_code_example: | + ```sql + SELECT + date_bin_gapfill(INTERVAL '30 minutes', time) as time, + room, + interpolate(avg(temp)) + FROM home + WHERE + time >= '2022-01-01T08:00:00Z' + AND time <= '2022-01-01T10:00:00Z' + GROUP BY 1, room + ``` +source: /shared/influxdb3-query-guides/sql/fill-gaps.md +--- + + diff --git a/content/influxdb3/enterprise/query-data/sql/parameterized-queries.md b/content/influxdb3/enterprise/query-data/sql/parameterized-queries.md new file mode 100644 index 000000000..72bcf69a0 --- /dev/null +++ b/content/influxdb3/enterprise/query-data/sql/parameterized-queries.md @@ -0,0 +1,42 @@ +--- +title: Use parameterized queries with SQL +description: > + Use parameterized queries to prevent injection attacks and make queries more reusable. +weight: 404 +menu: + influxdb3_enterprise: + name: Parameterized queries + parent: Query with SQL + identifier: parameterized-queries-sql +influxdb3/enterprise/tags: [query, security, sql] +list_code_example: | + ##### Using Go and the influxdb3-go client + + ```go + // Use the $parameter syntax to reference parameters in a query. + // The following SQL query contains $room and $min_temp placeholders. + query := ` + SELECT * FROM home + WHERE time >= $min_time + AND temp >= $min_temp + AND room = $room` + + // Assign parameter names to input values. + parameters := influxdb3.QueryParameters{ + "room": "Kitchen", + "min_temp": 20.0, + "min_time": "2024-03-18 00:00:00.00", + + } + + // Call the client's function to query InfluxDB with parameters. + iterator, err := client.QueryWithParameters(context.Background(), query, parameters) + ``` +# Leaving in draft until tested +draft: true +source: /shared/influxdb3-query-guides/sql/parameterized-queries.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md index cb534e2e4..a61bf6e0e 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md @@ -16,7 +16,7 @@ The `influxdb3 serve` command starts the {{< product-name >}} server. ```bash -influxdb3 serve [OPTIONS] --writer-id +influxdb3 serve [OPTIONS] --node-id ``` ## Options @@ -78,12 +78,12 @@ influxdb3 serve [OPTIONS] --writer-id | | `--wal-max-write-buffer-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-max-write-buffer-size)_ | | | `--query-log-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#query-log-size)_ | | | `--buffer-mem-limit-mb` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#buffer-mem-limit-mb)_ | -| {{< req "\*" >}} | `--writer-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#writer-id)_ | +| {{< req "\*" >}} | `--node-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#node-id)_ | | | `--mode` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#mode)_ | -| | `--read-from-writer-ids` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#read-from-writer-ids)_ | +| | `--read-from-node-ids` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#read-from-node-ids)_ | | | `--replication-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#replication-interval)_ | | | `--compactor-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compactor-id)_ | -| | `--compact-from-writer-ids` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compact-from-writer-ids)_ | +| | `--compact-from-node-ids` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compact-from-node-ids)_ | | | `--run-compactions` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#run-compactions)_ | | | `--compaction-row-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-row-limit)_ | | | `--compaction-max-num-files-per-plan` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-max-num-files-per-plan)_ | @@ -128,7 +128,7 @@ with a unique identifier for your {{< product-name >}} server. influxdb3 serve \ --object-store file \ --data-dir ~/.influxdb3 \ - --writer-id MY_HOST_NAME + --node-id MY_HOST_NAME ``` ### Run the InfluxDB 3 server with extra verbose logging @@ -140,7 +140,7 @@ influxdb3 serve \ --verbose \ --object-store file \ --data-dir ~/.influxdb3 \ - --writer-id MY_HOST_NAME + --node-id MY_HOST_NAME ``` ### Run InfluxDB 3 with debug logging using LOG_FILTER @@ -151,7 +151,7 @@ influxdb3 serve \ LOG_FILTER=debug influxdb3 serve \ --object-store file \ --data-dir ~/.influxdb3 \ - --writer-id MY_HOST_NAME + --node-id MY_HOST_NAME ``` {{% /code-placeholders %}} diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/show/system/_index.md b/content/influxdb3/enterprise/reference/cli/influxdb3/show/system/_index.md new file mode 100644 index 000000000..f971b3f20 --- /dev/null +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/show/system/_index.md @@ -0,0 +1,16 @@ +--- +title: influxdb3 show system +description: > + The `influxdb3 show system` command displays data from {{< product-name >}} + system tables. +menu: + influxdb3_enterprise: + parent: influxdb3 show + name: influxdb3 show system +weight: 400 +source: /shared/influxdb3-cli/show/system/_index.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/show/system/summary.md b/content/influxdb3/enterprise/reference/cli/influxdb3/show/system/summary.md new file mode 100644 index 000000000..a7b07a25a --- /dev/null +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/show/system/summary.md @@ -0,0 +1,16 @@ +--- +title: influxdb3 show system summary +description: > + The `influxdb3 show system summary` command returns a summary various types of + system table data. +menu: + influxdb3_enterprise: + parent: influxdb3 show system + name: influxdb3 show system summary +weight: 401 +source: /shared/influxdb3-cli/show/system/summary.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/show/system/table-list.md b/content/influxdb3/enterprise/reference/cli/influxdb3/show/system/table-list.md new file mode 100644 index 000000000..70f6acdd5 --- /dev/null +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/show/system/table-list.md @@ -0,0 +1,15 @@ +--- +title: influxdb3 show system table-list +description: > + The `influxdb3 show system table-list` command lists available system tables. +menu: + influxdb3_enterprise: + parent: influxdb3 show system + name: influxdb3 show system table-list +weight: 401 +source: /shared/influxdb3-cli/show/system/table-list.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/show/system/table.md b/content/influxdb3/enterprise/reference/cli/influxdb3/show/system/table.md new file mode 100644 index 000000000..c5aea344e --- /dev/null +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/show/system/table.md @@ -0,0 +1,15 @@ +--- +title: influxdb3 show system table +description: > + The `influxdb3 show system table` command queries data from a system table. +menu: + influxdb3_enterprise: + parent: influxdb3 show system + name: influxdb3 show system table +weight: 401 +source: /shared/influxdb3-cli/show/system/table.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/config-options.md b/content/influxdb3/enterprise/reference/config-options.md index 4af00d3cd..c062e5eb6 100644 --- a/content/influxdb3/enterprise/reference/config-options.md +++ b/content/influxdb3/enterprise/reference/config-options.md @@ -27,7 +27,7 @@ environment variables. influxdb3 serve \ --object-store file \ --data-dir ~/.influxdb3 \ - --writer-id my-host \ + --node-id my-host \ --log-filter info \ --max-http-request-size 20971520 \ --aws-allow-http @@ -53,7 +53,7 @@ influxdb3 serve - [General](#general) - [object-store](#object-store) - [data-dir](#data-dir) - - [writer-id](#writer-id) + - [node-id](#node-id) - [mode](#mode) - [AWS](#aws) - [aws-access-key-id](#aws-access-key-id) @@ -118,11 +118,11 @@ influxdb3 serve - [wal-max-write-buffer-size](#wal-max-write-buffer-size) - [snapshotted-wal-files-to-keep](#snapshotted-wal-files-to-keep) - [Replication](#replication) - - [read-from-writer-ids](#read-from-writer-ids) + - [read-from-node-ids](#read-from-node-ids) - [replication-interval](#replication-interval) - [Compaction](#compaction) - [compactor-id](#compactor-id) - - [compact-from-writer-ids](#compact-from-writer-ids) + - [compact-from-node-ids](#compact-from-node-ids) - [run-compactions](#run-compactions) - [compaction-row-limit](#compaction-row-limit) - [compaction-max-num-files-per-plan](#compaction-max-num-files-per-plan) @@ -147,7 +147,7 @@ influxdb3 serve - [object-store](#object-store) - [bucket](#bucket) - [data-dir](#data-dir) -- [writer-id](#writer-id) +- [node-id](#node-id) - [mode](#mode) #### object-store @@ -178,15 +178,15 @@ Defines the location {{< product-name >}} uses to store files locally. --- -#### writer-id +#### node-id -Specifies the writer identifier used as a prefix in all object store file paths. +Specifies the node identifier used as a prefix in all object store file paths. This should be unique for any hosts sharing the same object store configuration--for example, the same bucket. -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------------------- | -| `--writer-id` | `INFLUXDB3_WRITER_IDENTIFIER_PREFIX` | +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------------------- | +| `--node-id` | `INFLUXDB3_NODE_IDENTIFIER_PREFIX` | --- @@ -902,24 +902,24 @@ they are deleted when the number of snapshotted WAL files exceeds this number. ### Replication -- [read-from-writer-ids](#read-from-writer-ids) +- [read-from-node-ids](#read-from-node-ids) - [replication-interval](#replication-interval) -#### read-from-writer-ids +#### read-from-node-ids -Specifies a comma-separated list of writer identifier prefixes (`writer-id`s) to +Specifies a comma-separated list of writer identifier prefixes (`node-id`s) to read WAL files from. [env: =] | influxdb3 serve option | Environment variable | | :--------------------- | :------------------------------ | -| `--read-from-writer-ids` | `INFLUXDB3_ENTERPRISE_READ_FROM_WRITER_IDS` | +| `--read-from-node-ids` | `INFLUXDB3_ENTERPRISE_READ_FROM_WRITER_IDS` | --- #### replication-interval Defines the interval at which each replica specified in the -`read-from-writer-ids` option is replicated. +`read-from-node-ids` option is replicated. **Default:** `250ms` @@ -932,7 +932,7 @@ Defines the interval at which each replica specified in the ### Compaction - [compactor-id](#compactor-id) -- [compact-from-writer-ids](#compact-from-writer-ids) +- [compact-from-node-ids](#compact-from-node-ids) - [run-compactions](#run-compactions) - [compaction-row-limit](#compaction-row-limit) - [compaction-max-num-files-per-plan](#compaction-max-num-files-per-plan) @@ -952,14 +952,14 @@ write buffer and any replicas it manages. --- -#### compact-from-writer-ids +#### compact-from-node-ids Defines a comma-separated list of writer identifier prefixes from which data is compacted. | influxdb3 serve option | Environment variable | | :-------------------------- | :--------------------------------------------- | -| `--compact-from-writer-ids` | `INFLUXDB3_ENTERPRISE_COMPACT_FROM_WRITER_IDS` | +| `--compact-from-node-ids` | `INFLUXDB3_ENTERPRISE_COMPACT_FROM_WRITER_IDS` | --- diff --git a/content/influxdb3/enterprise/reference/sample-data.md b/content/influxdb3/enterprise/reference/sample-data.md new file mode 100644 index 000000000..506d100f9 --- /dev/null +++ b/content/influxdb3/enterprise/reference/sample-data.md @@ -0,0 +1,17 @@ +--- +title: Sample data +description: > + Sample datasets are used throughout the the {{< product-name >}} documentation + to demonstrate functionality. + Use the following sample datasets to replicate provided examples. +menu: + influxdb3_enterprise: + name: Sample data + parent: Reference +weight: 182 +source: /shared/influxdb3-sample-data/sample-data.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/enterprise/write-data/_index.md b/content/influxdb3/enterprise/write-data/_index.md new file mode 100644 index 000000000..892af242d --- /dev/null +++ b/content/influxdb3/enterprise/write-data/_index.md @@ -0,0 +1,16 @@ +--- +title: Write data to {{% product-name %}} +list_title: Write data +description: > + Collect and write time series data to {{% product-name %}}. +weight: 3 +menu: + influxdb3_enterprise: + name: Write data +influxdb3/enterprise/tags: [write, line protocol] +source: /shared/influxdb3-write-guides/_index.md +--- + + diff --git a/content/influxdb3/enterprise/write-data/best-practices/_index.md b/content/influxdb3/enterprise/write-data/best-practices/_index.md new file mode 100644 index 000000000..45f1d8cfb --- /dev/null +++ b/content/influxdb3/enterprise/write-data/best-practices/_index.md @@ -0,0 +1,17 @@ +--- +title: Best practices for writing data +seotitle: Best practices for writing data to {{< product-name >}} +description: > + Learn about the recommendations and best practices for writing data to {{< product-name >}}. +weight: 105 +menu: + influxdb3_enterprise: + name: Best practices + identifier: write-best-practices + parent: Write data +source: /shared/influxdb3-write-guides/best-practices/_index.md +--- + + diff --git a/content/influxdb3/enterprise/write-data/best-practices/optimize-writes.md b/content/influxdb3/enterprise/write-data/best-practices/optimize-writes.md new file mode 100644 index 000000000..42966bfa2 --- /dev/null +++ b/content/influxdb3/enterprise/write-data/best-practices/optimize-writes.md @@ -0,0 +1,20 @@ +--- +title: Optimize writes to {{< product-name >}} +description: > + Tips and examples to optimize performance and system overhead when writing + data to {{< product-name >}}. +weight: 203 +menu: + influxdb3_enterprise: + name: Optimize writes + parent: write-best-practices +influxdb/cloud/tags: [best practices, write] +related: + - /resources/videos/ingest-data/, How to Ingest Data in InfluxDB (Video) + - /influxdb3/enterprise/write-data/use-telegraf/ +source: /shared/influxdb3-write-guides/best-practices/optimize-writes.md +--- + + diff --git a/content/influxdb3/enterprise/write-data/best-practices/schema-design.md b/content/influxdb3/enterprise/write-data/best-practices/schema-design.md new file mode 100644 index 000000000..e9451b2f3 --- /dev/null +++ b/content/influxdb3/enterprise/write-data/best-practices/schema-design.md @@ -0,0 +1,20 @@ +--- +title: InfluxDB schema design recommendations +seotitle: InfluxDB schema design recommendations and best practices +description: > + Design your schema for simpler and more performant queries. +menu: + influxdb3_enterprise: + name: Schema design + weight: 201 + parent: write-best-practices +related: + - /influxdb3/enterprise/admin/databases/ + - /influxdb3/enterprise/reference/cli/influxdb3/ + - /influxdb3/enterprise/query-data/troubleshoot-and-optimize/ +source: /shared/influxdb3-write-guides/best-practices/schema-design.md +--- + + diff --git a/content/influxdb3/enterprise/write-data/client-libraries.md b/content/influxdb3/enterprise/write-data/client-libraries.md new file mode 100644 index 000000000..afb2b64ec --- /dev/null +++ b/content/influxdb3/enterprise/write-data/client-libraries.md @@ -0,0 +1,20 @@ +--- +title: Use InfluxDB client libraries to write data +description: > + Use InfluxDB API clients to write points as line protocol data to InfluxDB + Clustered. +menu: + influxdb3_enterprise: + name: Use client libraries + parent: Write data + identifier: write-client-libs +weight: 103 +related: + - /influxdb3/enterprise/reference/syntax/line-protocol/ + - /influxdb3/enterprise/get-started/write/ +source: /shared/influxdb3-write-guides/client-libraries.md +--- + + diff --git a/content/influxdb3/enterprise/write-data/influxdb3-cli.md b/content/influxdb3/enterprise/write-data/influxdb3-cli.md new file mode 100644 index 000000000..70526147b --- /dev/null +++ b/content/influxdb3/enterprise/write-data/influxdb3-cli.md @@ -0,0 +1,23 @@ +--- +title: Use the influxdb3 CLI to write data +description: > + Use the [`influxdb3` CLI](/influxdb3/enterprise/reference/cli/influxdb3/) + to write line protocol data to InfluxDB Clustered. +menu: + influxdb3_enterprise: + name: Use the influxdb3 CLI + parent: Write data + identifier: write-influxdb3 +weight: 101 +related: + - /influxdb3/enterprise/reference/cli/influxdb3/write/ + - /influxdb3/enterprise/reference/syntax/line-protocol/ + - /influxdb3/enterprise/get-started/write/ +alt_links: + cloud-serverless: /influxdb3/cloud-serverless/write-data/line-protocol/ +source: /shared/influxdb3-write-guides/influxdb3-cli.md +--- + + diff --git a/content/influxdb3/enterprise/write-data/troubleshoot.md b/content/influxdb3/enterprise/write-data/troubleshoot.md new file mode 100644 index 000000000..1c75ab0ef --- /dev/null +++ b/content/influxdb3/enterprise/write-data/troubleshoot.md @@ -0,0 +1,24 @@ +--- +title: Troubleshoot issues writing data +seotitle: Troubleshoot issues writing data to InfluxDB +weight: 106 +description: > + Troubleshoot issues writing data. + Find response codes for failed writes. + Discover how writes fail, from exceeding rate or payload limits, to syntax + errors and schema conflicts. +menu: + influxdb3_enterprise: + name: Troubleshoot issues + parent: Write data +influxdb3/enterprise/tags: [write, line protocol, errors] +related: + - /influxdb3/enterprise/reference/syntax/line-protocol/ + - /influxdb3/enterprise/write-data/best-practices/ + - /influxdb3/enterprise/reference/internals/durability/ +source: /shared/influxdb3-write-guides/troubleshoot.md +--- + + diff --git a/content/influxdb3/enterprise/write-data/use-telegraf/_index.md b/content/influxdb3/enterprise/write-data/use-telegraf/_index.md new file mode 100644 index 000000000..727327a79 --- /dev/null +++ b/content/influxdb3/enterprise/write-data/use-telegraf/_index.md @@ -0,0 +1,22 @@ +--- +title: Use Telegraf to write data +seotitle: Use the Telegraf agent to collect and write data +weight: 102 +description: > + Use Telegraf to collect and write data to {{< product-name >}}. +aliases: + - /influxdb3/enterprise/collect-data/advanced-telegraf + - /influxdb3/enterprise/collect-data/use-telegraf + - /influxdb3/enterprise/write-data/no-code/use-telegraf/ +menu: + influxdb3_enterprise: + name: Use Telegraf + parent: Write data +alt_links: + cloud: /influxdb/cloud/write-data/no-code/use-telegraf/ +source: /shared/influxdb3-write-guides/use-telegraf/_index.md +--- + + diff --git a/content/influxdb3/enterprise/write-data/use-telegraf/configure.md b/content/influxdb3/enterprise/write-data/use-telegraf/configure.md new file mode 100644 index 000000000..8f9c7cd08 --- /dev/null +++ b/content/influxdb3/enterprise/write-data/use-telegraf/configure.md @@ -0,0 +1,23 @@ +--- +title: Configure Telegraf to write to {{< product-name >}} +seotitle: Configure Telegraf to write data to {{< product-name >}} +description: > + Update existing or create new Telegraf configurations to use the `influxdb_v2` + output plugin to write to {{< product-name >}}. + Start Telegraf using the custom configuration. +menu: + influxdb3_enterprise: + name: Configure Telegraf + parent: Use Telegraf +weight: 101 +influxdb3/enterprise/tags: [telegraf] +related: + - /telegraf/v1/plugins/, Telegraf plugins +alt_links: + cloud: /influxdb/cloud/write-data/no-code/use-telegraf/manual-config/ +source: /shared/influxdb3-write-guides/use-telegraf/configure.md +--- + + diff --git a/content/influxdb3/enterprise/write-data/use-telegraf/csv.md b/content/influxdb3/enterprise/write-data/use-telegraf/csv.md new file mode 100644 index 000000000..d40dd25bf --- /dev/null +++ b/content/influxdb3/enterprise/write-data/use-telegraf/csv.md @@ -0,0 +1,21 @@ +--- +title: Use Telegraf to write CSV data +description: > + Use the Telegraf `file` input plugin to read and parse CSV data into + [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) + and write it to {{< product-name >}}. +menu: + influxdb3_enterprise: + parent: Use Telegraf + name: Write CSV + identifier: write-csv-telegraf +weight: 203 +related: + - /telegraf/v1/data_formats/input/csv/ + - /influxdb3/enterprise/write-data/use-telegraf/ +source: /shared/influxdb3-write-guides/use-telegraf/csv.md +--- + + diff --git a/content/influxdb3/enterprise/write-data/use-telegraf/dual-write.md b/content/influxdb3/enterprise/write-data/use-telegraf/dual-write.md new file mode 100644 index 000000000..04c5e6e0d --- /dev/null +++ b/content/influxdb3/enterprise/write-data/use-telegraf/dual-write.md @@ -0,0 +1,18 @@ +--- +title: Use Telegraf to dual write to InfluxDB +description: > + Configure Telegraf to write data to multiple InfluxDB instances or clusters + simultaneously. +menu: + influxdb3_enterprise: + name: Dual write to InfluxDB + parent: Use Telegraf +weight: 203 +alt_links: + cloud: /influxdb/cloud/write-data/no-code/use-telegraf/dual-write/ +source: /shared/influxdb3-write-guides/use-telegraf/dual-write.md +--- + + diff --git a/content/shared/influxdb3-cli/query.md b/content/shared/influxdb3-cli/query.md index 3fb14fcca..e0965e786 100644 --- a/content/shared/influxdb3-cli/query.md +++ b/content/shared/influxdb3-cli/query.md @@ -16,7 +16,11 @@ influxdb3 query [OPTIONS] --database [QUERY]... ## Arguments -- **QUERY**: The query string to execute. +- **QUERY**: The query to execute. Provide the query in one of the following ways: + + - a string + - a path to a file that contains the query using the `--file` option + - from stdin ## Options @@ -26,8 +30,9 @@ influxdb3 query [OPTIONS] --database [QUERY]... | `-d` | `--database` | _({{< req >}})_ Name of the database to operate on | | | `--token` | Authentication token | | `-l` | `--language` | Query language of the query string (`sql` _(default)_ or `influxql`) | -| | `--format` | Output format (`pretty` _(default)_, `json`, `json_lines`, `csv`, `parquet`) | +| | `--format` | Output format (`pretty` _(default)_, `json`, `jsonl`, `csv`, `parquet`) | | `-o` | `--output` | Output query results to the specified file | +| `-f` | `--file` | A file that contains the query to execute | | `-h` | `--help` | Print help information | ### Option environment variables @@ -55,14 +60,44 @@ with the name of the database to query. ### Query data using SQL +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[string](#) +[file](#) +[stdin](#) +{{% /code-tabs %}} +{{% code-tab-content %}} ```bash influxdb3 query --database DATABASE_NAME 'SELECT * FROM home' ``` +{{% /code-tab-content %}} +{{% code-tab-content %}} + + +```bash +influxdb3 query --database DATABASE_NAME --file ./query.sql +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} + + +```bash +cat ./query.sql | influxdb3 query --database DATABASE_NAME +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} ### Query data using InfluxQL +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[string](#) +[file](#) +[stdin](#) +{{% /code-tabs %}} +{{% code-tab-content %}} ```bash @@ -71,9 +106,37 @@ influxdb3 query \ --database DATABASE_NAME \ 'SELECT * FROM home' ``` +{{% /code-tab-content %}} +{{% code-tab-content %}} + + +```bash +influxdb3 query \ + --language influxql \ + --database DATABASE_NAME \ + --file ./query.influxql +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} + + +```bash +cat ./query.influxql | influxdb3 query \ + --language influxql \ + --database DATABASE_NAME +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} ### Query data and return JSON-formatted results +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[string](#) +[file](#) +[stdin](#) +{{% /code-tabs %}} +{{% code-tab-content %}} ```bash @@ -82,9 +145,37 @@ influxdb3 query \ --database DATABASE_NAME \ 'SELECT * FROM home' ``` +{{% /code-tab-content %}} +{{% code-tab-content %}} + + +```bash +influxdb3 query \ + --format json \ + --database DATABASE_NAME \ + --file ./query.sql +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} + + +```bash +cat ./query.sql | influxdb3 query \ + --format json \ + --database DATABASE_NAME +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} ### Query data and write results to a file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[string](#) +[file](#) +[stdin](#) +{{% /code-tabs %}} +{{% code-tab-content %}} ```bash @@ -93,5 +184,26 @@ influxdb3 query \ --database DATABASE_NAME \ 'SELECT * FROM home' ``` +{{% /code-tab-content %}} +{{% code-tab-content %}} + + +```bash +influxdb3 query \ + --output /path/to/results.txt \ + --database DATABASE_NAME \ + --file ./query.sql +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} + + +```bash +cat ./query.sql | influxdb3 query \ + --output /path/to/results.txt \ + --database DATABASE_NAME +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} {{% /code-placeholders %}} diff --git a/content/shared/influxdb3-cli/show/_index.md b/content/shared/influxdb3-cli/show/_index.md index dd803c71b..0ad4c6649 100644 --- a/content/shared/influxdb3-cli/show/_index.md +++ b/content/shared/influxdb3-cli/show/_index.md @@ -11,10 +11,11 @@ influxdb3 show ## Subcommands -| Subcommand | Description | -| :------------------------------------------------------------------------- | :--------------------------------------------- | +| Subcommand | Description | +| :---------------------------------------------------------------------- | :--------------------------------------------- | | [databases](/influxdb3/version/reference/cli/influxdb3/show/databases/) | List database | -| help | Print command help or the help of a subcommand | +| [system](/influxdb3/version/reference/cli/influxdb3/show/system/) | Display system table data | +| help | Print command help or the help of a subcommand | ## Options diff --git a/content/shared/influxdb3-cli/show/databases.md b/content/shared/influxdb3-cli/show/databases.md index 0012d5988..a784ac113 100644 --- a/content/shared/influxdb3-cli/show/databases.md +++ b/content/shared/influxdb3-cli/show/databases.md @@ -18,7 +18,7 @@ influxdb3 show databases [OPTIONS] | `-d` | `--database` | _({{< req >}})_ Name of the database to operate on | | | `--token` | Authentication token | | | `--show-deleted` | Include databases marked as deleted in the output | -| | `--format` | Output format (`pretty` _(default)_, `json`, `json_lines`, or `csv`) | +| | `--format` | Output format (`pretty` _(default)_, `json`, `jsonl`, `csv`, or `parquet`) | | `-h` | `--help` | Print help information | ### Option environment variables diff --git a/content/shared/influxdb3-cli/show/system/_index.md b/content/shared/influxdb3-cli/show/system/_index.md new file mode 100644 index 000000000..0ea9c9d95 --- /dev/null +++ b/content/shared/influxdb3-cli/show/system/_index.md @@ -0,0 +1,43 @@ + +The `influxdb3 show system` command displays data from {{< product-name >}} +system tables. + +## Usage + + + +```bash + influxdb3 show system [OPTIONS] --database +``` + +##### Aliases + +`system`, `s` + +## Subcommands + +| Subcommand | Description | +| :---------------------------------------------------------------------------- | :--------------------------------------------- | +| [summary](/influxdb3/version/reference/cli/influxdb3/show/system/summary) | Summarize system table data | +| [table](/influxdb3/version/reference/cli/influxdb3/show/system/table/) | Retrieve entries from a specific system table | +| [table-list](/influxdb3/version/reference/cli/influxdb3/show/system/table-list/) | List available system tables | +| help | Print command help or the help of a subcommand | + +## Options + +| Option | | Description | +| :----- | :--------------- | :--------------------------------------------------------------------------------------- | +| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | +| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on | +| | `--token` | Authentication token | +| `-h` | `--help` | Print help information | + +### Option environment variables + +You can use the following environment variables to set command options: + +| Environment Variable | Option | +| :------------------------ | :----------- | +| `INFLUXDB3_HOST_URL` | `--host` | +| `INFLUXDB3_DATABASE_NAME` | `--database` | +| `INFLUXDB3_AUTH_TOKEN` | `--token` | diff --git a/content/shared/influxdb3-cli/show/system/summary.md b/content/shared/influxdb3-cli/show/system/summary.md new file mode 100644 index 000000000..ff0eb0ae5 --- /dev/null +++ b/content/shared/influxdb3-cli/show/system/summary.md @@ -0,0 +1,48 @@ + +The `influxdb3 show system summary` command returns a summary of various types of +system table data. + +## Usage + + + +```bash +influxdb3 show system --database summary [OPTIONS] +``` + +## Options + +| Option | | Description | +| :----- | :--------- | :--------------------------------------------------------------------------------------------- | +| `-l` | `--limit` | Maximum number of entries from each table to display (default is `10`, `0` indicates no limit) | +| | `--format` | Output format (`pretty` _(default)_, `json`, `jsonl`, `csv`, or `parquet`) | +| `-h` | `--help` | Print help information | + +## Examples + +- [Summarize system table data](#summarize-system-table-data) +- [Summarize system table data in JSON-formatted output](#summarize-system-table-data-in-json-formatted-output) + +In the examples below, replace +{{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}} with +the name of the database to operate on. + +{{% code-placeholders "DATABASE_NAME" %}} + +### Summarize system table data + + + +```bash +influxdb3 show system --database DATABASE_NAME summary +``` + +### Summarize system table data in JSON-formatted output + + + +```bash +influxdb3 show system --database DATABASE_NAME summary --format json +``` + +{{% /code-placeholders %}} diff --git a/content/shared/influxdb3-cli/show/system/table-list.md b/content/shared/influxdb3-cli/show/system/table-list.md new file mode 100644 index 000000000..47ae872fe --- /dev/null +++ b/content/shared/influxdb3-cli/show/system/table-list.md @@ -0,0 +1,46 @@ + +The `influxdb3 show system table-list` command lists available system tables. + +## Usage + + + +```bash +influxdb3 show system --database table-list [OPTIONS] +``` + +## Options + +| Option | | Description | +| :----- | :--------- | :--------------------------------------------------------------------------------------------- | +| | `--format` | Output format (`pretty` _(default)_, `json`, `jsonl`, `csv`, or `parquet`) | +| `-h` | `--help` | Print help information | + +## Examples + +- [List system tables](#list-system-tables) +- [List system tables in JSON-formatted output](#list-system-tables-in-json-formatted-output) + +In the examples below, replace +{{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}} with +the name of the database to operate on. + +{{% code-placeholders "DATABASE_NAME" %}} + +### List system tables + + + +```bash +influxdb3 show system --database DATABASE_NAME summary +``` + +### List system tables in JSON-formatted output + + + +```bash +influxdb3 show system --database DATABASE_NAME summary --format json +``` + +{{% /code-placeholders %}} diff --git a/content/shared/influxdb3-cli/show/system/table.md b/content/shared/influxdb3-cli/show/system/table.md new file mode 100644 index 000000000..558131916 --- /dev/null +++ b/content/shared/influxdb3-cli/show/system/table.md @@ -0,0 +1,85 @@ + +The `influxdb3 show system table` command queries data from a system table. + +## Usage + + + +```bash +influxdb3 show system --database table [OPTIONS] +``` + +## Arguments + +- **SYSTEM_TABLE**: the system table to query + +## Options + +| Option | | Description | +| :----- | :----------- | :------------------------------------------------------------------------------------ | +| `-l` | `--limit` | Maximum number of tables entries to display (default is `10`, `0` indicates no limit) | +| `-o` | `--order-by` | Order by the specified columns | +| `-s` | `--select` | Select specific columns from the system table | +| | `--format` | Output format (`pretty` _(default)_, `json`, `jsonl`, `csv`, or `parquet`) | +| `-h` | `--help` | Print help information | + +## Examples + +- [Query a system table](#query-a-system-table) +- [Query specific columns from a system table](#query-specific-columns-from-a-system-table) +- [Query a system table and order by a specific column](#query-a-system-table-and-order-by-a-specific-column) +- [Query a system table and return JSON-formatted output](#query-a-system-table-and-return-json-formatted-output) + +In the examples below, replace +{{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}} with +the name of the database to operate on. + +{{% code-placeholders "DATABASE_NAME" %}} + +### Query a system table + + + +```bash +# Query the parquet_files system table +influxdb3 show system --database DATABASE_NAME table parquet_files +``` + +### Query specific columns from a system table + + + +```bash +# Select specific columns from the parquet_files system table +influxdb3 show system \ + --database DATABASE_NAME \ + table \ + --select table_name,size_bytes,row_count \ + parquet_files +``` + +### Query a system table and order by a specific column + + + +```bash +influxdb3 show system \ + --database DATABASE_NAME \ + table \ + --order-by size_bytes,row_count \ + parquet_files +``` + +### Query a system table and return JSON-formatted output + + + +```bash +influxdb3 show system \ + --database DATABASE_NAME \ + table \ + --format json \ + parquet_files +``` + +{{% /code-placeholders %}} diff --git a/content/shared/influxdb3-cli/write.md b/content/shared/influxdb3-cli/write.md index df8c31a40..c4acb2657 100644 --- a/content/shared/influxdb3-cli/write.md +++ b/content/shared/influxdb3-cli/write.md @@ -6,13 +6,22 @@ The `influxdb3 write` command writes data to your {{< product-name >}} server. ```bash -influxdb3 write [OPTIONS] --database --file +influxdb3 write [OPTIONS] --database [LINE_PROTOCOL]... ``` ##### Aliases `write`, `w` +## Arguments + +- **LINE_PROTOCOL**: The line protocol to write to {{< product-name >}}. + Provide the line protocol in one of the following ways: + + - a string + - a path to a file that contains the line protocol using the `--file` option + - from stdin + ## Options | Option | | Description | @@ -20,7 +29,7 @@ influxdb3 write [OPTIONS] --database --file | `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | | `-d` | `--database` | _({{< req >}})_ Name of the database to operate on | | | `--token` | Authentication token | -| `-f` | `--file` | _({{< req >}})_ Line protocol file to use to write data | +| `-f` | `--file` | A file that contains line protocol to write | | | `--accept-partial` | Accept partial writes | | `-h` | `--help` | Print help information | @@ -47,21 +56,77 @@ with the name of the database to query. ### Write line protocol to your InfluxDB 3 server +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[string](#) +[file](#) +[stdin](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +{{% influxdb/custom-timestamps %}} ```bash -influxdb3 write --database DATABASE_NAME --file /path/to/data.lp +influxdb3 write --database DATABASE_NAME \ + 'home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000' ``` +{{% /influxdb/custom-timestamps %}} +{{% /code-tab-content %}} +{{% code-tab-content %}} + + +```bash +influxdb3 write --database DATABASE_NAME --file ./data.lp +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} + + +```bash +cat ./data.lp | influxdb3 write --database DATABASE_NAME +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} ### Write line protocol and accept partial writes +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[string](#) +[file](#) +[stdin](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +{{% influxdb/custom-timestamps %}} ```bash influxdb3 write \ --accept-partial \ --database DATABASE_NAME \ - --file /path/to/data.lp + 'home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000' ``` +{{% /influxdb/custom-timestamps %}} +{{% /code-tab-content %}} +{{% code-tab-content %}} + + +```bash +influxdb3 write \ + --accept-partial \ + --database DATABASE_NAME \ + --file ./data.lp +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} + + +```bash +cat ./data.lp | influxdb3 write \ + --accept-partial \ + --database DATABASE_NAME \ +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} {{% /code-placeholders %}} diff --git a/content/shared/influxdb3-query-guides/_index.md b/content/shared/influxdb3-query-guides/_index.md new file mode 100644 index 000000000..72fae2ee5 --- /dev/null +++ b/content/shared/influxdb3-query-guides/_index.md @@ -0,0 +1,4 @@ + +Learn to query data stored in {{< product-name >}}. + +{{< children >}} diff --git a/content/shared/influxdb3-query-guides/execute-queries/_index.md b/content/shared/influxdb3-query-guides/execute-queries/_index.md new file mode 100644 index 000000000..d82abd6a2 --- /dev/null +++ b/content/shared/influxdb3-query-guides/execute-queries/_index.md @@ -0,0 +1,11 @@ +Use tools and libraries to query data stored in an {{< product-name >}} database. + +InfluxDB client libraries and Flight clients can use the Flight+gRPC protocol to +query with SQL or InfluxQL and retrieve data in the +[Arrow in-memory format](https://arrow.apache.org/docs/format/Columnar.html). +HTTP clients can use the InfluxDB v1 `/query` REST API to query with InfluxQL +and retrieve data in JSON format. + +Learn how to connect to InfluxDB and query your data using the following tools: + +{{< children readmore=true hr=true hlevel="h2" >}} diff --git a/content/shared/influxdb3-query-guides/execute-queries/influxdb-v1-api.md b/content/shared/influxdb3-query-guides/execute-queries/influxdb-v1-api.md new file mode 100644 index 000000000..bc04a8fef --- /dev/null +++ b/content/shared/influxdb3-query-guides/execute-queries/influxdb-v1-api.md @@ -0,0 +1,66 @@ + +Use the InfluxDB v1 HTTP query API to query data in {{< product-name >}} +with InfluxQL. + +The examples below use **cURL** to send HTTP requests to the InfluxDB v1 HTTP API, +but you can use any HTTP client. + +{{% warn %}} +#### InfluxQL feature support + +InfluxQL is being rearchitected to work with the InfluxDB 3 storage engine. +This process is ongoing and some InfluxQL features are still being implemented. +For information about the current implementation status of InfluxQL features, +see [InfluxQL feature support](/influxdb3/version/reference/influxql/feature-support/). +{{% /warn %}} + +Use the v1 `/query` endpoint and the `GET` request method to query data with InfluxQL: + +{{< api-endpoint endpoint="http://{{< influxdb/host >}}/query" method="get" api-ref="/influxdb3/version/api/#tag/Query" >}} + +Provide the following with your request: + +- **Headers:** + - **Authorization:** `Bearer AUTH_TOKEN` + + > [!Note] + > While in alpha, {{< product-name >}} does not require an authorization + > token. You can either omit this header or include it with an arbitrary + > token string. + +- **Query parameters:** + - **db**: the database to query + - **rp**: Optional: the retention policy to query + - **q**: URL-encoded InfluxQL query + +{{% code-placeholders "(DATABASE|AUTH)_(NAME|TOKEN)" %}} +```sh +curl --get https://{{< influxdb/host >}}/query \ + --header "Authorization: Bearer AUTH_TOKEN" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=SELECT * FROM home" +``` +{{% /code-placeholders %}} + +Replace the following configuration values: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of the database to query +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your authorization token + +## Return results as JSON or CSV + +By default, the `/query` endpoint returns results in **JSON**, but it can also +return results in **CSV**. To return results as CSV, include the `Accept` header +with the `application/csv` or `text/csv` MIME type: + +{{% code-placeholders "(DATABASE|AUTH)_(NAME|TOKEN)" %}} +```sh +curl --get https://{{< influxdb/host >}}/query \ + --header "Authorization: BEARER AUTH_TOKEN" \ + --header "Accept: application/csv" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=SELECT * FROM home" +``` +{{% /code-placeholders %}} diff --git a/content/shared/influxdb3-query-guides/execute-queries/influxdb3-cli.md b/content/shared/influxdb3-query-guides/execute-queries/influxdb3-cli.md new file mode 100644 index 000000000..49468f277 --- /dev/null +++ b/content/shared/influxdb3-query-guides/execute-queries/influxdb3-cli.md @@ -0,0 +1,226 @@ + +Use the [`influxdb3 query` command](/influxdb3/version/reference/cli/influxdb3/query/) +to query data in {{< product-name >}} with SQL or InfluxQL. + +Provide the following with your command: + + + +- **Database name**: The name of the database to query. + Provide this using one of the following: + + - `-d`, `--database` command option + - `INFLUXDB3_DATABASE_NAME` environment variable + +- **Query language** (Optional): The query language of the query. + Use the `-l`, `--language` option to specify one of the following query languages: + + - `sql` _(default)_ + - `influxql` + +- **Query**: SQL or InfluxQL query to execute. Provide the query in one of the + following ways: + + - a string + - the `--file` option and the path to a file that contains the query + - from stdin + +{{% code-placeholders "(DATABASE|AUTH)_(TOKEN|NAME)" %}} + +{{< tabs-wrapper >}} +{{% tabs %}} +[SQL](#) +[InfluxQL](#) +{{% /tabs %}} + +{{% tab-content %}} + + + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[string](#) +[file](#) +[stdin](#) +{{% /code-tabs %}} +{{% code-tab-content %}} + + +```bash +influxdb3 query \ + --database DATABASE_NAME \ + "SELECT * FROM home" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} + + +```bash +influxdb3 query \ + --database DATABASE_NAME \ + --file ./query.sql +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} + + +```bash +cat ./query.sql | influxdb3 query --database DATABASE_NAME +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + + + +{{% /tab-content %}} + +{{% tab-content %}} + + + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[string](#) +[file](#) +[stdin](#) +{{% /code-tabs %}} +{{% code-tab-content %}} + + +```bash +influxdb3 query \ + --language influxql \ + --database DATABASE_NAME \ + "SELECT * FROM home" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} + + +```bash +influxdb3 query \ + --language influxql \ + --database DATABASE_NAME \ + --file ./query.influxql +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} + + +```bash +cat ./query.influxql | influxdb3 query \ + --language influxql \ + --database DATABASE_NAME +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + + + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +{{% /code-placeholders %}} + +In the examples above and below, replace the following: + + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + Name of the database to query + +## Output format + +The `influxdb3 query` command supports the following output formats: + +- `pretty` _(default)_ +- `json` +- `jsonl` +- `csv` +- `parquet` _(must [output to a file](#output-query-results-to-a-parquet-file))_ + +Use the `--format` flag to specify the output format: + +{{% code-placeholders "(DATABASE|AUTH)_(TOKEN|NAME)" %}} +{{% influxdb/custom-timestamps %}} +```sh +influxdb3 query \ + --database DATABASE_NAME \ + --format json \ + "SELECT * FROM home WHERE time >= '2022-01-01T08:00:00Z' LIMIT 5" +``` +{{% /influxdb/custom-timestamps %}} +{{% /code-placeholders %}} + +{{< expand-wrapper >}} +{{% expand "View example pretty-formatted results" %}} +{{% influxdb/custom-timestamps %}} +``` ++----+------+-------------+------+---------------------+ +| co | hum | room | temp | time | ++----+------+-------------+------+---------------------+ +| 0 | 35.9 | Living Room | 21.1 | 2022-01-01T08:00:00 | +| 0 | 35.9 | Kitchen | 21.0 | 2022-01-01T08:00:00 | +| 0 | 35.9 | Living Room | 21.4 | 2022-01-01T09:00:00 | +| 0 | 36.2 | Kitchen | 23.0 | 2022-01-01T09:00:00 | +| 0 | 36.0 | Living Room | 21.8 | 2022-01-01T10:00:00 | ++----+------+-------------+------+---------------------+ +``` +{{% /influxdb/custom-timestamps %}} +{{% /expand %}} +{{% expand "View example JSON-formatted results" %}} +{{% influxdb/custom-timestamps %}} +```json +[{"co":0,"hum":35.9,"room":"Living Room","temp":21.1,"time":"2022-01-01T08:00:00"},{"co":0,"hum":35.9,"room":"Kitchen","temp":21.0,"time":"2022-01-01T08:00:00"},{"co":0,"hum":35.9,"room":"Living Room","temp":21.4,"time":"2022-01-01T09:00:00"},{"co":0,"hum":36.2,"room":"Kitchen","temp":23.0,"time":"2022-01-01T09:00:00"},{"co":0,"hum":36.0,"room":"Living Room","temp":21.8,"time":"2022-01-01T10:00:00"}] +``` +{{% /influxdb/custom-timestamps %}} +{{% /expand %}} +{{% expand "View example JSON-line-formatted results" %}} +{{% influxdb/custom-timestamps %}} +```json +{"co":0,"hum":35.9,"room":"Living Room","temp":21.1,"time":"2022-01-01T08:00:00"} +{"co":0,"hum":35.9,"room":"Kitchen","temp":21.0,"time":"2022-01-01T08:00:00"} +{"co":0,"hum":35.9,"room":"Living Room","temp":21.4,"time":"2022-01-01T09:00:00"} +{"co":0,"hum":36.2,"room":"Kitchen","temp":23.0,"time":"2022-01-01T09:00:00"} +{"co":0,"hum":36.0,"room":"Living Room","temp":21.8,"time":"2022-01-01T10:00:00"} +``` +{{% /influxdb/custom-timestamps %}} +{{% /expand %}} +{{% expand "View example CSV-formatted results" %}} +{{% influxdb/custom-timestamps %}} +```csv +co,hum,room,temp,time +0,35.9,Living Room,21.1,2022-01-01T08:00:00 +0,35.9,Kitchen,21.0,2022-01-01T08:00:00 +0,35.9,Living Room,21.4,2022-01-01T09:00:00 +0,36.2,Kitchen,23.0,2022-01-01T09:00:00 +0,36.0,Living Room,21.8,2022-01-01T10:00:00 +``` +{{% /influxdb/custom-timestamps %}} +{{% /expand %}} +{{< /expand-wrapper >}} + +## Output query results to a Parquet file + +To output query results to a Parquet file, provide the following options with +the `influxdb3 query` command: + +- `--format`: `parquet` +- `-o`, `--output`: the filepath to the Parquet file to store results in + +{{% code-placeholders "(DATABASE|AUTH)_(TOKEN|NAME)" %}} +{{% influxdb/custom-timestamps %}} +```sh +influxdb3 query \ + --database DATABASE_NAME \ + --format parquet \ + --output path/to/results.parquet \ + "SELECT * FROM home WHERE time >= '2022-01-01T08:00:00Z' LIMIT 5" +``` +{{% /influxdb/custom-timestamps %}} +{{% /code-placeholders %}} diff --git a/content/shared/influxdb3-query-guides/influxql/_index.md b/content/shared/influxdb3-query-guides/influxql/_index.md new file mode 100644 index 000000000..7e70a0c1c --- /dev/null +++ b/content/shared/influxdb3-query-guides/influxql/_index.md @@ -0,0 +1,8 @@ + +Learn to use InfluxQL to query data stored in {{< product-name >}}. + +{{< children type="anchored-list" >}} + +--- + +{{< children readmore=true hr=true >}} \ No newline at end of file diff --git a/content/shared/influxdb3-query-guides/influxql/aggregate-select.md b/content/shared/influxdb3-query-guides/influxql/aggregate-select.md new file mode 100644 index 000000000..79493e0dd --- /dev/null +++ b/content/shared/influxdb3-query-guides/influxql/aggregate-select.md @@ -0,0 +1,216 @@ + +An InfluxQL query that aggregates data includes the following clauses: + +{{< req type="key" >}} + +- {{< req "\*">}} `SELECT`: Specify fields and calculations to output from a + measurement or use the wildcard alias (`*`) to select all fields and tags + from a measurement. +- {{< req "\*">}} `FROM`: Specify the measurement to query data from. +- `WHERE`: Only retrieve data that meets the specified conditions--for example, + time is in a time range, contains specific tag values, or contains a field + value outside specified thresholds. +- `GROUP BY`: Group data by tag values and time intervals. + +{{% note %}} +For simplicity, the term _"aggregate"_ in this guide refers to applying +both aggregate and selector functions to a dataset. +{{% /note %}} + +Learn how to apply aggregate operations to your queried data: + +- [Aggregate and selector functions](#aggregate-and-selector-functions) + - [Aggregate functions](#aggregate-functions) + - [Selector functions](#selector-functions) +- [Example aggregate queries](#example-aggregate-queries) + +{{% influxql/v1-v3-data-model-note %}} + +## Aggregate and selector functions + +Both aggregate and selector functions return a limited number of rows from each group. +Aggregate functions return a single row, whereas some selector functions let you +specify the number of rows to return from each group. +For example, if you `GROUP BY room` and perform an aggregate operation +in your `SELECT` clause, results include an aggregate value for each unique +value of `room`. + +### Aggregate functions + +Use **aggregate functions** to aggregate values in a specified field for each +group and return a single row per group containing the aggregate field value. + +View InfluxQL aggregate functions + +##### Basic aggregate query + +```sql +SELECT MEAN(co) from home +``` + +### Selector functions + +Use **selector functions** to "select" a value from a specified field. + +View InfluxQL selector functions + +##### Basic selector query + +```sql +SELECT TOP(co, 3) from home +``` + +## Example aggregate queries + +- [Perform an ungrouped aggregation](#perform-an-ungrouped-aggregation) +- [Group and aggregate data](#group-and-aggregate-data) + - [Downsample data by applying interval-based aggregates](#downsample-data-by-applying-interval-based-aggregates) +- [Query rows based on aggregate values](#query-rows-based-on-aggregate-values) + +> [!Note] +> +> #### Sample data +> +> The following examples use the [Home sensor data](/influxdb3/version/reference/sample-data/#home-sensor-data). +> To run the example queries and return results, +> [write the sample data](/influxdb3/version/reference/sample-data/#write-the-home-sensor-data-to-influxdb) +> to your {{% product-name %}} database before running the example queries. + +### Perform an ungrouped aggregation + +To aggregate _all_ queried values in a specified field: + +- Use aggregate or selector functions in your `SELECT` statement. +- Do not include a `GROUP BY` clause to leave your data ungrouped. + +```sql +SELECT MEAN(co) AS "average co" FROM home +``` + +{{< expand-wrapper >}} +{{% expand "View example results" %}} +{{% influxql/table-meta %}} +name: home +{{% /influxql/table-meta %}} + +| time | average co | +| :--- | ----------------: | +| 0 | 5.269230769230769 | +{{% /expand %}} +{{< /expand-wrapper >}} + +### Group and aggregate data + +To apply aggregate or selector functions to grouped data: + +- Use aggregate or selector functions in your `SELECT` statement. +- Include a `GROUP BY` clause with a comma-delimited list of tags to group by. + +Keep the following in mind when using `GROUP BY`: + +- `GROUP BY` can use column aliases that are defined in the `SELECT` clause. + +```sql +SELECT + MEAN(temp) AS "average temp" +FROM home +GROUP BY room +``` + +{{< expand-wrapper >}} +{{% expand "View example results" %}} + +{{% influxql/table-meta %}} +name: home +tags: room=Kitchen +{{% /influxql/table-meta %}} + +| time | average temp | +| :--- | -----------------: | +| 0 | 22.623076923076926 | + +{{% influxql/table-meta %}} +name: home +tags: room=Living Room +{{% /influxql/table-meta %}} + +| time | average temp | +| :--- | ----------------: | +| 0 | 22.16923076923077 | +{{% /expand %}} +{{< /expand-wrapper >}} + +#### Downsample data by applying interval-based aggregates + +A common use case when querying time series is downsampling data by applying +aggregates to time-based groups. To group and aggregate data into time-based +groups: + +- In your `SELECT` clause, apply [aggregate](/influxdb3/version/reference/influxql/functions/aggregates/) + or [selector](/influxdb3/version/reference/influxql/functions/selectors/) + functions to queried fields. + +- In your `WHERE` clause, include time bounds for the query. + Interval-based aggregates produce a row for each specified time interval. + If no time bounds are specified in the `WHERE` clause, the query uses the + default time range (1970-01-01T00:00:00Z to now) and returns a row for each + interval in that time range. + +- In your `GROUP BY` clause: + + - Use the [`time()` function](/influxdb3/version/reference/influxql/functions/date-time/#time) + to specify the time interval to group by. + - _Optional_: Specify other tags to group by. + + +The following example retrieves unique combinations of time intervals and rooms with their minimum, maximum, and average temperatures. + +```sql +SELECT + MAX(temp) AS "max temp", + MIN(temp) AS "min temp", + MEAN(temp) AS "average temp" +FROM home +WHERE + time >= '2022-01-01T08:00:00Z' + AND time < '2022-01-01T20:00:00Z' +GROUP BY time(2h), room +``` + +{{< expand-wrapper >}} +{{% expand "View example results" "1" %}} +{{% influxdb/custom-timestamps %}} + +{{% influxql/table-meta %}} +name: home +tags: room=Kitchen +{{% /influxql/table-meta %}} + +| time | max temp | min temp | average temp | +| :------------------- | -------: | -------: | -----------------: | +| 2022-01-01T08:00:00Z | 23 | 21 | 22 | +| 2022-01-01T10:00:00Z | 22.7 | 22.4 | 22.549999999999997 | +| 2022-01-01T12:00:00Z | 22.8 | 22.5 | 22.65 | +| 2022-01-01T14:00:00Z | 22.8 | 22.7 | 22.75 | +| 2022-01-01T16:00:00Z | 22.7 | 22.4 | 22.549999999999997 | +| 2022-01-01T18:00:00Z | 23.3 | 23.1 | 23.200000000000003 | +| 2022-01-01T20:00:00Z | 22.7 | 22.7 | 22.7 | + +{{% influxql/table-meta %}} +name: home +tags: room=Living Room +{{% /influxql/table-meta %}} + +| time | max temp | min temp | average temp | +| :------------------- | -------: | -------: | -----------------: | +| 2022-01-01T08:00:00Z | 21.4 | 21.1 | 21.25 | +| 2022-01-01T10:00:00Z | 22.2 | 21.8 | 22 | +| 2022-01-01T12:00:00Z | 22.4 | 22.2 | 22.299999999999997 | +| 2022-01-01T14:00:00Z | 22.3 | 22.3 | 22.3 | +| 2022-01-01T16:00:00Z | 22.6 | 22.4 | 22.5 | +| 2022-01-01T18:00:00Z | 22.8 | 22.5 | 22.65 | +| 2022-01-01T20:00:00Z | 22.2 | 22.2 | 22.2 | + +{{% /influxdb/custom-timestamps %}} +{{% /expand %}} +{{< /expand-wrapper >}} diff --git a/content/shared/influxdb3-query-guides/influxql/basic-query.md b/content/shared/influxdb3-query-guides/influxql/basic-query.md new file mode 100644 index 000000000..f44441c0d --- /dev/null +++ b/content/shared/influxdb3-query-guides/influxql/basic-query.md @@ -0,0 +1,230 @@ + +InfluxQL (Influx Query Language) is an SQL-like query language used to interact +with InfluxDB and work with times series data. + +{{% influxql/v1-v3-data-model-note %}} + +A basic InfluxQL query that queries data from InfluxDB most commonly includes the +following clauses: + +{{< req type="key" >}} + +- {{< req "\*">}} `SELECT`: Specify fields, tags, and calculations to return + from a [table](/influxdb3/version/reference/glossary/#table) or use the + wildcard alias (`*`) to select all fields and tags from a table. It requires + at least one + [field key](/influxdb3/version/reference/glossary/#field-key) or the + wildcard alias (`*`). For more information, see + [Notable SELECT statement behaviors](/influxdb3/version/reference/influxql/select/#notable-select-statement-behaviors). +- {{< req "\*">}} `FROM`: Specify the + [table](/influxdb3/version/reference/glossary/#table) to query from. + + It requires one or more comma-delimited + [measurement expressions](/influxdb3/version/reference/influxql/select/#measurement_expression). + +- `WHERE`: Filter data based on + [field values](/influxdb3/version/reference/glossary/#field), + [tag values](/influxdb3/version/reference/glossary/#tag), or + [timestamps](/influxdb3/version/reference/glossary/#timestamp). Only + return data that meets the specified conditions--for example, falls within a + time range, contains specific tag values, or contains a field value outside a + specified range. + +{{% influxdb/custom-timestamps %}} + +```sql +SELECT + temp, + hum, + room +FROM home +WHERE + time >= '2022-01-01T08:00:00Z' + AND time <= '2022-01-01T20:00:00Z' +``` + +{{% /influxdb/custom-timestamps %}} + +## Result set + +If at least one row satisfies the query, {{% product-name %}} returns row data +in the query result set. +If a query uses a `GROUP BY` clause, the result set +includes the following: + +- Columns listed in the query's `SELECT` clause +- A `time` column that contains the timestamp for the record or the group +- An `iox::measurement` column that contains the record's + [table](/influxdb3/version/reference/glossary/#table) name +- Columns listed in the query's `GROUP BY` clause; each row in the result set + contains the values used for grouping + +### GROUP BY result columns + +If a query uses `GROUP BY` and the `WHERE` clause doesn't filter by time, then +groups are based on the +[default time range](/influxdb3/version/reference/influxql/group-by/#default-time-range). + +## Basic query examples + +- [Query data within time boundaries](#query-data-within-time-boundaries) +- [Query data without time boundaries](#query-data-without-time-boundaries) +- [Query specific fields and tags](#query-specific-fields-and-tags) +- [Query fields based on tag values](#query-fields-based-on-tag-values) +- [Query points based on field values](#query-points-based-on-field-values) +- [Alias queried fields and tags](#alias-queried-fields-and-tags) + +> [!Note] +> +> #### Sample data +> +> The following examples use the [Home sensor data](/influxdb3/version/reference/sample-data/#home-sensor-data). +> To run the example queries and return results, +> [write the sample data](/influxdb3/version/reference/sample-data/#write-the-home-sensor-data-to-influxdb) +> to your {{% product-name %}} database before running the example queries. + +### Query data within time boundaries + +- Use the `SELECT` clause to specify what tags and fields to return. + Specify at least one field key. + To return all tags and fields, use the wildcard alias (`*`). +- Specify the [table](/influxdb3/version/reference/glossary/#table) to + query in the `FROM` clause. +- Specify time boundaries in the `WHERE` clause. Include time-based predicates + that compare the value of the `time` column to a timestamp. + Use the `AND` logical operator to chain multiple predicates together. + +{{% influxdb/custom-timestamps %}} + +```sql +SELECT * +FROM home +WHERE + time >= '2022-01-01T08:00:00Z' + AND time <= '2022-01-01T12:00:00Z' +``` + +{{% /influxdb/custom-timestamps %}} + +Query time boundaries can be relative or absolute. + +{{< expand-wrapper >}} +{{% expand "Query with relative time boundaries" %}} + +To query data from relative time boundaries, compare the value of the `time` +column to a timestamp calculated by subtracting an interval from a timestamp. +Use `now()` to return the timestamp for the current time (UTC). + +##### Query all data from the last month + +```sql +SELECT * FROM home WHERE time >= now() - 30d +``` + +##### Query one day of data from a week ago + +```sql +SELECT * +FROM home +WHERE + time >= now() - 7d + AND time <= now() - 6d +``` + +{{% /expand %}} + +{{% expand "Query with absolute time boundaries" %}} + +To query data from absolute time boundaries, compare the value of the `time` +column to a timestamp literal. +Use the `AND` logical operator to chain together +multiple predicates and define both start and stop boundaries for the query. + +{{% influxdb/custom-timestamps %}} + +```sql +SELECT + * +FROM + home +WHERE + time >= '2022-01-01T08:00:00Z' + AND time <= '2022-01-01T20:00:00Z' +``` + +{{% /influxdb/custom-timestamps %}} + +{{% /expand %}} +{{< /expand-wrapper >}} + +### Query data without time boundaries + +To query data without time boundaries, do not include any time-based predicates +in your `WHERE` clause. +If a time range is not defined in the `WHERE` clause, +the default time range is the Unix epoch (`1970-01-01T00:00:00Z`) to _now_. + +{{% warn %}} +Querying data _without time bounds_ can return an unexpected amount of data. +The query may take a long time to complete and results may be truncated. +{{% /warn %}} + +```sql +SELECT * FROM home +``` + +### Query specific fields and tags + +To query specific fields, include them in the `SELECT` clause. +If querying multiple fields or tags, comma-delimit each. +If a field or tag key includes special characters or spaces or is +case-sensitive, wrap the key in _double-quotes_. + +```sql +SELECT time, room, temp, hum FROM home +``` + +### Query fields based on tag values + +- In the `SELECT` clause, include fields you want to query and tags you want to + base conditions on. +- In the `WHERE` clause, include predicates that compare the tag identifier to a + string literal. Use + [logical operators](/influxdb3/version/reference/influxql/where/#logical-operators) + to chain multiple predicates together and apply multiple conditions. + +```sql +SELECT * FROM home WHERE room = 'Kitchen' +``` + +### Query points based on field values + +- In the `SELECT` clause, include fields you want to query. +- In the `WHERE` clause, include predicates that compare the field identifier to + a value or expression. + Use + [logical operators](/influxdb3/version/reference/influxql/where/#logical-operators) + (`AND`, `OR`) to chain multiple predicates together and apply multiple + conditions. + +```sql +SELECT co, time FROM home WHERE co >= 10 OR co <= -10 +``` + +### Alias queried fields and tags + +To alias or rename fields and tags that you query, use the `AS` clause. +After the tag, field, or expression you want to alias, pass `AS` followed by the +alias name as an identifier (wrap in double quotes (`"`) if the alias includes +spaces or special characters)--for example: + +```sql +SELECT temp AS temperature, hum AS "humidity (%)" FROM home +``` + +{{% note %}} +When aliasing columns in **InfluxQL**, use the `AS` clause and an +[identifier](/influxdb3/version/reference/influxql/#identifiers). When +[aliasing columns in **SQL**](/influxdb3/version/query-data/sql/basic-query/#alias-queried-fields-and-tags), +you can use the `AS` clause to define the alias, but it isn't necessary. +{{% /note %}} diff --git a/content/shared/influxdb3-query-guides/influxql/explore-schema.md b/content/shared/influxdb3-query-guides/influxql/explore-schema.md new file mode 100644 index 000000000..7685878b4 --- /dev/null +++ b/content/shared/influxdb3-query-guides/influxql/explore-schema.md @@ -0,0 +1,323 @@ + +Use InfluxQL `SHOW` statements to return information about your data schema. + +{{% influxql/v1-v3-data-model-note %}} + +- [List measurements in a database](#list-measurements-in-a-database) + - [List measurements that contain specific tag key-value pairs](#list-measurements-that-contain-specific-tag-key-value-pairs) + - [List measurements that match a regular expression](#list-measurements-that-match-a-regular-expression) +- [List field keys in a measurement](#list-field-keys-in-a-measurement) +- [List tag keys in a measurement](#list-tag-keys-in-a-measurement) + - [List tag keys in measurements that contain a specific tag key-value pair](#list-tag-keys-in-measurements-that-contain-a-specific-tag-key-value-pair) +- [List tag values for a specific tag key](#list-tag-values-for-a-specific-tag-key) + - [List tag values for multiple tags](#list-tag-values-for-multiple-tags) + - [List tag values for tags that match a regular expression](#list-tag-values-for-tags-that-match-a-regular-expression) + - [List tag values associated with a specific tag key-value pair](#list-tag-values-associated-with-a-specific-tag-key-value-pair) + +> [!Note] +> +> #### Sample data +> +> The following examples use data provided in [sample data sets](/influxdb3/version/reference/sample-data/). +> To run the example queries and return identical results, follow the instructions +> provided for each sample data set to write the data to your {{% product-name %}} +> database. + +## List measurements in a database + +Use [`SHOW MEASUREMENTS`](/influxdb3/version/reference/influxql/show/#show-measurements) +to list measurements in your InfluxDB database. + +```sql +SHOW MEASUREMENTS +``` + +{{< expand-wrapper >}} +{{% expand "View example output" %}} + +{{% influxql/table-meta %}} +name: measurements +{{% /influxql/table-meta %}} + +| name | +| :----------- | +| bitcoin | +| home | +| home_actions | +| numbers | +| weather | + +{{% /expand %}} +{{< /expand-wrapper >}} + +### List measurements that contain specific tag key-value pairs + +To return only measurements with specific tag key-value pairs, include a `WHERE` +clause with tag key-value pairs to query for. + +```sql +SHOW MEASUREMENTS WHERE room = 'Kitchen' +``` + +{{< expand-wrapper >}} +{{% expand "View example output" "1" %}} + +{{% influxql/table-meta %}} +name: measurements +{{% /influxql/table-meta %}} + +| name | +| :----------- | +| home | +| home_actions | + +{{% /expand %}} +{{< /expand-wrapper >}} + +### List measurements that match a regular expression + +To return only measurements with names that match a +[regular expression](/influxdb3/version/reference/influxql/regular-expressions/), +include a `WITH` clause that compares the `MEASUREMENT` to a regular expression. + +```sql +SHOW MEASUREMENTS WITH MEASUREMENT =~ /^home/ +``` + +{{< expand-wrapper >}} +{{% expand "View example output" "2" %}} + +{{% influxql/table-meta %}} +name: measurements +{{% /influxql/table-meta %}} + +| name | +| :----------- | +| home | +| home_actions | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## List field keys in a measurement + +Use [`SHOW FIELD KEYS`](/influxdb3/version/reference/influxql/show/#show-field-keys) +to return all field keys in a measurement. +Include a `FROM` clause to specify the measurement. +If no measurement is specified, the query returns all field keys in the database. + +```sql +SHOW FIELD KEYS FROM home +``` + +{{< expand-wrapper >}} +{{% expand "View example output" "3" %}} + +{{% influxql/table-meta %}} +name: home +{{% /influxql/table-meta %}} + +| fieldKey | fieldType | +| :------- | :-------- | +| co | integer | +| hum | float | +| temp | float | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## List tag keys in a measurement + +Use [`SHOW TAG KEYS`](/influxdb3/version/reference/influxql/show/#show-field-keys) +to return all tag keys in a measurement. +Include a `FROM` clause to specify the measurement. +If no measurement is specified, the query returns all tag keys in the database. + +```sql +SHOW TAG KEYS FROM home_actions +``` + +{{< expand-wrapper >}} +{{% expand "View example output" "4" %}} + +{{% influxql/table-meta %}} +name: home_actions +{{% /influxql/table-meta %}} + +| tagKey | +| :----- | +| action | +| level | +| room | + +{{% /expand %}} +{{< /expand-wrapper >}} + +### List tag keys in measurements that contain a specific tag key-value pair + +To return all tag keys measurements that contain specific tag key-value pairs, +include a `WHERE` clause with the tag key-value pairs to query for. + +```sql +SHOW TAG KEYS WHERE room = 'Kitchen' +``` + +{{< expand-wrapper >}} +{{% expand "View example output" "5" %}} + +{{% influxql/table-meta %}} +name: home +{{% /influxql/table-meta %}} + +| tagKey | +| :----- | +| room | + +{{% influxql/table-meta %}} +name: home_actions +{{% /influxql/table-meta %}} + +| tagKey | +| :----- | +| action | +| level | +| room | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## List tag values for a specific tag key + +Use [`SHOW TAG VALUES`](/influxdb3/version/reference/influxql/show/#show-field-values) +to return all values for specific tags in a measurement. + +- Include a `FROM` clause to specify one or more measurements to query. +- Use the `WITH` clause to compare `KEY` to tag keys to list the values of. +- Use the `WHERE` clause to restrict the search to a specific time range + (default time range is the last day). + +```sql +SHOW TAG VALUES FROM weather WITH KEY = location +``` + +{{% note %}} + +#### Include a FROM clause + +We strongly recommend including a `FROM` clause with the `SHOW TAG VALUES` +statement that specifies 1-50 tables to query. +Without a `FROM` clause, the InfluxDB query engine must read data from all +tables and return unique tag values from each. + +Depending on the number of tables in your database and the number of unique tag +values in each table, excluding a `FROM` clause can result in poor query performance, +query timeouts, or unnecessary resource allocation that may affect other queries. + +{{% /note %}} + +{{< expand-wrapper >}} +{{% expand "View example output" "5" %}} + +{{% influxql/table-meta %}} +name: weather +{{% /influxql/table-meta %}} + +| key | value | +| :------- | :------------ | +| location | Concord | +| location | Hayward | +| location | San Francisco | + +{{% /expand %}} +{{< /expand-wrapper >}} + +### List tag values for multiple tags + +To return tag values for multiple specific tag keys, use the `IN` operator in +the `WITH` clause to compare `KEY` to a list of tag keys. + +```sql +SHOW TAG VALUES FROM home_actions WITH KEY IN ("level", "action") +``` + +{{< expand-wrapper >}} +{{% expand "View example output" "6" %}} + +{{% influxql/table-meta %}} +name: home_actions +{{% /influxql/table-meta %}} + +| key | value | +| :----- | :---- | +| action | alert | +| action | cool | +| level | ok | +| level | warn | + +{{% /expand %}} +{{< /expand-wrapper >}} + +### List tag values for tags that match a regular expression + +To return only tag values from tags keys that match a regular expression, use +regular expression comparison operators in your `WITH` clause to compare `KEY` +to the regular expression. + +```sql +SHOW TAG VALUES FROM home, home_actions WITH KEY =~ /oo/ +``` + +{{< expand-wrapper >}} +{{% expand "View example output" "7" %}} + +{{% influxql/table-meta %}} +name: home +{{% /influxql/table-meta %}} + +| key | value | +| :--- | :---------- | +| room | Kitchen | +| room | Living Room | + +{{% influxql/table-meta %}} +name: home_actions +{{% /influxql/table-meta %}} + +| key | value | +| :--- | :---------- | +| room | Kitchen | +| room | Living Room | + +{{% /expand %}} +{{< /expand-wrapper >}} + +### List tag values associated with a specific tag key-value pair + +To list tag values for tags associated with a specific tag key-value pair: + +- Use the `WITH` clause to identify what tag keys to return values for. +- Include a `WHERE` clause that identifies the tag key-value pair to query for. + +The following example returns tag values for the `action` and `level` tags for +points where the `room` tag value is `Kitchen`. + +```sql +SHOW TAG VALUES FROM home_actions WITH KEY IN ("action", "level") WHERE room = 'Kitchen' +``` + +{{< expand-wrapper >}} +{{% expand "View example output" "8" %}} + +{{% influxql/table-meta %}} +name: home_actions +{{% /influxql/table-meta %}} + +| key | value | +| :----- | :---- | +| action | alert | +| action | cool | +| level | ok | +| level | warn | + +{{% /expand %}} +{{< /expand-wrapper >}} diff --git a/content/shared/influxdb3-query-guides/influxql/parameterized-queries.md b/content/shared/influxdb3-query-guides/influxql/parameterized-queries.md new file mode 100644 index 000000000..bc2279218 --- /dev/null +++ b/content/shared/influxdb3-query-guides/influxql/parameterized-queries.md @@ -0,0 +1,315 @@ + +Parameterized queries in {{% product-name %}} let you dynamically and safely change values in a query. +If your application code allows user input to customize values or expressions in a query, use a parameterized query to make sure untrusted input is processed strictly as data and not executed as code. + +Parameterized queries: + +- help prevent injection attacks, which can occur if input is executed as code +- help make queries more reusable + +{{% note %}} +#### Prevent injection attacks + +For more information on security and query parameterization, +see the [OWASP SQL Injection Prevention Cheat Sheet](https://cheatsheetseries.owasp.org/cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#defense-option-1-prepared-statements-with-parameterized-queries). +{{% /note %}} + +In InfluxDB 3, a parameterized query is an InfluxQL or SQL query that contains one or more named parameter placeholders–variables that represent input data. + +- [Use parameters in `WHERE` expressions](#use-parameters-in-where-expressions) +- [Parameter data types](#parameter-data-types) + - [Data type examples](#data-type-examples) + - [Time expressions](#time-expressions) + - [Not compatible with parameters](#not-compatible-with-parameters) +- [Parameterize an SQL query](#parameterize-an-sql-query) +- [Execute parameterized InfluxQL queries](#execute-parameterized-influxql-queries) + - [Use InfluxDB Flight RPC clients](#use-influxdb-flight-rpc-clients) +- [Client support for parameterized queries](#client-support-for-parameterized-queries) +- [Not supported](#not-supported) + +{{% note %}} + +#### Parameters only supported in `WHERE` expressions + +InfluxDB 3 supports parameters in `WHERE` clause **predicate expressions**. +Parameter values must be one of the [allowed parameter data types](#parameter-data-types). + +If you use parameters in other expressions or clauses, +such as function arguments, `SELECT`, or `GROUP BY`, then your query might not work as you expect. + +{{% /note %}} + +## Use parameters in `WHERE` expressions + +You can use parameters in `WHERE` clause **predicate expressions**-–for example, the following query contains a `$temp` parameter: + +```sql +SELECT * FROM measurement WHERE temp > $temp +``` + +When executing a query, you specify parameter name-value pairs. +The value that you assign to a parameter must be one of the [parameter data types](#parameter-data-types). + +```go +{"temp": 22.0} +``` + +The InfluxDB Querier parses the query text with the parameter placeholders, and then generates query plans that replace the placeholders with the values that you provide. +This separation of query structure from input data ensures that input is treated as one of the allowed [data types](#parameter-data-types) and not as executable code. + +## Parameter data types + +A parameter value can be one of the following data types: + +- Null +- Boolean +- Unsigned integer (`u_int64`) +- Integer (`int64`) +- Double (`float64`) +- String + +### Data type examples + +```js +{ + "string": "Living Room", + "double": 3.14, + "unsigned_integer": 1234, + "integer": -1234, + "boolean": false, + "null": Null, +} +``` + +### Time expressions + +To parameterize time bounds, substitute a parameter for a timestamp literal--for example: + +```sql +SELECT * +FROM home +WHERE time >= $min_time +``` + +For the parameter value, specify the timestamp literal as a string--for example: + +{{% influxdb/custom-timestamps %}} + +```go +// Assign a timestamp string literal to the min_time parameter. +parameters := influxdb3.QueryParameters{ + "min_time": "2022-01-01 00:00:00.00", +} +``` + +{{% /influxdb/custom-timestamps %}} + +InfluxDB executes the query as the following: + +{{% influxdb/custom-timestamps %}} + +```sql +SELECT * +FROM home +WHERE time >= '2022-01-01 00:00:00.00' +``` + +{{% /influxdb/custom-timestamps %}} + +### Not compatible with parameters + +If you use parameters for the following, your query might not work as you expect: + +- In clauses other than `WHERE`, such as `SELECT` or `GROUP BY` +- As function arguments, such as `avg($temp)` +- In place of identifiers, such as column or table names +- In place of duration literals, such as `time > now() - $min_duration` + +## Parameterize an SQL query + +{{% note %}} +#### Sample data + +The following examples use the +[Get started home sensor data](/influxdb3/version/reference/sample-data/#get-started-home-sensor-data). +To run the example queries and return results, +[write the sample data](/influxdb3/version/reference/sample-data/#write-the-home-sensor-data-to-influxdb) +to your {{% product-name %}} database before running the example queries. +{{% /note %}} + +To use a parameterized query, do the following: + +1. In your query text, use the `$parameter` syntax to reference a parameter name--for example, +the following query contains `$room` and `$min_temp` parameter placeholders: + + ```sql + SELECT * + FROM home + WHERE time > now() - 7d + AND temp >= $min_temp + AND room = $room + ``` + +2. Provide a value for each parameter name. + If you don't assign a value for a parameter, InfluxDB returns an error. + The syntax for providing parameter values depends on the client you use--for example: + + + + {{< code-tabs-wrapper >}} + {{% code-tabs %}} + [Go](#) + {{% /code-tabs %}} + {{% code-tab-content %}} + + ```go + // Define a QueryParameters struct--a map of parameters to input values. + parameters := influxdb3.QueryParameters{ + "room": "Kitchen", + "min_temp": 20.0, + } + ``` + + {{% /code-tab-content %}} + {{< /code-tabs-wrapper >}} + +After InfluxDB receives your request and parses the query, it executes the query as + +```sql +SELECT * +FROM home +WHERE time > now() - 7d +AND temp >= 20.0 +AND room = 'Kitchen' +``` + +## Execute parameterized InfluxQL queries + +{{% note %}} +#### Sample data + +The following examples use the +[Get started home sensor data](/influxdb3/version/reference/sample-data/#get-started-home-sensor-data). +To run the example queries and return results, +[write the sample data](/influxdb3/version/reference/sample-data/#write-the-home-sensor-data-to-influxdb) +to your {{% product-name %}} database before running the example queries. +{{% /note %}} + +### Use InfluxDB Flight RPC clients + +Using the InfluxDB 3 native Flight RPC protocol and supported clients, you can send a parameterized query and a list of parameter name-value pairs. +InfluxDB Flight clients that support parameterized queries pass the parameter name-value pairs in a Flight ticket `params` field. + +The following examples show how to use client libraries to execute parameterized InfluxQL queries: + + + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Go](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} + +```go +import ( + "context" + "fmt" + "io" + "os" + "text/tabwriter" + "time" + "github.com/InfluxCommunity/influxdb3-go/v2/influxdb3" +) + +func Query(query string, parameters influxdb3.QueryParameters, + options influxdb3.QueryOptions) error { + url := os.Getenv("INFLUX_HOST") + token := os.Getenv("INFLUX_TOKEN") + database := os.Getenv("INFLUX_DATABASE") + + // Instantiate the influxdb3 client. + client, err := influxdb3.New(influxdb3.ClientConfig{ + Host: url, + Token: token, + Database: database, + }) + + if err != nil { + panic(err) + } + + // Ensure the client is closed after the Query function finishes. + defer func(client *influxdb3.Client) { + err := client.Close() + if err != nil { + panic(err) + } + }(client) + + // Call the client's QueryWithParameters function. + // Provide the query, parameters, and the InfluxQL QueryType option. + iterator, err := client.QueryWithParameters(context.Background(), query, + parameters, influxdb3.WithQueryType(options.QueryType)) + + // Create a buffer for storing rows as you process them. + w := tabwriter.NewWriter(io.Discard, 4, 4, 1, ' ', 0) + w.Init(os.Stdout, 0, 8, 0, '\t', 0) + + fmt.Fprintf(w, "time\troom\tco\thum\ttemp\n") + + // Format and write each row to the buffer. + // Process each row as key-value pairs. + for iterator.Next() { + row := iterator.Value() + // Use Go time package to format unix timestamp + // as a time with timezone layout (RFC3339 format) + time := (row["time"].(time.Time)). + Format(time.RFC3339) + + fmt.Fprintf(w, "%s\t%s\t%d\t%.1f\t%.1f\n", + time, row["room"], row["co"], row["hum"], row["temp"]) + } + w.Flush() + + return nil +} + +func main() { + // Use the $placeholder syntax in a query to reference parameter placeholders + // for input data. + // The following InfluxQL query contains the placeholders $room and $min_temp. + query := ` + SELECT * + FROM home + WHERE time > now() - 7d + AND temp >= $min_temp + AND room = $room` + + // Define a QueryParameters struct--a map of placeholder names to input values. + parameters := influxdb3.QueryParameters{ + "room": "Kitchen", + "min_temp": 20.0, + } + + Query(query, parameters, influxdb3.QueryOptions{ + QueryType: influxdb3.InfluxQL, + }) +} +``` + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +## Client support for parameterized queries + +- Not all [InfluxDB 3 Flight clients](/influxdb3/version/reference/client-libraries/v3/) support parameterized queries. +- InfluxDB doesn't currently support parameterized queries or DataFusion prepared statements for Flight SQL or Flight SQL clients. +- InfluxDB 3 SQL and InfluxQL parameterized queries aren’t supported in InfluxDB v1 and v2 clients. + +## Not supported + +Currently, parameterized queries in {{% product-name %}} don't provide the following: + +- support for DataFusion prepared statements +- query caching, optimization, or performance benefits diff --git a/content/shared/influxdb3-query-guides/influxql/troubleshoot.md b/content/shared/influxdb3-query-guides/influxql/troubleshoot.md new file mode 100644 index 000000000..d7a1b7436 --- /dev/null +++ b/content/shared/influxdb3-query-guides/influxql/troubleshoot.md @@ -0,0 +1,259 @@ + +Learn how to troubleshoot and fix common InfluxQL errors. + +> [!Note] +> **Disclaimer:** This document does not contain an exhaustive list of all +> possible InfluxQL errors. + +- [error: database name required](#error-database-name-required) +- [error parsing query: found ..., expected identifier at ...](#error-parsing-query-found--expected-identifier-at-) +- [error parsing query: mixing aggregate and non-aggregate queries is not supported](#error-parsing-query-mixing-aggregate-and-non-aggregate-queries-is-not-supported) +- [invalid operation: time and \*influxql.VarRef are not compatible](#invalid-operation-time-and-influxqlvarref-are-not-compatible) + +{{% influxql/v1-v3-data-model-note %}} + +## error: database name required + +``` +error: database name required +``` + +### Cause + +The `database name required` error occurs when certain +[`SHOW` queries](/influxdb3/version/reference/influxql/show/) +do not specify a [database](/influxdb3/version/reference/glossary/#database) +in the query or with the query request. + +For example, the following `SHOW` query doesn't specify the database and assumes +the `db` is not specified in the `/query` API request: + +```sql +SHOW MEASUREMENTS +``` + +### Solution + +To resolve this error, specify a database with your query request by doing one +of the following: + +{{% code-placeholders "DATABASE_(NAME|TOKEN)" %}} + +- Include an `ON` clause with the `SHOW` statement that specifies the database + to query: + + ```sql + SHOW MEASUREMENTS ON DATABASE_NAME + ``` + +- If using the [InfluxDB v1 query API](/enterprise_influxdb/v1/tools/api/#query-string-parameters), + Include the `db` query parameter in your request: + + ```sh + curl --get https://{{< influxdb/host >}}/query \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=SHOW MEASUREMENTS" + ``` + +{{% /code-placeholders %}} + +**Related:** +[InfluxQL `SHOW` statements](/influxdb3/version/reference/influxql/show/), +[Explore your schema with InfluxQL](/influxdb3/version/query-data/influxql/explore-schema/) + +--- + +## error parsing query: found ..., expected identifier at ... + +``` +error parsing query: found EXAMPLE, expected identifier at line 1, char 14 +``` + +### Causes + +This error occurs when InfluxDB anticipates an identifier in a query but doesn't find it. +Identifiers are tokens that refer to database names, retention policy names, +measurement names, field keys, and tag keys. + +This error is generally caused by one of the following: + +- [A required identifier is missing](#a-required-identifier-is-missing) +- [A string literal is used instead of an identifier](#a-string-literal-is-used-instead-of-an-identifier) +- [An InfluxQL keyword is used as an unquoted identifier](#an-influxql-keyword-is-used-as-an-unquoted-identifier) + +#### A required identifier is missing + +Some InfluxQL statements and clauses require identifiers to identify databases, +measurements, tags, or fields. If the statement is missing a required identifier, +the query returns the `expected identifier` error. + +For example, the following query omits the measurement name from the +[`FROM` clause](/influxdb3/version/reference/influxql/select/#from-clause): + +```sql +SELECT * FROM WHERE color = 'blue' +``` + +##### Solution + +Update the query to include the expected identifier in the `FROM` clause that +identifies the measurement to query: + +```sql +SELECT * FROM measurement_name WHERE color = 'blue' +``` + +#### A string literal is used instead of an identifier + +In InfluxQL, string literals are wrapped in single quotes (`''`) while character +sequences wrapped in double quotes (`""`) are parsed as identifiers. If you use +single quotes to wrap an identifier, the identifier is parsed as a string +literal and returns the `expected identifier` error. + +For example, the following query wraps the measurement name in single quotes: + +```sql +SELECT * FROM 'measurement-name' WHERE color = 'blue' +``` + +Results in the following error: + +``` +error parsing query: found measurement-name, expected identifier at line 1, char 14 +``` + +##### Solution + +Update single-quoted identifiers to use double quotes so they are parsed as +identifiers and not as string literals. + +```sql +SELECT * FROM "measurement-name" WHERE color = 'blue' +``` + +#### An InfluxQL keyword is used as an unquoted identifier + +[InfluxQL keyword](/influxdb3/version/reference/influxql/#keywords) +are character sequences reserved for specific functionality in the InfluxQL syntax. +It is possible to use a keyword as an identifier, but the identifier must be +wrapped in double quotes (`""`). + +{{% note %}} +While wrapping identifiers that are InfluxQL keywords in double quotes is an +acceptable workaround, for simplicity, you should avoid using +[InfluxQL keywords](/influxdb3/version/reference/influxql/#keywords) +as identifiers. +{{% /note %}} + +```sql +SELECT duration FROM runs +``` + +Returns the following error: + +``` +error parsing query: found DURATION, expected identifier, string, number, bool at line 1, char 8 +``` + +##### Solution + +Double quote [InfluxQL keywords](/influxdb3/version/reference/influxql/#keywords) +when used as identifiers: + +```sql +SELECT "duration" FROM runs +``` + +**Related:** +[InfluxQL keywords](/influxdb3/version/reference/influxql/#keywords) + +--- + +## error parsing query: mixing aggregate and non-aggregate queries is not supported + +``` +error parsing query: mixing aggregate and non-aggregate queries is not supported +``` + +### Cause + +The `mixing aggregate and non-aggregate` error occurs when a `SELECT` statement +includes both an [aggregate function](/influxdb3/version/reference/influxql/functions/aggregates/) +and a standalone [field key](/influxdb3/version/reference/glossary/#field-key) or +[tag key](/influxdb3/version/reference/glossary/#tag-key). + +Aggregate functions return a single calculated value per group and column and +there is no obvious single value to return for any un-aggregated fields or tags. + +For example, the following example queries two fields from the `home` +measurement--`temp` and `hum`. However, it only applies the aggregate function, +`MEAN` to the `temp` field. + +```sql +SELECT MEAN(temp), hum FROM home +``` + +### Solution + +To fix this error, apply an aggregate or selector function to each of the queried +fields: + +```sql +SELECT MEAN(temp), MAX(hum) FROM home +``` + +**Related:** +[InfluxQL functions](/influxdb3/version/reference/influxql/functions/), +[Aggregate data with InfluxQL](/influxdb3/version/query-data/influxql/aggregate-select/) + +--- + +## invalid operation: time and \*influxql.VarRef are not compatible + +``` +invalid operation: time and *influxql.VarRef are not compatible +``` + +### Cause + +The `time and \*influxql.VarRef are not compatible` error occurs when +date-time strings are double-quoted in a query. +Date-time strings should be formatted as string literals and wrapped in single quotes (`''`). + +For example: + +{{% influxdb/custom-timestamps %}} +```sql +SELECT temp +FROM home +WHERE + time >= "2022-01-01T08:00:00Z" + AND time <= "2022-01-01T00:20:00Z" +``` +{{% /influxdb/custom-timestamps %}} + +Returns the following error: + +``` +invalid operation: time and *influxql.VarRef are not compatible +``` + +### Solution + +To fix the error, wrap RFC3339 timestamps in single quotes rather than double quotes. + +{{% influxdb/custom-timestamps %}} +```sql +SELECT temp +FROM home +WHERE + time >= '2022-01-01T08:00:00Z' + AND time <= '2022-01-01T00:20:00Z' +``` +{{% /influxdb/custom-timestamps %}} + +**Related:** +[Query data within time boundaries](/influxdb3/version/query-data/influxql/basic-query/#query-data-within-time-boundaries), +[`WHERE` clause--Time ranges](/influxdb3/version/reference/influxql/where/#time-ranges), +[InfluxQL time syntax](/influxdb3/version/reference/influxql/time-and-timezone/#time-syntax) diff --git a/content/shared/influxdb3-query-guides/sql/_index.md b/content/shared/influxdb3-query-guides/sql/_index.md new file mode 100644 index 000000000..b0c677e4f --- /dev/null +++ b/content/shared/influxdb3-query-guides/sql/_index.md @@ -0,0 +1,8 @@ + +Learn to query data stored in {{< product-name >}} using SQL. + +{{< children type="anchored-list" >}} + +--- + +{{< children readmore=true hr=true >}} \ No newline at end of file diff --git a/content/shared/influxdb3-query-guides/sql/aggregate-select.md b/content/shared/influxdb3-query-guides/sql/aggregate-select.md new file mode 100644 index 000000000..1db4da3c1 --- /dev/null +++ b/content/shared/influxdb3-query-guides/sql/aggregate-select.md @@ -0,0 +1,322 @@ + +An SQL query that aggregates data includes the following clauses: + +{{< req type="key" >}} + +- {{< req "\*">}} `SELECT`: Specify fields, tags, and calculations to output from a + table or use the wildcard alias (`*`) to select all fields and tags + from a table. +- {{< req "\*">}} `FROM`: Specify the table to query data from. +- `WHERE`: Only return rows that meets the specified conditions--for example, + the time is within a time range, a tag has a specific value, or a field value + is above or below a specified threshold. +- `GROUP BY`: Group data that have the same values for specified columns and + expressions (for example, an aggregate function result). + +{{% note %}} +For simplicity, the verb, **"aggregate,"** in this guide refers to applying +both aggregate and selector functions to a dataset. +{{% /note %}} + +Learn how to apply aggregate operations to your queried data: + +- [Aggregate and selector functions](#aggregate-and-selector-functions) + - [Aggregate functions](#aggregate-functions) + - [Selector functions](#selector-functions) +- [Example aggregate queries](#example-aggregate-queries) + +## Aggregate and selector functions + +Both aggregate and selector functions return a single row from each SQL group. +For example, if you `GROUP BY room` and perform an aggregate operation +in your `SELECT` clause, results include an aggregate value for each unique +value of `room`. + +### Aggregate functions + +Use **aggregate functions** to aggregate values in a specified column for each +group and return a single row per group containing the aggregate value. + +View SQL aggregate functions + +##### Basic aggregate query + +```sql +SELECT AVG(co) from home +``` + +### Selector functions + +Use **selector functions** to "select" a value from a specified column. +The available selector functions are designed to work with time series data. + +View SQL selector functions + +Each selector function returns a Rust _struct_ (similar to a JSON object) +representing a single time and value from the specified column in the each group. +What time and value get returned depend on the logic in the selector function. +For example, `selector_first` returns the value of specified column in the first +row of the group. `selector_max` returns the maximum value of the specified +column in the group. + +#### Selector struct schema + +The struct returned from a selector function has two properties: + +- **time**: `time` value in the selected row +- **value**: value of the specified column in the selected row + +```js +{time: 2023-01-01T00:00:00Z, value: 72.1} +``` + +#### Use selector functions + +Each selector function has two arguments: + +- The first is the column to operate on. +- The second is the time column to use in the selection logic. + +In your `SELECT` statement, execute a selector function and use bracket notation +to reference properties of the [returned struct](#selector-struct-schema) to +populate the column value: + +```sql +SELECT + selector_first(temp, time)['time'] AS time, + selector_first(temp, time)['value'] AS temp, + room +FROM home +GROUP BY room +``` + +## Example aggregate queries + +- [Perform an ungrouped aggregation](#perform-an-ungrouped-aggregation) +- [Group and aggregate data](#group-and-aggregate-data) + - [Downsample data by applying interval-based aggregates](#downsample-data-by-applying-interval-based-aggregates) +- [Query rows based on aggregate values](#query-rows-based-on-aggregate-values) + +> [!Note] +> #### Sample data +> +> The following examples use the +> [Home sensor sample data](/influxdb3/version/reference/sample-data/#home-sensor-data). +> To run the example queries and return results, +> [write the sample data](/influxdb3/version/reference/sample-data/#write-home-sensor-data-to-influxdb) +> to your {{% product-name %}} database before running the example queries. + +### Perform an ungrouped aggregation + +To aggregate _all_ queried values in a specified column: + +- Use aggregate or selector functions in your `SELECT` statement. +- Do not include a `GROUP BY` clause to leave your data ungrouped. + +```sql +SELECT avg(co) AS 'average co' from home +``` + +{{< expand-wrapper >}} +{{% expand "View example results" %}} +| average co | +| :---------------: | +| 5.269230769230769 | +{{% /expand %}} +{{< /expand-wrapper >}} + +### Group and aggregate data + +To apply aggregate or selector functions to grouped data: + +- Use aggregate or selector functions in your `SELECT` statement. +- Include columns to group by in your `SELECT` statement. +- Include a `GROUP BY` clause with a comma-delimited list of columns and + expressions to group by. + +Keep the following in mind when using `GROUP BY`: + +- `GROUP BY` can use column aliases that are defined in the `SELECT` clause. +- `GROUP BY` won't use an aliased value if the alias is the same as the original + column name. `GROUP BY` will use the original value of the column, not the + transformed, aliased value. + +```sql +SELECT + room, + avg(temp) AS 'average temp' +FROM home +GROUP BY room +``` + +{{< expand-wrapper >}} +{{% expand "View example results" %}} +| room | average temp | +| :---------- | -----------------: | +| Living Room | 22.16923076923077 | +| Kitchen | 22.623076923076926 | +{{% /expand %}} +{{< /expand-wrapper >}} + +#### Downsample data by applying interval-based aggregates + +A common use case when querying time series is downsampling data by applying +aggregates to time-based groups. To group and aggregate data into time-based +groups: + +- In your `SELECT` clause: + + - Use the [`DATE_BIN` function](/influxdb3/version/reference/sql/functions/time-and-date/#date_bin) + to calculate time intervals and output a column that contains the start of + the interval nearest to the `time` timestamp in each row--for example, the + following clause calculates two-hour intervals (originating at the Unix epoch) + and returns a new `time` column that contains the start of the interval + nearest to `home.time`: + + ```sql + SELECT + DATE_BIN(INTERVAL '2 hours', time) AS time + FROM home + ... + ``` + + Given a `time` value + {{% influxdb/custom-timestamps-span %}}`2022-01-01T13:00:50.000Z`{{% /influxdb/custom-timestamps-span %}}, + the output `time` column contains + {{% influxdb/custom-timestamps-span %}}`2022-01-01T12:00:00.000Z`{{% /influxdb/custom-timestamps-span %}}. + + - Use [aggregate](/influxdb3/version/reference/sql/functions/aggregate/) or + [selector](/influxdb3/version/reference/sql/functions/selector/) functions on + specified columns. + +- In your `GROUP BY` clause: + + - Specify the `DATE_BIN(...)` column ordinal reference (`1`). + This lets you group by the transformed `time` value and maintain the `time` + column name. + - Specify other columns (for example, `room`) that are specified in the + `SELECT` clause and aren't used in a selector function. + + ```sql + SELECT + DATE_BIN(INTERVAL '2 hours', time) AS time + ... + GROUP BY 1, room + ... + ``` + + To reference the `DATE_BIN(...)` result column by _name_ in the `GROUP BY` + clause, assign an alias other than "time" in the `SELECT` clause--for example: + + ```sql + SELECT + DATE_BIN(INTERVAL '2 hours', time) AS _time + FROM home + ... + GROUP BY _time, room + ``` + +- Include an `ORDER BY` clause with columns to sort by. + +The following example retrieves unique combinations of time intervals and rooms +with their minimum, maximum, and average temperatures: + +```sql +SELECT + DATE_BIN(INTERVAL '2 hours', time) AS time, + room, + selector_max(temp, time)['value'] AS 'max temp', + selector_min(temp, time)['value'] AS 'min temp', + avg(temp) AS 'average temp' +FROM home +GROUP BY 1, room +ORDER BY room, 1 +``` + +{{< expand-wrapper >}} +{{% expand "View example results" %}} +{{% influxdb/custom-timestamps %}} +| time | room | max temp | min temp | average temp | +| :------------------- | :---------- | -------: | -------: | -----------------: | +| 2022-01-01T08:00:00Z | Kitchen | 23 | 21 | 22 | +| 2022-01-01T10:00:00Z | Kitchen | 22.7 | 22.4 | 22.549999999999997 | +| 2022-01-01T12:00:00Z | Kitchen | 22.8 | 22.5 | 22.65 | +| 2022-01-01T14:00:00Z | Kitchen | 22.8 | 22.7 | 22.75 | +| 2022-01-01T16:00:00Z | Kitchen | 22.7 | 22.4 | 22.549999999999997 | +| 2022-01-01T18:00:00Z | Kitchen | 23.3 | 23.1 | 23.200000000000003 | +| 2022-01-01T20:00:00Z | Kitchen | 22.7 | 22.7 | 22.7 | +| 2022-01-01T08:00:00Z | Living Room | 21.4 | 21.1 | 21.25 | +| 2022-01-01T10:00:00Z | Living Room | 22.2 | 21.8 | 22 | +| 2022-01-01T12:00:00Z | Living Room | 22.4 | 22.2 | 22.299999999999997 | +| 2022-01-01T14:00:00Z | Living Room | 22.3 | 22.3 | 22.3 | +| 2022-01-01T16:00:00Z | Living Room | 22.6 | 22.4 | 22.5 | +| 2022-01-01T18:00:00Z | Living Room | 22.8 | 22.5 | 22.65 | +| 2022-01-01T20:00:00Z | Living Room | 22.2 | 22.2 | 22.2 | +{{% /influxdb/custom-timestamps %}} +{{% /expand %}} +{{< /expand-wrapper >}} + +> [!Note] +> +> #### GROUP BY time +> +> In the `GROUP BY` clause, the name "time" always refers to the `time` column +> in the source table. If you want to reference a calculated time column by name, +> use an alias different from "time" or use the column ordinal--for example: +> +> {{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Column alias](#) +[Column ordinal](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sql +SELECT + DATE_BIN(INTERVAL '2 hours', time) AS _time, + room, + selector_max(temp, time)['value'] AS 'max temp', + selector_min(temp, time)['value'] AS 'min temp', + avg(temp) AS 'average temp' +FROM home +GROUP BY _time, room +ORDER BY room, _time +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sql +SELECT + DATE_BIN(INTERVAL '2 hours', time) AS time, + room, + selector_max(temp, time)['value'] AS 'max temp', + selector_min(temp, time)['value'] AS 'min temp', + avg(temp) AS 'average temp' +FROM home +GROUP BY 1, room +ORDER BY room, 1 +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +### Query rows based on aggregate values + +To query data based on values after an aggregate operation, include a `HAVING` +clause with defined predicate conditions such as a value threshold. +Predicates in the `WHERE` clause are applied _before_ data is aggregated. +Predicates in the `HAVING` clause are applied _after_ data is aggregated. + +```sql +SELECT + room, + avg(co) AS 'average co' +FROM home +GROUP BY room +HAVING "average co" > 5 +``` + +{{< expand-wrapper >}} +{{% expand "View example results" %}} +| room | average co | +| :------ | -----------------: | +| Kitchen | 6.6923076923076925 | +{{% /expand %}} +{{< /expand-wrapper >}} diff --git a/content/shared/influxdb3-query-guides/sql/basic-query.md b/content/shared/influxdb3-query-guides/sql/basic-query.md new file mode 100644 index 000000000..be4af1e2d --- /dev/null +++ b/content/shared/influxdb3-query-guides/sql/basic-query.md @@ -0,0 +1,208 @@ + +The InfluxDB SQL implementation is powered by the [Apache Arrow DataFusion](https://arrow.apache.org/datafusion/) +query engine which provides an SQL syntax similar to other relational query languages. + +A basic SQL query that queries data from {{< product-name >}} most commonly +includes the following clauses: + +{{< req type="key" >}} + +- {{< req "\*">}} `SELECT`: Specify fields, tags, and calculations to output + from a table or use the wildcard alias (`*`) to select all fields and tags + from a table. +- {{< req "\*">}} `FROM`: Specify the table to query data from. +- `WHERE`: Only return rows that meets the specified conditions--for example, + the time is within a time range, a tag has a specific value, or a field value + is above or below a specified threshold. + +{{% influxdb/custom-timestamps %}} +```sql +SELECT + temp, + hum, + room +FROM home +WHERE + time >= '2022-01-01T08:00:00Z' + AND time <= '2022-01-01T20:00:00Z' +``` +{{% /influxdb/custom-timestamps %}} + +## Result set + +If at least one row satisfies the query, {{% product-name %}} returns row data +in the query result set. An SQL query result set includes columns listed in the +query's `SELECT` statement. + +## Basic query examples + +- [Query data within time boundaries](#query-data-within-time-boundaries) +- [Query data without time boundaries](#query-data-without-time-boundaries) +- [Query specific fields and tags](#query-specific-fields-and-tags) +- [Query fields based on tag values](#query-fields-based-on-tag-values) +- [Query points based on field values](#query-points-based-on-field-values) +- [Alias queried fields and tags](#alias-queried-fields-and-tags) + +> [!Note] +> #### Sample data +> +> The following examples use the +> [Home sensor sample data](/influxdb3/version/reference/sample-data/#home-sensor-data). +> To run the example queries and return results, +> [write the sample data](/influxdb3/version/reference/sample-data/#write-home-sensor-data-to-influxdb) +> to your {{% product-name %}} database before running the example queries. + +### Query data within time boundaries + +- Use the `SELECT` clause to specify what tags and fields to return. + To return all tags and fields, use the wildcard alias (`*`). +- Specify the table to query in the `FROM` clause. +- Specify time boundaries in the `WHERE` clause. + Include time-based predicates that compare the value of the `time` column to a timestamp. + Use the `AND` logical operator to chain multiple predicates together. + +{{% influxdb/custom-timestamps %}} +```sql +SELECT * +FROM home +WHERE + time >= '2022-01-01T08:00:00Z' + AND time <= '2022-01-01T12:00:00Z' +``` +{{% /influxdb/custom-timestamps %}} + +Query time boundaries can be relative or absolute. + +{{< expand-wrapper >}} +{{% expand "Query with relative time boundaries" %}} + +To query data from relative time boundaries, compare the value of the `time` +column to a timestamp calculated by subtracting an interval from a timestamp. +Use `now()` to return the timestamp for the current time (UTC). + +##### Query all data from the last month + +```sql +SELECT * FROM home WHERE time >= now() - INTERVAL '1 month' +``` + +##### Query one day of data data from a week ago +```sql +SELECT * +FROM home +WHERE + time >= now() - INTERVAL '7 days' + AND time <= now() - INTERVAL '6 days' +``` +{{% /expand %}} + +{{% expand "Query with absolute time boundaries" %}} + +To query data from absolute time boundaries, compare the value of the `time` column +to a timestamp literal. +Use the `AND` logical operator to chain together multiple predicates and define +both start and stop boundaries for the query. + +{{% influxdb/custom-timestamps %}} +```sql +SELECT + * +FROM + home +WHERE + time >= '2022-01-01T08:00:00Z' + AND time <= '2022-01-01T20:00:00Z' +``` +{{% /influxdb/custom-timestamps %}} + +{{% /expand %}} + +{{% expand "Query data using a time zone offset" %}} + +To query data using a time zone offset, use the +[`AT TIME ZONE` operator](/influxdb3/version/reference/sql/operators/other/#at-time-zone) +to apply a time zone offset to timestamps in the `WHERE` clause. + +{{% note %}} +Timestamp types in InfluxDB always represent a UTC time. `AT TIME ZONE` returns +a UTC timestamp adjusted for the offset of the specified time zone. +Timestamps in the `time` column are not updated. +If you need to display the timestamps in your current timezone, this should be handled +client-side. +{{% /note %}} + +{{% influxdb/custom-timestamps %}} +```sql +SELECT + * +FROM + home +WHERE + time >= '2022-01-01 00:00:00'::TIMESTAMP AT TIME ZONE 'America/Los_Angeles' + AND time <= '2022-01-01 12:00:00'::TIMESTAMP AT TIME ZONE 'America/Los_Angeles' +``` +{{% /influxdb/custom-timestamps %}} +{{% /expand %}} +{{< /expand-wrapper >}} + +### Query data without time boundaries + +To query data without time boundaries, do not include any time-based predicates +in your `WHERE` clause. + +{{% warn %}} +Querying data _without time bounds_ can return an unexpected amount of data. +The query may take a long time to complete and results may be truncated. +{{% /warn %}} + +```sql +SELECT * FROM home +``` + +### Query specific fields and tags + +To query specific fields, include them in the `SELECT` clause. +If querying multiple fields or tags, comma-delimit each. +If a field or tag key includes special characters or spaces or is case-sensitive, +wrap the key in _double-quotes_. + +```sql +SELECT time, room, temp, hum FROM home +``` + +### Query fields based on tag values + +- Include the fields you want to query and the tags you want to base conditions + on in the `SELECT` clause. +- Include predicates in the `WHERE` clause that compare the tag identifier to + a string literal. + Use [logical operators](/influxdb3/version/reference/sql/operators/logical/) to + chain multiple predicates together and apply multiple conditions. + +```sql +SELECT * FROM home WHERE room = 'Kitchen' +``` + +### Query points based on field values + +- In the `SELECT` clause, include fields you want to query. +- In the `WHERE` clause, include predicates that compare the field identifier to a value or expression. + Use [logical operators](/influxdb3/version/reference/sql/where/#logical-operators) (`AND`, `OR`) to chain multiple predicates together + and apply multiple conditions. + +```sql +SELECT co, time FROM home WHERE co >= 10 OR co <= -10 +``` + +### Alias queried fields and tags + +To alias or rename fields and tags that you query, pass a string literal after +the field or tag identifier in the `SELECT` clause. +You can use the `AS` clause to define the alias, but it isn't necessary. +The following queries are functionally the same: + +```sql +SELECT temp 'temperature', hum 'humidity' FROM home + +SELECT temp AS 'temperature', hum AS 'humidity' FROM home +``` diff --git a/content/shared/influxdb3-query-guides/sql/cast-types.md b/content/shared/influxdb3-query-guides/sql/cast-types.md new file mode 100644 index 000000000..5a11f47b0 --- /dev/null +++ b/content/shared/influxdb3-query-guides/sql/cast-types.md @@ -0,0 +1,328 @@ + +Use the `CAST` function or double-colon `::` casting shorthand syntax to cast a +value to a specific type. + +```sql +-- CAST function +SELECT CAST(1234.5 AS BIGINT) + +-- Double-colon casting shorthand +SELECT 1234.5::BIGINT +``` + +- [Cast to a string type](#cast-to-a-string-type) +- [Cast to numeric types](#cast-to-numeric-types) + - [Float](#cast-to-a-float) + - [Integer](#cast-to-an-integer) + - [Unsigned integer](#cast-to-an-unsigned-integer) +- [Cast to a boolean type](#cast-to-a-boolean-type) +- [Cast to a timestamp type](#cast-to-a-timestamp-type) + +Casting operations can be performed on a column expression or a literal value. +For example, the following query uses the +[Home sensor sample data](/influxdb3/version/reference/sample-data/#home-sensor-data) +and: + +- Casts all values in the `time` column to integers (Unix nanosecond timestamps). +- Casts the literal string value `'1234'` to a 64-bit float for each row. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[:: shorthand](#) +[CAST()](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sql +SELECT + time::BIGINT AS unix_time, + '1234'::DOUBLE AS string_to_float +FROM home +LIMIT 5 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sql +SELECT + CAST(time AS BIGINT) AS unix_time, + CAST('1234' AS DOUBLE) AS string_to_float +FROM home +LIMIT 5 +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +{{% influxdb/custom-timestamps %}} + +| unix_time | string_to_float | +| :------------------ | --------------: | +| 1641024000000000000 | 1234 | +| 1641027600000000000 | 1234 | +| 1641031200000000000 | 1234 | +| 1641034800000000000 | 1234 | +| 1641038400000000000 | 1234 | + +{{% /influxdb/custom-timestamps %}} + +--- + +## Cast to a string type + +Use the `STRING`, `CHAR`, `VARCHAR`, or `TEXT` type in a casting operation to +cast a value to a string. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[:: shorthand](#) +[CAST()](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sql +value::STRING +value::CHAR +value::VARCHAR +value::TEXT +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sql +CAST(value AS STRING) +CAST(value AS CHAR) +CAST(value AS VARCHAR) +CAST(value AS TEXT) +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +SQL supports casting the following to a string value: + +- **Floats** +- **Integers** +- **Unsigned integers** +- **Booleans** +- **Timestamps** + +--- + +## Cast to numeric types + +The InfluxDB SQL implementation supports 64-bit floats (`DOUBLE`), +integers (`BIGINT`), and unsigned integers (`BIGINT UNSIGNED`). + +### Cast to a float + +Use the `DOUBLE` type in a casting operation to cast a value to a 64-bit float. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[:: shorthand](#) +[CAST()](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sql +value::DOUBLE +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sql +CAST(value AS DOUBLE) +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +SQL supports casting the following to a float value: + +- **Strings**: Returns the float equivalent of the numeric string (`[0-9]`). + The following string patterns are also supported: + + - Scientific notation (`'123.4E+10'`) + - Infinity (`'±Inf'`) + - NaN (`'NaN'`) + +- **Integers** +- **Unsigned integers** + +### Cast to an integer + +Use the `BIGINT` type in a casting operation to cast a value to a 64-bit signed integer. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[:: shorthand](#) +[CAST()](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sql +value::BIGINT +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sql +CAST(value AS BIGINT) +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +SQL supports casting the following to an integer: + +- **Strings**: Returns the integer equivalent of the numeric string (`[0-9]`). +- **Floats**: Truncates the float value at the decimal. +- **Unsigned integers**: Returns the signed integer equivalent of the unsigned integer. +- **Booleans**: Returns `1` for `true` and `0` for `false`. +- **Timestamps**: Returns the equivalent + [nanosecond epoch timestamp](/influxdb3/version/reference/glossary/#unix-timestamp). + +### Cast to an unsigned integer + +Use the `BIGINT UNSIGNED` type in a casting operation to cast a value to a +64-bit unsigned integer. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[:: shorthand](#) +[CAST()](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sql +value::BIGINT UNSIGNED +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sql +CAST(value AS BIGINT UNSIGNED) +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +SQL supports casting the following to an unsigned integer: + +- **Strings**: Returns the unsigned integer equivalent of the numeric string (`[0-9]`). +- **Floats**: Truncates the float value at the decimal. +- **Integers**: Returns the unsigned integer equivalent of the signed integer. +- **Booleans**: Returns `1` for `true` and `0` for `false`. +- **Timestamps**: Returns the equivalent + [nanosecond epoch timestamp](/influxdb3/version/reference/glossary/#unix-timestamp). + +--- + +## Cast to a boolean type + +Use the `BOOLEAN` type in a casting operation to cast a value to a boolean. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[:: shorthand](#) +[CAST()](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sql +value::BOOLEAN +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sql +CAST(value AS BOOLEAN) +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +SQL supports casting the following to a boolean: + +- **Strings** + - Return `true`: + - `'true'` _(case-insensitive)_ + - `'t'`, _(case-insensitive)_ + - `'1'` + - Return `false`: + - `'false'` _(case-insensitive)_ + - `'f'` _(case-insensitive)_ + - `'0'` +- **Integers** + - Returns `true`: positive non-zero integer + - Returns `false`: `0` +- **Unsigned integers** + - Returns `true`: non-zero unsigned integer + - Returns `false`: `0` + +--- + +## Cast to a timestamp type + +Use the `TIMESTAMP` type in a casting operation to cast a value to a timestamp. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[:: shorthand](#) +[CAST()](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sql +value::TIMESTAMP +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sql +CAST(value AS TIMESTAMP) +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +SQL supports casting the following to a timestamp: + +- **Strings**: Returns the timestamp equivalent of the string value. + The following RFC3339 and RFC339-like string patterns are supported: + + - `YYYY-MM-DDT00:00:00.000Z` + - `YYYY-MM-DDT00:00:00.000-00:00` + - `YYYY-MM-DD 00:00:00.000-00:00` + - `YYYY-MM-DDT00:00:00Z` + - `YYYY-MM-DD 00:00:00.000` + - `YYYY-MM-DD 00:00:00` + - `YYYY-MM-DD` + +- **Integers**: Parses the integer as a Unix _second_ timestamp and returns + the equivalent timestamp. +- **Unsigned integers**: Parses the unsigned integer as a Unix nanosecond timestamp + and returns the equivalent timestamp. + +{{% note %}} +#### Cast Unix nanosecond timestamps to a timestamp type + +To cast a Unix nanosecond timestamp to a timestamp type, first cast the numeric +value to an unsigned integer (`BIGINT UNSIGNED`) and then a timestamp. +You can also use the [`to_timestamp_nanos`](/influxdb3/version/reference/sql/functions/time-and-date/#to_timestamp_nanos) +function. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[:: shorthand](#) +[CAST()](#) +[to_timestamp_nanos](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sql +1704067200000000000::BIGINT UNSIGNED::TIMESTAMP +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sql +CAST(CAST(1704067200000000000 AS BIGINT UNSIGNED) AS TIMESTAMP) +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sql +to_timestamp_nanos(1704067200000000000) +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} +{{% /note %}} + +### Timestamp functions + +You can also use the following SQL functions to cast a value to a timestamp type: + +- [`to_timestamp`](/influxdb3/version/reference/sql/functions/time-and-date/#to_timestamp) +- [`to_timestamp_millis`](/influxdb3/version/reference/sql/functions/time-and-date/#to_timestamp_millis) +- [`to_timestamp_micros`](/influxdb3/version/reference/sql/functions/time-and-date/#to_timestamp_micros) +- [`to_timestamp_nanos`](/influxdb3/version/reference/sql/functions/time-and-date/#to_timestamp_nanos) +- [`to_timestamp_seconds`](/influxdb3/version/reference/sql/functions/time-and-date/#to_timestamp_seconds) +- [to_unixtime](/influxdb3/version/reference/sql/functions/time-and-date/#to_unixtime) diff --git a/content/shared/influxdb3-query-guides/sql/explore-schema.md b/content/shared/influxdb3-query-guides/sql/explore-schema.md new file mode 100644 index 000000000..cec04bf86 --- /dev/null +++ b/content/shared/influxdb3-query-guides/sql/explore-schema.md @@ -0,0 +1,53 @@ + +Use SQL to explore your data schema in your {{< product-name >}} database. + +## List tables in a database + +Use `SHOW TABLES` to list tables in your InfluxDB database. + +```sql +SHOW TABLES +``` + +{{< expand-wrapper >}} +{{% expand "View example output" %}} + +Tables listed with the `table_schema` of `iox` are tables. +Tables with `system` or `information_schema` table schemas are system tables +that store internal metadata. + +| table_catalog | table_schema | table_name | table_type | +| :------------ | :----------------- | :---------- | ---------: | +| public | iox | home | BASE TABLE | +| public | iox | noaa | BASE TABLE | +| public | system | queries | BASE TABLE | +| public | information_schema | tables | VIEW | +| public | information_schema | views | VIEW | +| public | information_schema | columns | VIEW | +| public | information_schema | df_settings | VIEW | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## List columns in a table + +Use the `SHOW COLUMNS` statement to view what columns are in a table. +Use the `IN` clause to specify the table. + +```sql +SHOW COLUMNS IN home +``` + +{{< expand-wrapper >}} +{{% expand "View example output" %}} + +| table_catalog | table_schema | table_name | column_name | data_type | is_nullable | +| :------------ | :----------- | :--------- | :---------- | :-------------------------- | ----------: | +| public | iox | home | co | Int64 | YES | +| public | iox | home | hum | Float64 | YES | +| public | iox | home | room | Dictionary(Int32, Utf8) | YES | +| public | iox | home | temp | Float64 | YES | +| public | iox | home | time | Timestamp(Nanosecond, None) | NO | + +{{% /expand %}} +{{< /expand-wrapper >}} diff --git a/content/shared/influxdb3-query-guides/sql/fill-gaps.md b/content/shared/influxdb3-query-guides/sql/fill-gaps.md new file mode 100644 index 000000000..e786a8ff7 --- /dev/null +++ b/content/shared/influxdb3-query-guides/sql/fill-gaps.md @@ -0,0 +1,112 @@ + +Use [`date_bin_gapfill`](/influxdb3/version/reference/sql/functions/time-and-date/#date_bin_gapfill) +with [`interpolate`](/influxdb3/version/reference/sql/functions/misc/#interpolate) +or [`locf`](/influxdb3/version/reference/sql/functions/misc/#locf) to +fill gaps of time where no data is returned. +Gap-filling SQL queries handle missing data in time series data by filling in +gaps with interpolated values or by carrying forward the last available observation. + +**To fill gaps in data:** + +1. Use the `date_bin_gapfill` function to window your data into time-based groups + and apply an [aggregate function](/influxdb3/version/reference/sql/functions/aggregate/) + to each window. If no data exists in a window, `date_bin_gapfill` inserts + a new row with the starting timestamp of the window, all columns in the + `GROUP BY` clause populated, and null values for the queried fields. + +2. Use either `interpolate` or `locf` to fill the inserted null values in the + specified column. + + - **interpolate**: fills null values by interpolating values between non-null values. + - **locf**: fills null values by carrying the last observed value forward. + + > [!Note] + > The expression passed to `interpolate` or `locf` must use an + > [aggregate function](/influxdb3/version/reference/sql/functions/aggregate/). + +3. Include a `WHERE` clause that sets upper and lower time bounds. + For example: + +{{% influxdb/custom-timestamps %}} +```sql +WHERE time >= '2022-01-01T08:00:00Z' AND time <= '2022-01-01T10:00:00Z' +``` +{{% /influxdb/custom-timestamps %}} + + +## Example of filling gaps in data + +The following examples use the [Home sensor sample data](/influxdb3/version/reference/sample-data/#home-sensor-data) +to show how to use `date_bin_gapfill` and the different results of `interplate` +and `locf`. + +{{< tabs-wrapper >}} +{{% tabs "small" %}} +[interpolate](#) +[locf](#) +{{% /tabs %}} +{{% tab-content %}} + +{{% influxdb/custom-timestamps %}} + +```sql +SELECT + date_bin_gapfill(INTERVAL '30 minutes', time) as time, + room, + interpolate(avg(temp)) +FROM home +WHERE + time >= '2022-01-01T08:00:00Z' + AND time <= '2022-01-01T10:00:00Z' +GROUP BY 1, room +``` + +| time | room | AVG(home.temp) | +| :------------------- | :---------- | -------------: | +| 2022-01-01T08:00:00Z | Kitchen | 21 | +| 2022-01-01T08:30:00Z | Kitchen | 22 | +| 2022-01-01T09:00:00Z | Kitchen | 23 | +| 2022-01-01T09:30:00Z | Kitchen | 22.85 | +| 2022-01-01T10:00:00Z | Kitchen | 22.7 | +| 2022-01-01T08:00:00Z | Living Room | 21.1 | +| 2022-01-01T08:30:00Z | Living Room | 21.25 | +| 2022-01-01T09:00:00Z | Living Room | 21.4 | +| 2022-01-01T09:30:00Z | Living Room | 21.6 | +| 2022-01-01T10:00:00Z | Living Room | 21.8 | + +{{% /influxdb/custom-timestamps %}} + +{{% /tab-content %}} +{{% tab-content %}} + +{{% influxdb/custom-timestamps %}} + +```sql +SELECT + date_bin_gapfill(INTERVAL '30 minutes', time) as time, + room, + locf(avg(temp)) +FROM home +WHERE + time >= '2022-01-01T08:00:00Z' + AND time <= '2022-01-01T10:00:00Z' +GROUP BY 1, room +``` + +| time | room | AVG(home.temp) | +| :------------------- | :---------- | -------------: | +| 2022-01-01T08:00:00Z | Kitchen | 21 | +| 2022-01-01T08:30:00Z | Kitchen | 21 | +| 2022-01-01T09:00:00Z | Kitchen | 23 | +| 2022-01-01T09:30:00Z | Kitchen | 23 | +| 2022-01-01T10:00:00Z | Kitchen | 22.7 | +| 2022-01-01T08:00:00Z | Living Room | 21.1 | +| 2022-01-01T08:30:00Z | Living Room | 21.1 | +| 2022-01-01T09:00:00Z | Living Room | 21.4 | +| 2022-01-01T09:30:00Z | Living Room | 21.4 | +| 2022-01-01T10:00:00Z | Living Room | 21.8 | + +{{% /influxdb/custom-timestamps %}} + +{{% /tab-content %}} +{{< /tabs-wrapper >}} diff --git a/content/shared/influxdb3-query-guides/sql/parameterized-queries.md b/content/shared/influxdb3-query-guides/sql/parameterized-queries.md new file mode 100644 index 000000000..07efb64a8 --- /dev/null +++ b/content/shared/influxdb3-query-guides/sql/parameterized-queries.md @@ -0,0 +1,310 @@ + +Parameterized queries in {{% product-name %}} let you dynamically and safely change values in a query. +If your application code allows user input to customize values or expressions in a query, use a parameterized query to make sure untrusted input is processed strictly as data and not executed as code. + +Parameterized queries: + +- help prevent injection attacks, which can occur if input is executed as code +- help make queries more reusable + +{{% note %}} +#### Prevent injection attacks + +For more information on security and query parameterization, +see the [OWASP SQL Injection Prevention Cheat Sheet](https://cheatsheetseries.owasp.org/cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#defense-option-1-prepared-statements-with-parameterized-queries). +{{% /note %}} + +In InfluxDB 3, a parameterized query is an InfluxQL or SQL query that contains one or more named parameter placeholders–variables that represent input data. + +- [Use parameters in `WHERE` expressions](#use-parameters-in-where-expressions) +- [Parameter data types](#parameter-data-types) + - [Data type examples](#data-type-examples) + - [Time expressions](#time-expressions) + - [Not compatible with parameters](#not-compatible-with-parameters) +- [Parameterize an SQL query](#parameterize-an-sql-query) +- [Execute parameterized SQL queries](#execute-parameterized-sql-queries) + - [Use InfluxDB Flight RPC clients](#use-influxdb-flight-rpc-clients) +- [Client support for parameterized queries](#client-support-for-parameterized-queries) +- [Not supported](#not-supported) + +{{% note %}} + +#### Parameters only supported in `WHERE` expressions + +InfluxDB 3 supports parameters in `WHERE` clause **predicate expressions**. +Parameter values must be one of the [allowed parameter data types](#parameter-data-types). + +If you use parameters in other expressions or clauses, +such as function arguments, `SELECT`, or `GROUP BY`, then your query might not work as you expect. + +{{% /note %}} + +## Use parameters in `WHERE` expressions + +You can use parameters in `WHERE` clause **predicate expressions**-–for example, the following query contains a `$temp` parameter: + +```sql +SELECT * FROM measurement WHERE temp > $temp +``` + +When executing a query, you specify parameter name-value pairs. +The value that you assign to a parameter must be one of the [parameter data types](#parameter-data-types). + +```go +{"temp": 22.0} +``` + +The InfluxDB Querier parses the query text with the parameter placeholders, and then generates query plans that replace the placeholders with the values that you provide. +This separation of query structure from input data ensures that input is treated as one of the allowed [data types](#parameter-data-types) and not as executable code. + +## Parameter data types + +A parameter value can be one of the following data types: + +- Null +- Boolean +- Unsigned integer (`u_int64`) +- Integer (`int64`) +- Double (`float64`) +- String + +### Data type examples + +```js +{ + "string": "Living Room", + "double": 3.14, + "unsigned_integer": 1234, + "integer": -1234, + "boolean": false, + "null": Null, +} +``` + +### Time expressions + +To parameterize time bounds, substitute a parameter for a timestamp literal--for example: + +```sql +SELECT * +FROM home +WHERE time >= $min_time +``` + +For the parameter value, specify the timestamp literal as a string--for example: + +{{% influxdb/custom-timestamps %}} + +```go +// Assign a timestamp string literal to the min_time parameter. +parameters := influxdb3.QueryParameters{ + "min_time": "2022-01-01 00:00:00.00", +} +``` + +{{% /influxdb/custom-timestamps %}} + +InfluxDB executes the query as the following: + +{{% influxdb/custom-timestamps %}} + +```sql +SELECT * +FROM home +WHERE time >= '2022-01-01 00:00:00.00' +``` + +{{% /influxdb/custom-timestamps %}} + +### Not compatible with parameters + +If you use parameters for the following, your query might not work as you expect: + +- In clauses other than `WHERE`, such as `SELECT` or `GROUP BY` +- As function arguments, such as `avg($temp)` +- In place of identifiers, such as column or table names +- In place of duration literals, such as `INTERVAL $minutes` + +## Parameterize an SQL query + +{{% note %}} +#### Sample data + +The following examples use the +[Get started home sensor data](/influxdb3/version/reference/sample-data/#get-started-home-sensor-data). +To run the example queries and return results, +[write the sample data](/influxdb3/version/reference/sample-data/#write-the-home-sensor-data-to-influxdb) +to your {{% product-name %}} database before running the example queries. +{{% /note %}} + +To use a parameterized query, do the following: + +1. In your query text, use the `$parameter` syntax to reference a parameter name--for example, +the following query contains `$room` and `$min_temp` parameter placeholders: + + ```sql + SELECT * + FROM home + WHERE time > now() - INTERVAL '7 days' + AND temp >= $min_temp + AND room = $room + ``` + +2. Provide a value for each parameter name. + If you don't assign a value for a parameter, InfluxDB returns an error. + The syntax for providing parameter values depends on the client you use--for example: + + + + {{< code-tabs-wrapper >}} + {{% code-tabs %}} + [Go](#) + {{% /code-tabs %}} + {{% code-tab-content %}} + + ```go + // Define a QueryParameters struct--a map of parameters to input values. + parameters := influxdb3.QueryParameters{ + "room": "Kitchen", + "min_temp": 20.0, + } + ``` + + {{% /code-tab-content %}} + {{< /code-tabs-wrapper >}} + +After InfluxDB receives your request and parses the query, it executes the query as + +```sql +SELECT * +FROM home +WHERE time > now() - INTERVAL '7 days' +AND temp >= 20.0 +AND room = 'Kitchen' +``` + +## Execute parameterized SQL queries + +{{% note %}} +#### Sample data + +The following examples use the +[Get started home sensor data](/influxdb3/version/reference/sample-data/#get-started-home-sensor-data). +To run the example queries and return results, +[write the sample data](/influxdb3/version/reference/sample-data/#write-the-home-sensor-data-to-influxdb) +to your {{% product-name %}} database before running the example queries. +{{% /note %}} + +### Use InfluxDB Flight RPC clients + +Using the InfluxDB 3 native Flight RPC protocol and supported clients, you can send a parameterized query and a list of parameter name-value pairs. +InfluxDB Flight clients that support parameterized queries pass the parameter name-value pairs in a Flight ticket `params` field. + +The following examples show how to use client libraries to execute parameterized SQL queries: + + + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Go](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} + +```go +import ( + "context" + "fmt" + "io" + "os" + "text/tabwriter" + "time" + "github.com/InfluxCommunity/influxdb3-go/v2/influxdb3" +) + +func Query(query string, parameters influxdb3.QueryParameters) error { + url := os.Getenv("INFLUX_HOST") + token := os.Getenv("INFLUX_TOKEN") + database := os.Getenv("INFLUX_DATABASE") + + // Instantiate the influxdb3 client. + client, err := influxdb3.New(influxdb3.ClientConfig{ + Host: url, + Token: token, + Database: database, + }) + + if err != nil { + panic(err) + } + + // Ensure the client is closed after the Query function finishes. + defer func(client *influxdb3.Client) { + err := client.Close() + if err != nil { + panic(err) + } + }(client) + + // Call the client's QueryWithParameters function. + // Provide the query and parameters. The default QueryType is SQL. + iterator, err := client.QueryWithParameters(context.Background(), query, + parameters) + + // Create a buffer for storing rows as you process them. + w := tabwriter.NewWriter(io.Discard, 4, 4, 1, ' ', 0) + w.Init(os.Stdout, 0, 8, 0, '\t', 0) + + fmt.Fprintf(w, "time\troom\tco\thum\ttemp\n") + + // Format and write each row to the buffer. + // Process each row as key-value pairs. + for iterator.Next() { + row := iterator.Value() + // Use Go time package to format unix timestamp + // as a time with timezone layout (RFC3339 format) + time := (row["time"].(time.Time)). + Format(time.RFC3339) + + fmt.Fprintf(w, "%s\t%s\t%d\t%.1f\t%.1f\n", + time, row["room"], row["co"], row["hum"], row["temp"]) + } + w.Flush() + + return nil +} + +func main() { + // Use the $placeholder syntax in a query to reference parameter placeholders + // for input data. + // The following SQL query contains the placeholders $room and $min_temp. + query := ` + SELECT * + FROM home + WHERE time > now() - INTERVAL '7 days' + AND temp >= $min_temp + AND room = $room` + + // Define a QueryParameters struct--a map of placeholder names to input values. + parameters := influxdb3.QueryParameters{ + "room": "Kitchen", + "min_temp": 20.0, + } +} +``` + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +## Client support for parameterized queries + +- Not all [InfluxDB 3 Flight clients](/influxdb3/version/reference/client-libraries/v3/) support parameterized queries. +- InfluxDB doesn't currently support parameterized queries or DataFusion prepared statements for Flight SQL or Flight SQL clients. +- InfluxDB 3 SQL and InfluxQL parameterized queries aren’t supported in InfluxDB v1 and v2 clients. + +## Not supported + +Currently, parameterized queries in {{% product-name %}} don't provide the following: + +- support for DataFusion prepared statements +- query caching, optimization, or performance benefits diff --git a/content/shared/influxdb3-sample-data/sample-data.md b/content/shared/influxdb3-sample-data/sample-data.md new file mode 100644 index 000000000..326ad35c3 --- /dev/null +++ b/content/shared/influxdb3-sample-data/sample-data.md @@ -0,0 +1,506 @@ + +Sample datasets are used throughout the {{< product-name >}} documentation to +demonstrate functionality. +Use the following sample datasets to replicate provided examples. + +- [Home sensor data](#home-sensor-data) +- [Home sensor actions data](#home-sensor-actions-data) +- [NOAA Bay Area weather data](#noaa-bay-area-weather-data) +- [Bitcoin price data](#bitcoin-price-data) +- [Random numbers sample data](#random-numbers-sample-data) + +## Home sensor data + +Includes simulated hourly home sensor data with anomalous sensor readings to +demonstrate processing and alerting on time series data. +To customize timestamps in the dataset, use the {{< icon "clock" >}} button in +the lower right corner of the page. + +##### Time Range + +**{{% influxdb/custom-timestamps-span %}}2022-01-01T08:00:00Z{{% /influxdb/custom-timestamps-span %}}** +to +**{{% influxdb/custom-timestamps-span %}}2022-01-01T20:00:00Z{{% /influxdb/custom-timestamps-span %}}** +(Customizable) + +##### Schema + +- home (measurement) + - **tags**: + - room + - Kitchen + - Living Room + - **fields**: + - co (integer) + - temp (float) + - hum (float) + +{{< expand-wrapper >}} +{{% expand "Write home sensor data to InfluxDB" %}} + +#### Write the home sensor data to InfluxDB + +Use the InfluxDB v2 or v1 API to write the home sensor sample data to {{< product-name >}}. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[v2 API](#) +[v1 API](#) +{{% /code-tabs %}} +{{% code-tab-content %}} + +{{% influxdb/custom-timestamps %}} +{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} +```sh +curl --request POST \ + http://{{< influxdb/host >}}/api/v2/write?bucket=DATABASE_NAME&precision=s \ + --header "Authorization: Bearer AUTH_TOKEN" \ + --header "Content-Type: text/plain; charset=utf-8" \ + --header "Accept: application/json" \ + --data-binary " +home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 +home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 +home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 +home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 +home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 +home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 +home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 +home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 +home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 +home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 +home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 +home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000 +home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1641045600 +home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641045600 +home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1641049200 +home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641049200 +home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1641052800 +home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641052800 +home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1641056400 +home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641056400 +home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1641060000 +home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641060000 +home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1641063600 +home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600 +home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200 +home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 +" +``` +{{% /code-placeholders %}} +{{% /influxdb/custom-timestamps %}} + +{{% /code-tab-content %}} +{{% code-tab-content %}} + +{{% influxdb/custom-timestamps %}} +{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} +```sh +curl --request POST \ + https://{{< influxdb/host >}}/write?db=DATABASE_NAME&precision=s \ + --header "Authorization: Bearer AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary " +home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 +home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 +home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 +home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 +home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 +home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 +home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 +home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 +home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 +home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 +home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 +home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000 +home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1641045600 +home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641045600 +home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1641049200 +home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641049200 +home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1641052800 +home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641052800 +home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1641056400 +home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641056400 +home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1641060000 +home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641060000 +home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1641063600 +home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600 +home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200 +home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 +" +``` +{{% /code-placeholders %}} +{{% /influxdb/custom-timestamps %}} + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +Replace the following in the sample script: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of database to write to +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your InfluxDB authorization token + + > [!Note] + > While in alpha, {{< product-name >}} does not require an authorization token. + > You can either omit the `Authorization` header or you can provide an + > arbitrary token string. + +{{% /expand %}} +{{< /expand-wrapper >}} + +## Home sensor actions data + +Includes hypothetical actions triggered by data in the [Get started home sensor data](#get-started-home-sensor-data) +and is a companion dataset to that sample dataset. +To customize timestamps in the dataset, use the {{< icon "clock" >}} button in +the lower right corner of the page. +This lets you modify the sample dataset to stay within the retention period of +the database you write it to. + +##### Time Range + +**{{% influxdb/custom-timestamps-span %}}2022-01-01T08:00:00Z{{% /influxdb/custom-timestamps-span %}}** +to +**{{% influxdb/custom-timestamps-span %}}2022-01-01T20:00:00Z{{% /influxdb/custom-timestamps-span %}}** +(Customizable) + +##### Schema + +- home_actions (measurement) + - **tags**: + - room + - Kitchen + - Living Room + - action + - alert + - cool + - level + - ok + - warn + - **fields**: + - description (string) + +{{< expand-wrapper >}} +{{% expand "Write home sensor actions data to InfluxDB" %}} + +#### Write the home sensor actions data to InfluxDB + +Use the InfluxDB v2 or v1 API to write the home sensor actions sample data +to {{< product-name >}}. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[v2 API](#) +[v1 API](#) +{{% /code-tabs %}} +{{% code-tab-content %}} + +{{% influxdb/custom-timestamps %}} +{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} +```sh +curl --request POST \ + https://{{< influxdb/host >}}/api/v2/write?bucket=DATABASE_NAME&precision=s \ + --header "Authorization: Bearer AUTH_TOKEN" \ + --header "Content-Type: text/plain; charset=utf-8" \ + --header "Accept: application/json" \ + --data-binary ' +home_actions,room=Kitchen,action=cool,level=ok description="Temperature at or above 23°C (23°C). Cooling to 22°C." 1641027600 +home_actions,room=Kitchen,action=cool,level=ok description="Temperature at or above 23°C (23.3°C). Cooling to 22°C." 1641060000 +home_actions,room=Kitchen,action=cool,level=ok description="Temperature at or above 23°C (23.1°C). Cooling to 22°C." 1641063600 +home_actions,room=Kitchen,action=alert,level=warn description="Carbon monoxide level above normal: 18 ppm." 1641060000 +home_actions,room=Kitchen,action=alert,level=warn description="Carbon monoxide level above normal: 22 ppm." 1641063600 +home_actions,room=Kitchen,action=alert,level=warn description="Carbon monoxide level above normal: 26 ppm." 1641067200 +home_actions,room=Living\ Room,action=alert,level=warn description="Carbon monoxide level above normal: 14 ppm." 1641063600 +home_actions,room=Living\ Room,action=alert,level=warn description="Carbon monoxide level above normal: 17 ppm." 1641067200 +' +``` +{{% /code-placeholders %}} +{{% /influxdb/custom-timestamps %}} + +{{% /code-tab-content %}} +{{% code-tab-content %}} + +{{% influxdb/custom-timestamps %}} +{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} +```sh +curl --request POST \ + https://{{< influxdb/host >}}/write?db=DATABASE_NAME&precision=s \ + --header "Authorization: Bearer AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary ' +home_actions,room=Kitchen,action=cool,level=ok description="Temperature at or above 23°C (23°C). Cooling to 22°C." 1641027600 +home_actions,room=Kitchen,action=cool,level=ok description="Temperature at or above 23°C (23.3°C). Cooling to 22°C." 1641060000 +home_actions,room=Kitchen,action=cool,level=ok description="Temperature at or above 23°C (23.1°C). Cooling to 22°C." 1641063600 +home_actions,room=Kitchen,action=alert,level=warn description="Carbon monoxide level above normal: 18 ppm." 1641060000 +home_actions,room=Kitchen,action=alert,level=warn description="Carbon monoxide level above normal: 22 ppm." 1641063600 +home_actions,room=Kitchen,action=alert,level=warn description="Carbon monoxide level above normal: 26 ppm." 1641067200 +home_actions,room=Living\ Room,action=alert,level=warn description="Carbon monoxide level above normal: 14 ppm." 1641063600 +home_actions,room=Living\ Room,action=alert,level=warn description="Carbon monoxide level above normal: 17 ppm." 1641067200 +' +``` +{{% /code-placeholders %}} +{{% /influxdb/custom-timestamps %}} + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +Replace the following in the sample script: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of database to write to +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your InfluxDB authorization token + + > [!Note] + > While in alpha, {{< product-name >}} does not require an authorization token. + > You can either omit the `Authorization` header or you can provide an + > arbitrary token string. + +{{% /expand %}} +{{< /expand-wrapper >}} + +## NOAA Bay Area weather data + +Includes daily weather metrics from three San Francisco Bay Area airports from +**January 1, 2020 to December 31, 2022**. +This sample dataset includes seasonal trends and is good for exploring time +series use cases that involve seasonality. + +##### Time Range + +**2020-01-01T00:00:00Z** to **2022-12-31T00:00:00Z** + +##### Schema + +- weather (measurement) + - **tags**: + - location + - Concord + - Hayward + - San Francisco + - **fields** + - precip (float) + - temp_avg (float) + - temp_max (float) + - temp_min (float) + - wind_avg (float) + +{{< expand-wrapper >}} +{{% expand "Write the NOAA Bay Area weather data to InfluxDB" %}} + +#### Write the NOAA Bay Area weather data to InfluxDB + +Use the InfluxDB v2 or v1 API to write the NOAA Bay Area weather sample data to +{{< product-name >}}. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[v2 API](#) +[v1 API](#) +{{% /code-tabs %}} +{{% code-tab-content %}} + +{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} +```sh +curl --request POST \ + http://{{< influxdb/host >}}/api/v2/write?bucket=DATABASE_NAME \ + --header "Authorization: Bearer AUTH_TOKEN" \ + --header "Content-Type: text/plain; charset=utf-8" \ + --header "Accept: application/json" \ + --data-binary "$(curl --request GET https://docs.influxdata.com/downloads/bay-area-weather.lp)" +``` +{{% /code-placeholders %}} + +{{% /code-tab-content %}} +{{% code-tab-content %}} + +{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} +```sh +curl --request POST \ + http://{{< influxdb/host >}}/write?db=DATABASE_NAME \ + --header "Authorization: Bearer AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary "$(curl --request GET https://docs.influxdata.com/downloads/bay-area-weather.lp)" +``` +{{% /code-placeholders %}} + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +Replace the following in the sample script: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of database to write to +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your InfluxDB authorization token + + > [!Note] + > While in alpha, {{< product-name >}} does not require an authorization token. + > You can either omit the `Authorization` header or you can provide an + > arbitrary token string. + +{{% /expand %}} +{{< /expand-wrapper >}} + +## Bitcoin price data + +The Bitcoin price sample dataset provides Bitcoin prices from +**2023-05-01T00:00:00Z to 2023-05-15T00:00:00Z**—_[Powered by CoinDesk](https://www.coindesk.com/price/bitcoin)_. + +##### Time Range + +**2023-05-01T00:19:00Z** to **2023-05-14T23:48:00Z** + +##### Schema + +- bitcoin (measurement) + - **tags**: + - code + - EUR + - GBP + - USD + - crypto + - bitcoin + - description + - Euro + - British Pound Sterling + - United States Dollar + - symbol + - \€ (€) + - \£ (£) + - \$ ($) + - **fields** + - price (float) + +{{< expand-wrapper >}} +{{% expand "Write the Bitcoin sample data to InfluxDB" %}} + +#### Write the Bitcoin price sample data to InfluxDB + +Use the InfluxDB v2 or v1 API to write the Bitcoin price sample data to +{{< product-name >}}. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[v2 API](#) +[v1 API](#) +{{% /code-tabs %}} +{{% code-tab-content %}} + +{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} +```sh +curl --request POST \ + http://{{< influxdb/host >}}/api/v2/write?bucket=DATABASE_NAME \ + --header "Authorization: Bearer AUTH_TOKEN" \ + --header "Content-Type: text/plain; charset=utf-8" \ + --header "Accept: application/json" \ + --data-binary "$(curl --request GET https://docs.influxdata.com/downloads/bitcoin.lp)" +``` +{{% /code-placeholders %}} + +{{% /code-tab-content %}} +{{% code-tab-content %}} + +{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} +```sh +curl --request POST \ + http://{{< influxdb/host >}}/write?db=DATABASE_NAME \ + --header "Authorization: Bearer AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary "$(curl --request GET https://docs.influxdata.com/downloads/bitcoin.lp)" +``` +{{% /code-placeholders %}} + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +Replace the following in the sample script: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of database to write to +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your InfluxDB authorization token + + > [!Note] + > While in alpha, {{< product-name >}} does not require an authorization token. + > You can either omit the `Authorization` header or you can provide an + > arbitrary token string. + +{{% /expand %}} +{{< /expand-wrapper >}} + +## Random numbers sample data + +Includes two fields with randomly generated numbers reported every minute. +Each field has a specific range of randomly generated numbers. +This sample dataset is used to demonstrate mathematic operations and +transformation functions. + +##### Time Range + +**2023-01-01T00:00:00Z** to **2023-01-01T12:00:00Z** + +##### Schema + +- numbers (measurement) + - **fields** + - a (float between -1 and 1) + - b (float between -3 and 3) + +{{< expand-wrapper >}} +{{% expand "Write the random number sample data to InfluxDB" %}} + +#### Write the random number sample data to InfluxDB + +Use the InfluxDB v2 or v1 API to write the random number sample data to +{{< product-name >}}. + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[v2 API](#) +[v1 API](#) +{{% /code-tabs %}} +{{% code-tab-content %}} + +{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} +```sh +curl --request POST \ + http://{{< influxdb/host >}}/api/v2/write?bucket=DATABASE_NAME \ + --header "Authorization: Bearer AUTH_TOKEN" \ + --header "Content-Type: text/plain; charset=utf-8" \ + --header "Accept: application/json" \ + --data-binary "$(curl --request GET https://docs.influxdata.com/downloads/random-numbers.lp)" +``` +{{% /code-placeholders %}} + +{{% /code-tab-content %}} +{{% code-tab-content %}} + +{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} +```sh +curl --request POST \ + http://{{< influxdb/host >}}/write?db=DATABASE_NAME \ + --header "Authorization: Bearer AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary "$(curl --request GET https://docs.influxdata.com/downloads/random-numbers.lp)" +``` +{{% /code-placeholders %}} + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +Replace the following in the sample script: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of database to write to +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your InfluxDB authorization token + + > [!Note] + > While in alpha, {{< product-name >}} does not require an authorization token. + > You can either omit the `Authorization` header or you can provide an + > arbitrary token string. + +{{% /expand %}} +{{< /expand-wrapper >}} diff --git a/content/shared/influxdb3-write-guides/_index.md b/content/shared/influxdb3-write-guides/_index.md new file mode 100644 index 000000000..ed2c7854d --- /dev/null +++ b/content/shared/influxdb3-write-guides/_index.md @@ -0,0 +1,79 @@ + +Use tools like the `influxctl` CLI, Telegraf, and InfluxDB client libraries to +to write time series data to {{< product-name >}}. [Line protocol](#line-protocol) +is the text-based format used to write data to InfluxDB. There are tools +available to covert other formats (for example—[CSV](/influxdb3/version/write-data/use-telegraf/csv/)) +to line protocol. + +- [Line protocol](#line-protocol) + - [Line protocol elements](#line-protocol-elements) +- [Write data to InfluxDB](#write-data-to-influxdb) + {{< children type="anchored-list" >}} + +> [!Note] +> +> #### Choose the write endpoint for your workload +> +> When bringing existing v1 write workloads, use the {{% product-name %}} +> HTTP API [`/write` endpoint](/influxdb3/version/guides/api-compatibility/v1/). +> When creating new write workloads, use the HTTP API +> [`/api/v2/write` endpoint](/influxdb3/version/guides/api-compatibility/v2/). + +## Line protocol + +All data written to InfluxDB is written using +[line protocol](/influxdb3/version/reference/line-protocol/), a text-based format +that lets you provide the necessary information to write a data point to InfluxDB. + +### Line protocol elements + +In InfluxDB, a point contains a table name, one or more fields, a timestamp, +and optional tags that provide metadata about the observation. + +Each line of line protocol contains the following elements: + +{{< req type="key" >}} + +- {{< req "\*" >}} **table**: A string that identifies the + table to store the data in. +- **tag set**: Comma-delimited list of key value pairs, each representing a tag. + Tag keys and values are unquoted strings. _Spaces, commas, and equal characters + must be escaped._ +- {{< req "\*" >}} **field set**: Comma-delimited list of key value pairs, each + representing a field. + Field keys are unquoted strings. _Spaces and commas must be escaped._ + Field values can be [strings](/influxdb3/version/reference/line-protocol/#string) + (quoted), + [floats](/influxdb3/version/reference/line-protocol/#float), + [integers](/influxdb3/version/reference/line-protocol/#integer), + [unsigned integers](/influxdb3/version/reference/line-protocol/#uinteger), + or [booleans](/influxdb3/version/reference/line-protocol/#boolean). +- **timestamp**: [Unix timestamp](/influxdb3/version/reference/line-protocol/#unix-timestamp) + associated with the data. InfluxDB supports up to nanosecond precision. + _If the precision of the timestamp is not in nanoseconds, you must specify the + precision when writing the data to InfluxDB._ + +#### Line protocol element parsing + +- **table**: Everything before the _first unescaped comma before the first + whitespace_. +- **tag set**: Key-value pairs between the _first unescaped comma_ and the _first + unescaped whitespace_. +- **field set**: Key-value pairs between the _first and second unescaped whitespaces_. +- **timestamp**: Integer value after the _second unescaped whitespace_. +- Lines are separated by the newline character (`\n`). + Line protocol is whitespace sensitive. + +--- + +{{< influxdb/line-protocol version="v3" >}} + +--- + +_For schema design recommendations, see +[InfluxDB schema design](/influxdb3/version/write-data/best-practices/schema-design/)._ + +## Write data to InfluxDB + +{{< children >}} + \ No newline at end of file diff --git a/content/shared/influxdb3-write-guides/best-practices/_index.md b/content/shared/influxdb3-write-guides/best-practices/_index.md new file mode 100644 index 000000000..f1fb980de --- /dev/null +++ b/content/shared/influxdb3-write-guides/best-practices/_index.md @@ -0,0 +1,5 @@ + +The following articles walk through recommendations and best practices for +writing data to {{< product-name >}}. + +{{< children >}} diff --git a/content/shared/influxdb3-write-guides/best-practices/optimize-writes.md b/content/shared/influxdb3-write-guides/best-practices/optimize-writes.md new file mode 100644 index 000000000..99ebbff55 --- /dev/null +++ b/content/shared/influxdb3-write-guides/best-practices/optimize-writes.md @@ -0,0 +1,809 @@ + +Use these tips to optimize performance and system overhead when writing data to +{{< product-name >}}. + +- [Batch writes](#batch-writes) +- [Sort tags by key](#sort-tags-by-key) +- [Use the coarsest time precision possible](#use-the-coarsest-time-precision-possible) +- [Use gzip compression](#use-gzip-compression) + - [Enable gzip compression in Telegraf](#enable-gzip-compression-in-telegraf) + - [Enable gzip compression in InfluxDB client libraries](#enable-gzip-compression-in-influxdb-client-libraries) + - [Use gzip compression with the InfluxDB API](#use-gzip-compression-with-the-influxdb-api) +- [Synchronize hosts with NTP](#synchronize-hosts-with-ntp) +- [Write multiple data points in one request](#write-multiple-data-points-in-one-request) +- [Pre-process data before writing](#pre-process-data-before-writing) + - [Prerequisites](#prerequisites) + - [Filter data from a batch](#filter-data-from-a-batch) + - [Coerce data types to avoid rejected point errors](#coerce-data-types-to-avoid-rejected-point-errors) + - [Merge lines to optimize memory and bandwidth](#merge-lines-to-optimize-memory-and-bandwidth) + - [Avoid sending duplicate data](#avoid-sending-duplicate-data) + - [Run custom preprocessing code](#run-custom-preprocessing-code) + +> [!Note] +> The following tools write to InfluxDB and employ _most_ write optimizations by +> default: +> +> - [Telegraf](/influxdb3/version/write-data/use-telegraf/) +> - InfluxDB client libraries + +## Batch writes + +Write data in batches to minimize network overhead when writing data to InfluxDB. + +> [!Note] +> The optimal batch size is 10,000 lines of line protocol or 10 MBs, whichever +> threshold is met first. + +## Sort tags by key + +Before writing data points to InfluxDB, sort tags by key in lexicographic order. +_Verify sort results match results from the [Go `bytes.Compare` function](http://golang.org/pkg/bytes/#Compare)._ + + + +```bash +# Line protocol example with unsorted tags +measurement,tagC=therefore,tagE=am,tagA=i,tagD=i,tagB=think fieldKey=fieldValue 1562020262 + +# Optimized line protocol example with tags sorted by key +measurement,tagA=i,tagB=think,tagC=therefore,tagD=i,tagE=am fieldKey=fieldValue 1562020262 +``` + +## Use the coarsest time precision possible + +{{< product-name >}} supports up to nanosecond timestamp precision. However, +if your data isn't collected in nanoseconds, there is no need to write at that precision. +For better performance, use the coarsest timestamp precision you can for your +use case. + +By default, {{< product-name >}} attempts to auto-detect the precision of +timestamps in line protocol by identifying what precision would be relatively +close to "now." You can also specify your timestamp precision in your write +request. {{< product-name >}} supports the following timestamp precisions: + +- `ns` (nanoseconds) +- `us` (microseconds) +- `ms` (milliseconds) +- `s` (seconds) + +## Use gzip compression + +Use gzip compression to speed up writes to {{< product-name >}}. +Benchmarks have shown up to a 5x speed improvement when data is compressed. + +{{< tabs-wrapper >}} +{{% tabs %}} +[Telegraf](#) +[Client libraries](#) +[InfluxDB API](#) +{{% /tabs %}} +{{% tab-content %}} + +### Enable gzip compression in Telegraf + +In the `influxdb_v2` output plugin configuration in your `telegraf.conf`, set the +`content_encoding` option to `gzip`: + +```toml +[[outputs.influxdb_v2]] + urls = ["https://{{< influxdb/host >}}"] + # ... + content_encoding = "gzip" +``` + +{{% /tab-content %}} +{{% tab-content %}} + +### Enable gzip compression in InfluxDB client libraries + +Each [InfluxDB client library](/influxdb3/version/reference/client-libraries/) provides +options for compressing write requests or enforces compression by default. +The method for enabling compression is different for each library. +For specific instructions, see the +[InfluxDB client libraries documentation](/influxdb3/version/reference/client-libraries/). +{{% /tab-content %}} +{{% tab-content %}} + +### Use gzip compression with the InfluxDB API + +When using the InfluxDB API `/api/v2/write` endpoint to write data, +compress the data with `gzip` and set the `Content-Encoding` header to `gzip`--for example: + +{{% influxdb/custom-timestamps %}} +{{% code-placeholders "(AUTH|DATABASE)_(TOKEN|NAME)" %}} +{{% code-callout "Content-Encoding: gzip" "orange" %}} +```bash +echo "mem,host=host1 used_percent=23.43234543 1641024000 +mem,host=host2 used_percent=26.81522361 1641027600 +mem,host=host1 used_percent=22.52984738 1641031200 +mem,host=host2 used_percent=27.18294630 1641034800" | gzip > system.gzip \ + +curl --request POST "https://{{< influxdb/host >}}/api/v2/write?org=ignored&bucket=DATABASE_NAME" \ + --header "Authorization: Token AUTH_TOKEN" \ + --header "Content-Type: text/plain; charset=utf-8" \ + --header "Content-Encoding: gzip" \ + --data-binary @system.gzip +``` + +{{% /code-callout %}} +{{% /code-placeholders %}} +{{% /influxdb/custom-timestamps %}} + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of the database to write data to +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your {{< product-name >}} authorization token. + _Store this in a secret store or environment variable to avoid exposing the raw token string._ + + > [!Note] + > While in alpha, {{< product-name >}} does not require an authorization token. + > You can either omit the `Authorization` header or you can provide an + > arbitrary token string. + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +## Synchronize hosts with NTP + +Use the Network Time Protocol (NTP) to synchronize time between hosts. +If a timestamp isn't included in line protocol, InfluxDB uses its host's local +time (in UTC) to assign timestamps to each point. +If a host's clocks isn't synchronized with NTP, timestamps may be inaccurate. + +## Write multiple data points in one request + +To write multiple lines in one request, each line of line protocol must be +delimited by a new line (`\n`). + +## Pre-process data before writing + +Pre-processing data in your write workload can help you avoid +[write failures](/influxdb3/version/write-data/troubleshoot/#troubleshoot-failures) +due to schema conflicts or resource use. +For example, if you have many devices that write to the same table, and some +devices use different data types for the same field, then you might want to +generate an alert or convert field data to fit your schema before you send the +data to InfluxDB. + +With [Telegraf](/telegraf/v1/), you can process data from other services and +files and then write it to InfluxDB. +In addition to processing data with Telegraf's included plugins, you can use the +[Execd processor plugin](/telegraf/v1/plugins/#processor-execd) to integrate +your own code and external applications. + +The following examples show how to [configure the Telegraf agent](/telegraf/v1/configuration) +and [plugins](/telegraf/v1/plugins/) to optimize writes. +The examples use the [File input plugin](/telegraf/v1/plugins/#input-file) to +read data from a file and use the [InfluxDB v2 output plugin](/telegraf/v1/plugins/#input-influxdb) +to write data to a database, but you can use any input and output plugin. + +### Prerequisites + +[Install Telegraf](/telegraf/v1/install/) if you haven't already. + +### Filter data from a batch + +Use Telegraf and metric filtering to filter data before writing it to InfluxDB. + +Configure [metric filters](/telegraf/v1/configuration/#filters) to retain or +remove data elements (before processor and aggregator plugins run). + +1. Enter the following command to create a Telegraf configuration that parses + system usage data, removes the specified fields and tags, and then writes + the data to InfluxDB: + + + + {{< code-placeholders "DATABASE_NAME|AUTH_TOKEN" >}} + +```sh +cat <> ./telegraf.conf + [[inputs.cpu]] + # Remove the specified fields from points. + fieldpass = ["usage_system", "usage_idle"] + # Remove the specified tags from points. + tagexclude = ["host"] + [[outputs.influxdb_v2]] + urls = ["http://{{< influxdb/host >}}"] + token = "AUTH_TOKEN" + organization = "" + bucket = "DATABASE_NAME" +EOF +``` + + {{< /code-placeholders >}} + + Replace the following: + + - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of the database to write data to + - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your {{< product-name >}} authorization token. + _Store this in a secret store or environment variable to avoid exposing the raw token string._ + + > [!Note] + > While in alpha, {{< product-name >}} does not require an authorization token. + > For the `token` option, provide an empty or arbitrary token string. + +2. To test the input and processor, enter the following command: + + + + ```sh + telegraf --test --config telegraf.conf + ``` + + The output is similar to the following. + For each row of input data, the filters pass the metric name, tags, + specified fields, and timestamp. + + ```text + > cpu,cpu=cpu0 usage_idle=100,usage_system=0 1702067201000000000 + ... + > cpu,cpu=cpu-total usage_idle=99.80198019802448,usage_system=0.1980198019802045 1702067201000000000 + ``` + +### Coerce data types to avoid rejected point errors + +Use Telegraf and the [Converter processor plugin](/telegraf/v1/plugins/#processor-converter) +to convert field data types to fit your schema. + +For example, if you write the sample data in +[Home sensor sample data](/influxdb3/version/reference/sample-data/#home-sensor-data) +to a database and then try to write the following batch to the same table: + +{{% influxdb/custom-timestamps %}} + +```text +home,room=Kitchen temp=23.1,hum=36.6,co=22.1 1641063600 +home,room=Living\ Room temp=22i,hum=36.4,co=17i 1641067200 +home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 +``` + +{{% /influxdb/custom-timestamps %}} + +InfluxDB expects `co` to contain an integer value and rejects points with `co` +floating-point decimal (`22.1`) values. +To avoid the error, configure Telegraf to convert fields to the data types in +your schema columns. + +The following example converts the `temp`, `hum`, and `co` fields to fit the +[sample data](/influxdb3/version/reference/sample-data/#home-sensor-data) schema: + + + +1. In your terminal, enter the following command to create the sample data file: + + + ```sh + cat < ./home.lp + home,room=Kitchen temp=23.1,hum=36.6,co=22.1 1641063600 + home,room=Living\ Room temp=22i,hum=36.4,co=17i 1641067200 + home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 + EOF + ``` + +2. Enter the following command to create a Telegraf configuration that parses + the sample data, converts the field values to the specified data types, and + then writes the data to InfluxDB: + + + + {{< code-placeholders "DATABASE_NAME|AUTH_TOKEN" >}} + +```sh +cat < ./telegraf.conf +[[inputs.file]] + ## For each interval, parse data from files in the list. + files = ["home.lp"] + influx_timestamp_precision = "1s" + precision = "1s" + tagexclude = ["host"] +[[processors.converter]] + [processors.converter.fields] + ## A data type and a list of fields to convert to the data type. + float = ["temp", "hum"] + integer = ["co"] +[[outputs.influxdb_v2]] + ## InfluxDB v2 API credentials and the database to write to. + urls = ["https://{{< influxdb/host >}}"] + token = "AUTH_TOKEN" + organization = "" + bucket = "DATABASE_NAME" +EOF +``` + + {{< /code-placeholders >}} + + Replace the following: + + - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of the database to write data to + - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your {{< product-name >}} authorization token. + _Store this in a secret store or environment variable to avoid exposing the raw token string._ + + > [!Note] + > While in alpha, {{< product-name >}} does not require an authorization token. + > For the `token` option, provide an empty or arbitrary token string. + +3. To test the input and processor, enter the following command: + + + + ```bash + telegraf --test --config telegraf.conf + ``` + + Telegraf outputs the following to stdout, and then exits: + + + + ``` + > home,room=Kitchen co=22i,hum=36.6,temp=23.1 1641063600000000000 + > home,room=Living\ Room co=17i,hum=36.4,temp=22 1641067200000000000 + > home,room=Kitchen co=26i,hum=36.5,temp=22.7 1641067200000000000 + ``` + + + +### Merge lines to optimize memory and bandwidth + +Use Telegraf and the [Merge aggregator plugin](/telegraf/v1/plugins/#aggregator-merge) +to merge points that share the same measurement, tag set, and timestamp. + +The following example creates sample data for two series (the combination of +table, tag set, and timestamp), and then merges points in each series: + +1. In your terminal, enter the following command to create the sample data file + and calculate the number of seconds between the earliest timestamp and _now_. + The command assigns the calculated value to a `grace_duration` variable that + you'll use in the next step. + + ```bash + cat < ./home.lp + home,room=Kitchen temp=23.1 1641063600 + home,room=Kitchen hum=36.6 1641063600 + home,room=Kitchen co=22i 1641063600 + home,room=Living\ Room temp=22.7 1641063600 + home,room=Living\ Room hum=36.4 1641063600 + home,room=Living\ Room co=17i 1641063600 + EOF + grace_duration="$(($(date +%s)-1641063000))s" + ``` + +2. Enter the following command to configure Telegraf to parse the file, merge + the points, and write the data to InfluxDB--specifically, the configuration + sets the following properties: + + - `influx_timestamp_precision`: for parsers, specifies the timestamp + precision in the input data + - Optional: `aggregators.merge.grace` extends the duration for merging points. + To ensure the sample data is included, the configuration uses the + calculated variable from the preceding step. + + + {{< code-placeholders "DATABASE_NAME|AUTH_TOKEN" >}} + + ```bash + cat < ./telegraf.conf + # Parse metrics from a file + [[inputs.file]] + ## A list of files to parse during each interval. + files = ["home.lp"] + ## The precision of timestamps in your data. + influx_timestamp_precision = "1s" + tagexclude = ["host"] + # Merge separate metrics that share a series key + [[aggregators.merge]] + grace = "$grace_duration" + ## If true, drops the original metric. + drop_original = true + # Writes metrics as line protocol to the InfluxDB v2 API + [[outputs.influxdb_v2]] + ## InfluxDB v2 API credentials and the database to write data to. + urls = ["https://{{< influxdb/host >}}"] + token = "AUTH_TOKEN" + organization = "" + bucket = "DATABASE_NAME" + EOF + ``` + + {{< /code-placeholders >}} + + Replace the following: + + - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of the database to write data to + - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your {{< product-name >}} authorization token. + _Store this in a secret store or environment variable to avoid exposing the raw token string._ + + > [!Note] + > While in alpha, {{< product-name >}} does not require an authorization token. + > For the `token` option, provide an empty or arbitrary token string. + +3. To test the input and aggregator, enter the following command: + + + + ```bash + telegraf --test --config telegraf.conf + ``` + + Telegraf outputs the following to stdout, and then exits: + + + + ``` + > home,room=Kitchen co=22i,hum=36.6,temp=23.1 1641063600000000000 + > home,room=Living\ Room co=17i,hum=36.4,temp=22.7 1641063600000000000 + ``` + + + + +### Avoid sending duplicate data + +Use Telegraf and the [Dedup processor plugin](/telegraf/v1/plugins/#processor-dedup) +to filter data whose field values are exact repetitions of previous values. +Deduplicating your data can reduce your write payload size and resource usage. + +The following example shows how to use Telegraf to remove points that repeat +field values, and then write the data to InfluxDB: + +1. In your terminal, enter the following command to create the sample data file + and calculate the number of seconds between the earliest timestamp and _now_. + The command assigns the calculated value to a `dedup_duration` variable that + you'll use in the next step. + + ```bash + cat < ./home.lp + home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600 + home,room=Living\ Room temp=22.5,hum=36.4,co=17i 1641063600 + home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641063605 + home,room=Living\ Room temp=22.5,hum=36.4,co=17i 1641063605 + home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063610 + home,room=Living\ Room temp=23.0,hum=36.4,co=17i 1641063610 + EOF + dedup_duration="$(($(date +%s)-1641063000))s" + ``` + +2. Enter the following command to configure Telegraf to parse the file, drop + duplicate points, and write the data to InfluxDB--specifically, the sample configuration sets the following: + + - `influx_timestamp_precision`: for parsers, specifies the timestamp + precision in the input data + - `processors.dedup`: configures the Dedup processor plugin + - Optional: `processors.dedup.dedup_interval`. Points in the range + `dedup_interval` _to now_ are considered for removal. + To ensure the sample data is included, the configuration uses the + calculated variable from the preceding step. + + + + {{< code-placeholders "DATABASE_NAME|AUTH_TOKEN" >}} + ```bash + cat < ./telegraf.conf + # Parse metrics from a file + [[inputs.file]] + ## A list of files to parse during each interval. + files = ["home.lp"] + ## The precision of timestamps in your data. + influx_timestamp_precision = "1s" + tagexclude = ["host"] + # Filter metrics that repeat previous field values + [[processors.dedup]] + ## Drops duplicates within the specified duration + dedup_interval = "$dedup_duration" + # Writes metrics as line protocol to the InfluxDB v2 API + [[outputs.influxdb_v2]] + ## InfluxDB v2 API credentials and the database to write data to. + urls = ["https://{{< influxdb/host >}}"] + token = "AUTH_TOKEN" + organization = "" + bucket = "DATABASE_NAME" + EOF + ``` + + {{< /code-placeholders >}} + + Replace the following: + + - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of the database to write data to + - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your {{< product-name >}} authorization token. + _Store this in a secret store or environment variable to avoid exposing the raw token string._ + + > [!Note] + > While in alpha, {{< product-name >}} does not require an authorization token. + > For the `token` option, provide an empty or arbitrary token string. + +3. To test the input and processor, enter the following command: + + + + ```bash + telegraf --test --config telegraf.conf + ``` + + Telegraf outputs the following to stdout, and then exits: + + + + ``` + > home,room=Kitchen co=22i,hum=36.6,temp=23.1 1641063600000000000 + > home,room=Living\ Room co=17i,hum=36.4,temp=22.5 1641063600000000000 + > home,room=Kitchen co=26i,hum=36.5,temp=22.7 1641063605000000000 + > home,room=Kitchen co=22i,hum=36.6,temp=23.1 1641063610000000000 + > home,room=Living\ Room co=17i,hum=36.4,temp=23 1641063610000000000 + ``` + + + +### Run custom preprocessing code + +Use Telegraf and the [Execd processor plugin](/telegraf/v1/plugins/#processor-execd) +to execute code external to Telegraf and then write the processed data. +The Execd plugin expects line protocol data in stdin, passes the data to the +configured executable, and then outputs line protocol to stdout. + +The following example shows how to use Telegraf to execute Go code for +processing metrics and then write the output to InfluxDB. +The Go `multiplier.go` sample code does the following: + + 1. Imports `influx` parser and serializer plugins from Telegraf. + 2. Parses each line of data into a Telegraf metric. + 3. If the metric contains a `count` field, multiplies the field value by `2`; + otherwise, prints a message to stderr and exits. + +1. In your editor, enter the following sample code and save the file as `multiplier.go`: + + ```go + package main + + import ( + "fmt" + "os" + + "github.com/influxdata/telegraf/plugins/parsers/influx" + influxSerializer "github.com/influxdata/telegraf/plugins/serializers/influx" + ) + + func main() { + parser := influx.NewStreamParser(os.Stdin) + serializer := influxSerializer.Serializer{} + if err := serializer.Init(); err != nil { + fmt.Fprintf(os.Stderr, "serializer init failed: %v\n", err) + os.Exit(1) + } + + for { + metric, err := parser.Next() + if err != nil { + if err == influx.EOF { + return // stream ended + } + if parseErr, isParseError := err.(*influx.ParseError); isParseError { + fmt.Fprintf(os.Stderr, "parse ERR %v\n", parseErr) + os.Exit(1) + } + fmt.Fprintf(os.Stderr, "ERR %v\n", err) + os.Exit(1) + } + + c, found := metric.GetField("count") + if !found { + fmt.Fprintf(os.Stderr, "metric has no count field\n") + os.Exit(1) + } + switch t := c.(type) { + case float64: + t *= 2 + metric.AddField("count", t) + case int64: + t *= 2 + metric.AddField("count", t) + default: + fmt.Fprintf(os.Stderr, "count is not an unknown type, it's a %T\n", c) + os.Exit(1) + } + b, err := serializer.Serialize(metric) + if err != nil { + fmt.Fprintf(os.Stderr, "ERR %v\n", err) + os.Exit(1) + } + fmt.Fprint(os.Stdout, string(b)) + } + } + ``` + + + +2. Initialize the module and install dependencies: + + + + ```bash + go mod init processlp + go mod tidy + ``` + +3. In your terminal, enter the following command to create the sample data file: + + + + ```bash + cat < ./home.lp + home,room=Kitchen temp=23.1,count=1 1641063600 + home,room=Living\ Room temp=22.7,count=1 1641063600 + home,room=Kitchen temp=23.1 1641063601 + home,room=Living\ Room temp=22.7 1641063601 + EOF + ``` + +4. Enter the following command to configure Telegraf to parse the file, execute + the Go binary, and write the data--specifically, the sample configuration + sets the following: + + - `influx_timestamp_precision`: for parsers, specifies the timestamp + precision in the input data + - `processors.execd`: configures the Execd plugin + - `processors.execd.command`: sets the executable and arguments for Execd to run + + + + {{< code-placeholders "DATABASE_NAME|AUTH_TOKEN" >}} + +```bash +cat < ./telegraf.conf +# Parse metrics from a file +[[inputs.file]] + ## A list of files to parse during each interval. + files = ["home.lp"] + ## The precision of timestamps in your data. + influx_timestamp_precision = "1s" + tagexclude = ["host"] +# Filter metrics that repeat previous field values +[[processors.execd]] + ## A list that contains the executable command and arguments to run as a daemon. + command = ["go", "run", "multiplier.go"] +# Writes metrics as line protocol to the InfluxDB v2 API +[[outputs.influxdb_v2]] + ## InfluxDB v2 API credentials and the database to write data to. + urls = ["https://{{< influxdb/host >}}"] + token = "AUTH_TOKEN" + organization = "" + bucket = "DATABASE_NAME" +EOF +``` + + {{< /code-placeholders >}} + + Replace the following: + + - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of the database to write data to + - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your {{< product-name >}} authorization token. + _Store this in a secret store or environment variable to avoid exposing the raw token string._ + + > [!Note] + > While in alpha, {{< product-name >}} does not require an authorization token. + > For the `token` option, provide an empty or arbitrary token string. + +5. To test the input and processor, enter the following command: + + + + ```bash + telegraf --test --config telegraf.conf + ``` + + Telegraf outputs the following to stdout, and then exits: + + + + ``` + > home,room=Kitchen count=2,temp=23.1 1641063600000000000 + > home,room=Living\ Room count=2,temp=22.7 1641063600000000000 + ``` + + diff --git a/content/shared/influxdb3-write-guides/best-practices/schema-design.md b/content/shared/influxdb3-write-guides/best-practices/schema-design.md new file mode 100644 index 000000000..d17ad27fb --- /dev/null +++ b/content/shared/influxdb3-write-guides/best-practices/schema-design.md @@ -0,0 +1,393 @@ + +Use the following guidelines to design your [schema](/influxdb3/version/reference/glossary/#schema) +for simpler and more performant queries. + +- [InfluxDB data structure](#influxdb-data-structure) + - [Primary keys](#primary-keys) + - [Tags versus fields](#tags-versus-fields) +- [Schema restrictions](#schema-restrictions) + - [Do not use duplicate names for tags and fields](#do-not-use-duplicate-names-for-tags-and-fields) + - [Maximum number of columns per table](#maximum-number-of-columns-per-table) +- [Design for performance](#design-for-performance) + - [Avoid wide schemas](#avoid-wide-schemas) + - [Avoid sparse schemas](#avoid-sparse-schemas) + - [Table schemas should be homogenous](#table-schemas-should-be-homogenous) + - [Use the best data type for your data](#use-the-best-data-type-for-your-data) +- [Design for query simplicity](#design-for-query-simplicity) + - [Keep table names, tags, and fields simple](#keep-table-names-tags-and-fields-simple) + - [Avoid keywords and special characters](#avoid-keywords-and-special-characters) + +## InfluxDB data structure + +The {{% product-name %}} data model organizes time series data into databases and tables. +A database can contain multiple tables. +Tables contain multiple tags and fields. + + + +- **Database**: A named location where time series data is stored. + In {{% product-name %}}, _database_ is synonymous with _bucket_ in InfluxDB + Cloud Serverless and InfluxDB TSM implementations. + + A database can contain multiple _tables_. + - **Table**: A logical grouping for time series data. + In {{% product-name %}}, _table_ is synonymous with _measurement_ in + InfluxDB Cloud Serverless and InfluxDB TSM implementations. + All _points_ in a given table should have the same _tags_. + A table contains multiple _tags_ and _fields_. + - **Tags**: Key-value pairs that store metadata string values for each point--for example, + a value that identifies or differentiates the data source or context--for example, host, + location, station, etc. + Tag values may be null. + - **Fields**: Key-value pairs that store data for each point--for example, + temperature, pressure, stock price, etc. + Field values may be null, but at least one field value is not null on any given row. + - **Timestamp**: Timestamp associated with the data. + When stored on disk and queried, all data is ordered by time. + In InfluxDB, a timestamp is a nanosecond-scale + [Unix timestamp](/influxdb3/version/reference/glossary/#unix-timestamp) + in UTC. + A timestamp is never null. + +> [!Note] +> +> #### What happened to buckets and measurements? +> +> If coming from earlier versions of InfluxDB, InfluxDB Cloud (TSM), or +> InfluxDB Cloud Serverless, you're likely familiar with the concepts _bucket_ +> and _measurement_: +> +> - _**Bucket**_ in InfluxDB v2 or InfluxDB Cloud Serverless is synonymous with +> _**database**_ in {{% product-name %}}. +> - _**Measurement**_ in InfluxDB v1, v2, or InfluxDB Cloud Serverless is synonymous +> with _**table**_ in {{% product-name %}}. + + + +### Primary keys + +In time series data, the primary key for a row of data is typically a combination +of timestamp and other attributes that uniquely identify each data point. +In {{% product-name %}}, the primary key for a row is the combination of the +point's timestamp and _tag set_—the collection of +[tag keys](/influxdb3/version/reference/glossary/#tag-key) and +[tag values](/influxdb3/version/reference/glossary/#tag-value) on the point. +A row's primary key tag set does not include tags with null values. + +### Tags versus fields + +When designing your schema for InfluxDB, a common question is, "what should be a +tag and what should be a field?" The following guidelines should help answer that +question as you design your schema. + +- Use tags to store metadata, or identifying information, about the source or context of the data. +- Use fields to store measured values. +- Tag values can only be strings. +- Field values can be any of the following data types: + - Integer + - Unsigned integer + - Float + - String + - Boolean + +> [!Note] +> The InfluxDB 3 storage engine supports infinite tag value and series cardinality. +> Unlike previous versions of InfluxDB, **tag value** cardinality doesn't affect +> the overall performance of your database. + +--- + +## Schema restrictions + +### Do not use duplicate names for tags and fields + +Use unique names for tags and fields within the same table. +{{% product-name %}} stores tags and fields as unique columns in a table. +If you attempt to write a table that contains tags or fields with the same name, +the write fails due to a column conflict. + +### Maximum number of columns per table + +A table has a [maximum number of columns](/influxdb3/version/admin/databases/#column-limit). +Each row must include a time column. +As a result, a table can have the following: + +- a time column +- field and tag columns up to the configured maximum + +If you attempt to write to a table and exceed the column limit, then the write +request fails and InfluxDB returns an error. + +InfluxData identified the +[column limit](/influxdb3/version/admin/databases/#column-limit) +as the safe limit for maintaining system performance and stability. +Exceeding this threshold can result in +[wide schemas](#avoid-wide-schemas), which can negatively impact performance +and resource use, [depending on your queries](#avoid-non-specific-queries), +the shape of your schema, and data types in the schema. + +--- + +## Design for performance + +How you structure your schema within a table can affect resource use and +the performance of queries against that table. + +The following guidelines help to optimize query performance: + +- [Avoid wide schemas](#avoid-wide-schemas) +- [Avoid sparse schemas](#avoid-sparse-schemas) +- [Table schemas should be homogenous](#table-schemas-should-be-homogenous) +- [Use the best data type for your data](#use-the-best-data-type-for-your-data) + +### Avoid wide schemas + +A wide schema refers to a schema with a large number of columns (tags and fields). + +Wide schemas can lead to the following issues: + +- Increased resource usage for persisting data during ingestion. +- Reduced sorting performance due to complex primary keys with [too many tags](#avoid-too-many-tags). +- Reduced query performance when selecting too many columns + +To prevent wide schema issues, limit the number of tags and fields stored in a table. +If you need to store more than the [maximum number of columns](/influxdb3/version/admin/databases/#column-limit), +consider segmenting your fields into separate tables. + +#### Avoid too many tags + +In {{% product-name %}}, the primary key for a row is the combination of the +point's timestamp and _tag set_ - the collection of +[tag keys](/influxdb3/version/reference/glossary/#tag-key) +and [tag values](/influxdb3/version/reference/glossary/#tag-value) on the point. +A point that contains more tags has a more complex primary key, which could +impact sorting performance if you sort using all parts of the key. + +### Avoid sparse schemas + +A sparse schema is one where, for many rows, columns contain null values. + +These generally stem from the following: + +- [non-homogenous table schemas](#table-schemas-should-be-homogenous) +- [writing individual fields with different timestamps](#writing-individual-fields-with-different-timestamps) + +Sparse schemas require the InfluxDB query engine to evaluate many +null columns, adding unnecessary overhead to storing and querying data. + +_For an example of a sparse schema, +[view the non-homogenous schema example below](#view-example-of-a-sparse-non-homogenous-schema)._ + +#### Writing individual fields with different timestamps + +Reporting fields at different times with different timestamps creates distinct +rows that contain null values--for example: + +You report `fieldA` with `tagset`, and then report `field B` with the same +`tagset`, but with a different timestamp. +The result is two rows: one row has a _null_ value for **field A** and the other +has a _null_ value for **field B**. + +In contrast, if you report fields at different times while using the same tagset +and timestamp, the existing row is updated. +This requires slightly more resources at ingestion time, but then gets resolved +at persistence time or compaction time and avoids a sparse schema. + +### Table schemas should be homogenous + +Data stored in a table should be "homogenous," meaning each row should have the +same tag and field keys. +All rows stored in a table share the same columns, but if a point doesn't +include a value for a column, the column value is _null_. +A table full of _null_ values has a ["sparse" schema](#avoid-sparse-schemas). + +{{< expand-wrapper >}} +{{% expand "View example of a sparse, non-homogenous schema" %}} + +Non-homogenous schemas are often caused by writing points to a table with +inconsistent tag or field sets. +In the following example, data is collected from two +different sources and each source returns data with different tag and field sets. + +{{< flex >}} +{{% flex-content %}} + +##### Source 1 tags and fields: + +- tags: + - source + - code + - crypto +- fields: + - price + {{% /flex-content %}} + {{% flex-content %}} + +##### Source 2 tags and fields: + +- tags: + - src + - currency + - crypto +- fields: + - cost + - volume + {{% /flex-content %}} + {{< /flex >}} + +These sets of data written to the same table result in a table +full of null values (also known as a _sparse schema_): + +| time | source | src | code | currency | crypto | price | cost | volume | +| :------------------- | :----- | --: | :--- | :------- | :------ | ----------: | ---------: | ----------: | +| 2025-01-01T12:00:00Z | src1 | | USD | | bitcoin | 16588.45865 | | | +| 2025-01-01T12:00:00Z | | 2 | | EUR | bitcoin | | 16159.5806 | 16749450200 | +| 2025-01-01T13:00:00Z | src1 | | USD | | bitcoin | 16559.49871 | | | +| 2025-01-01T13:00:00Z | | 2 | | EUR | bitcoin | | 16131.3694 | 16829683245 | +| 2025-01-01T14:00:00Z | src1 | | USD | | bitcoin | 16577.46667 | | | +| 2025-01-01T14:00:00Z | | 2 | | EUR | bitcoin | | 16148.8727 | 17151722208 | +| 2025-01-01T15:00:00Z | src1 | | USD | | bitcoin | 16591.36998 | | | +| 2025-01-01T15:00:00Z | | 2 | | EUR | bitcoin | | 16162.4167 | 17311854919 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +### Use the best data type for your data + +When writing data to a field, use the most appropriate +[data type](/influxdb3/version/reference/glossary/#data-type) for your data--write +integers as integers, decimals as floats, and booleans as booleans. +A query against a field that stores integers outperforms a query against string data; +querying over many long string values can negatively affect performance. + +## Design for query simplicity + +Naming conventions for tables, tag keys, and field keys can simplify or +complicate the process of writing queries for your data. +The following guidelines help to ensure writing queries for your data is as +simple as possible. + +- [Keep table names, tags, and fields simple](#keep-table-names-tags-and-fields-simple) +- [Avoid keywords and special characters](#avoid-keywords-and-special-characters) + +### Keep table names, tags, and fields simple + +Use one tag or one field for each data attribute. +If your source data contains multiple data attributes in a single parameter, +split each attribute into its own tag or field. + +Table names, tag keys, and field keys should be simple and accurately +describe what each contains. +Keep names free of data. +The most common cause of a complex naming convention is when you try to "embed" +data attributes into a table name, tag key, or field key. + +When each key and value represents one attribute (not multiple concatenated attributes) +of your data, you'll reduce the need for regular expressions in your queries. +Without regular expressions, your queries will be easier to write and more performant. + +#### Not recommended {.orange} + +For example, consider the following [line protocol](/influxdb3/version/reference/syntax/line-protocol/) +that embeds multiple attributes (location, model, and ID) into a `sensor` tag value: + +```text +home,sensor=loc-kitchen.model-A612.id-1726ZA temp=72.1 +home,sensor=loc-bath.model-A612.id-2635YB temp=71.8 +``` + +{{< expand-wrapper >}} +{{% expand "View written data" %}} + +{{% influxql/table-meta %}} +**table**: home +{{% /influxql/table-meta %}} + +| time | sensor | temp | +| :------------------- | :------------------------------- | ---: | +| 2025-01-01T00:00:00Z | loc-kitchen.model-A612.id-1726ZA | 72.1 | +| 2025-01-01T00:00:00Z | loc-bath.model-A612.id-2635YB | 71.8 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +To query data from the sensor with ID `1726ZA`, you have to use either SQL +pattern matching or regular expressions to evaluate the `sensor` tag: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[SQL](#) +[InfluxQL](#) +{{% /code-tabs %}} +{{% code-tab-content %}} + +```sql +SELECT * FROM home WHERE sensor LIKE '%id-1726ZA%' +``` + +{{% /code-tab-content %}} +{{% code-tab-content %}} + +```sql +SELECT * FROM home WHERE sensor =~ /id-1726ZA/ +``` + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +SQL pattern matching and regular expressions both complicate the query and +are less performant than simple equality expressions. + +#### Recommended {.green} + +The better approach would be to write each sensor attribute as a separate tag: + +```text +home,location=kitchen,sensor_model=A612,sensor_id=1726ZA temp=72.1 +home,location=bath,sensor_model=A612,sensor_id=2635YB temp=71.8 +``` + +{{< expand-wrapper >}} +{{% expand "View written data" %}} + +{{% influxql/table-meta %}} +**table**: home +{{% /influxql/table-meta %}} + +| time | location | sensor_model | sensor_id | temp | +| :------------------- | :------- | :----------- | :-------- | ---: | +| 2023-01-01T00:00:00Z | kitchen | A612 | 1726ZA | 72.1 | +| 2023-01-01T00:00:00Z | bath | A612 | 2635YB | 71.8 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +To query data from the sensor with ID `1726ZA` using this schema, you can use a +simple equality expression: + +```sql +SELECT * FROM home WHERE sensor_id = '1726ZA' +``` + +This query is easier to write and is more performant than using pattern matching +or regular expressions. + +### Avoid keywords and special characters + +To simplify query writing, avoid using reserved keywords or special characters +in table names, tag keys, and field keys. + +- [SQL keywords](/influxdb3/version/reference/sql/#keywords) +- [InfluxQL keywords](/influxdb3/version/reference/influxql/#keywords) + +When using SQL or InfluxQL to query tables, tags, and fields with special +characters or keywords, you have to wrap these identifiers in **double quotes**. + +```sql +SELECT + "example-field", "tag@1-23" +FROM + "example-table" +WHERE + "tag@1-23" = 'ABC' +``` diff --git a/content/shared/influxdb3-write-guides/client-libraries.md b/content/shared/influxdb3-write-guides/client-libraries.md new file mode 100644 index 000000000..d0c8e42ae --- /dev/null +++ b/content/shared/influxdb3-write-guides/client-libraries.md @@ -0,0 +1,451 @@ + +Use InfluxDB 3 client libraries to construct data as time series points, and +then write them as line protocol to an {{% product-name %}} database. + +- [Construct line protocol](#construct-line-protocol) + - [Example home schema](#example-home-schema) +- [Set up your project](#set-up-your-project) +- [Construct points and write line protocol](#construct-points-and-write-line-protocol) + +## Construct line protocol + +With a [basic understanding of line protocol](/influxdb3/version/write-data/#line-protocol), +you can construct line protocol data and write it to {{% product-name %}}. + +All InfluxDB client libraries write data in line protocol format to InfluxDB. +Client library `write` methods let you provide data as raw line protocol or as +`Point` objects that the client library converts to line protocol. If your +program creates the data you write to InfluxDB, use the client library `Point` +interface to take advantage of type safety in your program. + +### Example home schema + +Consider a use case where you collect data from sensors in your home. Each +sensor collects temperature, humidity, and carbon monoxide readings. + +To collect this data, use the following schema: + + + +- **table**: `home` + - **tags** + - `room`: Living Room or Kitchen + - **fields** + - `temp`: temperature in °C (float) + - `hum`: percent humidity (float) + - `co`: carbon monoxide in parts per million (integer) + - **timestamp**: Unix timestamp in _second_ precision + + + +The following example shows how to construct and write points that follow the +`home` schema. + +## Set up your project + +After setting up {{< product-name >}} and your project, you should have the following: + +- {{< product-name >}} credentials: + + - [Database](/influxdb3/version/admin/databases/) + - Authorization token + + > [!Note] + > While in alpha, {{< product-name >}} does not require an authorization token. + + - {{% product-name %}} URL + +- A directory for your project. + +- Credentials stored as environment variables or in a project configuration + file--for example, a `.env` ("dotenv") file. + +- Client libraries installed for writing data to {{< product-name >}}. + +The following examples use InfluxDB 3 client libraries to show how to construct +`Point` objects that follow the [example `home` schema](#example-home-schema), +and then write the data as line protocol to an {{% product-name %}} database. + +{{< tabs-wrapper >}} +{{% tabs %}} +[Go](#) +[Node.js](#) +[Python](#) +{{% /tabs %}} +{{% tab-content %}} + +The following steps set up a Go project using the +[InfluxDB 3 Go client](https://github.com/InfluxCommunity/influxdb3-go/): + + + +1. Install [Go 1.13 or later](https://golang.org/doc/install). + +1. Create a directory for your Go module and change to the directory--for + example: + + ```sh + mkdir iot-starter-go && cd $_ + ``` + +1. Initialize a Go module--for example: + + ```sh + go mod init iot-starter + ``` + +1. Install [`influxdb3-go`](https://github.com/InfluxCommunity/influxdb3-go/), + which provides the InfluxDB `influxdb3` Go client library module. + + ```sh + go get github.com/InfluxCommunity/influxdb3-go/v2 + ``` + + + +{{% /tab-content %}} {{% tab-content %}} + + + +The following steps set up a JavaScript project using the +[InfluxDB 3 JavaScript client](https://github.com/InfluxCommunity/influxdb3-js/). + +1. Install [Node.js](https://nodejs.org/en/download/). + +1. Create a directory for your JavaScript project and change to the + directory--for example: + + ```sh + mkdir -p iot-starter-js && cd $_ + ``` + +1. Initialize a project--for example, using `npm`: + + + + ```sh + npm init + ``` + +1. Install the `@influxdata/influxdb3-client` InfluxDB 3 JavaScript client + library. + + ```sh + npm install @influxdata/influxdb3-client + ``` + + + +{{% /tab-content %}} {{% tab-content %}} + + + +The following steps set up a Python project using the +[InfluxDB 3 Python client](https://github.com/InfluxCommunity/influxdb3-python/): + +1. Install [Python](https://www.python.org/downloads/) + +1. Inside of your project directory, create a directory for your Python module + and change to the module directory--for example: + + ```sh + mkdir -p iot-starter-py && cd $_ + ``` + +1. **Optional, but recommended**: Use + [`venv`](https://docs.python.org/3/library/venv.html) or + [`conda`](https://docs.continuum.io/anaconda/install/) to activate a virtual + environment for installing and executing code--for example, enter the + following command using `venv` to create and activate a virtual environment + for the project: + + ```bash + python3 -m venv envs/iot-starter && source ./envs/iot-starter/bin/activate + ``` + +1. Install + [`influxdb3-python`](https://github.com/InfluxCommunity/influxdb3-python), + which provides the InfluxDB `influxdb_client_3` Python client library module + and also installs the + [`pyarrow` package](https://arrow.apache.org/docs/python/index.html) for + working with Arrow data. + + ```sh + pip install influxdb3-python + ``` + + + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +## Construct points and write line protocol + +Client libraries provide one or more `Point` constructor methods. Some libraries +support language-native data structures, such as Go's `struct`, for creating +points. + +{{< tabs-wrapper >}} +{{% tabs %}} +[Go](#) +[Node.js](#) +[Python](#) +{{% /tabs %}} +{{% tab-content %}} + + + +1. Create a file for your module--for example: `main.go`. + +1. In `main.go`, enter the following sample code: + + ```go + package main + + import ( + "context" + "os" + "fmt" + "time" + "github.com/InfluxCommunity/influxdb3-go/v2/influxdb3" + "github.com/influxdata/line-protocol/v2/lineprotocol" + ) + + func Write() error { + url := os.Getenv("INFLUX_HOST") + token := os.Getenv("INFLUX_TOKEN") + database := os.Getenv("INFLUX_DATABASE") + + // To instantiate a client, call New() with InfluxDB credentials. + client, err := influxdb3.New(influxdb3.ClientConfig{ + Host: url, + Token: token, + Database: database, + }) + + /** Use a deferred function to ensure the client is closed when the + * function returns. + **/ + defer func (client *influxdb3.Client) { + err = client.Close() + if err != nil { + panic(err) + } + }(client) + + /** Use the NewPoint method to construct a point. + * NewPoint(measurement, tags map, fields map, time) + **/ + point := influxdb3.NewPoint("home", + map[string]string{ + "room": "Living Room", + }, + map[string]any{ + "temp": 24.5, + "hum": 40.5, + "co": 15i}, + time.Now(), + ) + + /** Use the NewPointWithMeasurement method to construct a point with + * method chaining. + **/ + point2 := influxdb3.NewPointWithMeasurement("home"). + SetTag("room", "Living Room"). + SetField("temp", 23.5). + SetField("hum", 38.0). + SetField("co", 16i). + SetTimestamp(time.Now()) + + fmt.Println("Writing points") + points := []*influxdb3.Point{point, point2} + + /** Write points to InfluxDB. + * You can specify WriteOptions, such as Gzip threshold, + * default tags, and timestamp precision. Default precision is lineprotocol.Nanosecond + **/ + err = client.WritePoints(context.Background(), points, + influxdb3.WithPrecision(lineprotocol.Second)) + return nil + } + + func main() { + Write() + } + ``` + +1. To run the module and write the data to your {{% product-name %}} database, + enter the following command in your terminal: + + + + ```sh + go run main.go + ``` + + + +{{% /tab-content %}} {{% tab-content %}} + + + +1. Create a file for your module--for example: `write-points.js`. + +1. In `write-points.js`, enter the following sample code: + + ```js + // write-points.js + import { InfluxDBClient, Point } from '@influxdata/influxdb3-client'; + + /** + * Set InfluxDB credentials. + */ + const host = process.env.INFLUX_HOST ?? ''; + const database = process.env.INFLUX_DATABASE; + const token = process.env.INFLUX_TOKEN; + + /** + * Write line protocol to InfluxDB using the JavaScript client library. + */ + export async function writePoints() { + /** + * Instantiate an InfluxDBClient. + * Provide the host URL and the database token. + */ + const client = new InfluxDBClient({ host, token }); + + /** Use the fluent interface with chained methods to construct Points. */ + const point = Point.measurement('home') + .setTag('room', 'Living Room') + .setFloatField('temp', 22.2) + .setFloatField('hum', 35.5) + .setIntegerField('co', 7) + .setTimestamp(new Date().getTime() / 1000); + + const point2 = Point.measurement('home') + .setTag('room', 'Kitchen') + .setFloatField('temp', 21.0) + .setFloatField('hum', 35.9) + .setIntegerField('co', 0) + .setTimestamp(new Date().getTime() / 1000); + + /** Write points to InfluxDB. + * The write method accepts an array of points, the target database, and + * an optional configuration object. + * You can specify WriteOptions, such as Gzip threshold, default tags, + * and timestamp precision. Default precision is lineprotocol.Nanosecond + **/ + + try { + await client.write([point, point2], database, '', { precision: 's' }); + console.log('Data has been written successfully!'); + } catch (error) { + console.error(`Error writing data to InfluxDB: ${error.body}`); + } + + client.close(); + } + + writePoints(); + ``` + +1. To run the module and write the data to your {{\< product-name >}} database, + enter the following command in your terminal: + + + + ```sh + node writePoints.js + ``` + + + + {{% /tab-content %}} {{% tab-content %}} + + + +1. Create a file for your module--for example: `write-points.py`. + +1. In `write-points.py`, enter the following sample code to write data in + batching mode: + + ```python + import os + from influxdb_client_3 import ( + InfluxDBClient3, InfluxDBError, Point, WritePrecision, + WriteOptions, write_client_options) + + host = os.getenv('INFLUX_HOST') + token = os.getenv('INFLUX_TOKEN') + database = os.getenv('INFLUX_DATABASE') + + # Create an array of points with tags and fields. + points = [Point("home") + .tag("room", "Kitchen") + .field("temp", 25.3) + .field('hum', 20.2) + .field('co', 9)] + + # With batching mode, define callbacks to execute after a successful or + # failed write request. + # Callback methods receive the configuration and data sent in the request. + def success(self, data: str): + print(f"Successfully wrote batch: data: {data}") + + def error(self, data: str, exception: InfluxDBError): + print(f"Failed writing batch: config: {self}, data: {data} due: {exception}") + + def retry(self, data: str, exception: InfluxDBError): + print(f"Failed retry writing batch: config: {self}, data: {data} retry: {exception}") + + # Configure options for batch writing. + write_options = WriteOptions(batch_size=500, + flush_interval=10_000, + jitter_interval=2_000, + retry_interval=5_000, + max_retries=5, + max_retry_delay=30_000, + exponential_base=2) + + # Create an options dict that sets callbacks and WriteOptions. + wco = write_client_options(success_callback=success, + error_callback=error, + retry_callback=retry, + write_options=write_options) + + # Instantiate a synchronous instance of the client with your + # InfluxDB credentials and write options, such as Gzip threshold, default tags, + # and timestamp precision. Default precision is nanosecond ('ns'). + with InfluxDBClient3(host=host, + token=token, + database=database, + write_client_options=wco) as client: + + client.write(points, write_precision='s') + ``` + +1. To run the module and write the data to your {{< product-name >}} database, + enter the following command in your terminal: + + + + ```sh + python write-points.py + ``` + + + + {{% /tab-content %}} {{< /tabs-wrapper >}} + +The sample code does the following: + + + +1. Instantiates a client configured with the InfluxDB URL and API token. +2. Constructs `home`table `Point` objects. +3. Sends data as line protocol format to InfluxDB and waits for the response. +4. If the write succeeds, logs the success message to stdout; otherwise, logs + the failure message and error details. +5. Closes the client to release resources. + + diff --git a/content/shared/influxdb3-write-guides/influxdb3-cli.md b/content/shared/influxdb3-write-guides/influxdb3-cli.md new file mode 100644 index 000000000..9012ffbb3 --- /dev/null +++ b/content/shared/influxdb3-write-guides/influxdb3-cli.md @@ -0,0 +1,193 @@ + +Use the [`influxdb3` CLI](/influxdb3/version/reference/cli/influxdb3/) +to write line protocol data to {{< product-name >}}. + +- [Construct line protocol](#construct-line-protocol) +- [Write the line protocol to InfluxDB](#write-the-line-protocol-to-influxdb) + +## Construct line protocol + +With a [basic understanding of line protocol](/influxdb3/version/write-data/#line-protocol), +you can now construct line protocol and write data to {{< product-name >}}. +Consider a use case where you collect data from sensors in your home. +Each sensor collects temperature, humidity, and carbon monoxide readings. +To collect this data, use the following schema: + +- **table**: `home` + - **tags** + - `room`: Living Room or Kitchen + - **fields** + - `temp`: temperature in °C (float) + - `hum`: percent humidity (float) + - `co`: carbon monoxide in parts per million (integer) + - **timestamp**: Unix timestamp in _second_ precision + +The following line protocol represent the schema described above: + +{{% influxdb/custom-timestamps %}} + +```text +home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 +home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 +home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 +home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 +home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 +home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 +home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 +home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 +home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 +home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 +home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 +home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000 +``` + +{{% /influxdb/custom-timestamps %}} + +For this tutorial, you can either pass this line protocol directly to the +`influxdb3 write` command as a string, via `stdin`, or you can save it to and +read it from a file. + +## Write the line protocol to InfluxDB + +Use the [`influxdb3 write` command](/influxdb3/version/reference/cli/influxdb3/write/) +to write the home sensor sample data to {{< product-name >}}. +Provide the following: + +- The [database](/influxdb3/version/admin/databases/) name using the + `--database` option +- Your {{< product-name >}} authorization token using the `-t`, `--token` option +- [Line protocol](#construct-line-protocol). + Provide the line protocol in one of the following ways: + + - a string + - a path to a file that contains the line protocol using the `--file` option + - from stdin + +> [!Note] +> {{< product-name >}} auto-detects the timestamp precision by identifying which +> precision results in timestamps relatively close to "now." + + + +{{< tabs-wrapper >}} +{{% tabs %}} +[string](#) +[file](#) +[stdin](#) +{{% /tabs %}} +{{% tab-content %}} + +{{% influxdb/custom-timestamps %}} +{{% code-placeholders "(DATABASE|AUTH)_(NAME|TOKEN)|(LINE_PROTOCOL_FILEPATH)" %}} + +```sh +influxdb3 write \ + --database DATABASE_NAME \ + --token AUTH_TOKEN \ + 'home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 +home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 +home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 +home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 +home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 +home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 +home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 +home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 +home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 +home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 +home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 +home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000' +``` + +{{% /code-placeholders %}} +{{% /influxdb/custom-timestamps %}} + +{{% /tab-content %}} +{{% tab-content %}} + +{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} + +1. In your terminal, enter the following command to create the sample data file: + + ```sh + echo 'home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 + home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 + home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 + home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 + home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 + home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 + home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 + home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 + home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 + home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 + home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 + home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000' > ./home.lp + ``` + + + +2. Enter the following CLI command to write the data from the sample file: + + ```sh + influxdb3 write \ + --database DATABASE_NAME \ + --token AUTH_TOKEN \ + ./home.lp + ``` + +{{% /code-placeholders %}} + +{{% /tab-content %}} +{{% tab-content %}} + +{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} + +1. In your terminal, enter the following command to create the sample data file: + + ```sh + echo 'home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 + home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 + home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 + home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 + home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 + home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 + home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 + home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 + home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 + home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 + home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 + home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000' > ./home.lp + ``` + + + +2. Enter the following CLI command to write the data from the sample file: + + ```sh + cat ./home.lp | influxdb3 write \ + --database DATABASE_NAME \ + --token AUTH_TOKEN + ``` + +{{% /code-placeholders %}} + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of the database to write to +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your {{< product-name >}} authorization token + + > [!Note] + > While in alpha, {{< product-name >}} does not require an authorization token. diff --git a/content/shared/influxdb3-write-guides/troubleshoot.md b/content/shared/influxdb3-write-guides/troubleshoot.md new file mode 100644 index 000000000..1db4781c5 --- /dev/null +++ b/content/shared/influxdb3-write-guides/troubleshoot.md @@ -0,0 +1,67 @@ + +Learn how to avoid unexpected results and recover from errors when writing to +{{% product-name %}}. + +- [Handle write responses](#handle-write-responses) + - [Review HTTP status codes](#review-http-status-codes) +- [Troubleshoot failures](#troubleshoot-failures) +- [Troubleshoot rejected points](#troubleshoot-rejected-points) + +## Handle write responses + +{{% product-name %}} does the following when you send a write request: + +1. Validates the request. +2. If successful, attempts to ingest data from the request body; otherwise, + responds with an [error status](#review-http-status-codes). +3. Ingests or rejects data in the batch and returns one of the following HTTP + status codes: + + - `204 No Content`: All data in the batch is ingested. + - `400 Bad Request`: Some or all of the data has been rejected. + Data that has not been rejected is ingested and queryable. + +The response body contains error details about +[rejected points](#troubleshoot-rejected-points), up to 100 points. + +Writes are synchronous--the response status indicates the final status of the +write and all ingested data is queryable. + +To ensure that InfluxDB handles writes in the order you request them, +wait for the response before you send the next request. + +### Review HTTP status codes + +{{< product-name >}} uses conventional HTTP status codes to indicate the success +or failure of a request. The `message` property of the response body may contain +additional details about the error. +Write requests return the following status codes: + +| HTTP response code | Message | Description | +| :-------------------------------| :--------------------------------------------------------------- | :------------- | +| `204 "Success"` | | If InfluxDB ingested the data | +| `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | If some or all request data isn't allowed (for example, if it is malformed or falls outside of the bucket's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected | +| `401 "Unauthorized"` | | If the `Authorization` header is missing or malformed or if the [token](/influxdb3/version/admin/tokens/) doesn't have [permission](/influxdb3/version/reference/cli/influxctl/token/create/#examples) to write to the database. See [examples using credentials](/influxdb3/version/get-started/write/#write-line-protocol-to-influxdb) in write requests. | +| `404 "Not found"` | requested **resource type** (for example, "organization" or "database"), and **resource name** | If a requested resource (for example, organization or database) wasn't found | +| `500 "Internal server error"` | | Default status for an error | +| `503` "Service unavailable" | | If the server is temporarily unavailable to accept writes. The `Retry-After` header describes when to try the write again. + +If your data did not write to the database, see how to [troubleshoot rejected points](#troubleshoot-rejected-points). + +## Troubleshoot failures + +If you notice data is missing in your database, do the following: + +- Check the `message` property in the response body for details about the error. +- If the `message` describes a field error, [troubleshoot rejected points](#troubleshoot-rejected-points). +- Verify all lines contain valid syntax ([line protocol](/influxdb3/version/reference/syntax/line-protocol/)). +- Verify the timestamps in your data match the [precision parameter](/influxdb3/version/reference/glossary/#precision) in your request. +- Minimize payload size and network errors by [optimizing writes](/influxdb3/version/write-data/best-practices/optimize-writes/). + +## Troubleshoot rejected points + +InfluxDB rejects points that don't match the schema of existing data. + +Check for [field data type](/influxdb3/version/reference/syntax/line-protocol/#data-types-and-format) +differences between the rejected data point and points within the same +database--for example, did you attempt to write `string` data to an `int` field? diff --git a/content/shared/influxdb3-write-guides/use-telegraf/_index.md b/content/shared/influxdb3-write-guides/use-telegraf/_index.md new file mode 100644 index 000000000..d1a174c8f --- /dev/null +++ b/content/shared/influxdb3-write-guides/use-telegraf/_index.md @@ -0,0 +1,62 @@ + +[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) is a data +collection agent for collecting and reporting metrics. +Its vast library of input plugins and "plug-and-play" architecture lets you +quickly and easily collect metrics from many different sources. + +For a list of available plugins, see [Telegraf plugins](/telegraf/v1/plugins/). + +#### Requirements + +- **Telegraf 1.9.2 or greater**. + _For information about installing Telegraf, see the + [Telegraf Installation instructions](/telegraf/v1/install/)._ + +## Basic Telegraf usage + +Telegraf is a plugin-based agent with plugins that are enabled and configured in +your Telegraf configuration file (`telegraf.conf`). +Each Telegraf configuration must **have at least one input plugin and one output plugin**. + +Telegraf input plugins retrieve metrics from different sources. +Telegraf output plugins write those metrics to a destination. + +Use the [`outputs.influxdb_v2`](/telegraf/v1/plugins/#output-influxdb_v2) plugin +to connect to the InfluxDB v2 write API included in {{% product-name %}} and +write metrics collected by Telegraf to {{< product-name >}}. + +{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} + +```toml +# ... + +[[outputs.influxdb_v2]] + urls = ["http://{{< influxdb/host >}}"] + token = "AUTH_TOKEN" + organization = "" + bucket = "DATABASE_NAME" + +# ... +``` + +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of the database to write data to +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your {{< product-name >}} authorization token. + _Store this in a secret store or environment variable to avoid exposing the raw token string._ + + > [!Note] + > While in alpha, {{< product-name >}} does not require an authorization token. + > For the `token` option, provide an empty or arbitrary token string. + +_See how to [Configure Telegraf to write to {{% product-name %}}](/influxdb3/version/write-data/use-telegraf/configure/)._ + +## Use Telegraf with InfluxDB + +{{< children >}} + +{{< influxdbu "telegraf-102" >}} diff --git a/content/shared/influxdb3-write-guides/use-telegraf/configure.md b/content/shared/influxdb3-write-guides/use-telegraf/configure.md new file mode 100644 index 000000000..565e28f74 --- /dev/null +++ b/content/shared/influxdb3-write-guides/use-telegraf/configure.md @@ -0,0 +1,134 @@ + +Use the Telegraf [`influxdb_v2` output plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/influxdb_v2/README.md) +to collect and write metrics to {{< product-name >}}. +This plugin uses the InfluxDB v2 HTTP API write endpoint available with +{{% product-name %}}. +Learn how to enable and configure the `influxdb_v2` output plugin to write data +to {{% product-name %}}. + +> [!Note] +> _View the [requirements](/influxdb3/version/write-data/use-telegraf#requirements) +> for using Telegraf with {{< product-name >}}._ + + + +- [Configure Telegraf input and output plugins](#configure-telegraf-input-and-output-plugins) + - [Add Telegraf plugins](#add-telegraf-plugins) + - [Enable and configure the InfluxDB v2 output plugin](#enable-and-configure-the-influxdb-v2-output-plugin) + - [urls](#urls) + - [token](#token) + - [organization](#organization) + - [bucket](#bucket) + - [Other Telegraf configuration options](#other-telegraf-configuration-options) +- [Start Telegraf](#start-telegraf) + +## Configure Telegraf input and output plugins + +Configure Telegraf input and output plugins in the Telegraf configuration file +(typically named `telegraf.conf`). +Input plugins collect metrics. +Output plugins define destinations where metrics are sent. + +This guide assumes you have already [installed {{% product-name %}}](/influxdb3/version/install/) +and have been through the [getting started guide](/influxdb3/version/get-started/). + +### Add Telegraf plugins + +To add any of the available [Telegraf plugins](/telegraf/v1/plugins/), follow +the steps below. + +1. Find the plugin you want to enable from the complete list of available + [Telegraf plugins](/telegraf/v1/plugins/). +2. Click **View** to the right of the plugin name to open the plugin page on GitHub. + For example, view the [MQTT plugin GitHub page](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/mqtt_consumer/README.md). +3. Copy and paste the example configuration into your Telegraf configuration file + (typically named `telegraf.conf`). + +### Enable and configure the InfluxDB v2 output plugin + +To send data to {{< product-name >}}, enable the +[`influxdb_v2` output plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/influxdb_v2/README.md) +in the `telegraf.conf`. + +{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} +```toml +[[outputs.influxdb_v2]] + urls = ["http://{{< influxdb/host >}}"] + token = "AUTH_TOKEN" + organization = "" + bucket = "DATABASE_NAME" +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of the database to write data to +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your {{< product-name >}} authorization token. + _Store this in a secret store or environment variable to avoid exposing the raw token string._ + + > [!Note] + > While in alpha, {{< product-name >}} does not require an authorization token. + > For the `token` option, provide an empty or arbitrary token string. + +The InfluxDB output plugin configuration contains the following options: + +#### urls + +An array of URL strings. +To write to {{% product-name %}}, include your {{% product-name %}} URL: + +```toml +["http://{{< influxdb/host >}}"] +``` + +#### token + +Your {{% product-name %}} authorization token. + +> [!Note] +> While in alpha, {{< product-name >}} does not require an authorization token. +> For the `token` option, provide an empty or arbitrary token string. + +> [!Tip] +> +> ##### Store your authorization token as an environment variable +> +> To prevent a plain text token in your Telegraf configuration file, we +> recommend that you store the token as an environment variable and then +> reference the environment variable in your configuration file using string +> interpolation. For example: +> +> ```toml +> [[outputs.influxdb_v2]] +> urls = ["http://{{< influxdb/host >}}"] +> token = "${INFLUX_TOKEN}" +> # ... +> ``` + +#### organization + +For {{% product-name %}}, set this to an empty string (`""`). + +#### bucket + +The name of the {{% product-name %}} database to write data to. + +> [!Note] +> An InfluxDB v2 _**bucket**_ is synonymous with an {{% product-name %}} _**database**_. + +### Other Telegraf configuration options + +For more plugin configuration options, see the +[`influxdb_v2` output plugin README](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/influxdb_v2/README.md) +on GitHub. + +## Start Telegraf + +Start the Telegraf service using the `--config` flag to specify the location of +your `telegraf.conf`. + +```sh +telegraf --config /path/to/custom/telegraf.conf +``` diff --git a/content/shared/influxdb3-write-guides/use-telegraf/csv.md b/content/shared/influxdb3-write-guides/use-telegraf/csv.md new file mode 100644 index 000000000..3764d85a5 --- /dev/null +++ b/content/shared/influxdb3-write-guides/use-telegraf/csv.md @@ -0,0 +1,131 @@ + +Use the Telegraf `file` input plugin to read and parse CSV data into +[line protocol](/influxdb3/version/reference/syntax/line-protocol/) +and write it to {{< product-name >}}. +[Telegraf](/telegraf/v1/) is a plugin-based agent that collects +metrics from different sources and writes them to specified destinations. + + + +- [Configure Telegraf to read CSV files](#configure-telegraf-to-read-csv-files) +- [Configure Telegraf to write to InfluxDB](#configure-telegraf-to-write-to-influxdb) + - [Other Telegraf configuration options](#other-telegraf-configuration-options) + + + +## Configure Telegraf to read CSV files + +1. Add and enable the [`inputs.file` plugin](/telegraf/v1/plugins/#input-file) + in your Telegraf configuration file. +2. Use the `files` option to specify the list of CSV files to read. + CSV files must be accessible by the Telegraf agent. +3. Set the `data_format` option to `csv`. +4. Define all other `csv_` configuration options specific to the CSV data you + want to write to {{< product-name >}}. + _For detailed information about each of the CSV format configuration options, + see [CSV input data format](/telegraf/v1/data_formats/input/csv/)._ + +```toml +[[inputs.file]] + files = ["/path/to/example.csv"] + data_format = "csv" + csv_header_row_count = 0 + csv_column_names = [] + csv_column_types = [] + csv_skip_rows = 0 + csv_metadata_rows = 0 + csv_metadata_separators = [":", "="] + csv_metadata_trim_set = "" + csv_skip_columns = 0 + csv_delimiter = "," + csv_comment = "" + csv_trim_space = false + csv_tag_columns = [] + csv_measurement_column = "" + csv_timestamp_column = "" + csv_timestamp_format = "" + csv_timezone = "" + csv_skip_values = [] + csv_skip_errors = false + csv_reset_mode = "none" +``` + +## Configure Telegraf to write to InfluxDB + +To send data to {{< product-name >}}, enable and configure the +[`influxdb_v2` output plugin](/influxdb3/version/write-data/use-telegraf/configure/#enable-and-configure-the-influxdb-v2-output-plugin) +in your `telegraf.conf`. + +{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} +```toml +[[inputs.file]] + files = ["/path/to/example.csv"] + data_format = "csv" + csv_header_row_count = 0 + csv_column_names = [] + csv_column_types = [] + csv_skip_rows = 0 + csv_metadata_rows = 0 + csv_metadata_separators = [":", "="] + csv_metadata_trim_set = "" + csv_skip_columns = 0 + csv_delimiter = "," + csv_comment = "" + csv_trim_space = false + csv_tag_columns = [] + csv_measurement_column = "" + csv_timestamp_column = "" + csv_timestamp_format = "" + csv_timezone = "" + csv_skip_values = [] + csv_skip_errors = false + csv_reset_mode = "none" + +[[outputs.influxdb_v2]] + urls = ["http://{{< influxdb/host >}}"] + token = "AUTH_TOKEN" + organization = "" + bucket = "DATABASE_NAME" + content_encoding = "gzip" +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of the database to write data to +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your {{< product-name >}} authorization token. + _Store this in a secret store or environment variable to avoid exposing the raw token string._ + + > [!Note] + > While in alpha, {{< product-name >}} does not require an authorization token. + > For the `token` option, provide an empty or arbitrary token string. + + > [!Tip] + > + > ##### Store your authorization token as an environment variable + > + > Avoid storing a plain text token in your Telegraf configuration file. + > Store the token as an environment variable and then + > reference the environment variable in your configuration file using string + > interpolation. For example: + > + > ```toml + > [[outputs.influxdb_v2]] + > urls = ["http://{{< influxdb/host >}}"] + > token = "${INFLUX_TOKEN}" + > # ... + > ``` + + +**Restart the Telegraf agent** to apply the configuration change and write the +CSV data to {{% product-name %}}. + +#### Other Telegraf configuration options + +The preceding examples describe Telegraf configurations necessary for writing to +{{% product-name %}}. The `influxdb_v2` output plugin provides several other +configuration options. For more information, see the +[`influxdb_v2` plugin options](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/influxdb_v2/README.md) +on GitHub. diff --git a/content/shared/influxdb3-write-guides/use-telegraf/dual-write.md b/content/shared/influxdb3-write-guides/use-telegraf/dual-write.md new file mode 100644 index 000000000..83896c395 --- /dev/null +++ b/content/shared/influxdb3-write-guides/use-telegraf/dual-write.md @@ -0,0 +1,60 @@ + +Use Telegraf to write your data simultaneously to multiple InfluxDB instances or clusters. +This method, known as "dual writing," is useful for backing up data +to a separate instance or for migrating from other versions of InfluxDB to +{{< product-name >}}. + +The following example configures Telegraf for dual writing to {{% product-name %}} and an InfluxDB v2 OSS instance. + + + - The [InfluxDB v2 output plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/influxdb_v2) + twice--the first pointing to {{< product-name >}} and the other to an + InfluxDB v2 OSS instance. + - Two different tokens--one for InfluxDB v2 OSS and one for Clustered. + Configure both tokens as environment variables and use string interpolation + in your Telegraf configuration file to reference each environment variable. + + > [!Note] + > While in alpha, {{< product-name >}} does not require an authorization token. + > For the `token` option, provide an empty or arbitrary token string. + + +## Sample configuration + +```toml +# Include any other input, processor, or aggregator plugins that you want to +# include in your configuration. + +# Send data to {{% product-name %}} +[[outputs.influxdb_v2]] + ## The {{% product-name %}} URL + urls = ["http://{{< influxdb/host >}}"] + ## {{% product-name %}} authorization token + token = "${INFLUX_TOKEN}" + ## For {{% product-name %}}, set organization to an empty string + organization = "" + ## Destination database to write into + bucket = "DATABASE_NAME" + +# Send data to InfluxDB v2 OSS +[[outputs.influxdb_v2]] + ## The InfluxDB v2 OSS URL + urls = ["http://localhost:8086"] + ## OSS token for authentication + token = "${INFLUX_TOKEN_OSS}" + ## Organization is the name of the organization you want to write to. + organization = "ORG_NAME_OSS" + ## Destination bucket to write to + bucket = "BUCKET_NAME_OSS" +``` + +Telegraf lets you dual write data to any version of InfluxDB using the +[`influxdb` (InfluxDB v1)](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/influxdb/README.md) +and [`influxdb_v2` output plugins](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/influxdb_v2/README.md). +A single Telegraf agent sends identical data sets to all target outputs. +You cannot filter data based on the output. + +> [!Note] +> InfluxDB v1 does _not_ support the unsigned integer data type. +> You can only write unsigned integer field values to InfluxDB v2- and 3-based +> products. diff --git a/content/shared/influxql-v3-reference/functions/date-time.md b/content/shared/influxql-v3-reference/functions/date-time.md index 2647bdb5f..0da11223f 100644 --- a/content/shared/influxql-v3-reference/functions/date-time.md +++ b/content/shared/influxql-v3-reference/functions/date-time.md @@ -102,7 +102,7 @@ tz(time_zone) {{% expand "Return the UTC offset for Chicago's time zone" %}} The following example uses the -[Get started home sensor sample dataset](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample dataset](/influxdb/version/reference/sample-data/#home-sensor-data). {{% influxdb/custom-timestamps %}} diff --git a/content/shared/influxql-v3-reference/functions/misc.md b/content/shared/influxql-v3-reference/functions/misc.md index 3bb523161..629c0dce4 100644 --- a/content/shared/influxql-v3-reference/functions/misc.md +++ b/content/shared/influxql-v3-reference/functions/misc.md @@ -201,7 +201,7 @@ tz(time_zone) {{% expand "Return the UTC offset for Chicago's time zone" %}} The following example uses the -[Get started home sensor sample dataset](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample dataset](/influxdb/version/reference/sample-data/#home-sensor-data). {{% influxdb/custom-timestamps %}} diff --git a/content/shared/influxql-v3-reference/functions/technical-analysis.md b/content/shared/influxql-v3-reference/functions/technical-analysis.md index c81d0efbd..b631db1d6 100644 --- a/content/shared/influxql-v3-reference/functions/technical-analysis.md +++ b/content/shared/influxql-v3-reference/functions/technical-analysis.md @@ -278,7 +278,7 @@ CHANDE_MOMENTUM_OSCILLATOR(field_expression, period[, hold_period[, warmup_type] {{% expand "Apply `CHANDE_MOMENTUM_OSCILLATOR` to a field" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -313,7 +313,7 @@ name: home {{% expand "Apply `CHANDE_MOMENTUM_OSCILLATOR` to each field" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -348,7 +348,7 @@ name: home {{% expand "Apply `CHANDE_MOMENTUM_OSCILLATOR` with a custom hold period" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -380,7 +380,7 @@ name: home {{% expand "Apply `CHANDE_MOMENTUM_OSCILLATOR` with a default non-default warmup type" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -502,7 +502,7 @@ DOUBLE_EXPONENTIAL_MOVING_AVERAGE(field_expression, period[, hold_period[, warmu {{% expand "Apply `DOUBLE_EXPONENTIAL_MOVING_AVERAGE` to a field" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -537,7 +537,7 @@ name: home {{% expand "Apply `DOUBLE_EXPONENTIAL_MOVING_AVERAGE` to each field" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -572,7 +572,7 @@ name: home {{% expand "Apply `DOUBLE_EXPONENTIAL_MOVING_AVERAGE` with a custom hold period" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -604,7 +604,7 @@ name: home {{% expand "Apply `DOUBLE_EXPONENTIAL_MOVING_AVERAGE` with a default non-default warmup type" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -724,7 +724,7 @@ EXPONENTIAL_MOVING_AVERAGE(field_expression, period[, hold_period[, warmup_type] {{% expand "Apply `EXPONENTIAL_MOVING_AVERAGE` to a field" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -759,7 +759,7 @@ name: home {{% expand "Apply `EXPONENTIAL_MOVING_AVERAGE` to each field" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -794,7 +794,7 @@ name: home {{% expand "Apply `EXPONENTIAL_MOVING_AVERAGE` with a custom hold period" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -826,7 +826,7 @@ name: home {{% expand "Apply `EXPONENTIAL_MOVING_AVERAGE` with a default non-default warmup type" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -938,7 +938,7 @@ KAUFMANS_EFFICIENCY_RATIO(field_expression, period[, hold_period]) {{% expand "Apply `KAUFMANS_EFFICIENCY_RATIO` to a field" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -972,7 +972,7 @@ name: home {{% expand "Apply `KAUFMANS_EFFICIENCY_RATIO` to each field" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -1006,7 +1006,7 @@ name: home {{% expand "Apply `KAUFMANS_EFFICIENCY_RATIO` with a custom hold period" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -1111,7 +1111,7 @@ KAUFMANS_ADAPTIVE_MOVING_AVERAGE(field_expression, period[, hold_period]) {{% expand "Apply `KAUFMANS_ADAPTIVE_MOVING_AVERAGE` to a field" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -1145,7 +1145,7 @@ name: home {{% expand "Apply `KAUFMANS_ADAPTIVE_MOVING_AVERAGE` to each field" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -1179,7 +1179,7 @@ name: home {{% expand "Apply `KAUFMANS_ADAPTIVE_MOVING_AVERAGE` with a custom hold period" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -1289,7 +1289,7 @@ RELATIVE_STRENGTH_INDEX(field_expression, period[, hold_period[, warmup_type]]) {{% expand "Apply `RELATIVE_STRENGTH_INDEX` to a field" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -1323,7 +1323,7 @@ name: home {{% expand "Apply `RELATIVE_STRENGTH_INDEX` to each field" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -1357,7 +1357,7 @@ name: home {{% expand "Apply `RELATIVE_STRENGTH_INDEX` with a custom hold period" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -1389,7 +1389,7 @@ name: home {{% expand "Apply `RELATIVE_STRENGTH_INDEX` with a default non-default warmup type" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -1510,7 +1510,7 @@ TRIPLE_EXPONENTIAL_MOVING_AVERAGE(field_expression, period[, hold_period[, warmu {{% expand "Apply `TRIPLE_EXPONENTIAL_MOVING_AVERAGE` to a field" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -1545,7 +1545,7 @@ name: home {{% expand "Apply `TRIPLE_EXPONENTIAL_MOVING_AVERAGE` to each field" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -1580,7 +1580,7 @@ name: home {{% expand "Apply `TRIPLE_EXPONENTIAL_MOVING_AVERAGE` with a custom hold period" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -1612,7 +1612,7 @@ name: home {{% expand "Apply `TRIPLE_EXPONENTIAL_MOVING_AVERAGE` with a default non-default warmup type" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -1738,7 +1738,7 @@ TRIPLE_EXPONENTIAL_DERIVATIVE(field_expression, period[, hold_period[, warmup_ty {{% expand "Apply `TRIPLE_EXPONENTIAL_DERIVATIVE` to a field" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -1772,7 +1772,7 @@ name: home {{% expand "Apply `TRIPLE_EXPONENTIAL_DERIVATIVE` to each field" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -1806,7 +1806,7 @@ name: home {{% expand "Apply `TRIPLE_EXPONENTIAL_DERIVATIVE` with a custom hold period" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT @@ -1838,7 +1838,7 @@ name: home {{% expand "Apply `TRIPLE_EXPONENTIAL_DERIVATIVE` with a default non-default warmup type" %}} The following example uses the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT diff --git a/content/shared/influxql-v3-reference/limit-and-slimit.md b/content/shared/influxql-v3-reference/limit-and-slimit.md index 29b6a737e..f476d75ca 100644 --- a/content/shared/influxql-v3-reference/limit-and-slimit.md +++ b/content/shared/influxql-v3-reference/limit-and-slimit.md @@ -36,7 +36,7 @@ SELECT_clause FROM_clause [WHERE_clause] [GROUP_BY_clause] [ORDER_BY_clause] LIM ### Examples {#limit-examples} The following examples use the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). {{< expand-wrapper >}} diff --git a/content/shared/influxql-v3-reference/offset-and-soffset.md b/content/shared/influxql-v3-reference/offset-and-soffset.md index 1954e0138..918de06c2 100644 --- a/content/shared/influxql-v3-reference/offset-and-soffset.md +++ b/content/shared/influxql-v3-reference/offset-and-soffset.md @@ -46,7 +46,7 @@ SELECT_clause FROM_clause [WHERE_clause] [GROUP_BY_clause] [ORDER_BY_clause] [LI ### Examples {#offset-examples} The following examples use the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). {{< expand-wrapper >}} diff --git a/content/shared/influxql-v3-reference/order-by.md b/content/shared/influxql-v3-reference/order-by.md index 7942e9119..fc24520b0 100644 --- a/content/shared/influxql-v3-reference/order-by.md +++ b/content/shared/influxql-v3-reference/order-by.md @@ -26,7 +26,7 @@ SELECT_clause FROM_clause [WHERE_clause] [GROUP_BY_clause] ORDER BY time [ASC|DE ## Examples The following examples use the -[Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data). {{< expand-wrapper >}} diff --git a/content/shared/influxql-v3-reference/regular-expressions.md b/content/shared/influxql-v3-reference/regular-expressions.md index 13acb02c6..ebeae6b67 100644 --- a/content/shared/influxql-v3-reference/regular-expressions.md +++ b/content/shared/influxql-v3-reference/regular-expressions.md @@ -63,7 +63,7 @@ the [`WHERE` clause](/influxdb/version/reference/influxql/where/). The examples below use the following sample data sets: - [NOAA Bay Area weather data](/influxdb/version/reference/sample-data/#noaa-bay-area-weather-data) -- [Get started home sensor data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data) +- [Get started home sensor data](/influxdb/version/reference/sample-data/#home-sensor-data) {{< expand-wrapper >}} diff --git a/content/shared/influxql-v3-reference/select.md b/content/shared/influxql-v3-reference/select.md index c0a3c0566..66a0e56a1 100644 --- a/content/shared/influxql-v3-reference/select.md +++ b/content/shared/influxql-v3-reference/select.md @@ -198,7 +198,7 @@ is truncated at the decimal point. No rounding is performed. The examples below use the following sample data sets: -- [Get started home sensor data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data) +- [Get started home sensor data](/influxdb/version/reference/sample-data/#home-sensor-data) - [NOAA Bay Area weather data](/influxdb/version/reference/sample-data/#noaa-bay-area-weather-data) {{< expand-wrapper >}} diff --git a/content/shared/influxql-v3-reference/subqueries.md b/content/shared/influxql-v3-reference/subqueries.md index befdfd710..e15156e34 100644 --- a/content/shared/influxql-v3-reference/subqueries.md +++ b/content/shared/influxql-v3-reference/subqueries.md @@ -36,7 +36,7 @@ SELECT_clause FROM ( SELECT_clause FROM ( SELECT_statement ) [...] ) [...] > > The examples below use the following sample data sets: > -> - [Get started home sensor data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data) +> - [Get started home sensor data](/influxdb/version/reference/sample-data/#home-sensor-data) > - [Random numbers sample data](/influxdb/version/reference/sample-data/#random-numbers-sample-data) {{< expand-wrapper >}} diff --git a/content/shared/influxql-v3-reference/time-and-timezone.md b/content/shared/influxql-v3-reference/time-and-timezone.md index 8ba494b1b..5793f031a 100644 --- a/content/shared/influxql-v3-reference/time-and-timezone.md +++ b/content/shared/influxql-v3-reference/time-and-timezone.md @@ -116,7 +116,7 @@ Conditional expressions with time operands support the following comparison oper ## Query examples The following examples use the -[Get started home sensor sample dataset](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample dataset](/influxdb/version/reference/sample-data/#home-sensor-data). {{< expand-wrapper >}} @@ -284,7 +284,7 @@ SELECT_clause FROM_clause [WHERE_clause] [GROUP_BY_clause] [ORDER_BY_clause] [LI {{% influxdb/custom-timestamps %}} The following example uses the -[Get started home sensor sample dataset](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample dataset](/influxdb/version/reference/sample-data/#home-sensor-data). ```sql SELECT * diff --git a/content/shared/influxql-v3-reference/where.md b/content/shared/influxql-v3-reference/where.md index af5468482..12c9b3639 100644 --- a/content/shared/influxql-v3-reference/where.md +++ b/content/shared/influxql-v3-reference/where.md @@ -115,7 +115,7 @@ For more information about InfluxQL regular expression syntax, see ## WHERE clause examples The following examples use the -[Get started home sensor sample dataset](/influxdb/version/reference/sample-data/#get-started-home-sensor-data). +[Home sensor sample dataset](/influxdb/version/reference/sample-data/#home-sensor-data). {{< expand-wrapper >}} {{% expand "Select data with a specific tag value" %}} diff --git a/content/shared/sql-reference/group-by.md b/content/shared/sql-reference/group-by.md index 7720787dd..58d08afd3 100644 --- a/content/shared/sql-reference/group-by.md +++ b/content/shared/sql-reference/group-by.md @@ -5,9 +5,16 @@ To output an aggregation for each group, include an aggregate or selector functi When `GROUP BY` appears in a query, the `SELECT` list can only use columns that appear in the `GROUP BY` list or in aggregate expressions. -`GROUP BY` can use column aliases that are defined in the `SELECT` clause. -`GROUP BY` can't use an alias named `time`. -In a `GROUP BY` list, `time` always refers to the measurement `time` column. +> [!Note] +> +> #### Group by aliases +> +> - `GROUP BY` can use column aliases that are defined in the `SELECT` clause. +> - `GROUP BY` won't use an aliased value if the alias is the same as the +> original column name. `GROUP BY` uses the original value of the column, +> not the transformed, aliased value. We recommended using column ordinals in +> in the `GROUP BY` clause to group by transformed values and maintain the +> alias identifier. - [Syntax](#syntax) - [Examples](#examples) @@ -28,13 +35,13 @@ GROUP BY tag1 ```sql SELECT - AVG("water_level") AS "avg_water_level", - "location" -FROM "h2o_feet" -GROUP BY "location" + AVG(water_level) AS avg_water_level, + location +FROM h2o_feet +GROUP BY location ``` -{{< expand-wrapper >}}} +{{< expand-wrapper >}} {{% expand "View example results" %}} | avg_water_level | location | @@ -45,43 +52,39 @@ GROUP BY "location" {{% /expand %}} {{< /expand-wrapper >}} -Group results in 15 minute time intervals by tag: +### Group data into 15 minute time intervals by tag ```sql SELECT - "location", - DATE_BIN(INTERVAL '15 minutes', time, TIMESTAMP '2022-01-01 00:00:00Z') AS _time, - COUNT("water_level") AS count -FROM "h2o_feet" + location, + DATE_BIN(INTERVAL '15 minutes', time) AS time, + COUNT(water_level) AS count +FROM h2o_feet WHERE time >= timestamp '2019-09-17T00:00:00Z' AND time <= timestamp '2019-09-17T01:00:00Z' -GROUP BY - _time, - location -ORDER BY - location, - _time +GROUP BY 1, location +ORDER BY location, 1 ``` -{{< expand-wrapper >}}} +{{< expand-wrapper >}} {{% expand "View example results" %}} The query uses the `COUNT()` function to count the number of `water_level` points per 15 minute interval. Results are then ordered by location and time. -| location | _time | count | -| :----------- | :-------------------- | ----: | -| coyote_creek | 2019-09-16T23:45:00Z | 1 | -| coyote_creek | 2019-09-17T00:00:00Z | 2 | -| coyote_creek | 2019-09-17T00:15:00Z | 3 | -| coyote_creek | 2019-09-17T00:30:00Z | 2 | -| coyote_creek | 2019-09-17T00:45:00Z | 3 | -| santa_monica | 2019-09-16T23:45:00Z | 1 | -| santa_monica | 2019-09-17T00:00:00Z | 2 | -| santa_monica | 2019-09-17T00:15:00Z | 3 | -| santa_monica | 2019-09-17T00:30:00Z | 2 | -| santa_monica | 2019-09-17T00:45:00Z | 3 | +| location | time | count | +| :----------- | :------------------- | ----: | +| coyote_creek | 2019-09-16T23:45:00Z | 1 | +| coyote_creek | 2019-09-17T00:00:00Z | 2 | +| coyote_creek | 2019-09-17T00:15:00Z | 3 | +| coyote_creek | 2019-09-17T00:30:00Z | 2 | +| coyote_creek | 2019-09-17T00:45:00Z | 3 | +| santa_monica | 2019-09-16T23:45:00Z | 1 | +| santa_monica | 2019-09-17T00:00:00Z | 2 | +| santa_monica | 2019-09-17T00:15:00Z | 3 | +| santa_monica | 2019-09-17T00:30:00Z | 2 | +| santa_monica | 2019-09-17T00:45:00Z | 3 | {{% /expand %}} {{< /expand-wrapper >}} diff --git a/content/shared/sql-reference/operators/logical.md b/content/shared/sql-reference/operators/logical.md index 092e19170..424dab488 100644 --- a/content/shared/sql-reference/operators/logical.md +++ b/content/shared/sql-reference/operators/logical.md @@ -16,7 +16,7 @@ Logical operators combine or manipulate conditions in a SQL query. > > Query examples on this page use the following sample data sets: > -> - [Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data) +> - [Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data) > - [Home sensor actions sample data](/influxdb/version/reference/sample-data/#home-sensor-actions-data) ## AND {.monospace} diff --git a/content/shared/sql-reference/subqueries.md b/content/shared/sql-reference/subqueries.md index adae0c0b4..924b9bc74 100644 --- a/content/shared/sql-reference/subqueries.md +++ b/content/shared/sql-reference/subqueries.md @@ -20,7 +20,7 @@ Subqueries can be used in `SELECT`, `FROM`, `WHERE`, and `HAVING` clauses. > > Query examples on this page use the following sample data sets: > -> - [Get started home sensor sample data](/influxdb/version/reference/sample-data/#get-started-home-sensor-data) +> - [Home sensor sample data](/influxdb/version/reference/sample-data/#home-sensor-data) > - [Home sensor actions sample data](/influxdb/version/reference/sample-data/#home-sensor-actions-data) > - [NOAA Bay Area weather sample data](/influxdb/version/reference/sample-data/#noaa-bay-area-weather-data) diff --git a/content/shared/v3-core-get-started/_index.md b/content/shared/v3-core-get-started/_index.md index ef4091306..d96cb7aaa 100644 --- a/content/shared/v3-core-get-started/_index.md +++ b/content/shared/v3-core-get-started/_index.md @@ -139,21 +139,21 @@ To start your InfluxDB instance, use the `influxdb3 serve` command and provide the following: - `--object-store`: Specifies the type of Object store to use. InfluxDB supports the following: local file system (`file`), `memory`, S3 (and compatible services like Ceph or Minio) (`s3`), Google Cloud Storage (`google`), and Azure Blob Storage (`azure`). -- `--writer-id`: A string identifier that determines the server's storage path within the configured storage location +- `--node-id`: A string identifier that determines the server's storage path within the configured storage location The following examples show how to start InfluxDB 3 with different object store configurations: ```bash # MEMORY # Stores data in RAM; doesn't persist data -influxdb3 serve --writer-id=local01 --object-store=memory +influxdb3 serve --node-id=local01 --object-store=memory ``` ```bash # FILESYSTEM # Provide the filesystem directory influxdb3 serve \ - --writer-id=local01 \ + --node-id=local01 \ --object-store=file \ --data-dir ~/.influxdb3 ``` @@ -170,7 +170,7 @@ To run the [Docker image](/influxdb3/core/install/#docker-image) and persist dat docker run -it \ -v /path/on/host:/path/in/container \ quay.io/influxdb/influxdb3-core:latest serve \ - --writer-id my_host \ + --node-id my_host \ --object-store file \ --data-dir /path/in/container ``` @@ -178,13 +178,13 @@ docker run -it \ ```bash # S3 (defaults to us-east-1 for region) # Specify the Object store type and associated options -influxdb3 serve --writer-id=local01 --object-store=s3 --bucket=[BUCKET] --aws-access-key=[AWS ACCESS KEY] --aws-secret-access-key=[AWS SECRET ACCESS KEY] +influxdb3 serve --node-id=local01 --object-store=s3 --bucket=[BUCKET] --aws-access-key=[AWS ACCESS KEY] --aws-secret-access-key=[AWS SECRET ACCESS KEY] ``` ```bash # Minio/Open Source Object Store (Uses the AWS S3 API, with additional parameters) # Specify the Object store type and associated options -influxdb3 serve --writer-id=local01 --object-store=s3 --bucket=[BUCKET] --aws-access-key=[AWS ACCESS KEY] --aws-secret-access-key=[AWS SECRET ACCESS KEY] --aws-endpoint=[ENDPOINT] --aws-allow-http +influxdb3 serve --node-id=local01 --object-store=s3 --bucket=[BUCKET] --aws-access-key=[AWS ACCESS KEY] --aws-secret-access-key=[AWS SECRET ACCESS KEY] --aws-endpoint=[ENDPOINT] --aws-allow-http ``` _For more information about server options, run `influxdb3 serve --help`._ @@ -219,7 +219,7 @@ InfluxDB is a schema-on-write database. You can start writing data and InfluxDB After a schema is created, InfluxDB validates future write requests against it before accepting the data. Subsequent requests can add new fields on-the-fly, but can't add new tags. -InfluxDB 3 Core is optimized for recent data only--it accepts writes for data with timestamps from the last 72 hours. It persists that data in Parquet files for access by third-party systems for longer term historical analysis and queries. If you require longer historical queries with a compactor that optimizes data organization, consider using [InfluxDB 3 Enterprise](/influxdb3/enterprise/get-started/). +InfluxDB 3 Core is optimized for recent data, but accepts writes from any time period. It persists that data in Parquet files for access by third-party systems for longer term historical analysis and queries. If you require longer historical queries with a compactor that optimizes data organization, consider using [InfluxDB 3 Enterprise](/influxdb3/enterprise/get-started/). The database has three write API endpoints that respond to HTTP `POST` requests: @@ -320,7 +320,9 @@ influxdb3 create -h ### Query the database -InfluxDB 3 now supports native SQL for querying, in addition to InfluxQL, an SQL-like language customized for time series queries. +InfluxDB 3 now supports native SQL for querying, in addition to InfluxQL, an +SQL-like language customized for time series queries. {{< product-name >}} limits +query time ranges to 72 hours (both recent and historical) to ensure query performance. > [!Note] > Flux, the language introduced in InfluxDB 2.0, is **not** supported in InfluxDB 3. @@ -335,7 +337,7 @@ The `query` subcommand includes options to help ensure that the right database i | `--database` | The name of the database to operate on | Yes | | `--token` | The authentication token for the {{% product-name %}} server | No | | `--language` | The query language of the provided query string [default: `sql`] [possible values: `sql`, `influxql`] | No | -| `--format` | The format in which to output the query [default: `pretty`] [possible values: `pretty`, `json`, `json_lines`, `csv`, `parquet`] | No | +| `--format` | The format in which to output the query [default: `pretty`] [possible values: `pretty`, `json`, `jsonl`, `csv`, `parquet`] | No | | `--output` | The path to output data to | No | #### Example: query `“SHOW TABLES”` on the `servers` database: @@ -472,19 +474,18 @@ For more information about the Python client library, see the [`influxdb3-python You can use the `influxdb3` CLI to create a last value cache. ``` -Usage: $ influxdb3 create last-cache [OPTIONS] -d -t +Usage: $ influxdb3 create last_cache [OPTIONS] -d -t
[CACHE_NAME] Options: - -h, --host URL of the running InfluxDB 3 server - -d, --database The database to run the query against - --token The token for authentication + -h, --host URL of the running InfluxDB 3 Core server [env: INFLUXDB3_HOST_URL=] + -d, --database The database to run the query against [env: INFLUXDB3_DATABASE_NAME=] + --token The token for authentication [env: INFLUXDB3_AUTH_TOKEN=] -t, --table
The table for which the cache is created - --cache-name Give a name for the cache - --help Print help information --key-columns Columns used as keys in the cache --value-columns Columns to store as values in the cache --count Number of entries per unique key:column --ttl The time-to-live for entries (seconds) + --help Print help information ``` @@ -501,7 +502,7 @@ An example of creating this cache in use: | Alpha | webserver | 2024-12-11T10:02:00 | 25.3 | Warn | ```bash -influxdb3 create last-cache --database=servers --table=cpu --cache-name=cpuCache --key-columns=host,application --value-columns=usage_percent,status --count=5 +influxdb3 create last_cache --database=servers --table=cpu --key-columns=host,application --value-columns=usage_percent,status --count=5 cpuCache ``` #### Query a Last values cache diff --git a/content/shared/v3-enterprise-get-started/_index.md b/content/shared/v3-enterprise-get-started/_index.md index e9f6b8c3b..63d6fbf9b 100644 --- a/content/shared/v3-enterprise-get-started/_index.md +++ b/content/shared/v3-enterprise-get-started/_index.md @@ -130,21 +130,21 @@ To start your InfluxDB instance, use the `influxdb3 serve` command and provide the following: - `--object-store`: Specifies the type of Object store to use. InfluxDB supports the following: local file system (`file`), `memory`, S3 (and compatible services like Ceph or Minio) (`s3`), Google Cloud Storage (`google`), and Azure Blob Storage (`azure`). -- `--writer-id`: A string identifier that determines the server's storage path within the configured storage location, and, in a multi-node setup, is used to reference the node +- `--node-id`: A string identifier that determines the server's storage path within the configured storage location, and, in a multi-node setup, is used to reference the node The following examples show how to start InfluxDB 3 with different object store configurations: ```bash # MEMORY # Stores data in RAM; doesn't persist data -influxdb3 serve --writer-id=local01 --object-store=memory +influxdb3 serve --node-id=local01 --object-store=memory ``` ```bash # FILESYSTEM # Provide the filesystem directory influxdb3 serve \ - --writer-id=local01 \ + --node-id=local01 \ --object-store=file \ --data-dir ~/.influxdb3 ``` @@ -161,7 +161,7 @@ To run the [Docker image](/influxdb3/enterprise/install/#docker-image) and persi docker run -it \ -v /path/on/host:/path/in/container \ quay.io/influxdb/influxdb3-enterprise:latest serve \ - --writer-id my_host \ + --node-id my_host \ --object-store file \ --data-dir /path/in/container ``` @@ -169,13 +169,13 @@ docker run -it \ ```bash # S3 (defaults to us-east-1 for region) # Specify the Object store type and associated options -influxdb3 serve --writer-id=local01 --object-store=s3 --bucket=[BUCKET] --aws-access-key=[AWS ACCESS KEY] --aws-secret-access-key=[AWS SECRET ACCESS KEY] +influxdb3 serve --node-id=local01 --object-store=s3 --bucket=[BUCKET] --aws-access-key=[AWS ACCESS KEY] --aws-secret-access-key=[AWS SECRET ACCESS KEY] ``` ```bash # Minio/Open Source Object Store (Uses the AWS S3 API, with additional parameters) # Specify the Object store type and associated options -influxdb3 serve --writer-id=local01 --object-store=s3 --bucket=[BUCKET] --aws-access-key=[AWS ACCESS KEY] --aws-secret-access-key=[AWS SECRET ACCESS KEY] --aws-endpoint=[ENDPOINT] --aws-allow-http +influxdb3 serve --node-id=local01 --object-store=s3 --bucket=[BUCKET] --aws-access-key=[AWS ACCESS KEY] --aws-secret-access-key=[AWS SECRET ACCESS KEY] --aws-endpoint=[ENDPOINT] --aws-allow-http ``` _For more information about server options, run `influxdb3 serve --help`._ @@ -330,7 +330,7 @@ The `query` subcommand includes options to help ensure that the right database i | `--database` | The name of the database to operate on | Yes | | `--token` | The authentication token for the {{% product-name %}} server | No | | `--language` | The query language of the provided query string [default: `sql`] [possible values: `sql`, `influxql`] | No | -| `--format` | The format in which to output the query [default: `pretty`] [possible values: `pretty`, `json`, `json_lines`, `csv`, `parquet`] | No | +| `--format` | The format in which to output the query [default: `pretty`] [possible values: `pretty`, `json`, `jsonl`, `csv`, `parquet`] | No | | `--output` | The path to output data to | No | #### Example: query `“SHOW TABLES”` on the `servers` database: @@ -467,19 +467,18 @@ For more information about the Python client library, see the [`influxdb3-python You can use the `influxdb3` CLI to create a last value cache. ``` -Usage: $ influxdb3 create last-cache [OPTIONS] -d -t
+Usage: $ influxdb3 create last_cache [OPTIONS] -d -t
[CACHE_NAME] Options: - -h, --host URL of the running InfluxDB 3 server - -d, --database The database to run the query against - --token The token for authentication + -h, --host URL of the running InfluxDB 3 Enterprise server [env: INFLUXDB3_HOST_URL=] + -d, --database The database to run the query against [env: INFLUXDB3_DATABASE_NAME=] + --token The token for authentication [env: INFLUXDB3_AUTH_TOKEN=] -t, --table
The table for which the cache is created - --cache-name Give a name for the cache - --help Print help information --key-columns Columns used as keys in the cache --value-columns Columns to store as values in the cache --count Number of entries per unique key:column --ttl The time-to-live for entries (seconds) + --help Print help information ``` @@ -496,7 +495,7 @@ An example of creating this cache in use: | Alpha | webserver | 2024-12-11T10:02:00 | 25.3 | Warn | ```bash -influxdb3 create last-cache --database=servers --table=cpu --cache-name=cpuCache --key-columns=host,application --value-columns=usage_percent,status --count=5 +influxdb3 create last_cache --database=servers --table=cpu --key-columns=host,application --value-columns=usage_percent,status --count=5 cpuCache ``` #### Query a Last values cache @@ -783,7 +782,7 @@ The following examples show how to configure and start two nodes for a basic HA setup. The example commands pass the following options: -- `--read-from-writer-ids`: makes the node a _read replica_, which checks the Object store for data arriving from other nodes +- `--read-from-node-ids`: makes the node a _read replica_, which checks the Object store for data arriving from other nodes - `--compactor-id`: activates the Compactor for a node. Only one node can run compaction - `--run-compactions`: ensures the Compactor runs the compaction process @@ -791,22 +790,22 @@ The example commands pass the following options: ## NODE 1 # Example variables -# writer-id: 'host01' +# node-id: 'host01' # bucket: 'influxdb-3-enterprise-storage' # compactor-id: 'c01' -influxdb3 serve --writer-id=host01 --read-from-writer-ids=host02 --compactor-id=c01 --run-compactions --object-store=s3 --bucket=influxdb-3-enterprise-storage --http-bind=0.0.0.0:8181 --aws-access-key-id= --aws-secret-access-key= +influxdb3 serve --node-id=host01 --read-from-node-ids=host02 --compactor-id=c01 --run-compactions --object-store=s3 --bucket=influxdb-3-enterprise-storage --http-bind=0.0.0.0:8181 --aws-access-key-id= --aws-secret-access-key= ``` ``` ## NODE 2 # Example variables -# writer-id: 'host02' +# node-id: 'host02' # bucket: 'influxdb-3-enterprise-storage' -influxdb3 serve --writer-id=host02 --read-from-writer-ids=host01 --object-store=s3 --bucket=influxdb-3-enterprise-storage --http-bind=0.0.0.0:8282 +influxdb3 serve --node-id=host02 --read-from-node-ids=host01 --object-store=s3 --bucket=influxdb-3-enterprise-storage --http-bind=0.0.0.0:8282 --aws-access-key-id= --aws-secret-access-key= ``` @@ -814,7 +813,7 @@ After the nodes have started, querying either node returns data for both nodes, To add nodes to this setup, start more read replicas: ```bash -influxdb3 serve --read-from-writer-ids=host01,host02 [...OPTIONS] +influxdb3 serve --read-from-node-ids=host01,host02 [...OPTIONS] ``` > [!Note] @@ -822,12 +821,12 @@ influxdb3 serve --read-from-writer-ids=host01,host02 [...OPTIONS] > > ```bash > # In terminal 1 -> influxdb3 serve --writer-id=host01 --http-bind=http://127.0.0.1:8181 [...OPTIONS] +> influxdb3 serve --node-id=host01 --http-bind=http://127.0.0.1:8181 [...OPTIONS] > ``` > > ```bash > # In terminal 2 -> influxdb3 serve --writer-id=host01 --http-bind=http://127.0.0.1:8181 [...OPTIONS] +> influxdb3 serve --node-id=host01 --http-bind=http://127.0.0.1:8181 [...OPTIONS] ### High availability with a dedicated Compactor @@ -845,20 +844,20 @@ The following examples show how to set up HA with a dedicated Compactor node: ## NODE 1 — Writer/Reader Node #1 # Example variables - # writer-id: 'host01' + # node-id: 'host01' # bucket: 'influxdb-3-enterprise-storage' - influxdb3 serve --writer-id=host01 --compactor-id=c01 --read-from-writer-ids=host02 --object-store=s3 --bucket=influxdb-3-enterprise-storage --http-bind=0.0.0.0:8181 --aws-access-key-id= --aws-secret-access-key= + influxdb3 serve --node-id=host01 --compactor-id=c01 --read-from-node-ids=host02 --object-store=s3 --bucket=influxdb-3-enterprise-storage --http-bind=0.0.0.0:8181 --aws-access-key-id= --aws-secret-access-key= ``` ```bash ## NODE 2 — Writer/Reader Node #2 # Example variables - # writer-id: 'host02' + # node-id: 'host02' # bucket: 'influxdb-3-enterprise-storage' - influxdb3 serve --writer-id=host02 --compactor-id=c01 --read-from-writer-ids=host01 --object-store=s3 --bucket=influxdb-3-enterprise-storage --http-bind=0.0.0.0:8282 --aws-access-key-id= --aws-secret-access-key= + influxdb3 serve --node-id=host02 --compactor-id=c01 --read-from-node-ids=host01 --object-store=s3 --bucket=influxdb-3-enterprise-storage --http-bind=0.0.0.0:8282 --aws-access-key-id= --aws-secret-access-key= ``` 2. Start the dedicated compactor node, which uses the following options: @@ -866,18 +865,18 @@ The following examples show how to set up HA with a dedicated Compactor node: - `--mode=compactor`: Ensures the node **only** runs compaction. - `--compaction-hosts`: Specifies a comma-delimited list of hosts to run compaction for. - _**Don't include the replicas (`--read-from-writer-ids`) parameter because this node doesn't replicate data._ + _**Don't include the replicas (`--read-from-node-ids`) parameter because this node doesn't replicate data._ ```bash ## NODE 3 — Compactor Node # Example variables - # writer-id: 'host03' + # node-id: 'host03' # bucket: 'influxdb-3-enterprise-storage' # compactor-id: 'c01' - influxdb3 serve --writer-id=host03 --mode=compactor --compactor-id=c01 --compaction-hosts=host01,host02 --run-compactions --object-store=s3 --bucket=influxdb-3-enterprise-storage --aws-access-key-id= --aws-secret-access-key= + influxdb3 serve --node-id=host03 --mode=compactor --compactor-id=c01 --compaction-hosts=host01,host02 --run-compactions --object-store=s3 --bucket=influxdb-3-enterprise-storage --aws-access-key-id= --aws-secret-access-key= ``` ### High availability with read replicas and a dedicated Compactor @@ -893,10 +892,10 @@ For a very robust and effective setup for managing time-series data, you can run ## NODE 1 — Writer Node #1 # Example variables - # writer-id: 'host01' + # node-id: 'host01' # bucket: 'influxdb-3-enterprise-storage' - influxdb3 serve --writer-id=host01 --mode=read_write --object-store=s3 --bucket=influxdb-3-enterprise-storage --http-bind=0.0.0.0:8181 --aws-access-key-id= --aws-secret-access-key= + influxdb3 serve --node-id=host01 --mode=read_write --object-store=s3 --bucket=influxdb-3-enterprise-storage --http-bind=0.0.0.0:8181 --aws-access-key-id= --aws-secret-access-key= ``` @@ -904,10 +903,10 @@ For a very robust and effective setup for managing time-series data, you can run ## NODE 2 — Writer Node #2 # Example variables - # writer-id: 'host02' + # node-id: 'host02' # bucket: 'influxdb-3-enterprise-storage' - Usage: $ influxdb3 serve --writer-id=host02 --mode=read_write --object-store=s3 --bucket=influxdb-3-enterprise-storage --http-bind=0.0.0.0:8282 --aws-access-key-id= --aws-secret-access-key= + Usage: $ influxdb3 serve --node-id=host02 --mode=read_write --object-store=s3 --bucket=influxdb-3-enterprise-storage --http-bind=0.0.0.0:8282 --aws-access-key-id= --aws-secret-access-key= ``` 2. Start the dedicated Compactor node (`--mode=compactor`) and ensure it runs compactions on the specified `compaction-hosts`. @@ -916,36 +915,36 @@ For a very robust and effective setup for managing time-series data, you can run ## NODE 3 — Compactor Node # Example variables - # writer-id: 'host03' + # node-id: 'host03' # bucket: 'influxdb-3-enterprise-storage' - influxdb3 serve --writer-id=host03 --mode=compactor --compaction-hosts=host01,host02 --run-compactions --object-store=s3 --bucket=influxdb-3-enterprise-storage --aws-access-key-id= --aws-secret-access-key= + influxdb3 serve --node-id=host03 --mode=compactor --compaction-hosts=host01,host02 --run-compactions --object-store=s3 --bucket=influxdb-3-enterprise-storage --aws-access-key-id= --aws-secret-access-key= ``` 3. Finally, start the query nodes as _read-only_. Include the following options: - `--mode=read`: Sets the node to _read-only_ - - `--read-from-writer-ids=host01,host02`: A comma-demlimited list of host IDs to read data from + - `--read-from-node-ids=host01,host02`: A comma-demlimited list of host IDs to read data from ```bash ## NODE 4 — Read Node #1 # Example variables - # writer-id: 'host04' + # node-id: 'host04' # bucket: 'influxdb-3-enterprise-storage' - influxdb3 serve --writer-id=host04 --mode=read --object-store=s3 --read-from-writer-ids=host01,host02 --bucket=influxdb-3-enterprise-storage --http-bind=0.0.0.0:8383 --aws-access-key-id= --aws-secret-access-key= + influxdb3 serve --node-id=host04 --mode=read --object-store=s3 --read-from-node-ids=host01,host02 --bucket=influxdb-3-enterprise-storage --http-bind=0.0.0.0:8383 --aws-access-key-id= --aws-secret-access-key= ``` ``` ## NODE 5 — Read Node #2 # Example variables - # writer-id: 'host05' + # node-id: 'host05' # bucket: 'influxdb-3-enterprise-storage' - influxdb3 serve --writer-id=host05 --mode=read --object-store=s3 --read-from-writer-ids=host01,host02 --bucket=influxdb-3-enterprise-storage --http-bind=0.0.0.0:8484 --aws-access-key-id= --aws-secret-access-key= + influxdb3 serve --node-id=host05 --mode=read --object-store=s3 --read-from-node-ids=host01,host02 --bucket=influxdb-3-enterprise-storage --http-bind=0.0.0.0:8484 --aws-access-key-id= --aws-secret-access-key= ``` Congratulations, you have a robust setup to workload isolation using {{% product-name %}}. @@ -983,11 +982,11 @@ This feature is only available in Enterprise and is not available in Core. # Example variables on a query # HTTP-bound Port: 8585 -influxdb3 file-index create --host=http://127.0.0.1:8585 -d -t
+influxdb3 create file_index --host=http://127.0.0.1:8585 -d -t
``` #### Delete a file index ```bash -influxdb3 file-index delete --host=http://127.0.0.1:8585 -d -t
+influxdb3 delete file_index --host=http://127.0.0.1:8585 -d -t
``` diff --git a/data/products.yml b/data/products.yml index 0aa71136e..e64de8237 100644 --- a/data/products.yml +++ b/data/products.yml @@ -37,7 +37,7 @@ influxdb3_cloud_dedicated: list_order: 3 latest: cloud-dedicated link: "https://www.influxdata.com/contact-sales-form/" - latest_cli: 2.9.8 + latest_cli: 2.9.9 placeholder_host: cluster-id.a.influxdb.io influxdb3_clustered: diff --git a/layouts/partials/article.html b/layouts/partials/article.html index 110045713..faf1170ce 100644 --- a/layouts/partials/article.html +++ b/layouts/partials/article.html @@ -1,7 +1,7 @@
-

{{ .Title }}

+

{{ .Title | .RenderString }}

{{ partial "article/supported-versions.html" . }} {{ partial "article/page-meta.html" . }}
diff --git a/layouts/partials/article/related.html b/layouts/partials/article/related.html index b02551af4..184b7f6dd 100644 --- a/layouts/partials/article/related.html +++ b/layouts/partials/article/related.html @@ -26,7 +26,7 @@ {{ else }} {{ $sanitizedPath := replaceRE `\/$` "" (print $relatedItem) }} {{ with $.Page.GetPage $sanitizedPath }} -
  • {{ .Title }}
  • +
  • {{ .Title | .RenderString }}
  • {{ end }} {{ end }} {{ end }} diff --git a/layouts/partials/article/stable-version.html b/layouts/partials/article/stable-version.html index dd89bda21..cc53ed736 100644 --- a/layouts/partials/article/stable-version.html +++ b/layouts/partials/article/stable-version.html @@ -25,7 +25,7 @@ View this page in the {{ $stableVersion }} documentation. {{ else if $stablePageExists }} - See the equivalent InfluxDB {{ $stableVersion }} documentation: {{ $stableEquivalentPage.Title }}. + See the equivalent InfluxDB {{ $stableVersion }} documentation: {{ $stableEquivalentPage.Title | .RenderString }}. {{ else }} See the InfluxDB {{ $stableVersion }} documentation. {{ end }} @@ -38,7 +38,7 @@

    {{ if $stablePageExists }} - See the equivalent InfluxDB {{ $stableVersion }} documentation: {{ $stableEquivalentPage.Title }}. + See the equivalent InfluxDB {{ $stableVersion }} documentation: {{ $stableEquivalentPage.Title | .RenderString }}. {{ else }} See the equivalent InfluxDB {{ $stableVersion }} documentation. {{ end }} diff --git a/layouts/partials/header.html b/layouts/partials/header.html index b852cbc8a..be1c71a9c 100644 --- a/layouts/partials/header.html +++ b/layouts/partials/header.html @@ -11,7 +11,7 @@ {{ partial "header/title" . }} - + diff --git a/layouts/partials/header/title.html b/layouts/partials/header/title.html index 235b38d31..e6b1c8910 100644 --- a/layouts/partials/header/title.html +++ b/layouts/partials/header/title.html @@ -54,4 +54,4 @@ {{ $pageTitle := $scratch.Get "pageTitle" }} {{ $siteTitle := $scratch.Get "siteTitle" }} -{{ $pageTitle }}{{ cond (ne (len $pageTitle) 0) " | " "" }}{{ $siteTitle }} +{{ $pageTitle | .RenderString }}{{ cond (ne (len $pageTitle) 0) " | " "" }}{{ $siteTitle }} diff --git a/layouts/shortcodes/children.html b/layouts/shortcodes/children.html index 658cae0cc..a4520df88 100644 --- a/layouts/shortcodes/children.html +++ b/layouts/shortcodes/children.html @@ -18,7 +18,7 @@ {{ if eq $type "articles" }}