diff --git a/.circleci/config.yml b/.circleci/config.yml index 1db280c45..b90ba6693 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,4 +1,4 @@ -version: 2 +version: 2.1 jobs: build: docker: @@ -41,7 +41,7 @@ jobs: - /home/circleci/bin - run: name: Hugo Build - command: npx hugo --logLevel info --minify --destination workspace/public + command: yarn hugo --environment production --logLevel info --gc --destination workspace/public - persist_to_workspace: root: workspace paths: @@ -68,7 +68,6 @@ jobs: when: on_success workflows: - version: 2 build: jobs: - build diff --git a/.context/README.md b/.context/README.md new file mode 100644 index 000000000..79b6a4459 --- /dev/null +++ b/.context/README.md @@ -0,0 +1,44 @@ +# Context Files for LLMs and AI Tools + +This directory contains plans, reports, and other context files that are: +- Used to provide context to LLMs during development +- Not committed to the repository +- May be transient or belong in other repositories + +## Directory Structure + +- `plans/` - Documentation plans and roadmaps +- `reports/` - Generated reports and analyses +- `research/` - Research notes and findings +- `templates/` - Reusable templates for Claude interactions + +## Usage + +Place files here that you want to reference--for example, using @ mentions in Claude--such as: +- Documentation planning documents +- API migration guides +- Performance reports +- Architecture decisions + +## Example Structure + +``` +.context/ +├── plans/ +│ ├── v3.2-release-plan.md +│ └── api-migration-guide.md +├── reports/ +│ ├── weekly-progress-2025-07.md +│ └── pr-summary-2025-06.md +├── research/ +│ └── competitor-analysis.md +└── templates/ + └── release-notes-template.md +``` + +## Best Practices + +1. Use descriptive filenames that indicate the content and date +2. Keep files organized in appropriate subdirectories +3. Consider using date prefixes for time-sensitive content (e.g., `2025-07-01-meeting-notes.md`) +4. Remove outdated files periodically to keep the context relevant \ No newline at end of file diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 505688122..a4c0b7aaa 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -1,13 +1,16 @@ -# GitHub Copilot Instructions for InfluxData Documentation +# Instructions for InfluxData Documentation ## Purpose and scope -GitHub Copilot should help document InfluxData products by creating clear, accurate technical content with proper code examples, frontmatter, and formatting. +Help document InfluxData products by creating clear, accurate technical content with proper code examples, frontmatter, and formatting. ## Documentation structure - **Product version data**: `/data/products.yml` -- **Products**: +- **InfluxData products**: + - InfluxDB 3 Explorer + - Documentation source path: `/content/influxdb3/explorer` + - Published for the web: https://docs.influxdata.com/influxdb3/explorer/ - InfluxDB 3 Core - Documentation source path: `/content/influxdb3/core` - Published for the web: https://docs.influxdata.com/influxdb3/core/ @@ -92,7 +95,8 @@ GitHub Copilot should help document InfluxData products by creating clear, accur ## Markdown and shortcodes -- Include proper frontmatter for each page: +- Include proper frontmatter for Markdown pages in `content/**/*.md` (except for + shared content files in `content/shared/`): ```yaml title: # Page title (h1) @@ -180,3 +184,17 @@ Table: keys: [_start, _stop, _field, _measurement] ## Related repositories - **Internal documentation assistance requests**: https://github.com/influxdata/DAR/issues Documentation + +## Additional instruction files + +For specific workflows and content types, also refer to: + +- **InfluxDB 3 code placeholders**: `.github/instructions/influxdb3-code-placeholders.instructions.md` - Guidelines for placeholder formatting, descriptions, and shortcode usage in InfluxDB 3 documentation +- **Contributing guidelines**: `.github/instructions/contributing.instructions.md` - Detailed style guidelines, shortcode usage, frontmatter requirements, and development workflows +- **Content-specific instructions**: Check `.github/instructions/` directory for specialized guidelines covering specific documentation patterns and requirements + +## Integration with specialized instructions + +When working on InfluxDB 3 documentation (Core/Enterprise), prioritize the placeholder guidelines from `influxdb3-code-placeholders.instructions.md`. + +For general documentation structure, shortcodes, and development workflows, follow the comprehensive guidelines in `contributing.instructions.md`. diff --git a/.github/instructions/contributing.instructions.md b/.github/instructions/contributing.instructions.md index 4fb3b1efe..b5b5fdd59 100644 --- a/.github/instructions/contributing.instructions.md +++ b/.github/instructions/contributing.instructions.md @@ -2,11 +2,11 @@ applyTo: "content/**/*.md, layouts/**/*.html" --- -# GitHub Copilot Instructions for InfluxData Documentation +# Contributing instructions for InfluxData Documentation ## Purpose and scope -GitHub Copilot should help document InfluxData products +Help document InfluxData products by creating clear, accurate technical content with proper code examples, frontmatter, shortcodes, and formatting. @@ -46,184 +46,8 @@ We strongly recommend letting them run, but you can skip them (and avoid installing related dependencies) by including the `--no-verify` flag with your commit--for example, enter the following command in your terminal: -```sh +sh git commit -m "" --no-verify -``` - -### Install Node.js dependencies - -To install dependencies listed in package.json: - -1. Install [Node.js](https://nodejs.org/en) for your system. -2. Install [Yarn](https://yarnpkg.com/getting-started/install) for your system. -3. Run `yarn` to install dependencies (including Hugo). -4. Install the Yarn package manager and run `yarn` to install project dependencies. - -`package.json` contains dependencies used in `/assets/js` JavaScript code and -dev dependencies used in pre-commit hooks for linting, syntax-checking, and testing. - -Dev dependencies include: - -- [Lefthook](https://github.com/evilmartians/lefthook): configures and -manages git pre-commit and pre-push hooks for linting and testing Markdown content. -- [prettier](https://prettier.io/docs/en/): formats code, including Markdown, according to style rules for consistency -- [Cypress]: e2e testing for UI elements and URLs in content - -### Install Docker - -docs-v2 includes Docker configurations (`compose.yaml` and Dockerfiles) for running the Vale style linter and tests for code blocks (Shell, Bash, and Python) in Markdown files. - -Install [Docker](https://docs.docker.com/get-docker/) for your system. - -#### Build the test dependency image - -After you have installed Docker, run the following command to build the test -dependency image, `influxdata:docs-pytest`. -The tests defined in `compose.yaml` use the dependencies and execution -environment from this image. - -```bash -docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest . -``` - -### Run the documentation locally (optional) - -To run the documentation locally, follow the instructions provided in the README. - -### Install Visual Studio Code extensions - -If you use Microsoft Visual Studio (VS) Code, you can install extensions -to help you navigate, check, and edit files. - -docs-v2 contains a `./.vscode/settings.json` that configures the following extensions: - -- Comment Anchors: recognizes tags (for example, `//SOURCE`) and makes links and filepaths clickable in comments. -- Vale: shows linter errors and suggestions in the editor. -- YAML Schemas: validates frontmatter attributes. - -### Make your changes - -Make your suggested changes being sure to follow the [style and formatting guidelines](#style--formatting) outline below. - -## Lint and test your changes - -`package.json` contains scripts for running tests and linting. - -### Automatic pre-commit checks - -docs-v2 uses Lefthook to manage Git hooks that run during pre-commit and pre-push. The hooks run the scripts defined in `package.json` to lint Markdown and test code blocks. -When you try to commit changes (`git commit`), Git runs -the commands configured in `lefthook.yml` which pass your **staged** files to Vale, -Prettier, Cypress (for UI tests and link-checking), and Pytest (for testing Python and shell code in code blocks). - -### Skip pre-commit hooks - -**We strongly recommend running linting and tests**, but you can skip them -(and avoid installing dependencies) -by including the `LEFTHOOK=0` environment variable or the `--no-verify` flag with -your commit--for example: - -```sh -git commit -m "" --no-verify -``` - -```sh -LEFTHOOK=0 git commit -``` - -### Set up test scripts and credentials - -Tests for code blocks require your InfluxDB credentials and other typical -InfluxDB configuration. - -To set up your docs-v2 instance to run tests locally, do the following: - -1. **Set executable permissions on test scripts** in `./test/src`: - - ```sh - chmod +x ./test/src/*.sh - ``` - -2. **Create credentials for tests**: - - - Create databases, buckets, and tokens for the product(s) you're testing. - - If you don't have access to a Clustered instance, you can use your -Cloud Dedicated instance for testing in most cases. To avoid conflicts when - running tests, create separate Cloud Dedicated and Clustered databases. - -1. **Create .env.test**: Copy the `./test/env.test.example` file into each - product directory to test and rename the file as `.env.test`--for example: - - ```sh - ./content/influxdb/cloud-dedicated/.env.test - ``` - -2. Inside each product's `.env.test` file, assign your InfluxDB credentials to - environment variables: - - - Include the usual `INFLUX_` environment variables - - In - `cloud-dedicated/.env.test` and `clustered/.env.test` files, also define the - following variables: - - - `ACCOUNT_ID`, `CLUSTER_ID`: You can find these values in your `influxctl` - `config.toml` configuration file. - - `MANAGEMENT_TOKEN`: Use the `influxctl management create` command to generate - a long-lived management token to authenticate Management API requests - - See the substitution - patterns in `./test/src/prepare-content.sh` for the full list of variables you may need to define in your `.env.test` files. - -3. For influxctl commands to run in tests, move or copy your `config.toml` file - to the `./test` directory. - -> [!Warning] -> -> - The database you configure in `.env.test` and any written data may -be deleted during test runs. -> - Don't add your `.env.test` files to Git. To prevent accidentally adding credentials to the docs-v2 repo, -Git is configured to ignore `.env*` files. Consider backing them up on your local machine in case of accidental deletion. - -#### Test shell and python code blocks - -[pytest-codeblocks](https://github.com/nschloe/pytest-codeblocks/tree/main) extracts code from python and shell Markdown code blocks and executes assertions for the code. -If you don't assert a value (using a Python `assert` statement), `--codeblocks` considers a non-zero exit code to be a failure. - -**Note**: `pytest --codeblocks` uses Python's `subprocess.run()` to execute shell code. - -You can use this to test CLI and interpreter commands, regardless of programming -language, as long as they return standard exit codes. - -To make the documented output of a code block testable, precede it with the -`` tag and **omit the code block language -descriptor**--for example, in your Markdown file: - -##### Example markdown - -```python -print("Hello, world!") -``` - - - -The next code block is treated as an assertion. -If successful, the output is the following: - -``` -Hello, world! -``` - -For commands, such as `influxctl` CLI commands, that require launching an -OAuth URL in a browser, wrap the command in a subshell and redirect the output -to `/shared/urls.txt` in the container--for example: - -```sh -# Test the preceding command outside of the code block. -# influxctl authentication requires TTY interaction-- -# output the auth URL to a file that the host can open. -script -c "influxctl user list " \ - /dev/null > /shared/urls.txt -``` You probably don't want to display this syntax in the docs, which unfortunately means you'd need to include the test block separately from the displayed code @@ -236,25 +60,6 @@ pytest-codeblocks will still collect and run the code block. pytest-codeblocks has features for skipping tests and marking blocks as failed. To learn more, see the pytest-codeblocks README and tests. -#### Troubleshoot tests - -### Pytest collected 0 items - -Potential reasons: - -- See the test discovery options in `pytest.ini`. -- For Python code blocks, use the following delimiter: - - ```python - # Codeblocks runs this block. - ``` - - `pytest --codeblocks` ignores code blocks that use the following: - - ```py - # Codeblocks ignores this block. - ``` - ### Vale style linting docs-v2 includes Vale writing style linter configurations to enforce documentation writing style rules, guidelines, branding, and vocabulary terms. @@ -262,97 +67,10 @@ docs-v2 includes Vale writing style linter configurations to enforce documentati To run Vale, use the Vale extension for your editor or the included Docker configuration. For example, the following command runs Vale in a container and lints `*.md` (Markdown) files in the path `./content/influxdb/cloud-dedicated/write-data/` using the specified configuration for `cloud-dedicated`: -```sh -docker compose run -T vale --config=content/influxdb/cloud-dedicated/.vale.ini --minAlertLevel=error content/influxdb/cloud-dedicated/write-data/**/*.md -``` - -The output contains error-level style alerts for the Markdown content. - -**Note**: We strongly recommend running Vale, but it's not included in the -docs-v2 pre-commit hooks](#automatic-pre-commit-checks) for now. -You can include it in your own Git hooks. - -If a file contains style, spelling, or punctuation problems, -the Vale linter can raise one of the following alert levels: - -- **Error**: - - Problems that can cause content to render incorrectly - - Violations of branding guidelines or trademark guidelines - - Rejected vocabulary terms -- **Warning**: General style guide rules and best practices -- **Suggestion**: Style preferences that may require refactoring or updates to an exceptions list - -### Integrate Vale with your editor - -To integrate Vale with VSCode: - -1. Install the [Vale VSCode](https://marketplace.visualstudio.com/items?itemName=ChrisChinchilla.vale-vscode) extension. -2. In the extension settings, set the `Vale:Vale CLI:Path` value to the path of your Vale binary (`${workspaceFolder}/node_modules/.bin/vale` for Yarn-installed Vale). - -To use with an editor other than VSCode, see the [Vale integration guide](https://vale.sh/docs/integrations/guide/). - -### Configure style rules - -`/.ci/vale/styles/` contains configuration files for the custom `InfluxDataDocs` style. - -The easiest way to add accepted or rejected spellings is to enter your terms (or regular expression patterns) into the Vocabulary files at `.ci/vale/styles/config/vocabularies`. - -To add accepted/rejected terms for specific products, configure a style for the product and include a `Branding.yml` configuration. As an example, see `content/influxdb/cloud-dedicated/.vale.ini` and `.ci/vale/styles/Cloud-Dedicated/Branding.yml`. - -To learn more about configuration and rules, see [Vale configuration](https://vale.sh/docs/topics/config). - -### Submit a pull request - -Push your changes up to your forked repository, then [create a new pull request](https://help.github.com/articles/creating-a-pull-request/). - -## Style & Formatting - -### Markdown - -Most docs-v2 documentation content uses [Markdown](https://en.wikipedia.org/wiki/Markdown). - -_Some parts of the documentation, such as `./api-docs`, contain Markdown within YAML and rely on additional tooling._ - -### Semantic line feeds - -Use [semantic line feeds](http://rhodesmill.org/brandon/2012/one-sentence-per-line/). -Separating each sentence with a new line makes it easy to parse diffs with the human eye. - -**Diff without semantic line feeds:** - -```diff +diff -Data is taking off. This data is time series. You need a database that specializes in time series. You should check out InfluxDB. +Data is taking off. This data is time series. You need a database that specializes in time series. You need InfluxDB. -``` - -**Diff with semantic line feeds:** - -```diff -Data is taking off. -This data is time series. -You need a database that specializes in time series. --You should check out InfluxDB. -+You need InfluxDB. -``` - -### Article headings - -Use only h2-h6 headings in markdown content. -h1 headings act as the page title and are populated automatically from the `title` frontmatter. -h2-h6 headings act as section headings. - -### Image naming conventions - -Save images using the following naming format: `project/version-context-description.png`. -For example, `influxdb/2-0-visualizations-line-graph.png` or `influxdb/2-0-tasks-add-new.png`. -Specify a version other than 2.0 only if the image is specific to that version. - -## Page frontmatter - -Every documentation page includes frontmatter which specifies information about the page. -Frontmatter populates variables in page templates and the site's navigation menu. - -```yaml +yaml title: # Title of the page used in the page's h1 seotitle: # Page title used in the html title and used in search engine results list_title: # Title used in article lists generated using the {{< children >}} shortcode @@ -375,6 +93,9 @@ list_query_example:# Code examples included with article descriptions in childre # References to examples in data/query_examples canonical: # Path to canonical page, overrides auto-gen'd canonical URL v2: # Path to v2 equivalent page +alt_links: # Alternate pages in other products/versions for cross-product navigation + cloud-dedicated: /influxdb3/cloud-dedicated/path/to/page/ + core: /influxdb3/core/path/to/page/ prepend: # Prepend markdown content to an article (especially powerful with cascade) block: # (Optional) Wrap content in a block style (note, warn, cloud) content: # Content to prepend to article @@ -384,72 +105,12 @@ append: # Append markdown content to an article (especially powerful with cascad metadata: [] # List of metadata messages to include under the page h1 updated_in: # Product and version the referenced feature was updated in (displayed as a unique metadata) source: # Specify a file to pull page content from (typically in /content/shared/) -``` - -### Title usage - -##### `title` - -The `title` frontmatter populates each page's HTML `h1` heading tag. -It shouldn't be overly long, but should set the context for users coming from outside sources. - -##### `seotitle` - -The `seotitle` frontmatter populates each page's HTML `title` attribute. -Search engines use this in search results (not the page's h1) and therefore it should be keyword optimized. - -##### `list_title` - -The `list_title` frontmatter determines an article title when in a list generated -by the [`{{< children >}}` shortcode](#generate-a-list-of-children-articles). - -##### `menu > name` - -The `name` attribute under the `menu` frontmatter determines the text used in each page's link in the site navigation. -It should be short and assume the context of its parent if it has one. - -#### Page Weights - -To ensure pages are sorted both by weight and their depth in the directory -structure, pages should be weighted in "levels." -All top level pages are weighted 1-99. -The next level is 101-199. -Then 201-299 and so on. - -_**Note:** `_index.md` files should be weighted one level up from the other `.md` files in the same directory._ - -### Related content - -Use the `related` frontmatter to include links to specific articles at the bottom of an article. - -- If the page exists inside of this documentation, just include the path to the page. - It will automatically detect the title of the page. -- If the page exists inside of this documentation, but you want to customize the link text, - include the path to the page followed by a comma, and then the custom link text. - The path and custom text must be in that order and separated by a comma and a space. -- If the page exists outside of this documentation, include the full URL and a title for the link. - The link and title must be in that order and separated by a comma and a space. - -```yaml +yaml related: - /v2.0/write-data/quick-start - /v2.0/write-data/quick-start, This is custom text for an internal link - https://influxdata.com, This is an external link -``` - -### Canonical URLs - -Search engines use canonical URLs to accurately rank pages with similar or identical content. -The `canonical` HTML meta tag identifies which page should be used as the source of truth. - -By default, canonical URLs are automatically generated for each page in the InfluxData -documentation using the latest version of the current product and the current path. - -Use the `canonical` frontmatter to override the auto-generated canonical URL. - -_**Note:** The `canonical` frontmatter supports the [`{{< latest >}}` shortcode](#latest-links)._ - -```yaml +yaml canonical: /path/to/canonical/doc/ # OR @@ -466,6 +127,29 @@ add the following frontmatter to the 1.x page: v2: /influxdb/v2.0/get-started/ ``` +### Alternative links for cross-product navigation + +Use the `alt_links` frontmatter to specify equivalent pages in other InfluxDB products, +for example, when a page exists at a different path in a different version or if +the feature doesn't exist in that product. +This enables the product switcher to navigate users to the corresponding page when they +switch between products. If a page doesn't exist in another product (for example, an +Enterprise-only feature), point to the nearest parent page if relevant. + +```yaml +alt_links: + cloud-dedicated: /influxdb3/cloud-dedicated/admin/tokens/create-token/ + cloud-serverless: /influxdb3/cloud-serverless/admin/tokens/create-token/ + core: /influxdb3/core/reference/cli/influxdb3/update/ # Points to parent if exact page doesn't exist +``` + +Supported product keys for InfluxDB 3: +- `core` +- `enterprise` +- `cloud-serverless` +- `cloud-dedicated` +- `clustered` + ### Prepend and append content to a page Use the `prepend` and `append` frontmatter to add content to the top or bottom of a page. @@ -481,47 +165,7 @@ append: | Use this frontmatter with [cascade](#cascade) to add the same content to all children pages as well. -```yaml -cascade: - append: | - > [!Note] - > #### This is example markdown content - > This is just an example note block that gets appended to the article. -``` - -### Cascade - -To automatically apply frontmatter to a page and all of its children, use the -[`cascade` frontmatter](https://gohugo.io/content-management/front-matter/#front-matter-cascade) -built in into Hugo. - -```yaml -title: Example page -description: Example description -cascade: - layout: custom-layout -``` - -`cascade` applies the frontmatter to all children unless the child already includes -those frontmatter keys. Frontmatter defined on the page overrides frontmatter -"cascaded" from a parent. - -## Use shared content in a page - -Use the `source` frontmatter to specify a shared file to use to populate the -page content. Shared files are typically stored in the `/content/shared` directory. - -When building shared content, use the `show-in` and `hide-in` shortcodes to show -or hide blocks of content based on the current InfluxDB product/version. -For more information, see [show-in](#show-in) and [hide-in](#hide-in). - -## Shortcodes - -### Notes and warnings - -Shortcodes are available for formatting notes and warnings in each article: - -```md +md {{% note %}} Insert note markdown content here. {{% /note %}} @@ -529,148 +173,18 @@ Insert note markdown content here. {{% warn %}} Insert warning markdown content here. {{% /warn %}} -``` -### Product data - -Display the full product name and version name for the current page--for example: - -- InfluxDB 3 Core -- InfluxDB 3 Cloud Dedicated - -```md -{{% product-name %}} -``` - -Display the short version name (part of the key used in `products.yml`) from the current page URL--for example: - -- `/influxdb3/core` returns `core` - -```md -{{% product-key %}} -``` - -#### Enterprise name - -The name used to refer to InfluxData's enterprise offering is subject to change. -To facilitate easy updates in the future, use the `enterprise-name` shortcode -when referencing the enterprise product. -This shortcode accepts a `"short"` parameter which uses the "short-name". - -``` This is content that references {{< enterprise-name >}}. This is content that references {{< enterprise-name "short" >}}. -``` - -Product names are stored in `data/products.yml`. - -#### Enterprise link - -References to InfluxDB Enterprise are often accompanied with a link to a page where -visitors can get more information about the Enterprise offering. -This link is subject to change. -Use the `enterprise-link` shortcode when including links to more information about -InfluxDB Enterprise. - -``` -Find more info [here][{{< enterprise-link >}}] -``` - -### Latest patch version - -Use the `{{< latest-patch >}}` shortcode to add the latest patch version of a product. -By default, this shortcode parses the product and minor version from the URL. -To specify a specific product and minor version, use the `product` and `version` arguments. -Easier to maintain being you update the version number in the `data/products.yml` file instead of updating individual links and code examples. - -```md +md {{< latest-patch >}} {{< latest-patch product="telegraf" >}} {{< latest-patch product="chronograf" version="1.7" >}} -``` - -### Latest influx CLI version - -Use the `{{< latest-patch cli=true >}}` shortcode to add the latest version of the `influx` -CLI supported by the minor version of InfluxDB. -By default, this shortcode parses the minor version from the URL. -To specify a specific minor version, use the `version` argument. -Maintain CLI version numbers in the `data/products.yml` file instead of updating individual links and code examples. - -```md -{{< latest-patch cli=true >}} - -{{< latest-cli version="2.1" >}} -``` - -### API endpoint - -Use the `{{< api-endpoint >}}` shortcode to generate a code block that contains -a colored request method, a specified API endpoint, and an optional link to -the API reference documentation. -Provide the following arguments: - -- **method**: HTTP request method (get, post, patch, put, or delete) -- **endpoint**: API endpoint -- **api-ref**: Link the endpoint to a specific place in the API documentation -- **influxdb_host**: Specify which InfluxDB product host to use - _if the `endpoint` contains the `influxdb/host` shortcode_. - Uses the current InfluxDB product as default. - Supports the following product values: - - - oss - - cloud - - serverless - - dedicated - - clustered - -```md +md {{< api-endpoint method="get" endpoint="/api/v2/tasks" api-ref="/influxdb/cloud/api/#operation/GetTasks">}} -``` - -```md -{{< api-endpoint method="get" endpoint="{{< influxdb/host >}}/api/v2/tasks" influxdb_host="cloud">}} -``` - -### Tabbed Content - -To create "tabbed" content (content that is changed by a users' selection), use the following three shortcodes in combination: - -`{{< tabs-wrapper >}}` -This shortcode creates a wrapper or container for the tabbed content. -All UI interactions are limited to the scope of each container. -If you have more than one "group" of tabbed content in a page, each needs its own `tabs-wrapper`. -This shortcode must be closed with `{{< /tabs-wrapper >}}`. - -**Note**: The `<` and `>` characters used in this shortcode indicate that the contents should be processed as HTML. - -`{{% tabs %}}` -This shortcode creates a container for buttons that control the display of tabbed content. -It should contain simple markdown links with anonymous anchors (`#`). -The link text is used as the button text. -This shortcode must be closed with `{{% /tabs %}}`. - -**Note**: The `%` characters used in this shortcode indicate that the contents should be processed as Markdown. - -The `{{% tabs %}}` shortcode has an optional `style` argument that lets you -assign CSS classes to the tags HTML container. The following classes are available: - -- **small**: Tab buttons are smaller and don't scale to fit the width. -- **even-wrap**: Prevents uneven tab widths when tabs are forced to wrap. - -`{{% tab-content %}}` -This shortcode creates a container for a content block. -Each content block in the tab group needs to be wrapped in this shortcode. -**The number of `tab-content` blocks must match the number of links provided in the `tabs` shortcode** -This shortcode must be closed with `{{% /tab-content %}}`. - -**Note**: The `%` characters used in this shortcode indicate that the contents should be processed as Markdown. - -#### Example tabbed content group - -```md +md {{< tabs-wrapper >}} {{% tabs %}} @@ -687,592 +201,77 @@ Markdown content for tab 2. {{% /tab-content %}} {{< /tabs-wrapper >}} -``` - -#### Tabbed code blocks - -Shortcodes are also available for tabbed code blocks primarily used to give users -the option to choose between different languages and syntax. -The shortcode structure is the same as above, but the shortcode names are different: - -`{{< code-tabs-wrapper >}}` -`{{% code-tabs %}}` -`{{% code-tab-content %}}` - -````md -{{< code-tabs-wrapper >}} - -{{% code-tabs %}} -[Flux](#) -[InfluxQL](#) -{{% /code-tabs %}} - -{{% code-tab-content %}} - -```js -data = from(bucket: "example-bucket") - |> range(start: -15m) - |> filter(fn: (r) => - r._measurement == "mem" and - r._field == "used_percent" - ) -``` {{% /code-tab-content %}} {{% code-tab-content %}} -```sql -SELECT "used_percent" -FROM "telegraf"."autogen"."mem" -WHERE time > now() - 15m -``` - -{{% /code-tab-content %}} - -{{< /code-tabs-wrapper >}} -```` - -#### Link to tabbed content - -To link to tabbed content, click on the tab and use the URL parameter shown. -It will have the form `?t=`, plus a string. -For example: - -``` -[Windows installation](/influxdb/v2.0/install/?t=Windows) -``` - -### Required elements - -Use the `{{< req >}}` shortcode to identify required elements in documentation with -orange text and/or asterisks. By default, the shortcode outputs the text, "Required," but -you can customize the text by passing a string argument with the shortcode. - -```md +md {{< req >}} -``` - -**Output:** Required - -```md -{{< req "This is Required" >}} -``` - -**Output:** This is required - -If using other named arguments like `key` or `color`, use the `text` argument to -customize the text of the required message. - -```md -{{< req text="Required if ..." color="blue" type="key" >}} -``` - -#### Required elements in a list - -When identifying required elements in a list, use `{{< req type="key" >}}` to generate -a "\* Required" key before the list. For required elements in the list, include -{{< req "\*" >}} before the text of the list item. For example: - -```md +md {{< req type="key" >}} - {{< req "\*" >}} **This element is required** - {{< req "\*" >}} **This element is also required** - **This element is NOT required** -``` +md -#### Change color of required text - -Use the `color` argument to change the color of required text. -The following colors are available: - -- blue -- green -- magenta - -```md -{{< req color="magenta" text="This is required" >}} -``` - -### Page navigation buttons - -Use the `{{< page-nav >}}` shortcode to add page navigation buttons to a page. -These are useful for guiding users through a set of docs that should be read in sequential order. -The shortcode has the following parameters: - -- **prev:** path of the previous document _(optional)_ -- **next:** path of the next document _(optional)_ -- **prevText:** override the button text linking to the previous document _(optional)_ -- **nextText:** override the button text linking to the next document _(optional)_ -- **keepTab:** include the currently selected tab in the button link _(optional)_ - -The shortcode generates buttons that link to both the previous and next documents. -By default, the shortcode uses either the `list_title` or the `title` of the linked -document, but you can use `prevText` and `nextText` to override button text. - -```md - {{ page-nav prev="/path/to/prev/" next="/path/to/next" >}} - + {{ page-nav prev="/path/to/prev/" prevText="Previous" next="/path/to/next" nextText="Next" >}} - + {{ page-nav prev="/path/to/prev/" next="/path/to/next" keepTab=true>}} -``` - -### Keybinds - -Use the `{{< keybind >}}` shortcode to include OS-specific keybindings/hotkeys. -The following parameters are available: - -- mac -- linux -- win -- all -- other - -```md - - -{{< keybind mac="⇧⌘P" other="Ctrl+Shift+P" >}} - - - -{{< keybind all="Ctrl+Shift+P" >}} - - - -{{< keybind mac="⇧⌘P" linux="Ctrl+Shift+P" win="Ctrl+Shift+Alt+P" >}} -``` - -### Diagrams - -Use the `{{< diagram >}}` shortcode to dynamically build diagrams. -The shortcode uses [mermaid.js](https://github.com/mermaid-js/mermaid) to convert -simple text into SVG diagrams. -For information about the syntax, see the [mermaid.js documentation](https://mermaid-js.github.io/mermaid/#/). - -```md +md {{< diagram >}} flowchart TB This --> That That --> There {{< /diagram >}} -``` - -### File system diagrams - -Use the `{{< filesystem-diagram >}}` shortcode to create a styled file system -diagram using a Markdown unordered list. - -##### Example filesystem diagram shortcode - -```md -{{< filesystem-diagram >}} - -- Dir1/ -- Dir2/ - - ChildDir/ - - Child - - Child -- Dir3/ - {{< /filesystem-diagram >}} -``` - -### High-resolution images - -In many cases, screenshots included in the docs are taken from high-resolution (retina) screens. -Because of this, the actual pixel dimension is 2x larger than it needs to be and is rendered 2x bigger than it should be. -The following shortcode automatically sets a fixed width on the image using half of its actual pixel dimension. -This preserves the detail of the image and renders it at a size where there should be little to no "blur" -cause by browser image resizing. - -```html +html {{< img-hd src="/path/to/image" alt="Alternate title" />}} -``` - -###### Notes - -- This should only be used on screenshots takes from high-resolution screens. -- The `src` should be relative to the `static` directory. -- Image widths are limited to the width of the article content container and will scale accordingly, - even with the `width` explicitly set. - -### Truncated content blocks - -In some cases, it may be appropriate to shorten or truncate blocks of content. -Use cases include long examples of output data or tall images. -The following shortcode truncates blocks of content and allows users to opt into -to seeing the full content block. - -```md +md {{% truncate %}} Truncated markdown content here. {{% /truncate %}} -``` - -### Expandable accordion content blocks - -Use the `{{% expand "Item label" %}}` shortcode to create expandable, accordion-style content blocks. -Each expandable block needs a label that users can click to expand or collapse the content block. -Pass the label as a string to the shortcode. - -```md -{{% expand "Label 1" %}} -Markdown content associated with label 1. -{{% /expand %}} - -{{% expand "Label 2" %}} -Markdown content associated with label 2. -{{% /expand %}} - -{{% expand "Label 3" %}} -Markdown content associated with label 3. -{{% /expand %}} -``` Use the optional `{{< expand-wrapper >}}` shortcode around a group of `{{% expand %}}` shortcodes to ensure proper spacing around the expandable elements: -```md -{{< expand-wrapper >}} -{{% expand "Label 1" %}} -Markdown content associated with label 1. -{{% /expand %}} - -{{% expand "Label 2" %}} -Markdown content associated with label 2. -{{% /expand %}} -{{< /expand-wrapper >}} -``` - -### Captions - -Use the `{{% caption %}}` shortcode to add captions to images and code blocks. -Captions are styled with a smaller font size, italic text, slight transparency, -and appear directly under the previous image or code block. - -```md -{{% caption %}} -Markdown content for the caption. -{{% /caption %}} -``` - ### Generate a list of children articles Section landing pages often contain just a list of articles with links and descriptions for each. This can be cumbersome to maintain as content is added. To automate the listing of articles in a section, use the `{{< children >}}` shortcode. -```md -{{< children >}} -``` - -The children shortcode can also be used to list only "section" articles (those with their own children), -or only "page" articles (those with no children) using the `show` argument: - -```md -{{< children show="sections" >}} - - - -{{< children show="pages" >}} -``` - -_By default, it displays both sections and pages._ - -Use the `type` argument to specify the format of the children list. - -```md -{{< children type="functions" >}} -``` - -The following list types are available: - -- **articles:** lists article titles as headers with the description or summary - of the article as a paragraph. Article headers link to the articles. -- **list:** lists children article links in an unordered list. -- **anchored-list:** lists anchored children article links in an unordered list - meant to act as a page navigation and link to children header. -- **functions:** a special use-case designed for listing Flux functions. - -#### Include a "Read more" link - -To include a "Read more" link with each child summary, set `readmore=true`. -_Only the `articles` list type supports "Read more" links._ - -```md +md {{< children readmore=true >}} -``` - -#### Include a horizontal rule - -To include a horizontal rule after each child summary, set `hr=true`. -_Only the `articles` list type supports horizontal rules._ - -```md -{{< children hr=true >}} -``` - -#### Include a code example with a child summary - -Use the `list_code_example` frontmatter to provide a code example with an article -in an articles list. - -````yaml -list_code_example: | - ```sh - This is a code example - ``` -```` - -#### Organize and include native code examples - -To include text from a file in `/shared/text/`, use the -`{{< get-shared-text >}}` shortcode and provide the relative path and filename. - -This is useful for maintaining and referencing sample code variants in their -native file formats. - -1. Store code examples in their native formats at `/shared/text/`. - -```md +md /shared/text/example1/example.js /shared/text/example1/example.py -``` - -2. Include the files--for example, in code tabs: - - ````md - {{% code-tabs-wrapper %}} - {{% code-tabs %}} - [Javascript](#js) - [Python](#py) - {{% /code-tabs %}} - {{% code-tab-content %}} - - ```js - {{< get-shared-text "example1/example.js" >}} - ``` - - {{% /code-tab-content %}} - {{% code-tab-content %}} - - ```py - {{< get-shared-text "example1/example.py" >}} - ``` - - {{% /code-tab-content %}} - {{% /code-tabs-wrapper %}} - ```` - -#### Include specific files from the same directory - -To include the text from one file in another file in the same -directory, use the `{{< get-leaf-text >}}` shortcode. -The directory that contains both files must be a -Hugo [_Leaf Bundle_](https://gohugo.io/content-management/page-bundles/#leaf-bundles), -a directory that doesn't have any child directories. - -In the following example, `api` is a leaf bundle. `content` isn't. - -```md +md content | |--- api | query.pdmc | query.sh | \_index.md -``` - -##### query.pdmc - -```md -# Query examples -``` - -##### query.sh - -```md -curl https://localhost:8086/query -``` - -To include `query.sh` and `query.pdmc` in `api/_index.md`, use the following code: - -````md -{{< get-leaf-text "query.pdmc" >}} - -# Curl example - -```sh -{{< get-leaf-text "query.sh" >}} -``` -```` - -Avoid using the following file extensions when naming included text files since Hugo interprets these as markup languages: -`.ad`, `.adoc`, `.asciidoc`, `.htm`, `.html`, `.markdown`, `.md`, `.mdown`, `.mmark`, `.pandoc`, `.pdc`, `.org`, or `.rst`. - -#### Reference a query example in children - -To include a query example with the children in your list, update `data/query_examples.yml` -with the example code, input, and output, and use the `list_query_example` -frontmatter to reference the corresponding example. - -```yaml +yaml list_query_example: cumulative_sum -``` - -#### Children frontmatter - -Each children list `type` uses [frontmatter properties](#page-frontmatter) when generating the list of articles. -The following table shows which children types use which frontmatter properties: - -| Frontmatter | articles | list | functions | -| :------------------- | :------: | :--: | :-------: | -| `list_title` | ✓ | ✓ | ✓ | -| `description` | ✓ | | | -| `external_url` | ✓ | ✓ | | -| `list_image` | ✓ | | | -| `list_note` | | ✓ | | -| `list_code_example` | ✓ | | | -| `list_query_example` | ✓ | | | - -### Authentication token link - -Use the `{{% token-link "" "%}}` shortcode to -automatically generate links to token management documentation. The shortcode -accepts two _optional_ arguments: - -- **descriptor**: An optional token descriptor -- **link_append**: An optional path to append to the token management link path, - `///admin/tokens/`. - -```md +md {{% token-link "database" "resource/" }} - + [database token](/influxdb3/enterprise/admin/tokens/resource/) -``` -InfluxDB 3 Enterprise and InfluxDB 3 Core support different kinds of tokens. -The shortcode has a blacklist of token descriptors for each that will prevent -unsupported descriptors from appearing in the rendered output based on the -current product. - -### Inline icons - -The `icon` shortcode allows you to inject icons in paragraph text. -It's meant to clarify references to specific elements in the InfluxDB user interface. -This shortcode supports Clockface (the UI) v2 and v3. -Specify the version to use as the second argument. The default version is `v3`. - -``` {{< icon "icon-name" "v2" >}} -``` -Below is a list of available icons (some are aliases): - -- add-cell -- add-label -- alert -- calendar -- chat -- checkmark -- clone -- cloud -- cog -- config -- copy -- dashboard -- dashboards -- data-explorer -- delete -- download -- duplicate -- edit -- expand -- export -- eye -- eye-closed -- eye-open -- feedback -- fullscreen -- gear -- graph -- hide -- influx -- influx-icon -- nav-admin -- nav-config -- nav-configuration -- nav-dashboards -- nav-data-explorer -- nav-organizations -- nav-orgs -- nav-tasks -- note -- notebook -- notebooks -- org -- orgs -- pause -- pencil -- play -- plus -- refresh -- remove -- replay -- save-as -- search -- settings -- tasks -- toggle -- trash -- trashcan -- triangle -- view -- wrench -- x - -### InfluxDB UI left navigation icons - -In many cases, documentation references an item in the left nav of the InfluxDB UI. -Provide a visual example of the navigation item using the `nav-icon` shortcode. -This shortcode supports Clockface (the UI) v2 and v3. -Specify the version to use as the second argument. The default version is `v3`. - -``` {{< nav-icon "tasks" "v2" >}} -``` - -The following case insensitive values are supported: - -- admin, influx -- data-explorer, data explorer -- notebooks, books -- dashboards -- tasks -- monitor, alerts, bell -- cloud, usage -- data, load data, load-data -- settings -- feedback - -### Flexbox-formatted content blocks - -CSS Flexbox formatting lets you create columns in article content that adjust and -flow based on the viewable width. -In article content, this helps if you have narrow tables that could be displayed -side-by-side, rather than stacked vertically. -Use the `{{< flex >}}` shortcode to create the Flexbox wrapper. -Use the `{{% flex-content %}}` shortcode to identify each column content block. - -```md +md {{< flex >}} {{% flex-content %}} Column 1 @@ -1281,30 +280,6 @@ Column 1 Column 2 {{% /flex-content %}} {{< /flex >}} -``` - -`{{% flex-content %}}` has an optional width argument that determines the maximum -width of the column. - -```md -{{% flex-content "half" %}} -``` - -The following options are available: - -- half _(Default)_ -- third -- quarter - -### Tooltips - -Use the `{{< tooltip >}}` shortcode to add tooltips to text. -The **first** argument is the text shown in the tooltip. -The **second** argument is the highlighted text that triggers the tooltip. - -```md -I like {{< tooltip "Butterflies are awesome!" "butterflies" >}}. -``` The rendered output is "I like butterflies" with "butterflies" highlighted. When you hover over "butterflies," a tooltip appears with the text: "Butterflies are awesome!" @@ -1342,28 +317,7 @@ This is only recommended when showing how functions that require a time range (such as `window()`) operate on input data. Use either `includeRange` argument name or provide the boolean value as the third argument. -##### Example Flux sample data shortcodes - -```md - - -{{% flux/sample %}} - - - -{{% flux/sample set="string" includeNull=false %}} - - - -{{% flux/sample "int" true %}} - - - - -{{% flux/sample set="int" includeNull=true includeRange=true %}} -{{% flux/sample "int" true true %}} -``` - +# ### Duplicate OSS content in Cloud Docs for InfluxDB OSS and InfluxDB Cloud share a majority of content. @@ -1373,97 +327,14 @@ To prevent duplication of content between versions, use the following shortcodes - `{{% oss-only %}}` - `{{% cloud-only %}}` -#### duplicate-oss - -The `{{< duplicate-oss >}}` shortcode copies the page content of the file located -at the identical file path in the most recent InfluxDB OSS version. -The Cloud version of this markdown file should contain the frontmatter required -for all pages, but the body content should just be the `{{< duplicate-oss >}}` shortcode. - -#### oss-only - -Wrap content that should only appear in the OSS version of the doc with the `{{% oss-only %}}` shortcode. +[Similar patterns apply - see full CONTRIBUTING.md for complete examples]}` shortcode. Use the shortcode on both inline and content blocks: -```md -{{% oss-only %}}This is inline content that only renders in the InfluxDB OSS docs{{% /oss-only %}} - -{{% oss-only %}} - -This is a multi-paragraph content block that spans multiple paragraphs and will -only render in the InfluxDB OSS documentation. - -**Note:** Notice the blank newline after the opening short-code tag. -This is necessary to get the first sentence/paragraph to render correctly. - -{{% /oss-only %}} - -- {{% oss-only %}}This is a list item that will only render in InfluxDB OSS docs.{{% /oss-only %}} -- {{% oss-only %}} - - This is a multi-paragraph list item that will only render in the InfluxDB OSS docs. - - **Note:** Notice shortcode is _inside_ of the line item. - There also must be blank newline after the opening short-code tag. - This is necessary to get the first sentence/paragraph to render correctly. - - {{% /oss-only %}} - -1. Step 1 -2. {{% oss-only %}}This is a list item that will only render in InfluxDB OSS docs.{{% /oss-only %}} -3. {{% oss-only %}} - - This is a list item that contains multiple paragraphs or nested list items and will only render in the InfluxDB OSS docs. - - **Note:** Notice shortcode is _inside_ of the line item. - There also must be blank newline after the opening short-code tag. - This is necessary to get the first sentence/paragraph to render correctly. - - {{% /oss-only %}} -``` - #### cloud-only Wrap content that should only appear in the Cloud version of the doc with the `{{% cloud-only %}}` shortcode. Use the shortcode on both inline and content blocks: -```md -{{% cloud-only %}}This is inline content that only renders in the InfluxDB Cloud docs{{% /cloud-only %}} - -{{% cloud-only %}} - -This is a multi-paragraph content block that spans multiple paragraphs and will -only render in the InfluxDB Cloud documentation. - -**Note:** Notice the blank newline after the opening short-code tag. -This is necessary to get the first sentence/paragraph to render correctly. - -{{% /cloud-only %}} - -- {{% cloud-only %}}This is a list item that will only render in InfluxDB Cloud docs.{{% /cloud-only %}} -- {{% cloud-only %}} - - This is a list item that contains multiple paragraphs or nested list items and will only render in the InfluxDB Cloud docs. - - **Note:** Notice shortcode is _inside_ of the line item. - There also must be blank newline after the opening short-code tag. - This is necessary to get the first sentence/paragraph to render correctly. - - {{% /cloud-only %}} - -1. Step 1 -2. {{% cloud-only %}}This is a list item that will only render in InfluxDB Cloud docs.{{% /cloud-only %}} -3. {{% cloud-only %}} - - This is a multi-paragraph list item that will only render in the InfluxDB Cloud docs. - - **Note:** Notice shortcode is _inside_ of the line item. - There also must be blank newline after the opening short-code tag. - This is necessary to get the first sentence/paragraph to render correctly. - - {{% /cloud-only %}} -``` - ### Show or hide content blocks in shared content The `source` frontmatter lets you source page content from another file and is @@ -1485,107 +356,22 @@ The `show-in` shortcode accepts a comma-delimited string of InfluxDB "versions" to show the content block in. The version is the second level of the page path--for example: `/influxdb//...`. -```md -{{% show-in "core,enterprise" %}} - -This content will appear in pages in the InfluxDB 3 Core and InfluxDB 3 Enterprise -documentation, but not any other InfluxDB documentation this content is shared in. - -{{% /show-in %}} -``` - -#### hide-in - -The `hide-in` shortcode accepts a comma-delimited string of InfluxDB "versions" -to hide the content block in. The version is the second level of the page -path--for example: `/influxdb//...`. - -```md -{{% hide-in "core,enterprise" %}} - -This content will not appear in pages in the InfluxDB 3 Core and InfluxDB 3 -Enterprise documentation, but will in all other InfluxDB documentation this -content is shared in. - -{{% /hide-in %}} -``` - ### All-Caps Clockface v3 introduces many buttons with text formatted as all-caps. Use the `{{< caps >}}` shortcode to format text to match those buttons. -```md -Click {{< caps >}}Add Data{{< /caps >}} -``` +html -### Code callouts - -Use the `{{< code-callout >}}` shortcode to highlight and emphasize a specific -piece of code (for example, a variable, placeholder, or value) in a code block. -Provide the string to highlight in the code block. -Include a syntax for the codeblock to properly style the called out code. - -````md -{{< code-callout "03a2bbf46249a000" >}} - -```sh -http://localhost:8086/orgs/03a2bbf46249a000/... -``` - -{{< /code-callout >}} -```` - -### InfluxDB University banners - -Use the `{{< influxdbu >}}` shortcode to add an InfluxDB University banner that -points to the InfluxDB University site or a specific course. -Use the default banner template, a predefined course template, or fully customize -the content of the banner. - -```html - {{< influxdbu >}} - + {{< influxdbu "influxdb-101" >}} - + {{< influxdbu title="Course title" summary="Short course summary." action="Take the course" link="https://university.influxdata.com/" >}} -``` - -#### Course templates - -Use one of the following course templates: - -- influxdb-101 -- telegraf-102 -- flux-103 - -#### Custom banner content - -Use the following shortcode parameters to customize the content of the InfluxDB -University banner: - -- **title**: Course or banner title -- **summary**: Short description shown under the title -- **action**: Text of the button -- **link**: URL the button links to - -### Reference content - -The InfluxDB documentation is "task-based," meaning content primarily focuses on -what a user is **doing**, not what they are **using**. -However, there is a need to document tools and other things that don't necessarily -fit in the task-based style. -This is referred to as "reference content." - -Reference content is styled just as the rest of the InfluxDB documentation. -The only difference is the `menu` reference in the page's frontmatter. -When defining the menu for reference content, use the following pattern: - -```yaml +yaml # Pattern menu: ___ref: @@ -1595,111 +381,49 @@ menu: menu: influxdb_2_0_ref: # ... -``` -## InfluxDB URLs +` -When a user selects an InfluxDB product and region, example URLs in code blocks -throughout the documentation are updated to match their product and region. -InfluxDB URLs are configured in `/data/influxdb_urls.yml`. - -By default, the InfluxDB URL replaced inside of code blocks is `http://localhost:8086`. -Use this URL in all code examples that should be updated with a selected provider and region. - -For example: - -```` -```sh -# This URL will get updated -http://localhost:8086 - -# This URL will NOT get updated -http://example.com -``` -```` - -If the user selects the **US West (Oregon)** region, all occurrences of `http://localhost:8086` -in code blocks will get updated to `https://us-west-2-1.aws.cloud2.influxdata.com`. - -### Exempt URLs from getting updated - -To exempt a code block from being updated, include the `{{< keep-url >}}` shortcode -just before the code block. - -```` -{{< keep-url >}} -``` -// This URL won't get updated -http://localhost:8086 -``` -```` - -### Code examples only supported in InfluxDB Cloud - -Some functionality is only supported in InfluxDB Cloud and code examples should -only use InfluxDB Cloud URLs. In these cases, use `https://cloud2.influxdata.com` -as the placeholder in the code block. It will get updated on page load and when -users select a Cloud region in the URL select modal. - -```` -```sh -# This URL will get updated -https://cloud2.influxdata.com -``` -```` - -### Automatically populate InfluxDB host placeholder - -The InfluxDB host placeholder that gets replaced by custom domains differs -between each InfluxDB product/version. -Use the `influxdb/host` shortcode to automatically render the correct -host placeholder value for the current product. You can also pass a single -argument to specify a specific InfluxDB product to use. -Supported argument values: - -- oss -- cloud -- cloud-serverless -- cloud-dedicated -- clustered -- core -- enterprise - -``` {{< influxdb/host >}} {{< influxdb/host "serverless" >}} -``` -### User-populated placeholders - -Use the `code-placeholders` shortcode to format placeholders -as text fields that users can populate with their own values. -The shortcode takes a regular expression for matching placeholder names. -Use the `code-placeholder-key` shortcode to format the placeholder names in -text that describes the placeholder--for example: - -``` -{{% code-placeholders "DATABASE_NAME|USERNAME|PASSWORD_OR_TOKEN|API_TOKEN|exampleuser@influxdata.com" %}} -```sh -curl --request POST http://localhost:8086/write?db=DATABASE_NAME \ - --header "Authorization: Token API_TOKEN" \ - --data-binary @path/to/line-protocol.txt -``` {{% /code-placeholders %}} Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME` and `RETENTION_POLICY`{{% /code-placeholder-key %}}: the [database and retention policy mapping (DBRP)](/influxdb/v2/reference/api/influxdb-1x/dbrp/) for the InfluxDB v2 bucket that you want to write to -- {{% code-placeholder-key %}}`USERNAME`{{% /code-placeholder-key %}}: your [InfluxDB 1.x username](/influxdb/v2/reference/api/influxdb-1x/#manage-credentials) +- {{% code-placeholder-key %} + +[Similar patterns apply - see full CONTRIBUTING.md for complete examples]}`USERNAME`{{% /code-placeholder-key %}}: your [InfluxDB 1.x username](/influxdb/v2/reference/api/influxdb-1x/#manage-credentials) - {{% code-placeholder-key %}}`PASSWORD_OR_TOKEN`{{% /code-placeholder-key %}}: your [InfluxDB 1.x password or InfluxDB API token](/influxdb/v2/reference/api/influxdb-1x/#manage-credentials) - {{% code-placeholder-key %}}`API_TOKEN`{{% /code-placeholder-key %}}: your [InfluxDB API token](/influxdb/v2/admin/tokens/) -``` +html +
+ js + import { debugLog, debugBreak, debugInspect } from './utils/debug-helpers.js'; -## InfluxDB API documentation + const data = debugInspect(someData, 'Data'); + debugLog('Processing data', 'myFunction'); -InfluxData uses [Redoc](https://github.com/Redocly/redoc) to generate the full -InfluxDB API documentation when documentation is deployed. -Redoc generates HTML documentation using the InfluxDB `swagger.yml`. -For more information about generating InfluxDB API documentation, see the -[API Documentation README](https://github.com/influxdata/docs-v2/tree/master/api-docs#readme). + function processData() { + // Add a breakpoint that works with DevTools + debugBreak(); + + // Your existing code... + } + ``` + +3. Start Hugo in development mode--for example: + + ```bash + yarn hugo server + ``` + +4. In VS Code, go to Run > Start Debugging, and select the "Debug JS (debug-helpers)" configuration. + +Your system uses the configuration in `launch.json` to launch the site in Chrome +and attach the debugger to the Developer Tools console. + +Make sure to remove the debug statements before merging your changes. +The debug helpers are designed to be used in development and should not be used in production. diff --git a/.gitignore b/.gitignore index 650f31962..4cb5a9eae 100644 --- a/.gitignore +++ b/.gitignore @@ -15,13 +15,18 @@ node_modules !telegraf-build/templates !telegraf-build/scripts !telegraf-build/README.md -/cypress/downloads +/cypress/downloads/* /cypress/screenshots/* /cypress/videos/* test-results.xml /influxdb3cli-build-scripts/content .vscode/* +!.vscode/launch.json .idea **/config.toml package-lock.json -tmp \ No newline at end of file +tmp + +# Context files for LLMs and AI tools +.context/* +!.context/README.md diff --git a/.prettierignore b/.prettierignore index 004c23fb4..b7974b235 100644 --- a/.prettierignore +++ b/.prettierignore @@ -3,3 +3,4 @@ **/.svn **/.hg **/node_modules +assets/jsconfig.json \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 000000000..9aacd8f89 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,47 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Debug JS (debug-helpers)", + "type": "chrome", + "request": "launch", + "url": "http://localhost:1313", + "webRoot": "${workspaceFolder}", + "skipFiles": [ + "/**" + ], + "sourceMaps": false, + "trace": true, + "smartStep": false + }, + { + "name": "Debug JS (source maps)", + "type": "chrome", + "request": "launch", + "url": "http://localhost:1313", + "webRoot": "${workspaceFolder}", + "sourceMaps": true, + "sourceMapPathOverrides": { + "*": "${webRoot}/assets/js/*", + "main.js": "${webRoot}/assets/js/main.js", + "page-context.js": "${webRoot}/assets/js/page-context.js", + "ask-ai-trigger.js": "${webRoot}/assets/js/ask-ai-trigger.js", + "ask-ai.js": "${webRoot}/assets/js/ask-ai.js", + "utils/*": "${webRoot}/assets/js/utils/*", + "services/*": "${webRoot}/assets/js/services/*" + }, + "skipFiles": [ + "/**", + "node_modules/**", + "chrome-extension://**" + ], + "trace": true, + "smartStep": true, + "disableNetworkCache": true, + "userDataDir": "${workspaceFolder}/.vscode/chrome-user-data", + "runtimeArgs": [ + "--disable-features=VizDisplayCompositor" + ] + }, + ] +} \ No newline at end of file diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 000000000..97f250d72 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,25 @@ +# Instructions for InfluxData Documentation + +## Purpose and scope + +Claude should help document InfluxData products by creating clear, accurate technical content with proper code examples, frontmatter, and formatting. + +## Project overview + +See @README.md + +## Available NPM commands + +@package.json + +## Instructions for contributing + +See @.github/copilot-instructions.md for style guidelines and +product-specific documentation paths and URLs managed in this project. + +See @.github/instructions/contributing.instructions.md for contributing +information including using shortcodes and running tests. + +See @.github/instructions/influxdb3-code-placeholders.instructions.md for using +placeholders in code samples and CLI commands. + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 479578424..e9f5fd25c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -363,6 +363,9 @@ list_query_example:# Code examples included with article descriptions in childre # References to examples in data/query_examples canonical: # Path to canonical page, overrides auto-gen'd canonical URL v2: # Path to v2 equivalent page +alt_links: # Alternate pages in other products/versions for cross-product navigation + cloud-dedicated: /influxdb3/cloud-dedicated/path/to/page/ + core: /influxdb3/core/path/to/page/ prepend: # Prepend markdown content to an article (especially powerful with cascade) block: # (Optional) Wrap content in a block style (note, warn, cloud) content: # Content to prepend to article @@ -454,6 +457,29 @@ add the following frontmatter to the 1.x page: v2: /influxdb/v2.0/get-started/ ``` +### Alternative links for cross-product navigation + +Use the `alt_links` frontmatter to specify equivalent pages in other InfluxDB products, +for example, when a page exists at a different path in a different version or if +the feature doesn't exist in that product. +This enables the product switcher to navigate users to the corresponding page when they +switch between products. If a page doesn't exist in another product (for example, an +Enterprise-only feature), point to the nearest parent page if relevant. + +```yaml +alt_links: + cloud-dedicated: /influxdb3/cloud-dedicated/admin/tokens/create-token/ + cloud-serverless: /influxdb3/cloud-serverless/admin/tokens/create-token/ + core: /influxdb3/core/reference/cli/influxdb3/update/ # Points to parent if exact page doesn't exist +``` + +Supported product keys for InfluxDB 3: +- `core` +- `enterprise` +- `cloud-serverless` +- `cloud-dedicated` +- `clustered` + ### Prepend and append content to a page Use the `prepend` and `append` frontmatter to add content to the top or bottom of a page. @@ -1667,7 +1693,7 @@ The shortcode takes a regular expression for matching placeholder names. Use the `code-placeholder-key` shortcode to format the placeholder names in text that describes the placeholder--for example: -``` +```markdown {{% code-placeholders "DATABASE_NAME|USERNAME|PASSWORD_OR_TOKEN|API_TOKEN|exampleuser@influxdata.com" %}} ```sh curl --request POST http://localhost:8086/write?db=DATABASE_NAME \ @@ -1691,3 +1717,83 @@ InfluxDB API documentation when documentation is deployed. Redoc generates HTML documentation using the InfluxDB `swagger.yml`. For more information about generating InfluxDB API documentation, see the [API Documentation README](https://github.com/influxdata/docs-v2/tree/master/api-docs#readme). + +## JavaScript in the documentation UI + +The InfluxData documentation UI uses JavaScript with ES6+ syntax and +`assets/js/main.js` as the entry point to import modules from +`assets/js`. +Only `assets/js/main.js` should be imported in HTML files. + +`assets/js/main.js` registers components and initializes them on page load. + +If you're adding UI functionality that requires JavaScript, follow these steps: + +1. In your HTML file, add a `data-component` attribute to the element that + should be initialized by your JavaScript code. For example: + + ```html +
+ ``` + +2. Following the component pattern, create a single-purpose JavaScript module + (`assets/js/components/my-component.js`) + that exports a single function that receives the component element and initializes it. +3. In `assets/js/main.js`, import the module and register the component to ensure + the component is initialized on page load. + +### Debugging JavaScript + +To debug JavaScript code used in the InfluxData documentation UI, choose one of the following methods: + +- Use source maps and the Chrome DevTools debugger. +- Use debug helpers that provide breakpoints and console logging as a workaround or alternative for using source maps and the Chrome DevTools debugger. + +#### Using source maps and Chrome DevTools debugger + +1. In VS Code, select Run > Start Debugging. +2. Select the "Debug Docs (source maps)" configuration. +3. Click the play button to start the debugger. +5. Set breakpoints in the JavaScript source files--files in the + `assets/js/ns-hugo-imp:` namespace-- in the + VS Code editor or in the Chrome Developer Tools Sources panel: + + - In the VS Code Debugger panel > "Loaded Scripts" section, find the + `assets/js/ns-hugo-imp:` namespace. + - In the Chrome Developer Tools Sources panel, expand + `js/ns-hugo-imp://assets/js/`. + +#### Using debug helpers + +1. In your JavaScript module, import debug helpers from `assets/js/utils/debug-helpers.js`. + These helpers provide breakpoints and console logging as a workaround or alternative for + using source maps and the Chrome DevTools debugger. +2. Insert debug statements by calling the helper functions in your code--for example: + + ```js + import { debugLog, debugBreak, debugInspect } from './utils/debug-helpers.js'; + + const data = debugInspect(someData, 'Data'); + debugLog('Processing data', 'myFunction'); + + function processData() { + // Add a breakpoint that works with DevTools + debugBreak(); + + // Your existing code... + } + ``` + +3. Start Hugo in development mode--for example: + + ```bash + yarn hugo server + ``` + +4. In VS Code, go to Run > Start Debugging, and select the "Debug JS (debug-helpers)" configuration. + +Your system uses the configuration in `launch.json` to launch the site in Chrome +and attach the debugger to the Developer Tools console. + +Make sure to remove the debug statements before merging your changes. +The debug helpers are designed to be used in development and should not be used in production. diff --git a/api-docs/getswagger.sh b/api-docs/getswagger.sh index c3c9a36a1..1ff077a45 100755 --- a/api-docs/getswagger.sh +++ b/api-docs/getswagger.sh @@ -62,7 +62,7 @@ function showHelp { subcommand=$1 case "$subcommand" in - cloud-dedicated-v2|cloud-dedicated-management|cloud-serverless-v2|clustered-v2|cloud-v2|v2|v1-compat|core-v3|enterprise-v3|all) + cloud-dedicated-v2|cloud-dedicated-management|cloud-serverless-v2|clustered-management|clustered-v2|cloud-v2|v2|v1-compat|core-v3|enterprise-v3|all) product=$1 shift @@ -187,6 +187,22 @@ function updateCloudServerlessV2 { postProcess $outFile 'influxdb3/cloud-serverless/.config.yml' v2@2 } +function updateClusteredManagement { + outFile="influxdb3/clustered/management/openapi.yml" + if [[ -z "$baseUrl" ]]; + then + echo "Using existing $outFile" + else + # Clone influxdata/granite and fetch the latest openapi.yaml file. + echo "Fetching the latest openapi.yaml file from influxdata/granite" + tmp_dir=$(mktemp -d) + git clone --depth 1 --branch main https://github.com/influxdata/granite.git "$tmp_dir" + cp "$tmp_dir/openapi.yaml" "$outFile" + rm -rf "$tmp_dir" + fi + postProcess $outFile 'influxdb3/clustered/.config.yml' management@0 +} + function updateClusteredV2 { outFile="influxdb3/clustered/v2/ref.yml" if [[ -z "$baseUrl" ]]; @@ -278,6 +294,9 @@ then elif [ "$product" = "cloud-serverless-v2" ]; then updateCloudServerlessV2 +elif [ "$product" = "clustered-management" ]; +then + updateClusteredManagement elif [ "$product" = "clustered-v2" ]; then updateClusteredV2 @@ -305,6 +324,6 @@ then updateOSSV2 updateV1Compat else - echo "Provide a product argument: cloud-v2, cloud-serverless-v2, cloud-dedicated-v2, cloud-dedicated-management, clustered-v2, core-v3, enterprise-v3, v2, v1-compat, or all." + echo "Provide a product argument: cloud-v2, cloud-serverless-v2, cloud-dedicated-v2, cloud-dedicated-management, clustered-management, clustered-v2, core-v3, enterprise-v3, v2, v1-compat, or all." showHelp fi diff --git a/api-docs/influxdb/v2/.config.yml b/api-docs/influxdb/v2/.config.yml index c99715a57..c0b0b9205 100644 --- a/api-docs/influxdb/v2/.config.yml +++ b/api-docs/influxdb/v2/.config.yml @@ -10,7 +10,5 @@ apis: root: v2/ref.yml x-influxdata-docs-aliases: - /influxdb/v2/api/ - v1-compatibility@2: - root: v1-compatibility/swaggerV1Compat.yml - x-influxdata-docs-aliases: + - /influxdb/v2/api/v1-compatibility/ - /influxdb/v2/api/v1/ diff --git a/api-docs/influxdb/v2/v2/content/tag-groups.yml b/api-docs/influxdb/v2/v2/content/tag-groups.yml index 7fcd8cc8d..905c380ef 100644 --- a/api-docs/influxdb/v2/v2/content/tag-groups.yml +++ b/api-docs/influxdb/v2/v2/content/tag-groups.yml @@ -6,5 +6,6 @@ - Headers - Pagination - Response codes + - Compatibility endpoints - name: All endpoints tags: [] diff --git a/api-docs/influxdb/v2/v2/ref.yml b/api-docs/influxdb/v2/v2/ref.yml index 547f37265..ffc826338 100644 --- a/api-docs/influxdb/v2/v2/ref.yml +++ b/api-docs/influxdb/v2/v2/ref.yml @@ -58,6 +58,7 @@ tags: - [Manage API tokens](/influxdb/v2/security/tokens/) - [Assign a token to a specific user](/influxdb/v2/security/tokens/create-token/) name: Authorizations (API tokens) + - name: Authorizations (v1-compatible) - name: Backup - description: | Store your data in InfluxDB [buckets](/influxdb/v2/reference/glossary/#bucket). @@ -88,6 +89,15 @@ tags: | `orgID` | 16-byte string | The organization ID ([find your organization](/influxdb/v2/organizations/view-orgs/). | name: Common parameters x-traitTag: true + - name: Compatibility endpoints + description: | + InfluxDB v2 provides a v1-compatible API for backward compatibility with InfluxDB 1.x clients and integrations. + + Use these endpoints with InfluxDB 1.x client libraries and third-party integrations such as Grafana, Telegraf, and other tools designed for InfluxDB 1.x. The compatibility layer maps InfluxDB 1.x concepts (databases, retention policies) to InfluxDB v2 resources (buckets, organizations) through database retention policy (DBRP) mappings. + + - [Write data (v1-compatible)](#tag/Write-data-(v1-compatible)) + - [Query data using InfluxQL (v1-compatible)](#tag/Query-data-(v1-compatible)) + - [Manage v1-compatible users and permissions](#tag/Authorizations-(v1-compatible)) - name: Config - name: Dashboards - name: Data I/O endpoints @@ -99,7 +109,7 @@ tags: databases and retention policies are mapped to buckets using the database and retention policy (DBRP) mapping service. The DBRP mapping service uses the database and retention policy - specified in 1.x compatibility API requests to route operations to a bucket. + specified in v1 compatibility API requests to route operations to a bucket. ### Related guides @@ -139,9 +149,6 @@ tags: x-traitTag: true - name: Health - name: Labels - - name: Legacy Authorizations - - name: Legacy Query - - name: Legacy Write - name: Metrics - name: NotificationEndpoints - name: NotificationRules @@ -194,6 +201,7 @@ tags: - description: | Retrieve data, analyze queries, and get query suggestions. name: Query + - name: Query data (v1-compatible) - description: | See the [**API Quick Start**](/influxdb/v2/api-guide/api_intro/) to get up and running authenticating with tokens, writing to buckets, and querying data. @@ -314,6 +322,7 @@ tags: - description: | Write time series data to [buckets](/influxdb/v2/reference/glossary/#bucket). name: Write + - name: Write data (v1-compatible) paths: /api/v2: get: @@ -12756,7 +12765,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Error' + $ref: '#/components/schemas/Error' description: The request was well-formed, but some or all the points were rejected due to semantic errors--for example, schema conflicts or retention policy violations. Error message contains details for one or more rejected points. '429': description: | @@ -12869,7 +12878,7 @@ paths: description: Unexpected error summary: List all legacy authorizations tags: - - Legacy Authorizations + - Authorizations (v1-compatible) post: description: | Creates a legacy authorization and returns the legacy authorization. @@ -12932,7 +12941,7 @@ paths: description: Unexpected error summary: Create a legacy authorization tags: - - Legacy Authorizations + - Authorizations (v1-compatible) servers: - url: /private /legacy/authorizations/{authID}: @@ -12954,7 +12963,7 @@ paths: description: Unexpected error summary: Delete a legacy authorization tags: - - Legacy Authorizations + - Authorizations (v1-compatible) get: operationId: GetLegacyAuthorizationsID parameters: @@ -12977,7 +12986,7 @@ paths: description: Unexpected error summary: Retrieve a legacy authorization tags: - - Legacy Authorizations + - Authorizations (v1-compatible) patch: operationId: PatchLegacyAuthorizationsID parameters: @@ -13007,7 +13016,7 @@ paths: description: Unexpected error summary: Update a legacy authorization to be active or inactive tags: - - Legacy Authorizations + - Authorizations (v1-compatible) servers: - url: /private /legacy/authorizations/{authID}/password: @@ -13040,94 +13049,29 @@ paths: description: Unexpected error summary: Set a legacy authorization password tags: - - Legacy Authorizations + - Authorizations (v1-compatible) servers: - url: /private /query: get: - description: Queries InfluxDB using InfluxQL. + summary: Execute InfluxQL query (v1-compatible) + description: | + Executes an InfluxQL query to retrieve data from the specified database. + + This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. + Use query parameters to specify the database and the InfluxQL query. operationId: GetLegacyQuery parameters: - $ref: '#/components/parameters/TraceSpan' - - in: header - name: Accept - schema: - default: application/json - description: | - Media type that the client can understand. - - **Note**: With `application/csv`, query results include [**unix timestamps**](/influxdb/v2/reference/glossary/#unix-timestamp) instead of [RFC3339 timestamps](/influxdb/v2/reference/glossary/#rfc3339-timestamp). - enum: - - application/json - - application/csv - - text/csv - - application/x-msgpack - type: string - - description: The content encoding (usually a compression algorithm) that the client can understand. - in: header - name: Accept-Encoding - schema: - default: identity - description: The content coding. Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. - enum: - - gzip - - identity - type: string - - in: header - name: Content-Type - schema: - enum: - - application/json - type: string - - description: The InfluxDB 1.x username to authenticate the request. - in: query - name: u - schema: - type: string - - description: The InfluxDB 1.x password to authenticate the request. - in: query - name: p - schema: - type: string - - description: | - The database to query data from. - This is mapped to an InfluxDB [bucket](/influxdb/v2/reference/glossary/#bucket). - For more information, see [Database and retention policy mapping](/influxdb/v2/api/influxdb-1x/dbrp/). - in: query - name: db - required: true - schema: - type: string - - description: | - The retention policy to query data from. - This is mapped to an InfluxDB [bucket](/influxdb/v2/reference/glossary/#bucket). - For more information, see [Database and retention policy mapping](/influxdb/v2/api/influxdb-1x/dbrp/). - in: query - name: rp - schema: - type: string - - description: The InfluxQL query to execute. To execute multiple queries, delimit queries with a semicolon (`;`). - in: query - name: q - required: true - schema: - type: string - - description: | - A unix timestamp precision. - Formats timestamps as [unix (epoch) timestamps](/influxdb/v2/reference/glossary/#unix-timestamp) with the specified precision - instead of [RFC3339 timestamps](/influxdb/v2/reference/glossary/#rfc3339-timestamp) with nanosecond precision. - in: query - name: epoch - schema: - enum: - - ns - - u - - µ - - ms - - s - - m - - h - type: string + - $ref: '#/components/parameters/AuthV1Username' + - $ref: '#/components/parameters/AuthV1Password' + - $ref: '#/components/parameters/Accept' + - $ref: '#/components/parameters/AcceptEncoding' + - $ref: '#/components/parameters/Content-Type' + - $ref: '#/components/parameters/V1Database' + - $ref: '#/components/parameters/V1RetentionPolicy' + - $ref: '#/components/parameters/V1Epoch' + - $ref: '#/components/parameters/V1Query' responses: '200': content: @@ -13191,19 +13135,87 @@ paths: schema: $ref: '#/components/schemas/Error' description: Error processing query - summary: Query with the 1.x compatibility API tags: - - Legacy Query + - Query data (v1-compatible) + post: + operationId: PostQueryV1 + summary: Execute InfluxQL query (v1-compatible) + description: | + Executes an InfluxQL query to retrieve data from the specified database. + + This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. + Use query parameters to specify the database and the InfluxQL query. + tags: + - Query data (v1-compatible) + requestBody: + description: InfluxQL query to execute. + content: + text/plain: + schema: + type: string + parameters: + - $ref: '#/components/parameters/TraceSpan' + - $ref: '#/components/parameters/AuthV1Username' + - $ref: '#/components/parameters/AuthV1Password' + - $ref: '#/components/parameters/Accept' + - $ref: '#/components/parameters/AcceptEncoding' + - $ref: '#/components/parameters/Content-Type' + - $ref: '#/components/parameters/V1Database' + - $ref: '#/components/parameters/V1RetentionPolicy' + - $ref: '#/components/parameters/V1Epoch' + responses: + '200': + description: Query results + headers: + Content-Encoding: + description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body + schema: + type: string + description: Specifies that the response in the body is encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + Trace-Id: + description: The Trace-Id header reports the request's trace ID, if one was generated. + schema: + type: string + description: Specifies the request's trace ID. + content: + application/csv: + schema: + $ref: '#/components/schemas/InfluxqlCsvResponse' + application/json: + schema: + $ref: '#/components/schemas/InfluxqlJsonResponse' + text/csv: + schema: + $ref: '#/components/schemas/InfluxqlCsvResponse' + examples: + influxql-chunk_size_2: + value: | + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:55Z",90,"1"],["2016-05-19T18:37:56Z",90,"1"]],"partial":true}],"partial":true}]} + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:57Z",90,"1"],["2016-05-19T18:37:58Z",90,"1"]]}]}]} + application/x-msgpack: + schema: + type: string + format: binary + '429': + description: Token is temporarily over quota. The Retry-After header describes when to try the read again. + headers: + Retry-After: + description: A non-negative decimal integer indicating the seconds to delay after the response is received. + schema: + type: integer + format: int32 + default: + description: Error processing query + content: + application/json: + schema: + $ref: '#/components/schemas/Error' /write: post: - description: |- - Writes line protocol to the specified bucket. - - This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. - - Use this endpoint to send data in [line protocol](https://docs.influxdata.com/influxdb/v2/reference/syntax/line-protocol/) format to InfluxDB. - Use query parameters to specify options for writing data. - operationId: PostLegacyWrite parameters: - $ref: '#/components/parameters/TraceSpan' @@ -13281,7 +13293,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Error' + $ref: '#/components/schemas/Error' description: The request was well-formed, but some or all the points were rejected due to semantic errors--for example, schema conflicts or retention policy violations. Error message contains details for one or more rejected points. '429': description: Token is temporarily over quota. The Retry-After header describes when to try the write again. @@ -13305,9 +13317,31 @@ paths: schema: $ref: '#/components/schemas/Error' description: Internal server error - summary: Write time series data into InfluxDB in a V1-compatible format + summary: Write data using a v1-compatible request + description: | + Writes data in [line protocol](/influxdb/v2/reference/syntax/line-protocol/) syntax to the specified bucket using a v1-compatible request. + + This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. + + Use query parameters to specify options for writing data. + + #### InfluxDB Cloud + + - Validates and queues the request. + - Handles the write asynchronously - the write might not have completed yet. + - Returns a `Retry-After` header that describes when to try the write again. + + #### InfluxDB OSS v2 + + - Validates the request and handles the write synchronously. + - If all points were written successfully, responds with HTTP `2xx` status code + - If any points were rejected, responds with HTTP `4xx` status code and details about the problem. + + #### Related guides + + - [Write data with the InfluxDB API](/influxdb/v2/write-data/developer-tools/api) tags: - - Legacy Write + - Write data (v1-compatible) components: examples: AuthorizationPostRequest: @@ -13412,6 +13446,96 @@ components: required: false schema: type: string + Accept: + in: header + name: Accept + schema: + default: application/json + description: | + Media type that the client can understand. + + **Note**: With `application/csv`, query results include [**unix timestamps**](/influxdb/v2/reference/glossary/#unix-timestamp) instead of [RFC3339 timestamps](/influxdb/v2/reference/glossary/#rfc3339-timestamp). + enum: + - application/json + - application/csv + - text/csv + - application/x-msgpack + type: string + AcceptEncoding: + description: The content encoding (usually a compression algorithm) that the client can understand. + in: header + name: Accept-Encoding + schema: + default: identity + description: The content coding. Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. + enum: + - gzip + - identity + type: string + Content-Type: + in: header + name: Content-Type + schema: + enum: + - application/json + type: string + AuthV1Username: + description: | + The InfluxDB 1.x username to authenticate the request. + If you provide an API token as the password, `u` is required, but can be any value. + in: query + name: u + schema: + type: string + AuthV1Password: + description: The InfluxDB 1.x password to authenticate the request. + in: query + name: p + schema: + type: string + V1Database: + description: | + The database to query data from. + This is mapped to an InfluxDB [bucket](/influxdb/v2/reference/glossary/#bucket). + For more information, see [Database and retention policy mapping](/influxdb/v2/api/influxdb-1x/dbrp/). + in: query + name: db + required: true + schema: + type: string + V1RetentionPolicy: + description: | + The retention policy to query data from. + This is mapped to an InfluxDB [bucket](/influxdb/v2/reference/glossary/#bucket). + For more information, see [Database and retention policy mapping](/influxdb/v2/api/influxdb-1x/dbrp/). + in: query + name: rp + schema: + type: string + V1Query: + description: The InfluxQL query to execute. To execute multiple queries, delimit queries with a semicolon (`;`). + in: query + name: q + required: true + schema: + type: string + V1Epoch: + description: | + A unix timestamp precision. + Formats timestamps as [unix (epoch) timestamps](/influxdb/v2/reference/glossary/#unix-timestamp) with the specified precision + instead of [RFC3339 timestamps](/influxdb/v2/reference/glossary/#rfc3339-timestamp) with nanosecond precision. + in: query + name: epoch + schema: + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string responses: AuthorizationError: content: @@ -20058,13 +20182,16 @@ x-tagGroups: - Headers - Pagination - Response codes + - Compatibility endpoints - name: All endpoints tags: - Authorizations (API tokens) + - Authorizations (v1-compatible) - Backup - Buckets - Cells - Checks + - Compatibility endpoints - Config - Dashboards - DBRPs @@ -20072,15 +20199,13 @@ x-tagGroups: - Delete - Health - Labels - - Legacy Authorizations - - Legacy Query - - Legacy Write - Metrics - NotificationEndpoints - NotificationRules - Organizations - Ping - Query + - Query data (v1-compatible) - Ready - RemoteConnections - Replications @@ -20102,3 +20227,4 @@ x-tagGroups: - Variables - Views - Write + - Write data (v1-compatible) diff --git a/api-docs/influxdb3/cloud-dedicated/management/content/tag-groups.yml b/api-docs/influxdb3/cloud-dedicated/management/content/tag-groups.yml index 775a53762..57e8c8484 100644 --- a/api-docs/influxdb3/cloud-dedicated/management/content/tag-groups.yml +++ b/api-docs/influxdb3/cloud-dedicated/management/content/tag-groups.yml @@ -1,6 +1,6 @@ - name: Using the Management API tags: - Authentication - - Examples + - Quickstart - name: All endpoints tags: [] diff --git a/api-docs/influxdb3/cloud-dedicated/management/openapi.yml b/api-docs/influxdb3/cloud-dedicated/management/openapi.yml index 374a9ab72..a74165c29 100644 --- a/api-docs/influxdb3/cloud-dedicated/management/openapi.yml +++ b/api-docs/influxdb3/cloud-dedicated/management/openapi.yml @@ -7,10 +7,10 @@ info: This documentation is generated from the InfluxDB OpenAPI specification. + version: '' license: name: MIT url: https://opensource.org/licenses/MIT - version: '' contact: name: InfluxData url: https://www.influxdata.com @@ -31,7 +31,7 @@ tags: - name: Authentication x-traitTag: true description: | - The InfluxDB Management API endpoints require the following credentials: + With InfluxDB 3 Cloud Dedicated, the InfluxDB Management API endpoints require the following credentials: - `ACCOUNT_ID`: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the cluster belongs to. To view account ID and cluster ID, [list cluster details](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json). - `CLUSTER_ID`: The ID of the [cluster](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that you want to manage. To view account ID and cluster ID, [list cluster details](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json). @@ -45,7 +45,7 @@ tags: description: Manage database read/write tokens for a cluster - name: Databases description: Manage databases for a cluster - - name: Example + - name: Quickstart x-traitTag: true description: | The following example script shows how to use `curl` to make database and token management requests: @@ -630,7 +630,7 @@ paths: maxTables: 300 maxColumnsPerTable: 150 retentionPeriod: 600000000000 - maxTablsOnly: + maxTablesOnly: summary: Update Max Tables Only value: maxTables: 300 @@ -681,7 +681,7 @@ paths: maxTables: 300 maxColumnsPerTable: 150 retentionPeriod: 600000000000 - maxTablsOnly: + maxTablesOnly: summary: Update Max Tables Only value: accountId: 11111111-1111-4111-8111-111111111111 @@ -975,6 +975,10 @@ paths: $ref: '#/components/schemas/DatabaseTokenPermissions' createdAt: $ref: '#/components/schemas/DatabaseTokenCreatedAt' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' required: - accountId - clusterId @@ -1078,6 +1082,8 @@ paths: $ref: '#/components/schemas/DatabaseTokenDescription' permissions: $ref: '#/components/schemas/DatabaseTokenPermissions' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' required: - description examples: @@ -1127,6 +1133,10 @@ paths: $ref: '#/components/schemas/DatabaseTokenCreatedAt' accessToken: $ref: '#/components/schemas/DatabaseTokenAccessToken' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' required: - accountId - clusterId @@ -1270,6 +1280,10 @@ paths: $ref: '#/components/schemas/DatabaseTokenPermissions' createdAt: $ref: '#/components/schemas/DatabaseTokenCreatedAt' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' required: - accountId - clusterId @@ -1427,6 +1441,10 @@ paths: $ref: '#/components/schemas/DatabaseTokenPermissions' createdAt: $ref: '#/components/schemas/DatabaseTokenCreatedAt' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' required: - accountId - clusterId @@ -1876,6 +1894,18 @@ components: examples: - '2023-12-21T17:32:28.000Z' - '2024-03-02T04:20:19.000Z' + DatabaseTokenExpiresAt: + description: | + The date and time that the database token expires, if applicable + + Uses RFC3339 format + $ref: '#/components/schemas/DateTimeRfc3339' + DatabaseTokenRevokedAt: + description: | + The date and time that the database token was revoked, if applicable + + Uses RFC3339 format + $ref: '#/components/schemas/DateTimeRfc3339' DatabaseTokenAccessToken: description: | The access token that can be used to authenticate query and write requests to the cluster @@ -1986,7 +2016,7 @@ x-tagGroups: - name: Using the Management API tags: - Authentication - - Examples + - Quickstart - name: All endpoints tags: - Database tokens diff --git a/api-docs/influxdb3/clustered/.config.yml b/api-docs/influxdb3/clustered/.config.yml index 454f39d94..1715e1bf9 100644 --- a/api-docs/influxdb3/clustered/.config.yml +++ b/api-docs/influxdb3/clustered/.config.yml @@ -6,6 +6,8 @@ extends: x-influxdata-product-name: InfluxDB 3 Clustered apis: + management@0: + root: management/openapi.yml v2@2: root: v2/ref.yml x-influxdata-docs-aliases: diff --git a/api-docs/influxdb3/clustered/management/content/info.yml b/api-docs/influxdb3/clustered/management/content/info.yml new file mode 100644 index 000000000..0d324fadb --- /dev/null +++ b/api-docs/influxdb3/clustered/management/content/info.yml @@ -0,0 +1,15 @@ +title: InfluxDB 3 Clustered Management API +x-influxdata-short-title: Management API +description: | + The Management API for InfluxDB 3 Clustered provides a programmatic interface for managing an InfluxDB 3 cluster. + The Management API lets you integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application. + + This documentation is generated from the + InfluxDB 3 Management API OpenAPI specification. +license: + name: MIT + url: 'https://opensource.org/licenses/MIT' +contact: + name: InfluxData + url: https://www.influxdata.com + email: support@influxdata.com \ No newline at end of file diff --git a/api-docs/influxdb3/clustered/management/content/servers.yml b/api-docs/influxdb3/clustered/management/content/servers.yml new file mode 100644 index 000000000..edec580b8 --- /dev/null +++ b/api-docs/influxdb3/clustered/management/content/servers.yml @@ -0,0 +1,8 @@ +- url: 'https://{baseurl}/api/v0' + description: InfluxDB 3 Clustered Management API URL + variables: + baseurl: + enum: + - 'console.influxdata.com' + default: 'console.influxdata.com' + description: InfluxDB 3 Clustered Console URL diff --git a/api-docs/influxdb3/clustered/management/content/tag-groups.yml b/api-docs/influxdb3/clustered/management/content/tag-groups.yml new file mode 100644 index 000000000..57e8c8484 --- /dev/null +++ b/api-docs/influxdb3/clustered/management/content/tag-groups.yml @@ -0,0 +1,6 @@ +- name: Using the Management API + tags: + - Authentication + - Quickstart +- name: All endpoints + tags: [] diff --git a/api-docs/influxdb3/clustered/management/openapi.yml b/api-docs/influxdb3/clustered/management/openapi.yml new file mode 100644 index 000000000..410d10fc8 --- /dev/null +++ b/api-docs/influxdb3/clustered/management/openapi.yml @@ -0,0 +1,1730 @@ +openapi: 3.1.0 +info: + title: InfluxDB 3 Clustered Management API + description: | + The Management API for InfluxDB 3 Clustered provides a programmatic interface for managing an InfluxDB 3 cluster. + The Management API lets you integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application. + + This documentation is generated from the + InfluxDB 3 Management API OpenAPI specification. + version: '' + license: + name: MIT + url: https://opensource.org/licenses/MIT + contact: + name: InfluxData + url: https://www.influxdata.com + email: support@influxdata.com +servers: + - url: https://{baseurl}/api/v0 + description: InfluxDB 3 Clustered Management API URL + variables: + baseurl: + enum: + - console.influxdata.com + default: console.influxdata.com + description: InfluxDB 3 Clustered Console URL +security: + - bearerAuthManagementToken: [] + bearerAuthJwt: [] +tags: + - name: Authentication + x-traitTag: true + description: | + With InfluxDB 3 Clustered, InfluxDB Management API endpoints require the following credential: + + - `Authorization MANAGEMENT_TOKEN`: the `Authorization` HTTP header with a [management token](/influxdb3/clustered/admin/tokens/management/). + + See how to [create a management token](/influxdb3/clustered/admin/tokens/management/). + + By default, management tokens in InfluxDB 3 are short-lived tokens issued by an OAuth2 identity provider that grant a specific user administrative access to your InfluxDB cluster. However, for automation purposes, you can manually create management tokens that authenticate directly with your InfluxDB cluster and do not require human interaction with your identity provider. + - name: Database tokens + description: Manage database read/write tokens for a cluster + - name: Databases + description: Manage databases for a cluster + - name: Quickstart + x-traitTag: true + description: | + The following example script shows how to use `curl` to make database and token management requests: + + ```shell + #!/bin/bash + + # Usage: + # Note the leading space in the command below to keep secrets out of the shell history + # + # ``` + # MANAGEMENT_TOKEN= ./scripts/test_http_api_v0_endpoints.sh + # ``` + + # Env var validation + if [ -z "${MANAGEMENT_TOKEN}" ]; then + echo " + [Error]: ❌ + \$MANAGEMENT_TOKEN env var is required. + " + exit 1 + fi + + HOST="https://cluster-host.com" + + # Database request functions + list_databases () { + local response=$( \ + curl \ + --location "$HOST/api/v0/databases" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + create_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/databases" \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "name": "'$databaseName'", + "maxTables": 75, + "maxColumnsPerTable": 90, + "retentionPeriod": 600000000000, + "partitionTemplate": [ + { + "type": "tag", + "value": "abc" + }, + { + "type": "bucket", + "value": { + "tagName": "def", + "numberOfBuckets": 5 + } + } + ] + }' \ + ) + echo "$response" + } + + update_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/databases/$databaseName" \ + --request PATCH \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "maxTables": 150, + "maxColumnsPerTable": 180, + "retentionPeriod": 1200000000000 + }' \ + ) + echo "$response" + } + + delete_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/databases/$databaseName" \ + --request DELETE \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + # Token request functions + list_tokens () { + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + create_token () { + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens" \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "description": "my test token", + "permissions": [ + { + "action": "write", + "resource": "database_one" + }, + { + "action": "read", + "resource": "database_two" + } + ] + }' \ + ) + echo "$response" + } + + get_token () { + local token_id=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens/$tokenId" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + update_token () { + local token_id=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens/$tokenId" \ + --request PATCH \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "description": "my updated test token", + "permissions": [ + { + "action": "database_one", + "resource": "read" + } + ] + }' \ + ) + echo "$response" + } + + delete_token () { + local token_id=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens/$tokenId" \ + --request DELETE \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + + # Test database endpoints + databaseName="test_database_$RANDOM" + + printf "\n🏗️ Creating database... 🏗️\n\n" + response="$(create_database $databaseName)" + echo $response | jq + printf "\n🏗️ Creating database successful 🏗️\n\n" + + printf "\n⬆️ Updating database... ⬆️\n\n" + response="$(update_database $databaseName)" + echo $response | jq + printf "\n⬆️ Updating database successful ⬆️\n\n" + + printf "\n⬇️ Listing databases... ⬇️\n\n" + response="$(list_databases)" + echo $response | jq + printf "\n⬇️ Listing databases successful ⬇️\n\n" + + printf "\n🗑️ Deleting database... 🗑️\n\n" + response="$(delete_database $databaseName)" + echo $response | jq + printf "\n🗑️ Deleting database successful 🗑️\n\n" + + + # Test token endpoints + printf "\n🏗️ Creating token... 🏗️\n\n" + response="$(create_token)" + echo $response | jq + tokenId=$(echo $response | jq '.id') + printf "\n🏗️ Creating token successful 🏗️\n\n" + + printf "\n⬇️ Getting token... ⬇️\n\n" + response="$(get_token $tokenId)" + echo $response | jq + printf "\n⬇️ Getting token successful ⬇️\n\n" + + printf "\n⬆️ Updating token... ⬆️\n\n" + response="$(update_token $tokenId)" + echo $response | jq + printf "\n⬆️ Updating token successful ⬆️\n\n" + + printf "\n📋 Listing tokens... 📋\n\n" + response="$(list_tokens)" + echo $response | jq + printf "\n📋 Listing tokens successful 📋\n\n" + + printf "\n🗑️ Deleting token... 🗑️\n\n" + response="$(delete_token $tokenId)" + echo $response | jq + printf "\n🗑️ Deleting token successful 🗑️\n\n" + ``` + - name: Tables + description: Manage tables in a database +paths: + /databases: + get: + operationId: GetClusterDatabases + summary: Get all databases for a cluster + responses: + '200': + description: The cluster databases were successfully retrieved + content: + application/json: + schema: + type: array + items: + type: object + properties: + name: + $ref: '#/components/schemas/ClusterDatabaseName' + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - name + - maxTables + - maxColumnsPerTable + - retentionPeriod + example: + - name: DatabaseOne + maxTables: 500 + maxColumnsPerTable: 200 + retentionPeriod: 0 + - name: DatabaseTwo + maxTables: 100 + maxColumnsPerTable: 50 + retentionPeriod: 300000000000 + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: bananas + - type: tag + value: plátanos + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: '' + lang: Shell + source: | + HOST="https://cluster-host.com" + + list_databases () { + local response=$( \ + curl \ + --location "$HOST/api/v0/databases" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + tags: + - Databases + post: + operationId: CreateClusterDatabase + summary: Create a database + tags: + - Databases + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + name: + $ref: '#/components/schemas/ClusterDatabaseName' + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - name + examples: + requiredFieldsOnly: + summary: Required Fields Only + value: + name: DatabaseOne + allFields: + summary: All Fields + value: + name: DatabaseTwo + maxTables: 100 + maxColumnsPerTable: 50 + retentionPeriod: 300000000000 + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: bananas + - type: tag + value: plátanos + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + responses: + '200': + description: The cluster database was successfully created + content: + application/json: + schema: + type: object + properties: + name: + $ref: '#/components/schemas/ClusterDatabaseName' + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - name + - maxTables + - maxColumnsPerTable + - retentionPeriod + examples: + requiredFieldsOnly: + summary: Required Fields Only + value: + name: DatabaseOne + maxTables: 500 + maxColumnsPerTable: 200 + retentionPeriod: 0 + allFields: + summary: All Fields + value: + name: DatabaseTwo + maxTables: 100 + maxColumnsPerTable: 50 + retentionPeriod: 300000000000 + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: a + - type: tag + value: c + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '409': + $ref: '#/components/responses/Conflict' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + create_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/databases" \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "name": "'$databaseName'", + "maxTables": 75, + "maxColumnsPerTable": 90, + "retentionPeriod": 600000000000, + "partitionTemplate": [ + { + "type": "tag", + "value": "abc" + }, + { + "type": "bucket", + "value": { + "tagName": "def", + "numberOfBuckets": 5 + } + } + ] + }' \ + ) + echo "$response" + } + /databases/{databaseName}: + patch: + operationId: UpdateClusterDatabase + summary: Update a database + tags: + - Databases + parameters: + - name: databaseName + in: path + description: The name of the database to update + required: true + schema: + $ref: '#/components/schemas/ClusterDatabaseName' + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + minProperties: 1 + examples: + allFields: + summary: Update All Fields + value: + maxTables: 300 + maxColumnsPerTable: 150 + retentionPeriod: 600000000000 + maxTablesOnly: + summary: Update Max Tables Only + value: + maxTables: 300 + maxColumnsPerTableOnly: + summary: Update Max Columns Per Table Only + value: + maxColumnsPerTable: 150 + retentionPeriodOnly: + summary: Update Retention Period Only + value: + retentionPeriod: 600000000000 + responses: + '200': + description: The cluster database was successfully updated. + content: + application/json: + schema: + type: object + properties: + name: + $ref: '#/components/schemas/ClusterDatabaseName' + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + required: + - maxTables + - maxColumnsPerTable + - retentionPeriod + - name + examples: + allFields: + summary: Update All Fields + value: + name: DatabaseOne + maxTables: 300 + maxColumnsPerTable: 150 + retentionPeriod: 600000000000 + maxTablesOnly: + summary: Update Max Tables Only + value: + name: DatabaseOne + maxTables: 300 + maxColumnsPerTable: 200 + retentionPeriod: 0 + maxColumnsPerTableOnly: + summary: Update Max Columns Per Table Only + value: + name: DatabaseOne + maxTables: 500 + maxColumnsPerTable: 150 + retentionPeriod: 0 + retentionPeriodOnly: + summary: Update Retention Period Only + value: + name: DatabaseOne + maxTables: 500 + maxColumnsPerTable: 200 + retentionPeriod: 600000000000 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + update_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/databases/$databaseName" \ + --request PATCH \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "maxTables": 150, + "maxColumnsPerTable": 180, + "retentionPeriod": 1200000000000 + }' \ + ) + echo "$response" + } + delete: + operationId: DeleteClusterDatabase + summary: Delete a database + tags: + - Databases + parameters: + - name: databaseName + in: path + description: The name of the database to delete + required: true + schema: + $ref: '#/components/schemas/ClusterDatabaseName' + responses: + '204': + description: The cluster database was successfully deleted + $ref: '#/components/responses/NoContent' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + delete_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/databases/$databaseName" \ + --request DELETE \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + /databases/{databaseName}/tables: + post: + operationId: CreateClusterDatabaseTable + summary: Create a database table + tags: + - Tables + parameters: + - name: databaseName + in: path + description: The name of the database to create the database table for + required: true + schema: + $ref: '#/components/schemas/ClusterDatabaseName' + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + name: + $ref: '#/components/schemas/ClusterDatabaseTableName' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - name + examples: + requiredFieldsOnly: + summary: Required Fields Only + value: + name: TableOne + allFields: + summary: All Fields + value: + name: TableTwo + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: bananas + - type: tag + value: plátanos + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + responses: + '200': + description: The cluster database table was successfully created + content: + application/json: + schema: + type: object + properties: + databaseName: + description: The name of the database that the database table belongs to + $ref: '#/components/schemas/ClusterDatabaseName' + name: + description: The name of the database table + $ref: '#/components/schemas/ClusterDatabaseTableName' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - databaseName + - name + examples: + requiredFieldsOnly: + summary: Required Fields Only + value: + databaseName: DatabaseOne + name: TableOne + allFields: + summary: All Fields + value: + databaseName: DatabaseOne + name: TableTwo + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: a + - type: tag + value: c + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '409': + $ref: '#/components/responses/Conflict' + '500': + $ref: '#/components/responses/InternalServerError' + /tokens: + get: + operationId: GetDatabaseTokens + summary: Get all database tokens for a cluster + tags: + - Database tokens + responses: + '200': + description: The database tokens were successfully retrieved + content: + application/json: + schema: + type: array + items: + type: object + properties: + id: + description: The ID of the database token + $ref: '#/components/schemas/UuidV4' + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + createdAt: + $ref: '#/components/schemas/DatabaseTokenCreatedAt' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' + required: + - id + - description + - permissions + - createdAt + example: + - id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + createdAt: '2023-12-21T17:32:28.000Z' + - id: 66666666-6666-4666-8666-666666666666 + description: Full Access Token + permissions: + - action: write + resource: '*' + createdAt: '2024-03-02T04:20:19.000Z' + - id: 77777777-7777-4777-8777-777777777777 + description: No Access Token + permissions: [] + createdAt: '2024-03-02T04:20:19.000Z' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + list_tokens () { + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + post: + operationId: CreateDatabaseToken + summary: Create a database token + tags: + - Database tokens + description: | + Create a [database token](/influxdb3/clustered/admin/tokens/database/) for a cluster. + + The token returned on the `accessToken` property in the response can be used to authenticate query and write requests to the cluster. + + ### Notable behaviors + + - InfluxDB might take some time--from a few seconds to a few minutes--to activate and synchronize new tokens. If a new database token doesn't immediately work (you receive a `401 Unauthorized` error) for querying or writing, wait and then try your request again. + + - Token strings are viewable _only_ on token creation and aren't stored by InfluxDB; you can't recover a lost token. + + #### Store secure tokens in a secret store + + We recommend storing database tokens in a **secure secret store**. + For example, see how to [authenticate Telegraf using tokens in your OS secret store](https://github.com/influxdata/telegraf/tree/master/plugins/secretstores/os). + + If you lose a token, [delete the token from InfluxDB](/influxdb3/clustered/admin/tokens/database/delete/) and create a new one. + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + required: + - description + examples: + limitedAccessToken: + summary: Limited Access Token + value: + description: Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + fullAccessToken: + summary: Full Access Token + value: + description: Full Access Token + permissions: + - action: write + resource: '*' + noAccessToken: + summary: No Access Token + value: + description: No Access Token + permissions: [] + responses: + '200': + description: The database token was successfully created + content: + application/json: + schema: + type: object + properties: + id: + description: The ID of the database token + $ref: '#/components/schemas/UuidV4' + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + createdAt: + $ref: '#/components/schemas/DatabaseTokenCreatedAt' + accessToken: + $ref: '#/components/schemas/DatabaseTokenAccessToken' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' + required: + - id + - description + - permissions + - createdAt + - accessToken + examples: + limitedAccessToken: + summary: Limited Access Token + value: + id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + createdAt: '2023-12-21T17:32:28.000Z' + accessToken: apiv1_5555555555555555555555555555555555555555555555555555555555555555 + fullAccessToken: + summary: Full Access Token + value: + id: 66666666-6666-4666-8666-666666666666 + description: Full Access Token + permissions: + - action: write + resource: '*' + createdAt: '2024-03-02T04:20:19.000Z' + accessToken: apiv1_6666666666666666666666666666666666666666666666666666666666666666 + noAccessToken: + summary: No Access Token + value: + id: 66666666-6666-4666-8666-666666666666 + description: No Access Token + permissions: [] + createdAt: '2024-03-02T04:20:19.000Z' + accessToken: apiv1_7777777777777777777777777777777777777777777777777777777777777777 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '409': + $ref: '#/components/responses/Conflict' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + create_token () { + local description=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens" \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "description": "'$description'", + "permissions": [ + { + "action": "read", + "resource": "DatabaseOne" + }, + { + "action": "write", + "resource": "DatabaseTwo" + } + ] + }' \ + ) + echo "$response" + } + /tokens/{tokenId}: + get: + operationId: GetDatabaseToken + summary: Get a database token + tags: + - Database tokens + parameters: + - name: tokenId + in: path + description: The ID of the [database token](/influxdb3/clustered/admin/tokens/database/) to get + required: true + schema: + $ref: '#/components/schemas/UuidV4' + responses: + '200': + description: The database token was successfully retrieved. + content: + application/json: + schema: + type: object + properties: + id: + description: The ID of the database token + $ref: '#/components/schemas/UuidV4' + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + createdAt: + $ref: '#/components/schemas/DatabaseTokenCreatedAt' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' + required: + - id + - description + - permissions + - createdAt + examples: + limitedAccessToken: + summary: Limited Access Token + value: + id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + createdAt: '2023-12-21T17:32:28.000Z' + fullAccessToken: + summary: Full Access Token + value: + id: 66666666-6666-4666-8666-666666666666 + description: Full Access Token + permissions: + - action: write + resource: '*' + createdAt: '2024-03-02T04:20:19.000Z' + noAccessToken: + summary: No Access Token + value: + id: 77777777-7777-4777-8777-777777777777 + description: No Access Token + permissions: [] + createdAt: '2024-03-02T04:20:19.000Z' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + get_token () { + local tokenId=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens/$tokenId" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + patch: + operationId: UpdateDatabaseToken + summary: Update a database token + tags: + - Database tokens + parameters: + - name: tokenId + in: path + description: The ID of the [database token](/influxdb3/clustered/admin/tokens/database/) to update + required: true + schema: + $ref: '#/components/schemas/UuidV4' + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + minProperties: 1 + examples: + allFields: + summary: Update All Fields + value: + description: Updated Limited Access Token + permissions: + - action: write + resource: DatabaseOne + - action: read + resource: DatabaseTwo + - action: write + resource: DatabaseThree + descriptionOnly: + summary: Update Description Only + value: + description: Updated Limited Access Token + permissionsOnly: + summary: Update Permissions Only + value: + permissions: + - action: write + resource: DatabaseOne + - action: read + resource: DatabaseTwo + - action: write + resource: DatabaseThree + removeAllPermissions: + summary: Remove All Permissions + value: + permissions: [] + responses: + '200': + description: The database token was successfully updated + content: + application/json: + schema: + type: object + properties: + id: + description: The ID of the database token + $ref: '#/components/schemas/UuidV4' + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + createdAt: + $ref: '#/components/schemas/DatabaseTokenCreatedAt' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' + required: + - id + - description + - permissions + - createdAt + examples: + allFields: + summary: Update All Fields + value: + id: 55555555-5555-4555-8555-555555555555 + description: Updated Limited Access Token + permissions: + - action: write + resource: DatabaseOne + - action: read + resource: DatabaseTwo + - action: write + resource: DatabaseThree + createdAt: '2023-12-21T17:32:28.000Z' + descriptionOnly: + summary: Update Description Only + value: + id: 55555555-5555-4555-8555-555555555555 + description: Updated Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + createdAt: '2023-12-21T17:32:28.000Z' + permissionsOnly: + summary: Update Permissions Only + value: + id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: + - action: write + resource: DatabaseOne + - action: read + resource: DatabaseTwo + - action: write + resource: DatabaseThree + createdAt: '2023-12-21T17:32:28.000Z' + removeAllPermissions: + summary: Remove All Permissions + value: + id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: [] + createdAt: '2023-12-21T17:32:28.000Z' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '409': + $ref: '#/components/responses/Conflict' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + update_token () { + local tokenId=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens/$tokenId" \ + --request PATCH \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "description": "Updated Limited Access Token", + "permissions": [ + { + "action": "write", + "resource": "DatabaseOne" + }, + { + "action": "read", + "resource": "DatabaseTwo" + }, + { + "action": "write", + "resource": "DatabaseThree" + } + ] + }' \ + ) + echo "$response" + } + delete: + operationId: DeleteDatabaseToken + summary: Delete a database token + tags: + - Database tokens + parameters: + - name: tokenId + in: path + description: The ID of the [database token](/influxdb3/clustered/admin/tokens/database/) to delete + required: true + schema: + $ref: '#/components/schemas/UuidV4' + responses: + '204': + description: The database token was successfully deleted + $ref: '#/components/responses/NoContent' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + delete_token () { + local tokenId=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens/$tokenId" \ + --request DELETE \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } +components: + schemas: + Error: + type: object + properties: + code: + type: integer + message: + type: string + examples: + - code: 400 + message: bad request + - code: 401 + message: unauthorized + - code: 403 + message: forbidden + - code: 404 + message: not found + - code: 409 + message: conflict + - code: 500 + message: internal server error + required: + - code + - message + DateTimeRfc3339: + type: string + format: date-time + examples: + - '2023-12-21T17:32:28Z' + UuidV4: + type: string + format: uuid + examples: + - 11111111-1111-4111-8111-111111111111 + - 22222222-1111-4111-8111-111111111111 + ClusterDatabaseName: + description: The name of the cluster database + type: string + examples: + - DatabaseOne + - DatabaseTwo + maxLength: 64 + minLength: 1 + ClusterDatabaseRetentionPeriod: + description: | + The retention period of the [cluster database](/influxdb3/clustered/admin/databases/) in nanoseconds, if applicable + + If the retention period is not set or is set to 0, the database will have infinite retention + type: integer + format: int64 + default: 0 + examples: + - 300000000000 + - 600000000000 + minimum: 0 + ClusterDatabaseMaxTables: + description: The maximum number of tables for the cluster database + type: integer + format: int32 + default: 500 + examples: + - 100 + - 300 + minimum: 1 + ClusterDatabaseMaxColumnsPerTable: + description: The maximum number of columns per table for the cluster database + type: integer + format: int32 + default: 200 + examples: + - 50 + - 150 + minimum: 1 + ClusterDatabasePartitionTemplate: + description: | + A template for [partitioning](/influxdb3/clustered/admin/custom-partitions/) a cluster database. + + Each template part is evaluated in sequence, concatinating the final + partition key from the output of each part, delimited by the partition + key delimiter `|`. + + For example, using the partition template below: + + ```json + [ + { + "type": "time", + "value": "%Y" + }, + { + "type": "tag", + "value": "bananas" + }, + { + "type": "tag", + "value": "plátanos" + }, + { + "type": "bucket", + "value": { + "tagName": "c", + "numberOfBuckets": 10 + } + } + ] + ``` + + The following partition keys are derived: + + * `time=2023-01-01, a=bananas, b=plátanos, c=ananas` -> `2023|bananas|plátanos|5` + * `time=2023-01-01, b=plátanos` -> `2023|!|plátanos|!` + * `time=2023-01-01, another=cat, b=plátanos` -> `2023|!|plátanos|!` + * `time=2023-01-01` -> `2023|!|!|!` + * `time=2023-01-01, a=cat|dog, b=!, c=!` -> `2023|cat%7Cdog|%21|8` + * `time=2023-01-01, a=%50, c=%50` -> `2023|%2550|!|9` + * `time=2023-01-01, a=, c=` -> `2023|^|!|0` + * `time=2023-01-01, a=` -> `2023|#|!|!` + * `time=2023-01-01, c=` -> `2023|!|!|` + + When using the default [partitioning](/influxdb3/clustered/admin/custom-partitions/) template (YYYY-MM-DD) there is no + encoding necessary, as the derived partition key contains a single part, and + no reserved characters. [`TemplatePart::Bucket`] parts by definition will + always be within the part length limit and contain no restricted characters + so are also not percent-encoded and/or truncated. + type: array + items: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplatePart' + examples: + - - type: time + value: '%Y' + - type: tag + value: bananas + - type: tag + value: plátanos + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + maxItems: 8 + minItems: 1 + uniqueItems: true + ClusterDatabasePartitionTemplatePart: + description: A sub-part of a `PartitionTemplate` + anyOf: + - $ref: '#/components/schemas/ClusterDatabasePartitionTemplatePartTagValue' + - $ref: '#/components/schemas/ClusterDatabasePartitionTemplatePartTimeFormat' + - $ref: '#/components/schemas/ClusterDatabasePartitionTemplatePartBucket' + examples: + - type: time + value: '%Y' + - type: tag + value: bananas + - type: tag + value: plátanos + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + ClusterDatabasePartitionTemplatePartTagValue: + description: | + A tag value matcher that extracts a string value from the specified tag name + + If a row does not contain a value for the specified tag name, the NULL/missing partition key part `!` is rendered. + type: object + properties: + type: + type: string + enum: + - tag + value: + type: string + minLength: 1 + examples: + - type: tag + value: bananas + - type: tag + value: plátanos + ClusterDatabasePartitionTemplatePartTimeFormat: + description: A time format matcher that accepts a "strftime"-like format string and evaluates it against the "time" column + type: object + properties: + type: + type: string + enum: + - time + value: + type: string + minLength: 1 + examples: + - type: time + value: '%Y' + ClusterDatabasePartitionTemplatePartBucket: + description: | + A bucketing matcher that sorts data through a uniform hash function on the values of the given tag name. + + If a row does not contain a value for the specified tag name, the NULL/missing partition key part `!` is rendered. + type: object + properties: + type: + type: string + enum: + - bucket + value: + type: object + properties: + tagName: + description: The name of the tag used to derive the bucket the data belongs in + type: string + minLength: 1 + numberOfBuckets: + description: The number of buckets tag values are distributed across + type: integer + format: int32 + maximum: 100000 + minimum: 1 + examples: + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + ClusterDatabaseTableName: + description: The name of the [cluster database](/influxdb3/clustered/admin/databases/) table + type: string + examples: + - TableOne + - TableTwo + minLength: 1 + DatabaseTokenDescription: + description: The description of the database token + type: string + examples: + - Limited Access Token + - Full Access Token + DatabaseTokenResourceAllDatabases: + description: A resource value for a [database token](/influxdb3/clustered/admin/tokens/database/) permission that refers to all databases + type: string + enum: + - '*' + DatabaseTokenPermissionAction: + description: The action the [database token](/influxdb3/clustered/admin/tokens/database/) permission allows + type: string + DatabaseTokenPermissionResource: + description: The resource the [database token](/influxdb3/clustered/admin/tokens/database/) permission applies to + anyOf: + - $ref: '#/components/schemas/ClusterDatabaseName' + - $ref: '#/components/schemas/DatabaseTokenResourceAllDatabases' + examples: + - DatabaseOne + - DatabaseTwo + - '*' + DatabaseTokenPermission: + description: The description of the database token + type: object + properties: + action: + $ref: '#/components/schemas/DatabaseTokenPermissionAction' + resource: + $ref: '#/components/schemas/DatabaseTokenPermissionResource' + examples: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + - action: write + resource: '*' + DatabaseTokenPermissions: + description: The list of permissions the [database token](/influxdb3/clustered/admin/tokens/database/) allows + type: array + items: + $ref: '#/components/schemas/DatabaseTokenPermission' + examples: + - - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + - - action: write + resource: '*' + DatabaseTokenCreatedAt: + description: | + The date and time that the [database token](/influxdb3/clustered/admin/tokens/database/) was created + + Uses RFC3339 format + $ref: '#/components/schemas/DateTimeRfc3339' + examples: + - '2023-12-21T17:32:28.000Z' + - '2024-03-02T04:20:19.000Z' + DatabaseTokenExpiresAt: + description: | + The date and time that the database token expires, if applicable + + Uses RFC3339 format + $ref: '#/components/schemas/DateTimeRfc3339' + DatabaseTokenRevokedAt: + description: | + The date and time that the database token was revoked, if applicable + + Uses RFC3339 format + $ref: '#/components/schemas/DateTimeRfc3339' + DatabaseTokenAccessToken: + description: | + The access token that can be used to authenticate query and write requests to the cluster + + The access token is never stored by InfluxDB and is only returned once when the token is created. If the access token is lost, a new token must be created. + type: string + examples: + - apiv1_5555555555555555555555555555555555555555555555555555555555555555 + - apiv1_6666666666666666666666666666666666666666666666666666666666666666 + minLength: 64 + responses: + BadRequest: + description: Bad Request + content: + application/json: + schema: + properties: + code: + type: integer + enum: + - 400 + $ref: '#/components/schemas/Error' + example: + code: 400 + message: bad request + Unauthorized: + description: Unauthorized + content: + application/json: + schema: + properties: + code: + type: integer + enum: + - 401 + $ref: '#/components/schemas/Error' + example: + code: 401 + message: unauthorized + Forbidden: + description: Forbidden + content: + application/json: + schema: + properties: + code: + type: integer + enum: + - 403 + $ref: '#/components/schemas/Error' + example: + code: 403 + message: forbidden + NotFound: + description: Not Found + content: + application/json: + schema: + properties: + code: + type: integer + enum: + - 404 + $ref: '#/components/schemas/Error' + example: + code: 404 + message: not found + Conflict: + description: Conflict + content: + application/json: + schema: + properties: + code: + type: integer + enum: + - 409 + $ref: '#/components/schemas/Error' + example: + code: 409 + message: conflict + InternalServerError: + description: Internal Server Error + content: + application/json: + schema: + properties: + code: + type: integer + enum: + - 500 + $ref: '#/components/schemas/Error' + example: + code: 500 + message: internal server error + NoContent: + description: No Content + securitySchemes: + bearerAuthManagementToken: + type: http + scheme: bearer + bearerFormat: Management Token + bearerAuthJwt: + type: http + scheme: bearer + bearerFormat: JWT +x-tagGroups: + - name: Using the Management API + tags: + - Authentication + - Quickstart + - name: All endpoints + tags: + - Database tokens + - Databases + - Tables diff --git a/api-docs/influxdb3/core/v3/ref.yml b/api-docs/influxdb3/core/v3/ref.yml index 3f4a738e8..e15c42f9f 100644 --- a/api-docs/influxdb3/core/v3/ref.yml +++ b/api-docs/influxdb3/core/v3/ref.yml @@ -922,9 +922,25 @@ paths: summary: Delete a database description: | Soft deletes a database. - The database is scheduled for deletion and unavailable for querying. + The database is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. parameters: - $ref: '#/components/parameters/db' + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time + description: | + Schedule the database for hard deletion at the specified time. + If not provided, the database will be soft deleted. + Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). + + #### Deleting a database cannot be undone + + Deleting a database is a destructive action. + Once a database is deleted, data stored in that database cannot be recovered. responses: '200': description: Success. Database deleted. @@ -961,7 +977,13 @@ paths: summary: Delete a table description: | Soft deletes a table. - The table is scheduled for deletion and unavailable for querying. + The table is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + + #### Deleting a table cannot be undone + + Deleting a table is a destructive action. + Once a table is deleted, data stored in that table cannot be recovered. parameters: - $ref: '#/components/parameters/db' - name: table @@ -969,6 +991,16 @@ paths: required: true schema: type: string + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time + description: | + Schedule the table for hard deletion at the specified time. + If not provided, the table will be soft deleted. + Use ISO 8601 format (for example, "2025-12-31T23:59:59Z"). responses: '200': description: Success (no content). The table has been deleted. @@ -1078,7 +1110,7 @@ paths: In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to Friday). value: - db: DATABASE_NAME + db: mydb plugin_filename: schedule.py trigger_name: schedule_cron_trigger trigger_specification: cron:0 0 6 * * 1-5 @@ -1136,7 +1168,7 @@ paths: db: mydb plugin_filename: request.py trigger_name: hello_world_trigger - trigger_specification: path:hello-world + trigger_specification: request:hello-world cron_friday_afternoon: summary: Cron trigger for Friday afternoons description: | @@ -1365,16 +1397,16 @@ paths: description: Plugin not enabled. tags: - Processing engine - /api/v3/engine/{plugin_path}: + /api/v3/engine/{request_path}: parameters: - - name: plugin_path + - name: request_path description: | - The path configured in the request trigger specification "path:"` for the plugin. + The path configured in the request trigger specification for the plugin. For example, if you define a trigger with the following: ```json - trigger-spec: "path:hello-world" + trigger_specification: "request:hello-world" ``` then, the HTTP API exposes the following plugin endpoint: @@ -1390,7 +1422,7 @@ paths: operationId: GetProcessingEnginePluginRequest summary: On Request processing engine plugin request description: | - Executes the On Request processing engine plugin specified in ``. + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. An On Request plugin implements the following signature: @@ -1417,7 +1449,7 @@ paths: operationId: PostProcessingEnginePluginRequest summary: On Request processing engine plugin request description: | - Executes the On Request processing engine plugin specified in ``. + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. An On Request plugin implements the following signature: @@ -1868,7 +1900,7 @@ components: `schedule.py` or `endpoints/report.py`. The path can be absolute or relative to the `--plugins-dir` directory configured when starting InfluxDB 3. - The plugin file must implement the trigger interface associated with the trigger's specification (`trigger_spec`). + The plugin file must implement the trigger interface associated with the trigger's specification. trigger_name: type: string trigger_specification: @@ -1911,12 +1943,12 @@ components: - `table:TABLE_NAME` - Triggers on write events to a specific table ### On-demand triggers - Format: `path:ENDPOINT_NAME` + Format: `request:REQUEST_PATH` - Creates an HTTP endpoint `/api/v3/engine/ENDPOINT_NAME` for manual invocation: - - `path:hello-world` - Creates endpoint `/api/v3/engine/hello-world` - - `path:data-export` - Creates endpoint `/api/v3/engine/data-export` - pattern: ^(cron:[0-9 *,/-]+|every:[0-9]+[smhd]|all_tables|table:[a-zA-Z_][a-zA-Z0-9_]*|path:[a-zA-Z0-9_-]+)$ + Creates an HTTP endpoint `/api/v3/engine/REQUEST_PATH` for manual invocation: + - `request:hello-world` - Creates endpoint `/api/v3/engine/hello-world` + - `request:data-export` - Creates endpoint `/api/v3/engine/data-export` + pattern: ^(cron:[0-9 *,/-]+|every:[0-9]+[smhd]|all_tables|table:[a-zA-Z_][a-zA-Z0-9_]*|request:[a-zA-Z0-9_-]+)$ example: cron:0 0 6 * * 1-5 trigger_arguments: type: object @@ -2013,6 +2045,65 @@ components: - m - h type: string + UpdateDatabaseRequest: + type: object + properties: + retention_period: + type: string + description: | + The retention period for the database. Specifies how long data should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: "7d" + description: Request schema for updating database configuration. + UpdateTableRequest: + type: object + properties: + db: + type: string + description: The name of the database containing the table. + table: + type: string + description: The name of the table to update. + retention_period: + type: string + description: | + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: "30d" + required: + - db + - table + description: Request schema for updating table configuration. + LicenseResponse: + type: object + properties: + license_type: + type: string + description: The type of license (for example, "enterprise", "trial"). + example: "enterprise" + expires_at: + type: string + format: date-time + description: The expiration date of the license in ISO 8601 format. + example: "2025-12-31T23:59:59Z" + features: + type: array + items: + type: string + description: List of features enabled by the license. + example: + - "clustering" + - "processing_engine" + - "advanced_auth" + status: + type: string + enum: + - "active" + - "expired" + - "invalid" + description: The current status of the license. + example: "active" + description: Response schema for license information. responses: Unauthorized: description: Unauthorized access. diff --git a/api-docs/influxdb3/enterprise/v3/ref.yml b/api-docs/influxdb3/enterprise/v3/ref.yml index 9f1a7fca1..34b2bd961 100644 --- a/api-docs/influxdb3/enterprise/v3/ref.yml +++ b/api-docs/influxdb3/enterprise/v3/ref.yml @@ -922,9 +922,25 @@ paths: summary: Delete a database description: | Soft deletes a database. - The database is scheduled for deletion and unavailable for querying. + The database is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. parameters: - $ref: '#/components/parameters/db' + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time + description: | + Schedule the database for hard deletion at the specified time. + If not provided, the database will be soft deleted. + Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). + + #### Deleting a database cannot be undone + + Deleting a database is a destructive action. + Once a database is deleted, data stored in that database cannot be recovered. responses: '200': description: Success. Database deleted. @@ -961,7 +977,13 @@ paths: summary: Delete a table description: | Soft deletes a table. - The table is scheduled for deletion and unavailable for querying. + The table is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + + #### Deleting a table cannot be undone + + Deleting a table is a destructive action. + Once a table is deleted, data stored in that table cannot be recovered. parameters: - $ref: '#/components/parameters/db' - name: table @@ -969,6 +991,16 @@ paths: required: true schema: type: string + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time + description: | + Schedule the table for hard deletion at the specified time. + If not provided, the table will be soft deleted. + Use ISO 8601 format (for example, "2025-12-31T23:59:59Z"). responses: '200': description: Success (no content). The table has been deleted. @@ -978,6 +1010,77 @@ paths: description: Table not found. tags: - Table + patch: + operationId: PatchConfigureTable + summary: Update a table + description: | + Updates table configuration, such as retention period. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateTableRequest' + responses: + '200': + description: Success. The table has been updated. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Table not found. + tags: + - Table + /api/v3/configure/database/{db}: + patch: + operationId: PatchConfigureDatabase + summary: Update a database + description: | + Updates database configuration, such as retention period. + parameters: + - name: db + in: path + required: true + schema: + type: string + description: The name of the database to update. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateDatabaseRequest' + responses: + '200': + description: Success. The database has been updated. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Database not found. + tags: + - Database + /api/v3/show/license: + get: + operationId: GetShowLicense + summary: Show license information + description: | + Retrieves information about the current InfluxDB 3 Enterprise license. + responses: + '200': + description: Success. The response body contains license information. + content: + application/json: + schema: + $ref: '#/components/schemas/LicenseResponse' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + tags: + - Server information /api/v3/configure/distinct_cache: post: operationId: PostConfigureDistinctCache @@ -1136,7 +1239,7 @@ paths: db: mydb plugin_filename: request.py trigger_name: hello_world_trigger - trigger_specification: path:hello-world + trigger_specification: request:hello-world cron_friday_afternoon: summary: Cron trigger for Friday afternoons description: | @@ -1365,16 +1468,16 @@ paths: description: Plugin not enabled. tags: - Processing engine - /api/v3/engine/{plugin_path}: + /api/v3/engine/{request_path}: parameters: - - name: plugin_path + - name: request_path description: | - The path configured in the request trigger specification "path:"` for the plugin. + The path configured in the request trigger specification for the plugin. For example, if you define a trigger with the following: ```json - trigger-spec: "path:hello-world" + trigger_specification: "request:hello-world" ``` then, the HTTP API exposes the following plugin endpoint: @@ -1390,7 +1493,7 @@ paths: operationId: GetProcessingEnginePluginRequest summary: On Request processing engine plugin request description: | - Executes the On Request processing engine plugin specified in ``. + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. An On Request plugin implements the following signature: @@ -1417,7 +1520,7 @@ paths: operationId: PostProcessingEnginePluginRequest summary: On Request processing engine plugin request description: | - Executes the On Request processing engine plugin specified in ``. + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. An On Request plugin implements the following signature: @@ -1812,6 +1915,16 @@ components: properties: db: type: string + pattern: '^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$|^[a-zA-Z0-9]$' + description: |- + The database name. Database names cannot contain underscores (_). + Names must start and end with alphanumeric characters and can contain hyphens (-) in the middle. + retention_period: + type: string + description: |- + The retention period for the database. Specifies how long data should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: "7d" required: - db CreateTableRequest: @@ -1843,6 +1956,12 @@ components: required: - name - type + retention_period: + type: string + description: |- + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: "30d" required: - db - table @@ -1929,11 +2048,10 @@ components: `schedule.py` or `endpoints/report.py`. The path can be absolute or relative to the `--plugins-dir` directory configured when starting InfluxDB 3. - The plugin file must implement the trigger interface associated with the trigger's specification (`trigger_spec`). + The plugin file must implement the trigger interface associated with the trigger's specification. trigger_name: type: string trigger_specification: - type: string description: | Specifies when and how the processing engine trigger should be invoked. @@ -1972,12 +2090,12 @@ components: - `table:TABLE_NAME` - Triggers on write events to a specific table ### On-demand triggers - Format: `path:ENDPOINT_NAME` + Format: `request:REQUEST_PATH` - Creates an HTTP endpoint `/api/v3/engine/ENDPOINT_NAME` for manual invocation: - - `path:hello-world` - Creates endpoint `/api/v3/engine/hello-world` - - `path:data-export` - Creates endpoint `/api/v3/engine/data-export` - pattern: ^(cron:[0-9 *,/-]+|every:[0-9]+[smhd]|all_tables|table:[a-zA-Z_][a-zA-Z0-9_]*|path:[a-zA-Z0-9_-]+)$ + Creates an HTTP endpoint `/api/v3/engine/REQUEST_PATH` for manual invocation: + - `request:hello-world` - Creates endpoint `/api/v3/engine/hello-world` + - `request:data-export` - Creates endpoint `/api/v3/engine/data-export` + pattern: ^(cron:[0-9 *,/-]+|every:[0-9]+[smhd]|all_tables|table:[a-zA-Z_][a-zA-Z0-9_]*|request:[a-zA-Z0-9_-]+)$ example: cron:0 0 6 * * 1-5 trigger_arguments: type: object @@ -2074,6 +2192,65 @@ components: - m - h type: string + UpdateDatabaseRequest: + type: object + properties: + retention_period: + type: string + description: | + The retention period for the database. Specifies how long data should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: "7d" + description: Request schema for updating database configuration. + UpdateTableRequest: + type: object + properties: + db: + type: string + description: The name of the database containing the table. + table: + type: string + description: The name of the table to update. + retention_period: + type: string + description: | + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: "30d" + required: + - db + - table + description: Request schema for updating table configuration. + LicenseResponse: + type: object + properties: + license_type: + type: string + description: The type of license (for example, "enterprise", "trial"). + example: "enterprise" + expires_at: + type: string + format: date-time + description: The expiration date of the license in ISO 8601 format. + example: "2025-12-31T23:59:59Z" + features: + type: array + items: + type: string + description: List of features enabled by the license. + example: + - "clustering" + - "processing_engine" + - "advanced_auth" + status: + type: string + enum: + - "active" + - "expired" + - "invalid" + description: The current status of the license. + example: "active" + description: Response schema for license information. responses: Unauthorized: description: Unauthorized access. diff --git a/assets/js/api-libs.js b/assets/js/api-libs.js index 17cb76c85..edfbdebe0 100644 --- a/assets/js/api-libs.js +++ b/assets/js/api-libs.js @@ -2,7 +2,7 @@ ///////////////// Preferred Client Library programming language /////////////// //////////////////////////////////////////////////////////////////////////////// import { activateTabs, updateBtnURLs } from './tabbed-content.js'; -import { getPreference, setPreference } from './local-storage.js'; +import { getPreference, setPreference } from './services/local-storage.js'; function getVisitedApiLib() { const path = window.location.pathname.match( diff --git a/assets/js/ask-ai.js b/assets/js/ask-ai.js index 292fb0a4c..f315711a2 100644 --- a/assets/js/ask-ai.js +++ b/assets/js/ask-ai.js @@ -8,29 +8,31 @@ function setUser(userid, email) { window[NAMESPACE] = { user: { uniqueClientId: userid, - email: email, - } - } + email: email, + }, + }; } // Initialize the chat widget -function initializeChat({onChatLoad, chatAttributes}) { - /* See https://docs.kapa.ai/integrations/website-widget/configuration for +function initializeChat({ onChatLoad, chatAttributes }) { + /* See https://docs.kapa.ai/integrations/website-widget/configuration for * available configuration options. * All values are strings. */ - // If you make changes to data attributes here, you also need to port the changes to the api-docs/template.hbs API reference template. + // If you make changes to data attributes here, you also need to + // port the changes to the api-docs/template.hbs API reference template. const requiredAttributes = { websiteId: 'a02bca75-1dd3-411e-95c0-79ee1139be4d', projectName: 'InfluxDB', projectColor: '#020a47', projectLogo: '/img/influx-logo-cubo-white.png', - } + }; const optionalAttributes = { - - modalDisclaimer: 'This AI can access [documentation for InfluxDB, clients, and related tools](https://docs.influxdata.com). Information you submit is used in accordance with our [Privacy Policy](https://www.influxdata.com/legal/privacy-policy/).', - modalExampleQuestions: 'Use Python to write data to InfluxDB 3,How do I query using SQL?,How do I use MQTT with Telegraf?', + modalDisclaimer: + 'This AI can access [documentation for InfluxDB, clients, and related tools](https://docs.influxdata.com). Information you submit is used in accordance with our [Privacy Policy](https://www.influxdata.com/legal/privacy-policy/).', + modalExampleQuestions: + 'Use Python to write data to InfluxDB 3,How do I query using SQL?,How do I use MQTT with Telegraf?', buttonHide: 'true', exampleQuestionButtonWidth: 'auto', modalOpenOnCommandK: 'true', @@ -52,28 +54,32 @@ function initializeChat({onChatLoad, chatAttributes}) { modalHeaderBorderBottom: 'none', modalTitleColor: '#fff', modalTitleFontSize: '1.25rem', - } + }; const scriptUrl = 'https://widget.kapa.ai/kapa-widget.bundle.js'; const script = document.createElement('script'); script.async = true; script.src = scriptUrl; - script.onload = function() { + script.onload = function () { onChatLoad(); window.influxdatadocs.AskAI = AskAI; }; - script.onerror = function() { + script.onerror = function () { console.error('Error loading AI chat widget script'); }; - const dataset = {...requiredAttributes, ...optionalAttributes, ...chatAttributes}; - Object.keys(dataset).forEach(key => { - // Assign dataset attributes from the object + const dataset = { + ...requiredAttributes, + ...optionalAttributes, + ...chatAttributes, + }; + Object.keys(dataset).forEach((key) => { + // Assign dataset attributes from the object script.dataset[key] = dataset[key]; }); // Check for an existing script element to remove - const oldScript= document.querySelector(`script[src="${scriptUrl}"]`); + const oldScript = document.querySelector(`script[src="${scriptUrl}"]`); if (oldScript) { oldScript.remove(); } @@ -82,22 +88,21 @@ function initializeChat({onChatLoad, chatAttributes}) { function getProductExampleQuestions() { const questions = productData?.product?.ai_sample_questions; - return questions?.join(',') || ''; + return questions?.join(',') || ''; } -/** +/** * chatParams: specify custom (for example, page-specific) attribute values for the chat, pass the dataset key-values (collected in ...chatParams). See https://docs.kapa.ai/integrations/website-widget/configuration for available configuration options. * onChatLoad: function to call when the chat widget has loaded * userid: optional, a unique user ID for the user (not currently used for public docs) -*/ + */ export default function AskAI({ userid, email, onChatLoad, ...chatParams }) { - const modalExampleQuestions = getProductExampleQuestions(); const chatAttributes = { ...(modalExampleQuestions && { modalExampleQuestions }), ...chatParams, - } - initializeChat({onChatLoad, chatAttributes}); + }; + initializeChat({ onChatLoad, chatAttributes }); if (userid) { setUser(userid, email); diff --git a/assets/js/code-controls.js b/assets/js/code-controls.js index ffdf02003..19ec0ef7c 100644 --- a/assets/js/code-controls.js +++ b/assets/js/code-controls.js @@ -1,8 +1,9 @@ import $ from 'jquery'; +import { context } from './page-context.js'; function initialize() { var codeBlockSelector = '.article--content pre'; - var codeBlocks = $(codeBlockSelector); + var $codeBlocks = $(codeBlockSelector); var appendHTML = `
@@ -15,7 +16,7 @@ function initialize() { `; // Wrap all codeblocks with a new 'codeblock' div - $(codeBlocks).each(function () { + $codeBlocks.each(function () { $(this).wrap("
"); }); @@ -68,7 +69,94 @@ function initialize() { // Trigger copy failure state lifecycle $('.copy-code').click(function () { - let text = $(this).closest('.code-controls').prevAll('pre:has(code)')[0].innerText; + let codeElement = $(this) + .closest('.code-controls') + .prevAll('pre:has(code)')[0]; + + let text = codeElement.innerText; + + // Extract additional code block information + const codeBlockInfo = extractCodeBlockInfo(codeElement); + + // Add Google Analytics event tracking + const currentUrl = new URL(window.location.href); + + // Determine which tracking parameter to add based on product context + switch (context) { + case 'cloud': + currentUrl.searchParams.set('dl', 'cloud'); + break; + case 'core': + /** Track using the same value used by www.influxdata.com pages */ + currentUrl.searchParams.set('dl', 'oss3'); + break; + case 'enterprise': + /** Track using the same value used by www.influxdata.com pages */ + currentUrl.searchParams.set('dl', 'enterprise'); + break; + case 'serverless': + currentUrl.searchParams.set('dl', 'serverless'); + break; + case 'dedicated': + currentUrl.searchParams.set('dl', 'dedicated'); + break; + case 'clustered': + currentUrl.searchParams.set('dl', 'clustered'); + break; + case 'oss/enterprise': + currentUrl.searchParams.set('dl', 'oss'); + break; + case 'other': + default: + // No tracking parameter for other/unknown products + break; + } + + // Add code block specific tracking parameters + if (codeBlockInfo.language) { + currentUrl.searchParams.set('code_lang', codeBlockInfo.language); + } + if (codeBlockInfo.lineCount) { + currentUrl.searchParams.set('code_lines', codeBlockInfo.lineCount); + } + if (codeBlockInfo.hasPlaceholders) { + currentUrl.searchParams.set('has_placeholders', 'true'); + } + if (codeBlockInfo.blockType) { + currentUrl.searchParams.set('code_type', codeBlockInfo.blockType); + } + if (codeBlockInfo.sectionTitle) { + currentUrl.searchParams.set( + 'section', + encodeURIComponent(codeBlockInfo.sectionTitle) + ); + } + if (codeBlockInfo.firstLine) { + currentUrl.searchParams.set( + 'first_line', + encodeURIComponent(codeBlockInfo.firstLine.substring(0, 100)) + ); + } + + // Update browser history without triggering page reload + if (window.history && window.history.replaceState) { + window.history.replaceState(null, '', currentUrl.toString()); + } + + // Send custom Google Analytics event if gtag is available + if (typeof window.gtag !== 'undefined') { + window.gtag('event', 'code_copy', { + language: codeBlockInfo.language, + line_count: codeBlockInfo.lineCount, + has_placeholders: codeBlockInfo.hasPlaceholders, + dl: codeBlockInfo.dl || null, + section_title: codeBlockInfo.sectionTitle, + first_line: codeBlockInfo.firstLine + ? codeBlockInfo.firstLine.substring(0, 100) + : null, + product: context, + }); + } const copyContent = async () => { try { @@ -82,6 +170,71 @@ function initialize() { copyContent(); }); + /** + * Extract contextual information about a code block + * @param {HTMLElement} codeElement - The code block element + * @returns {Object} Information about the code block + */ + function extractCodeBlockInfo(codeElement) { + const codeTag = codeElement.querySelector('code'); + const info = { + language: null, + lineCount: 0, + hasPlaceholders: false, + blockType: 'code', + dl: null, // Download script type + sectionTitle: null, + firstLine: null, + }; + + // Extract language from class attribute + if (codeTag && codeTag.className) { + const langMatch = codeTag.className.match( + /language-(\w+)|hljs-(\w+)|(\w+)/ + ); + if (langMatch) { + info.language = langMatch[1] || langMatch[2] || langMatch[3]; + } + } + + // Count lines + const text = codeElement.innerText || ''; + const lines = text.split('\n'); + info.lineCount = lines.length; + + // Get first non-empty line + info.firstLine = lines.find((line) => line.trim() !== '') || null; + + // Check for placeholders (common patterns) + info.hasPlaceholders = + /\b[A-Z_]{2,}\b|\{\{[^}]+\}\}|\$\{[^}]+\}|<[^>]+>/.test(text); + + // Determine if this is a download script + if (text.includes('https://www.influxdata.com/d/install_influxdb3.sh')) { + if (text.includes('install_influxdb3.sh enterprise')) { + info.dl = 'enterprise'; + } else { + info.dl = 'oss3'; + } + } else if (text.includes('docker pull influxdb:3-enterprise')) { + info.dl = 'enterprise'; + } else if (text.includes('docker pull influxdb3-core')) { + info.dl = 'oss3'; + } + + // Find nearest section heading + let element = codeElement; + while (element && element !== document.body) { + element = element.previousElementSibling || element.parentElement; + if (element && element.tagName && /^H[1-6]$/.test(element.tagName)) { + info.sectionTitle = element.textContent.trim(); + break; + } + } + + return info; + } + /////////////////////////////// FULL WINDOW CODE /////////////////////////////// /* @@ -90,7 +243,10 @@ Disable scrolling on the body. Disable user selection on everything but the fullscreen codeblock. */ $('.fullscreen-toggle').click(function () { - var code = $(this).closest('.code-controls').prevAll('pre:has(code)').clone(); + var code = $(this) + .closest('.code-controls') + .prevAll('pre:has(code)') + .clone(); $('#fullscreen-code-placeholder').replaceWith(code[0]); $('body').css('overflow', 'hidden'); diff --git a/assets/js/components/diagram.js b/assets/js/components/diagram.js new file mode 100644 index 000000000..17f07dbe5 --- /dev/null +++ b/assets/js/components/diagram.js @@ -0,0 +1,78 @@ +// Memoize the mermaid module import +let mermaidPromise = null; + +export default function Diagram({ component }) { + // Import mermaid.js module (memoized) + if (!mermaidPromise) { + mermaidPromise = import('mermaid'); + } + mermaidPromise + .then(({ default: mermaid }) => { + // Configure mermaid with InfluxData theming + mermaid.initialize({ + startOnLoad: false, // We'll manually call run() + theme: document.body.classList.contains('dark-theme') + ? 'dark' + : 'default', + themeVariables: { + fontFamily: 'Proxima Nova', + fontSize: '16px', + lineColor: '#22ADF6', + primaryColor: '#22ADF6', + primaryTextColor: '#545454', + secondaryColor: '#05CE78', + tertiaryColor: '#f4f5f5', + }, + securityLevel: 'loose', // Required for interactive diagrams + logLevel: 'error', + }); + + // Process the specific diagram component + try { + mermaid.run({ nodes: [component] }); + } catch (error) { + console.error('Mermaid diagram rendering error:', error); + } + + // Store reference to mermaid for theme switching + if (!window.mermaidInstances) { + window.mermaidInstances = new Map(); + } + window.mermaidInstances.set(component, mermaid); + }) + .catch((error) => { + console.error('Failed to load Mermaid library:', error); + }); + + // Listen for theme changes to refresh diagrams + const observer = new MutationObserver((mutations) => { + mutations.forEach((mutation) => { + if ( + mutation.attributeName === 'class' && + document.body.classList.contains('dark-theme') !== window.isDarkTheme + ) { + window.isDarkTheme = document.body.classList.contains('dark-theme'); + + // Reload this specific diagram with new theme + if (window.mermaidInstances?.has(component)) { + const mermaid = window.mermaidInstances.get(component); + mermaid.initialize({ + theme: window.isDarkTheme ? 'dark' : 'default', + }); + mermaid.run({ nodes: [component] }); + } + } + }); + }); + + // Watch for theme changes on body element + observer.observe(document.body, { attributes: true }); + + // Return cleanup function to be called when component is destroyed + return () => { + observer.disconnect(); + if (window.mermaidInstances?.has(component)) { + window.mermaidInstances.delete(component); + } + }; +} diff --git a/assets/js/components/doc-search.js b/assets/js/components/doc-search.js new file mode 100644 index 000000000..52e1b1f65 --- /dev/null +++ b/assets/js/components/doc-search.js @@ -0,0 +1,180 @@ +/** + * DocSearch component for InfluxData documentation + * Handles asynchronous loading and initialization of Algolia DocSearch + */ +const debug = false; // Set to true for debugging output + +export default function DocSearch({ component }) { + // Store configuration from component data attributes + const config = { + apiKey: component.getAttribute('data-api-key'), + appId: component.getAttribute('data-app-id'), + indexName: component.getAttribute('data-index-name'), + inputSelector: component.getAttribute('data-input-selector'), + searchTag: component.getAttribute('data-search-tag'), + includeFlux: component.getAttribute('data-include-flux') === 'true', + includeResources: + component.getAttribute('data-include-resources') === 'true', + debug: component.getAttribute('data-debug') === 'true', + }; + + // Initialize global object to track DocSearch state + window.InfluxDocs = window.InfluxDocs || {}; + window.InfluxDocs.search = { + initialized: false, + options: config, + }; + + // Load DocSearch asynchronously + function loadDocSearch() { + if (debug) { + console.log('Loading DocSearch script...'); + } + const script = document.createElement('script'); + script.src = + 'https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.js'; + script.async = true; + script.onload = initializeDocSearch; + document.body.appendChild(script); + } + + // Initialize DocSearch after script loads + function initializeDocSearch() { + if (debug) { + console.log('Initializing DocSearch...'); + } + const multiVersion = ['influxdb']; + + // Use object-based lookups instead of conditionals for version and product names + // These can be replaced with data from productData in the future + + // Version display name mappings + const versionDisplayNames = { + cloud: 'Cloud (TSM)', + core: 'Core', + enterprise: 'Enterprise', + 'cloud-serverless': 'Cloud Serverless', + 'cloud-dedicated': 'Cloud Dedicated', + clustered: 'Clustered', + explorer: 'Explorer', + }; + + // Product display name mappings + const productDisplayNames = { + influxdb: 'InfluxDB', + influxdb3: 'InfluxDB 3', + explorer: 'InfluxDB 3 Explorer', + enterprise_influxdb: 'InfluxDB Enterprise', + flux: 'Flux', + telegraf: 'Telegraf', + chronograf: 'Chronograf', + kapacitor: 'Kapacitor', + platform: 'InfluxData Platform', + resources: 'Additional Resources', + }; + + // Initialize DocSearch with configuration + window.docsearch({ + apiKey: config.apiKey, + appId: config.appId, + indexName: config.indexName, + inputSelector: config.inputSelector, + debug: config.debug, + transformData: function (hits) { + // Format version using object lookup instead of if-else chain + function fmtVersion(version, productKey) { + if (version == null) { + return ''; + } else if (versionDisplayNames[version]) { + return versionDisplayNames[version]; + } else if (multiVersion.includes(productKey)) { + return version; + } else { + return ''; + } + } + + hits.map((hit) => { + const pathData = new URL(hit.url).pathname + .split('/') + .filter((n) => n); + const product = productDisplayNames[pathData[0]] || pathData[0]; + const version = fmtVersion(pathData[1], pathData[0]); + + hit.product = product; + hit.version = version; + hit.hierarchy.lvl0 = + hit.hierarchy.lvl0 + + ` ${product} ${version}`; + hit._highlightResult.hierarchy.lvl0.value = + hit._highlightResult.hierarchy.lvl0.value + + ` ${product} ${version}`; + }); + return hits; + }, + algoliaOptions: { + hitsPerPage: 10, + facetFilters: buildFacetFilters(config), + }, + autocompleteOptions: { + templates: { + header: + '
Search all InfluxData content ', + empty: + '

Not finding what you\'re looking for?

Search all InfluxData content
', + }, + }, + }); + + // Mark DocSearch as initialized + window.InfluxDocs.search.initialized = true; + + // Dispatch event for other components to know DocSearch is ready + window.dispatchEvent(new CustomEvent('docsearch-initialized')); + } + + /** + * Helper function to build facet filters based on config + * - Uses nested arrays for AND conditions + * - Includes space after colon in filter expressions + */ + function buildFacetFilters(config) { + if (!config.searchTag) { + return ['latest:true']; + } else if (config.includeFlux) { + // Return a nested array to match original template structure + // Note the space after each colon + return [ + [ + 'searchTag: ' + config.searchTag, + 'flux:true', + 'resources: ' + config.includeResources, + ], + ]; + } else { + // Return a nested array to match original template structure + // Note the space after each colon + return [ + [ + 'searchTag: ' + config.searchTag, + 'resources: ' + config.includeResources, + ], + ]; + } + } + + // Load DocSearch when page is idle or after a slight delay + if ('requestIdleCallback' in window) { + requestIdleCallback(loadDocSearch); + } else { + setTimeout(loadDocSearch, 500); + } + + // Return cleanup function + return function cleanup() { + // Clean up any event listeners if needed + if (debug) { + console.log('DocSearch component cleanup'); + } + }; +} diff --git a/assets/js/components/sidebar-search.js b/assets/js/components/sidebar-search.js new file mode 100644 index 000000000..f3d09fbe4 --- /dev/null +++ b/assets/js/components/sidebar-search.js @@ -0,0 +1,6 @@ +import SearchInteractions from '../utils/search-interactions.js'; + +export default function SidebarSearch({ component }) { + const searchInput = component.querySelector('.sidebar--search-field'); + SearchInteractions({ searchInput }); +} diff --git a/assets/js/custom-timestamps.js b/assets/js/custom-timestamps.js index c9e32838b..3cde0a6ad 100644 --- a/assets/js/custom-timestamps.js +++ b/assets/js/custom-timestamps.js @@ -1,7 +1,7 @@ import $ from 'jquery'; import { Datepicker } from 'vanillajs-datepicker'; import { toggleModal } from './modals.js'; -import * as localStorage from './local-storage.js'; +import * as localStorage from './services/local-storage.js'; // Placeholder start date used in InfluxDB custom timestamps const defaultStartDate = '2022-01-01'; @@ -53,65 +53,65 @@ function timeToUnixSeconds(time) { return unixSeconds; } - // Default time values in getting started sample data - const defaultTimes = [ - { - rfc3339: `${defaultStartDate}T08:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T08:00:00Z`), - }, // 1641024000 - { - rfc3339: `${defaultStartDate}T09:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T09:00:00Z`), - }, // 1641027600 - { - rfc3339: `${defaultStartDate}T10:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T10:00:00Z`), - }, // 1641031200 - { - rfc3339: `${defaultStartDate}T11:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T11:00:00Z`), - }, // 1641034800 - { - rfc3339: `${defaultStartDate}T12:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T12:00:00Z`), - }, // 1641038400 - { - rfc3339: `${defaultStartDate}T13:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T13:00:00Z`), - }, // 1641042000 - { - rfc3339: `${defaultStartDate}T14:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T14:00:00Z`), - }, // 1641045600 - { - rfc3339: `${defaultStartDate}T15:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T15:00:00Z`), - }, // 1641049200 - { - rfc3339: `${defaultStartDate}T16:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T16:00:00Z`), - }, // 1641052800 - { - rfc3339: `${defaultStartDate}T17:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T17:00:00Z`), - }, // 1641056400 - { - rfc3339: `${defaultStartDate}T18:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T18:00:00Z`), - }, // 1641060000 - { - rfc3339: `${defaultStartDate}T19:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T19:00:00Z`), - }, // 1641063600 - { - rfc3339: `${defaultStartDate}T20:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T20:00:00Z`), - }, // 1641067200 - ]; +// Default time values in getting started sample data +const defaultTimes = [ + { + rfc3339: `${defaultStartDate}T08:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T08:00:00Z`), + }, // 1641024000 + { + rfc3339: `${defaultStartDate}T09:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T09:00:00Z`), + }, // 1641027600 + { + rfc3339: `${defaultStartDate}T10:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T10:00:00Z`), + }, // 1641031200 + { + rfc3339: `${defaultStartDate}T11:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T11:00:00Z`), + }, // 1641034800 + { + rfc3339: `${defaultStartDate}T12:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T12:00:00Z`), + }, // 1641038400 + { + rfc3339: `${defaultStartDate}T13:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T13:00:00Z`), + }, // 1641042000 + { + rfc3339: `${defaultStartDate}T14:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T14:00:00Z`), + }, // 1641045600 + { + rfc3339: `${defaultStartDate}T15:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T15:00:00Z`), + }, // 1641049200 + { + rfc3339: `${defaultStartDate}T16:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T16:00:00Z`), + }, // 1641052800 + { + rfc3339: `${defaultStartDate}T17:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T17:00:00Z`), + }, // 1641056400 + { + rfc3339: `${defaultStartDate}T18:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T18:00:00Z`), + }, // 1641060000 + { + rfc3339: `${defaultStartDate}T19:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T19:00:00Z`), + }, // 1641063600 + { + rfc3339: `${defaultStartDate}T20:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T20:00:00Z`), + }, // 1641067200 +]; -function updateTimestamps (newStartDate, seedTimes=defaultTimes) { +function updateTimestamps(newStartDate, seedTimes = defaultTimes) { // Update the times array with replacement times - const times = seedTimes.map(x => { + const times = seedTimes.map((x) => { var newStartTimestamp = x.rfc3339.replace(/^.*T/, newStartDate + 'T'); return { @@ -178,7 +178,7 @@ function updateTimestamps (newStartDate, seedTimes=defaultTimes) { /////////////////////// MODAL INTERACTIONS / DATE PICKER /////////////////////// -function CustomTimeTrigger({component}) { +function CustomTimeTrigger({ component }) { const $component = $(component); $component .find('a[data-action="open"]:first') @@ -212,7 +212,7 @@ function CustomTimeTrigger({component}) { if (newDate != undefined) { newDate = formatDate(newDate); - + // Update the last updated timestamps with the new date // and reassign the updated times. updatedTimes = updateTimestamps(newDate, updatedTimes); diff --git a/assets/js/datetime.js b/assets/js/datetime.js index ec0f8ee2b..7c0261416 100644 --- a/assets/js/datetime.js +++ b/assets/js/datetime.js @@ -1,30 +1,54 @@ -const monthNames = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]; -var date = new Date() -var currentTimestamp = date.toISOString().replace(/^(.*)(\.\d+)(Z)/, '$1$3') // 2023-01-01T12:34:56Z -var currentTime = date.toISOString().replace(/(^.*T)(.*)(Z)/, '$2') + '084216' // 12:34:56.000084216 +import $ from 'jquery'; -function currentDate(offset=0, trimTime=false) { - outputDate = new Date(date) - outputDate.setDate(outputDate.getDate() + offset) +var date = new Date(); +var currentTimestamp = date.toISOString().replace(/^(.*)(\.\d+)(Z)/, '$1$3'); // 2023-01-01T12:34:56Z + +// Microsecond offset appended to the current time string for formatting purposes +const MICROSECOND_OFFSET = '084216'; + +var currentTime = + date.toISOString().replace(/(^.*T)(.*)(Z)/, '$2') + MICROSECOND_OFFSET; // 12:34:56.000084216 +function currentDate(offset = 0, trimTime = false) { + let outputDate = new Date(date); + outputDate.setDate(outputDate.getDate() + offset); if (trimTime) { - return outputDate.toISOString().replace(/T.*$/, '') // 2023-01-01 + return outputDate.toISOString().replace(/T.*$/, ''); // 2023-01-01 } else { - return outputDate.toISOString().replace(/T.*$/, 'T00:00:00Z') // 2023-01-01T00:00:00Z + return outputDate.toISOString().replace(/T.*$/, 'T00:00:00Z'); // 2023-01-01T00:00:00Z } } function enterpriseEOLDate() { - var inTwoYears = date.setFullYear(date.getFullYear() + 2) - earliestEOL = new Date(inTwoYears) - return `${monthNames[earliestEOL.getMonth()]} ${earliestEOL.getDate()}, ${earliestEOL.getFullYear()}` + const monthNames = [ + 'January', + 'February', + 'March', + 'April', + 'May', + 'June', + 'July', + 'August', + 'September', + 'October', + 'November', + 'December', + ]; + var inTwoYears = new Date(date); + inTwoYears.setFullYear(inTwoYears.getFullYear() + 2); + let earliestEOL = new Date(inTwoYears); + return `${monthNames[earliestEOL.getMonth()]} ${earliestEOL.getDate()}, ${earliestEOL.getFullYear()}`; } -$('span.current-timestamp').text(currentTimestamp) -$('span.current-time').text(currentTime) -$('span.enterprise-eol-date').text(enterpriseEOLDate) -$('span.current-date').each(function() { - var dayOffset = parseInt($(this).attr("offset")) - var trimTime = $(this).attr("trim-time") === "true" - $(this).text(currentDate(dayOffset, trimTime)) -}) +function initialize() { + $('span.current-timestamp').text(currentTimestamp); + $('span.current-time').text(currentTime); + $('span.enterprise-eol-date').text(enterpriseEOLDate()); + $('span.current-date').each(function () { + var dayOffset = parseInt($(this).attr('offset')); + var trimTime = $(this).attr('trim-time') === 'true'; + $(this).text(currentDate(dayOffset, trimTime)); + }); +} + +export { initialize }; diff --git a/assets/js/feature-callouts.js b/assets/js/feature-callouts.js index 253b09b13..a3ad28d94 100644 --- a/assets/js/feature-callouts.js +++ b/assets/js/feature-callouts.js @@ -2,37 +2,24 @@ This feature is designed to callout new features added to the documentation CSS is required for the callout bubble to determine look and position, but the element must have the `callout` class and a unique id. - Callouts are treated as notifications and use the notification cookie API in - assets/js/cookies.js. + Callouts are treated as notifications and use the LocalStorage notification API. */ +import $ from 'jquery'; +import * as LocalStorageAPI from './services/local-storage.js'; + // Get notification ID -function getCalloutID (el) { +function getCalloutID(el) { return $(el).attr('id'); } -// Hide a callout and update the cookie with the viewed callout -function hideCallout (calloutID) { - if (!window.LocalStorageAPI.notificationIsRead(calloutID)) { - window.LocalStorageAPI.setNotificationAsRead(calloutID, 'callout'); - $(`#${calloutID}`).fadeOut(200); +// Show the url feature callouts on page load +export default function FeatureCallout({ component }) { + const calloutID = getCalloutID($(component)); + + if (!LocalStorageAPI.notificationIsRead(calloutID, 'callout')) { + $(`#${calloutID}.feature-callout`) + .fadeIn(300) + .removeClass('start-position'); } } - -// Show the url feature callouts on page load -$(document).ready(function () { - $('.feature-callout').each(function () { - const calloutID = getCalloutID($(this)); - - if (!window.LocalStorageAPI.notificationIsRead(calloutID, 'callout')) { - $(`#${calloutID}.feature-callout`) - .fadeIn(300) - .removeClass('start-position'); - } - }); -}); - -// Hide the InfluxDB URL selector callout -// $('button.url-trigger, #influxdb-url-selector .close').click(function () { -// hideCallout('influxdb-url-selector'); -// }); diff --git a/assets/js/flux-group-keys.js b/assets/js/flux-group-keys.js index 80ab46b70..60ed99b0b 100644 --- a/assets/js/flux-group-keys.js +++ b/assets/js/flux-group-keys.js @@ -1,49 +1,148 @@ -var tablesElement = $("#flux-group-keys-demo #grouped-tables") +import $ from 'jquery'; // Sample data let data = [ [ - { _time: "2021-01-01T00:00:00Z", _measurement: "example", loc: "rm1", sensorID: "A123", _field: "temp", _value: 110.3 }, - { _time: "2021-01-01T00:01:00Z", _measurement: "example", loc: "rm1", sensorID: "A123", _field: "temp", _value: 112.5 }, - { _time: "2021-01-01T00:02:00Z", _measurement: "example", loc: "rm1", sensorID: "A123", _field: "temp", _value: 111.9 } + { + _time: '2021-01-01T00:00:00Z', + _measurement: 'example', + loc: 'rm1', + sensorID: 'A123', + _field: 'temp', + _value: 110.3, + }, + { + _time: '2021-01-01T00:01:00Z', + _measurement: 'example', + loc: 'rm1', + sensorID: 'A123', + _field: 'temp', + _value: 112.5, + }, + { + _time: '2021-01-01T00:02:00Z', + _measurement: 'example', + loc: 'rm1', + sensorID: 'A123', + _field: 'temp', + _value: 111.9, + }, ], [ - { _time: "2021-01-01T00:00:00Z", _measurement: "example", loc: "rm1", sensorID: "A123", _field: "hum", _value: 73.4 }, - { _time: "2021-01-01T00:01:00Z", _measurement: "example", loc: "rm1", sensorID: "A123", _field: "hum", _value: 73.7 }, - { _time: "2021-01-01T00:02:00Z", _measurement: "example", loc: "rm1", sensorID: "A123", _field: "hum", _value: 75.1 } + { + _time: '2021-01-01T00:00:00Z', + _measurement: 'example', + loc: 'rm1', + sensorID: 'A123', + _field: 'hum', + _value: 73.4, + }, + { + _time: '2021-01-01T00:01:00Z', + _measurement: 'example', + loc: 'rm1', + sensorID: 'A123', + _field: 'hum', + _value: 73.7, + }, + { + _time: '2021-01-01T00:02:00Z', + _measurement: 'example', + loc: 'rm1', + sensorID: 'A123', + _field: 'hum', + _value: 75.1, + }, ], [ - { _time: "2021-01-01T00:00:00Z", _measurement: "example", loc: "rm2", sensorID: "B456", _field: "temp", _value: 108.2 }, - { _time: "2021-01-01T00:01:00Z", _measurement: "example", loc: "rm2", sensorID: "B456", _field: "temp", _value: 108.5 }, - { _time: "2021-01-01T00:02:00Z", _measurement: "example", loc: "rm2", sensorID: "B456", _field: "temp", _value: 109.6 } + { + _time: '2021-01-01T00:00:00Z', + _measurement: 'example', + loc: 'rm2', + sensorID: 'B456', + _field: 'temp', + _value: 108.2, + }, + { + _time: '2021-01-01T00:01:00Z', + _measurement: 'example', + loc: 'rm2', + sensorID: 'B456', + _field: 'temp', + _value: 108.5, + }, + { + _time: '2021-01-01T00:02:00Z', + _measurement: 'example', + loc: 'rm2', + sensorID: 'B456', + _field: 'temp', + _value: 109.6, + }, ], [ - { _time: "2021-01-01T00:00:00Z", _measurement: "example", loc: "rm2", sensorID: "B456", _field: "hum", _value: 71.8 }, - { _time: "2021-01-01T00:01:00Z", _measurement: "example", loc: "rm2", sensorID: "B456", _field: "hum", _value: 72.3 }, - { _time: "2021-01-01T00:02:00Z", _measurement: "example", loc: "rm2", sensorID: "B456", _field: "hum", _value: 72.1 } - ] -] + { + _time: '2021-01-01T00:00:00Z', + _measurement: 'example', + loc: 'rm2', + sensorID: 'B456', + _field: 'hum', + _value: 71.8, + }, + { + _time: '2021-01-01T00:01:00Z', + _measurement: 'example', + loc: 'rm2', + sensorID: 'B456', + _field: 'hum', + _value: 72.3, + }, + { + _time: '2021-01-01T00:02:00Z', + _measurement: 'example', + loc: 'rm2', + sensorID: 'B456', + _field: 'hum', + _value: 72.1, + }, + ], +]; // Default group key -let groupKey = ["_measurement", "loc", "sensorID", "_field"] +let groupKey = ['_measurement', 'loc', 'sensorID', '_field']; + +export default function FluxGroupKeysDemo({ component }) { + $('.column-list label').click(function () { + toggleCheckbox($(this)); + groupKey = getChecked(component); + groupData(); + buildGroupExample(component); + }); + + // Group and render tables on load + groupData(); +} // Build a table group (group key and table) using an array of objects function buildTable(inputData) { - // Build the group key string function wrapString(column, value) { - var stringColumns = ["_measurement", "loc", "sensorID", "_field"] + var stringColumns = ['_measurement', 'loc', 'sensorID', '_field']; if (stringColumns.includes(column)) { - return '"' + value + '"' + return '"' + value + '"'; } else { - return value + return value; } } - var groupKeyString = "Group key instance = [" + (groupKey.map(column => column + ": " + wrapString(column, (inputData[0])[column])) ).join(", ") + "]"; - var groupKeyLabel = document.createElement("p"); - groupKeyLabel.className = "table-group-key" - groupKeyLabel.innerHTML = groupKeyString - + var groupKeyString = + 'Group key instance = [' + + groupKey + .map((column) => column + ': ' + wrapString(column, inputData[0][column])) + .join(', ') + + ']'; + var groupKeyLabel = document.createElement('p'); + groupKeyLabel.className = 'table-group-key'; + groupKeyLabel.innerHTML = groupKeyString; // Extract column headers var columns = []; @@ -54,56 +153,57 @@ function buildTable(inputData) { } } } - + // Create the table element - var table = document.createElement("table"); - + const table = document.createElement('table'); + // Create the table header for (let i = 0; i < columns.length; i++) { var header = table.createTHead(); - var th = document.createElement("th"); + var th = document.createElement('th'); th.innerHTML = columns[i]; if (groupKey.includes(columns[i])) { - th.className = "grouped-by"; + th.className = 'grouped-by'; } header.appendChild(th); } // Add inputData to the HTML table for (let i = 0; i < inputData.length; i++) { - tr = table.insertRow(-1); + let tr = table.insertRow(-1); for (let j = 0; j < columns.length; j++) { var td = tr.insertCell(-1); td.innerHTML = inputData[i][columns[j]]; // Highlight the value if column is part of the group key if (groupKey.includes(columns[j])) { - td.className = "grouped-by"; + td.className = 'grouped-by'; } } } // Create a table group with group key and table - var tableGroup = document.createElement("div"); - tableGroup.innerHTML += groupKeyLabel.outerHTML + table.outerHTML + var tableGroup = document.createElement('div'); + tableGroup.innerHTML += groupKeyLabel.outerHTML + table.outerHTML; - return tableGroup + return tableGroup; } // Clear and rebuild all HTML tables function buildTables(data) { - existingTables = tablesElement[0] + let tablesElement = $('#flux-group-keys-demo #grouped-tables'); + let existingTables = tablesElement[0]; while (existingTables.firstChild) { existingTables.removeChild(existingTables.firstChild); } for (let i = 0; i < data.length; i++) { - var table = buildTable(data[i]) + var table = buildTable(data[i]); tablesElement.append(table); } } // Group data based on the group key and output new tables function groupData() { - let groupedData = data.flat() + let groupedData = data.flat(); function groupBy(array, f) { var groups = {}; @@ -114,20 +214,19 @@ function groupData() { }); return Object.keys(groups).map(function (group) { return groups[group]; - }) + }); } groupedData = groupBy(groupedData, function (r) { - return groupKey.map(v => r[v]); + return groupKey.map((v) => r[v]); }); buildTables(groupedData); } -// Get selected column names -var checkboxes = $("input[type=checkbox]"); - -function getChecked() { +function getChecked(component) { + // Get selected column names + var checkboxes = $(component).find('input[type=checkbox]'); var checked = []; for (var i = 0; i < checkboxes.length; i++) { var checkbox = checkboxes[i]; @@ -141,17 +240,12 @@ function toggleCheckbox(element) { } // Build example group function -function buildGroupExample() { - var columnCollection = getChecked().map(i => '"' + i + '"').join(", ") - $("pre#group-by-example")[0].innerHTML = "data\n |> group(columns: [" + columnCollection + "])"; +function buildGroupExample(component) { + var columnCollection = getChecked(component) + .map((i) => '"' + i + '"') + .join(', '); + $('pre#group-by-example')[0].innerHTML = + "data\n |> group(columns: [" + + columnCollection + + '])'; } - -$(".column-list label").click(function () { - toggleCheckbox($(this)) - groupKey = getChecked(); - groupData(); - buildGroupExample(); -}); - -// Group and render tables on load -groupData() diff --git a/assets/js/home-interactions.js b/assets/js/home-interactions.js deleted file mode 100644 index a90df14cd..000000000 --- a/assets/js/home-interactions.js +++ /dev/null @@ -1,22 +0,0 @@ -$('.exp-btn').click(function() { - var targetBtnElement = $(this).parent() - $('.exp-btn > p', targetBtnElement).fadeOut(100); - setTimeout(function() { - $('.exp-btn-links', targetBtnElement).fadeIn(200) - $('.exp-btn', targetBtnElement).addClass('open'); - $('.close-btn', targetBtnElement).fadeIn(200); - }, 100); -}) - -$('.close-btn').click(function() { - var targetBtnElement = $(this).parent().parent() - $('.exp-btn-links', targetBtnElement).fadeOut(100) - $('.exp-btn', targetBtnElement).removeClass('open'); - $(this).fadeOut(100); - setTimeout(function() { - $('p', targetBtnElement).fadeIn(100); - }, 100); -}) - -/////////////////////////////// EXPANDING BUTTONS ////////////////////////////// - diff --git a/assets/js/index.js b/assets/js/index.js deleted file mode 100644 index f63ad8b5d..000000000 --- a/assets/js/index.js +++ /dev/null @@ -1 +0,0 @@ -export * from './main.js'; diff --git a/assets/js/influxdb-url.js b/assets/js/influxdb-url.js index bed47eb94..e0f5d34d8 100644 --- a/assets/js/influxdb-url.js +++ b/assets/js/influxdb-url.js @@ -3,7 +3,6 @@ ///////////////////////// INFLUXDB URL PREFERENCE ///////////////////////////// //////////////////////////////////////////////////////////////////////////////// */ -import * as pageParams from '@params'; import { DEFAULT_STORAGE_URLS, getPreference, @@ -12,15 +11,18 @@ import { removeInfluxDBUrl, getInfluxDBUrl, getInfluxDBUrls, -} from './local-storage.js'; +} from './services/local-storage.js'; import $ from 'jquery'; import { context as PRODUCT_CONTEXT, referrerHost } from './page-context.js'; +import { influxdbUrls } from './services/influxdb-urls.js'; import { delay } from './helpers.js'; import { toggleModal } from './modals.js'; let CLOUD_URLS = []; -if (pageParams && pageParams.influxdb_urls) { - CLOUD_URLS = Object.values(pageParams.influxdb_urls.cloud.providers).flatMap((provider) => provider.regions?.map((region) => region.url)); +if (influxdbUrls?.cloud) { + CLOUD_URLS = Object.values(influxdbUrls.cloud.providers).flatMap((provider) => + provider.regions?.map((region) => region.url) + ); } export { CLOUD_URLS }; @@ -28,7 +30,7 @@ export function InfluxDBUrl() { const UNIQUE_URL_PRODUCTS = ['dedicated', 'clustered']; const IS_UNIQUE_URL_PRODUCT = UNIQUE_URL_PRODUCTS.includes(PRODUCT_CONTEXT); - // Add actual cloud URLs as needed + // Add actual cloud URLs as needed const elementSelector = '.article--content pre:not(.preserve)'; ///////////////////// Stored preference management /////////////////////// @@ -118,11 +120,12 @@ export function InfluxDBUrl() { }); } - // Retrieve the currently selected URLs from the urls local storage object. - function getUrls() { - const { cloud, oss, core, enterprise, serverless, dedicated, clustered } = getInfluxDBUrls(); - return { oss, cloud, core, enterprise, serverless, dedicated, clustered }; -} + // Retrieve the currently selected URLs from the urls local storage object. + function getUrls() { + const { cloud, oss, core, enterprise, serverless, dedicated, clustered } = + getInfluxDBUrls(); + return { oss, cloud, core, enterprise, serverless, dedicated, clustered }; + } // Retrieve the previously selected URLs from the from the urls local storage object. // This is used to update URLs whenever you switch between browser tabs. @@ -289,15 +292,17 @@ export function InfluxDBUrl() { } // Append the URL selector button to each codeblock containing a placeholder URL - function appendUrlSelector(urls={ - cloud: '', - oss: '', - core: '', - enterprise: '', - serverless: '', - dedicated: '', - clustered: '', - }) { + function appendUrlSelector( + urls = { + cloud: '', + oss: '', + core: '', + enterprise: '', + serverless: '', + dedicated: '', + clustered: '', + } + ) { const appendToUrls = Object.values(urls); const getBtnText = (context) => { @@ -315,7 +320,7 @@ export function InfluxDBUrl() { return contextText[context]; }; - appendToUrls.forEach(function (url) { + appendToUrls.forEach(function (url) { $(elementSelector).each(function () { var code = $(this).html(); if (code.includes(url)) { @@ -330,20 +335,32 @@ export function InfluxDBUrl() { }); } -//////////////////////////////////////////////////////////////////////////// -////////////////// Initialize InfluxDB URL interactions //////////////////// -//////////////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////////////// + ////////////////// Initialize InfluxDB URL interactions //////////////////// + //////////////////////////////////////////////////////////////////////////// // Add the preserve tag to code blocks that shouldn't be updated addPreserve(); - const { cloud, oss, core, enterprise, serverless, dedicated, clustered } = DEFAULT_STORAGE_URLS; + const { cloud, oss, core, enterprise, serverless, dedicated, clustered } = + DEFAULT_STORAGE_URLS; // Append URL selector buttons to code blocks - appendUrlSelector({ cloud, oss, core, enterprise, serverless, dedicated, clustered }); + appendUrlSelector({ + cloud, + oss, + core, + enterprise, + serverless, + dedicated, + clustered, + }); // Update URLs on load - updateUrls({ cloud, oss, core, enterprise, serverless, dedicated, clustered }, getUrls()); + updateUrls( + { cloud, oss, core, enterprise, serverless, dedicated, clustered }, + getUrls() + ); // Set active radio button on page load setRadioButtons(getUrls()); diff --git a/assets/js/keybindings.js b/assets/js/keybindings.js index 6c8f2fcbe..50ee3a683 100644 --- a/assets/js/keybindings.js +++ b/assets/js/keybindings.js @@ -1,41 +1,58 @@ -// Dynamically update keybindings or hotkeys -function getPlatform() { - if (/Mac/.test(navigator.platform)) { - return "osx" - } else if (/Win/.test(navigator.platform)) { - return "win" - } else if (/Linux/.test(navigator.platform)) { - return "linux" - } else { - return "other" - } +import { getPlatform } from './utils/user-agent-platform.js'; +import $ from 'jquery'; + +/** + * Adds OS-specific class to component + * @param {string} osClass - OS-specific class to add + * @param {Object} options - Component options + * @param {jQuery} options.$component - jQuery element reference + */ +function addOSClass(osClass, { $component }) { + $component.addClass(osClass); } -const platform = getPlatform() +/** + * Updates keybinding display based on detected platform + * @param {Object} options - Component options + * @param {jQuery} options.$component - jQuery element reference + * @param {string} options.platform - Detected platform + */ +function updateKeyBindings({ $component, platform }) { + const osx = $component.data('osx'); + const linux = $component.data('linux'); + const win = $component.data('win'); -function addOSClass(osClass) { - $('.keybinding').addClass(osClass) -} + let keybind; -function updateKeyBindings() { - $('.keybinding').each(function() { - var osx = $(this).data("osx") - var linux = $(this).data("linux") - var win = $(this).data("win") - - if (platform === "other") { - if (win != linux) { - var keybind = '' + osx + ' for macOS, ' + linux + ' for Linux, and ' + win + ' for Windows'; - } else { - var keybind = '' + linux + ' for Linux and Windows and ' + osx + ' for macOS'; - } + if (platform === 'other') { + if (win !== linux) { + keybind = + `${osx} for macOS, ` + + `${linux} for Linux, ` + + `and ${win} for Windows`; } else { - var keybind = '' + $(this).data(platform) + '' + keybind = + `${linux} for Linux and Windows and ` + + `${osx} for macOS`; } + } else { + keybind = `${$component.data(platform)}`; + } - $(this).html(keybind) - }) + $component.html(keybind); } -addOSClass(platform) -updateKeyBindings() +/** + * Initialize and render platform-specific keybindings + * @param {Object} options - Component options + * @param {HTMLElement} options.component - DOM element + * @returns {void} + */ +export default function KeyBinding({ component }) { + // Initialize keybindings + const platform = getPlatform(); + const $component = $(component); + + addOSClass(platform, { $component }); + updateKeyBindings({ $component, platform }); +} diff --git a/assets/js/list-filters.js b/assets/js/list-filters.js index 7b008dcb6..28c818507 100644 --- a/assets/js/list-filters.js +++ b/assets/js/list-filters.js @@ -1,11 +1,15 @@ +import $ from 'jquery'; + // Count tag elements function countTag(tag) { - return $(".visible[data-tags*='" + tag + "']").length + return $(".visible[data-tags*='" + tag + "']").length; } -function getFilterCounts() { - $('#list-filters label').each(function() { - var tagName = $('input', this).attr('name').replace(/[\W/]+/, "-"); +function getFilterCounts($labels) { + $labels.each(function () { + var tagName = $('input', this) + .attr('name') + .replace(/[\W/]+/, '-'); var tagCount = countTag(tagName); $(this).attr('data-count', '(' + tagCount + ')'); if (tagCount <= 0) { @@ -13,38 +17,58 @@ function getFilterCounts() { } else { $(this).fadeTo(400, 1.0); } - }) + }); } -// Get initial filter count on page load -getFilterCounts() +/** TODO: Include the data source value in the as an additional attribute + * in the HTML and pass it into the component, which would let us use selectors + * for only the source items and let us have more than one + * list filter component per page without conflicts */ +export default function ListFilters({ component }) { + const $component = $(component); + const $labels = $component.find('label'); + const $inputs = $component.find('input'); -$("#list-filters input").click(function() { + getFilterCounts($labels); - // List of tags to hide - var tagArray = $("#list-filters input:checkbox:checked").map(function(){ - return $(this).attr('name').replace(/[\W]+/, "-"); - }).get(); + $inputs.click(function () { + // List of tags to hide + var tagArray = $component + .find('input:checkbox:checked') + .map(function () { + return $(this).attr('name').replace(/[\W]+/, '-'); + }) + .get(); - // List of tags to restore - var restoreArray = $("#list-filters input:checkbox:not(:checked)").map(function(){ - return $(this).attr('name').replace(/[\W]+/, "-"); - }).get(); + // List of tags to restore + var restoreArray = $component + .find('input:checkbox:not(:checked)') + .map(function () { + return $(this).attr('name').replace(/[\W]+/, '-'); + }) + .get(); - // Actions for filter select - if ( $(this).is(':checked') ) { - $.each( tagArray, function( index, value ) { - $(".filter-item.visible:not([data-tags~='" + value + "'])").removeClass('visible').fadeOut() - }) - } else { - $.each( restoreArray, function( index, value ) { - $(".filter-item:not(.visible)[data-tags~='" + value + "']").addClass('visible').fadeIn() - }) - $.each( tagArray, function( index, value ) { - $(".filter-item.visible:not([data-tags~='" + value + "'])").removeClass('visible').hide() - }) - } + // Actions for filter select + if ($(this).is(':checked')) { + $.each(tagArray, function (index, value) { + $(".filter-item.visible:not([data-tags~='" + value + "'])") + .removeClass('visible') + .fadeOut(); + }); + } else { + $.each(restoreArray, function (index, value) { + $(".filter-item:not(.visible)[data-tags~='" + value + "']") + .addClass('visible') + .fadeIn(); + }); + $.each(tagArray, function (index, value) { + $(".filter-item.visible:not([data-tags~='" + value + "'])") + .removeClass('visible') + .hide(); + }); + } - // Refresh filter count - getFilterCounts() -}); + // Refresh filter count + getFilterCounts($labels); + }); +} diff --git a/assets/js/main.js b/assets/js/main.js index 5c2289720..ca99dff48 100644 --- a/assets/js/main.js +++ b/assets/js/main.js @@ -1,7 +1,7 @@ // assets/js/main.js -// If you need to pass parameters from the calling Hugo page, you can import them here like so: -// import * as pageParams from '@params'; +// Import dependencies that we still need to load in the global scope +import $ from 'jquery'; /** Import modules that are not components. * TODO: Refactor these into single-purpose component modules. @@ -9,9 +9,10 @@ import * as apiLibs from './api-libs.js'; import * as codeControls from './code-controls.js'; import * as contentInteractions from './content-interactions.js'; +import * as datetime from './datetime.js'; import { delay } from './helpers.js'; import { InfluxDBUrl } from './influxdb-url.js'; -import * as localStorage from './local-storage.js'; +import * as localStorage from './services/local-storage.js'; import * as modals from './modals.js'; import * as notifications from './notifications.js'; import * as pageContext from './page-context.js'; @@ -29,8 +30,17 @@ import * as v3Wayfinding from './v3-wayfinding.js'; import AskAITrigger from './ask-ai-trigger.js'; import CodePlaceholder from './code-placeholders.js'; import { CustomTimeTrigger } from './custom-timestamps.js'; +import Diagram from './components/diagram.js'; +import DocSearch from './components/doc-search.js'; +import FeatureCallout from './feature-callouts.js'; +import FluxGroupKeysDemo from './flux-group-keys.js'; import FluxInfluxDBVersionsTrigger from './flux-influxdb-versions.js'; +import KeyBinding from './keybindings.js'; +import ListFilters from './list-filters.js'; +import ProductSelector from './version-selector.js'; +import ReleaseToc from './release-toc.js'; import { SearchButton } from './search-button.js'; +import SidebarSearch from './components/sidebar-search.js'; import { SidebarToggle } from './sidebar-toggle.js'; import Theme from './theme.js'; import ThemeSwitch from './theme-switch.js'; @@ -49,11 +59,20 @@ const componentRegistry = { 'ask-ai-trigger': AskAITrigger, 'code-placeholder': CodePlaceholder, 'custom-time-trigger': CustomTimeTrigger, + diagram: Diagram, + 'doc-search': DocSearch, + 'feature-callout': FeatureCallout, + 'flux-group-keys-demo': FluxGroupKeysDemo, 'flux-influxdb-versions-trigger': FluxInfluxDBVersionsTrigger, + keybinding: KeyBinding, + 'list-filters': ListFilters, + 'product-selector': ProductSelector, + 'release-toc': ReleaseToc, 'search-button': SearchButton, + 'sidebar-search': SidebarSearch, 'sidebar-toggle': SidebarToggle, - 'theme': Theme, - 'theme-switch': ThemeSwitch + theme: Theme, + 'theme-switch': ThemeSwitch, }; /** @@ -71,7 +90,12 @@ function initGlobals() { window.influxdatadocs.pageContext = pageContext; window.influxdatadocs.toggleModal = modals.toggleModal; window.influxdatadocs.componentRegistry = componentRegistry; - + + // Re-export jQuery to global namespace for legacy scripts + if (typeof window.jQuery === 'undefined') { + window.jQuery = window.$ = $; + } + return window.influxdatadocs; } @@ -81,32 +105,35 @@ function initGlobals() { */ function initComponents(globals) { const components = document.querySelectorAll('[data-component]'); - + components.forEach((component) => { const componentName = component.getAttribute('data-component'); const ComponentConstructor = componentRegistry[componentName]; - + if (ComponentConstructor) { // Initialize the component and store its instance in the global namespace try { const instance = ComponentConstructor({ component }); globals[componentName] = ComponentConstructor; - + // Optionally store component instances for future reference if (!globals.instances) { globals.instances = {}; } - + if (!globals.instances[componentName]) { globals.instances[componentName] = []; } - + globals.instances[componentName].push({ element: component, - instance + instance, }); } catch (error) { - console.error(`Error initializing component "${componentName}":`, error); + console.error( + `Error initializing component "${componentName}":`, + error + ); } } else { console.warn(`Unknown component: "${componentName}"`); @@ -122,6 +149,7 @@ function initModules() { apiLibs.initialize(); codeControls.initialize(); contentInteractions.initialize(); + datetime.initialize(); InfluxDBUrl(); notifications.initialize(); pageFeedback.initialize(); @@ -135,10 +163,10 @@ function initModules() { function init() { // Initialize global namespace and expose core modules const globals = initGlobals(); - + // Initialize non-component UI modules initModules(); - + // Initialize components from registry initComponents(globals); } @@ -147,4 +175,4 @@ function init() { document.addEventListener('DOMContentLoaded', init); // Export public API -export { initGlobals, componentRegistry }; \ No newline at end of file +export { initGlobals, componentRegistry }; diff --git a/assets/js/page-context.js b/assets/js/page-context.js index 4903e9a14..6779fbca2 100644 --- a/assets/js/page-context.js +++ b/assets/js/page-context.js @@ -1,34 +1,80 @@ /** This module retrieves browser context information and site data for the * current page, version, and product. */ -import { products, influxdb_urls } from '@params'; - -const safeProducts = products || {}; -const safeUrls = influxdb_urls || {}; +import { products } from './services/influxdata-products.js'; +import { influxdbUrls } from './services/influxdb-urls.js'; function getCurrentProductData() { const path = window.location.pathname; const mappings = [ - { pattern: /\/influxdb\/cloud\//, product: safeProducts.cloud, urls: safeUrls.influxdb_cloud }, - { pattern: /\/influxdb3\/core/, product: safeProducts.influxdb3_core, urls: safeUrls.core }, - { pattern: /\/influxdb3\/enterprise/, product: safeProducts.influxdb3_enterprise, urls: safeUrls.enterprise }, - { pattern: /\/influxdb3\/cloud-serverless/, product: safeProducts.influxdb3_cloud_serverless, urls: safeUrls.cloud }, - { pattern: /\/influxdb3\/cloud-dedicated/, product: safeProducts.influxdb3_cloud_dedicated, urls: safeUrls.dedicated }, - { pattern: /\/influxdb3\/clustered/, product: safeProducts.influxdb3_clustered, urls: safeUrls.clustered }, - { pattern: /\/enterprise_v1\//, product: safeProducts.enterprise_influxdb, urls: safeUrls.oss }, - { pattern: /\/influxdb.*v1\//, product: safeProducts.influxdb, urls: safeUrls.oss }, - { pattern: /\/influxdb.*v2\//, product: safeProducts.influxdb, urls: safeUrls.oss }, - { pattern: /\/kapacitor\//, product: safeProducts.kapacitor, urls: safeUrls.oss }, - { pattern: /\/telegraf\//, product: safeProducts.telegraf, urls: safeUrls.oss }, - { pattern: /\/chronograf\//, product: safeProducts.chronograf, urls: safeUrls.oss }, - { pattern: /\/flux\//, product: safeProducts.flux, urls: safeUrls.oss }, + { + pattern: /\/influxdb\/cloud\//, + product: products.cloud, + urls: influxdbUrls.influxdb_cloud, + }, + { + pattern: /\/influxdb3\/core/, + product: products.influxdb3_core, + urls: influxdbUrls.core, + }, + { + pattern: /\/influxdb3\/enterprise/, + product: products.influxdb3_enterprise, + urls: influxdbUrls.enterprise, + }, + { + pattern: /\/influxdb3\/cloud-serverless/, + product: products.influxdb3_cloud_serverless, + urls: influxdbUrls.cloud, + }, + { + pattern: /\/influxdb3\/cloud-dedicated/, + product: products.influxdb3_cloud_dedicated, + urls: influxdbUrls.dedicated, + }, + { + pattern: /\/influxdb3\/clustered/, + product: products.influxdb3_clustered, + urls: influxdbUrls.clustered, + }, + { + pattern: /\/enterprise_v1\//, + product: products.enterprise_influxdb, + urls: influxdbUrls.oss, + }, + { + pattern: /\/influxdb.*v1\//, + product: products.influxdb, + urls: influxdbUrls.oss, + }, + { + pattern: /\/influxdb.*v2\//, + product: products.influxdb, + urls: influxdbUrls.oss, + }, + { + pattern: /\/kapacitor\//, + product: products.kapacitor, + urls: influxdbUrls.oss, + }, + { + pattern: /\/telegraf\//, + product: products.telegraf, + urls: influxdbUrls.oss, + }, + { + pattern: /\/chronograf\//, + product: products.chronograf, + urls: influxdbUrls.oss, + }, + { pattern: /\/flux\//, product: products.flux, urls: influxdbUrls.oss }, ]; for (const { pattern, product, urls } of mappings) { if (pattern.test(path)) { - return { - product: product || 'unknown', - urls: urls || {} + return { + product: product || 'unknown', + urls: urls || {}, }; } } @@ -36,7 +82,8 @@ function getCurrentProductData() { return { product: 'other', urls: {} }; } -// Return the page context (cloud, serverless, oss/enterprise, dedicated, clustered, other) +// Return the page context +// (cloud, serverless, oss/enterprise, dedicated, clustered, other) function getContext() { if (/\/influxdb\/cloud\//.test(window.location.pathname)) { return 'cloud'; @@ -78,8 +125,12 @@ const context = getContext(), protocol = location.protocol, referrer = document.referrer === '' ? 'direct' : document.referrer, referrerHost = getReferrerHost(), - // TODO: Verify this still does what we want since the addition of InfluxDB 3 naming and the Core and Enterprise versions. - version = (/^v\d/.test(pathArr[1]) || pathArr[1]?.includes('cloud') ? pathArr[1].replace(/^v/, '') : "n/a") + // TODO: Verify this works since the addition of InfluxDB 3 naming + // and the Core and Enterprise versions. + version = + /^v\d/.test(pathArr[1]) || pathArr[1]?.includes('cloud') + ? pathArr[1].replace(/^v/, '') + : 'n/a'; export { context, @@ -92,4 +143,4 @@ export { referrer, referrerHost, version, -}; \ No newline at end of file +}; diff --git a/assets/js/release-toc.js b/assets/js/release-toc.js index c27a9deaf..9e02ec5c4 100644 --- a/assets/js/release-toc.js +++ b/assets/js/release-toc.js @@ -1,26 +1,67 @@ /////////////////////////// Table of Contents Script /////////////////////////// /* - * This script is used to generate a table of contents for the - * release notes pages. -*/ + * This script is used to generate a table of contents for the + * release notes pages. + */ +export default function ReleaseToc({ component }) { + // Get all h2 elements that are not checkpoint-releases + const releases = Array.from(document.querySelectorAll('h2')).filter( + (el) => !el.id.match(/checkpoint-releases/) + ); -// Get all h2 elements that are not checkpoint-releases -const releases = Array.from(document.querySelectorAll('h2')).filter( - el => !el.id.match(/checkpoint-releases/) -); + // Extract data about each release from the array of releases + const releaseData = releases.map((el) => ({ + name: el.textContent, + id: el.id, + class: el.getAttribute('class'), + date: el.getAttribute('date'), + })); -// Extract data about each release from the array of releases -const releaseData = releases.map(el => ({ - name: el.textContent, - id: el.id, - class: el.getAttribute('class'), - date: el.getAttribute('date') -})); + // Build the release table of contents + const releaseTocUl = component.querySelector('#release-toc ul'); + releaseData.forEach((release) => { + releaseTocUl.appendChild(getReleaseItem(release)); + }); + + /* + * This script is used to expand the release notes table of contents by the + * number specified in the `show` attribute of `ul.release-list`. + * Once all the release items are visible, the "Show More" button is hidden. + */ + const showMoreBtn = component.querySelector('.show-more'); + if (showMoreBtn) { + showMoreBtn.addEventListener('click', function () { + const itemHeight = 1.885; // Item height in rem + const releaseNum = releaseData.length; + const maxHeight = releaseNum * itemHeight; + const releaseList = document.getElementById('release-list'); + const releaseIncrement = Number(releaseList.getAttribute('show')); + const currentHeightMatch = releaseList.style.height.match(/\d+\.?\d+/); + const currentHeight = currentHeightMatch + ? Number(currentHeightMatch[0]) + : 0; + const potentialHeight = currentHeight + releaseIncrement * itemHeight; + const newHeight = + potentialHeight > maxHeight ? maxHeight : potentialHeight; + + releaseList.style.height = `${newHeight}rem`; + + if (newHeight >= maxHeight) { + // Simple fade out + showMoreBtn.style.transition = 'opacity 0.1s'; + showMoreBtn.style.opacity = 0; + setTimeout(() => { + showMoreBtn.style.display = 'none'; + }, 100); + } + }); + } +} // Use release data to generate a list item for each release function getReleaseItem(releaseData) { - const li = document.createElement("li"); + const li = document.createElement('li'); if (releaseData.class !== null) { li.className = releaseData.class; } @@ -28,42 +69,3 @@ function getReleaseItem(releaseData) { li.setAttribute('date', releaseData.date); return li; } - -// Build the release table of contents -const releaseTocUl = document.querySelector('#release-toc ul'); -releaseData.forEach(release => { - releaseTocUl.appendChild(getReleaseItem(release)); -}); - -/* - * This script is used to expand the release notes table of contents by the - * number specified in the `show` attribute of `ul.release-list`. - * Once all the release items are visible, the "Show More" button is hidden. -*/ -const showMoreBtn = document.querySelector('#release-toc .show-more'); -if (showMoreBtn) { - showMoreBtn.addEventListener('click', function () { - const itemHeight = 1.885; // Item height in rem - const releaseNum = releaseData.length; - const maxHeight = releaseNum * itemHeight; - const releaseList = document.getElementById('release-list'); - const releaseIncrement = Number(releaseList.getAttribute('show')); - const currentHeightMatch = releaseList.style.height.match(/\d+\.?\d+/); - const currentHeight = currentHeightMatch - ? Number(currentHeightMatch[0]) - : 0; - const potentialHeight = currentHeight + releaseIncrement * itemHeight; - const newHeight = potentialHeight > maxHeight ? maxHeight : potentialHeight; - - releaseList.style.height = `${newHeight}rem`; - - if (newHeight >= maxHeight) { - // Simple fade out - showMoreBtn.style.transition = 'opacity 0.1s'; - showMoreBtn.style.opacity = 0; - setTimeout(() => { - showMoreBtn.style.display = 'none'; - }, 100); - } - }); -} diff --git a/assets/js/search-interactions.js b/assets/js/search-interactions.js deleted file mode 100644 index 4f8fdd8ac..000000000 --- a/assets/js/search-interactions.js +++ /dev/null @@ -1,10 +0,0 @@ -// Fade content wrapper when focusing on search input -$('#algolia-search-input').focus(function() { - $('.content-wrapper').fadeTo(300, .35); -}) - -// Hide search dropdown when leaving search input -$('#algolia-search-input').blur(function() { - $('.content-wrapper').fadeTo(200, 1); - $('.ds-dropdown-menu').hide(); -}) diff --git a/assets/js/services/influxdata-products.js b/assets/js/services/influxdata-products.js new file mode 100644 index 000000000..eecd8aa89 --- /dev/null +++ b/assets/js/services/influxdata-products.js @@ -0,0 +1,3 @@ +import { products as productsParam } from '@params'; + +export const products = productsParam || {}; diff --git a/assets/js/services/influxdb-urls.js b/assets/js/services/influxdb-urls.js new file mode 100644 index 000000000..1d31ff67f --- /dev/null +++ b/assets/js/services/influxdb-urls.js @@ -0,0 +1,3 @@ +import { influxdb_urls as influxdbUrlsParam } from '@params'; + +export const influxdbUrls = influxdbUrlsParam || {}; diff --git a/assets/js/local-storage.js b/assets/js/services/local-storage.js similarity index 93% rename from assets/js/local-storage.js rename to assets/js/services/local-storage.js index 103685f4d..8efccde12 100644 --- a/assets/js/local-storage.js +++ b/assets/js/services/local-storage.js @@ -10,7 +10,8 @@ - messages: Messages (data/notifications.yaml) that have been seen (array) - callouts: Feature callouts that have been seen (array) */ -import * as pageParams from '@params'; + +import { influxdbUrls } from './influxdb-urls.js'; // Prefix for all InfluxData docs local storage const storagePrefix = 'influxdata_docs_'; @@ -82,14 +83,12 @@ function getPreferences() { //////////// MANAGE INFLUXDATA DOCS URLS IN LOCAL STORAGE ////////////////////// //////////////////////////////////////////////////////////////////////////////// - const defaultUrls = {}; -// Guard against pageParams being null/undefined and safely access nested properties -if (pageParams && pageParams.influxdb_urls) { - Object.entries(pageParams.influxdb_urls).forEach(([product, {providers}]) => { - defaultUrls[product] = providers.filter(provider => provider.name === 'Default')[0]?.regions[0]?.url; - }); -} +Object.entries(influxdbUrls).forEach(([product, { providers }]) => { + defaultUrls[product] = + providers.filter((provider) => provider.name === 'Default')[0]?.regions[0] + ?.url || 'https://cloud2.influxdata.com'; +}); export const DEFAULT_STORAGE_URLS = { oss: defaultUrls.oss, @@ -177,7 +176,10 @@ const defaultNotificationsObj = { function getNotifications() { // Initialize notifications data if it doesn't already exist if (localStorage.getItem(notificationStorageKey) === null) { - initializeStorageItem('notifications', JSON.stringify(defaultNotificationsObj)); + initializeStorageItem( + 'notifications', + JSON.stringify(defaultNotificationsObj) + ); } // Retrieve and parse the notifications data as JSON @@ -221,7 +223,10 @@ function setNotificationAsRead(notificationID, notificationType) { readNotifications.push(notificationID); notificationsObj[notificationType + 's'] = readNotifications; - localStorage.setItem(notificationStorageKey, JSON.stringify(notificationsObj)); + localStorage.setItem( + notificationStorageKey, + JSON.stringify(notificationsObj) + ); } // Export functions as a module and make the file backwards compatible for non-module environments until all remaining dependent scripts are ported to modules diff --git a/assets/js/sidebar-toggle.js b/assets/js/sidebar-toggle.js index 4db64db79..49af74008 100644 --- a/assets/js/sidebar-toggle.js +++ b/assets/js/sidebar-toggle.js @@ -3,7 +3,7 @@ http://www.thesitewizard.com/javascripts/change-style-sheets.shtml */ -import * as localStorage from './local-storage.js'; +import * as localStorage from './services/local-storage.js'; // *** TO BE CUSTOMISED *** var sidebar_state_preference_name = 'sidebar_state'; diff --git a/assets/js/theme-switch.js b/assets/js/theme-switch.js index 4c97c3108..c7de8552d 100644 --- a/assets/js/theme-switch.js +++ b/assets/js/theme-switch.js @@ -1,20 +1,21 @@ import Theme from './theme.js'; export default function ThemeSwitch({ component }) { - if ( component == undefined) { + if (component === undefined) { component = document; } - component.querySelectorAll(`.theme-switch-light`).forEach((button) => { - button.addEventListener('click', function(event) { + + component.querySelectorAll('.theme-switch-light').forEach((button) => { + button.addEventListener('click', function (event) { event.preventDefault(); - Theme({ style: 'light-theme' }); + Theme({ component, style: 'light-theme' }); }); }); - component.querySelectorAll(`.theme-switch-dark`).forEach((button) => { - button.addEventListener('click', function(event) { + component.querySelectorAll('.theme-switch-dark').forEach((button) => { + button.addEventListener('click', function (event) { event.preventDefault(); - Theme({ style: 'dark-theme' }); + Theme({ component, style: 'dark-theme' }); }); }); } diff --git a/assets/js/theme.js b/assets/js/theme.js index 92a6c190e..8588d44a9 100644 --- a/assets/js/theme.js +++ b/assets/js/theme.js @@ -1,4 +1,4 @@ -import { getPreference, setPreference } from './local-storage.js'; +import { getPreference, setPreference } from './services/local-storage.js'; const PROPS = { style_preference_name: 'theme', @@ -6,19 +6,22 @@ const PROPS = { style_domain: 'docs.influxdata.com', }; -function getPreferredTheme () { +function getPreferredTheme() { return `${getPreference(PROPS.style_preference_name)}-theme`; } function switchStyle({ styles_element, css_title }) { // Disable all other theme stylesheets - styles_element.querySelectorAll('link[rel*="stylesheet"][title*="theme"]') - .forEach(function (link) { - link.disabled = true; - }); + styles_element + .querySelectorAll('link[rel*="stylesheet"][title*="theme"]') + .forEach(function (link) { + link.disabled = true; + }); // Enable the stylesheet with the specified title - const link = styles_element.querySelector(`link[rel*="stylesheet"][title="${css_title}"]`); + const link = styles_element.querySelector( + `link[rel*="stylesheet"][title="${css_title}"]` + ); link && (link.disabled = false); setPreference(PROPS.style_preference_name, css_title.replace(/-theme/, '')); @@ -38,5 +41,4 @@ export default function Theme({ component, style }) { if (component.dataset?.themeCallback === 'setVisibility') { setVisibility(component); } - } diff --git a/assets/js/utils/debug-helpers.js b/assets/js/utils/debug-helpers.js new file mode 100644 index 000000000..08433e2cf --- /dev/null +++ b/assets/js/utils/debug-helpers.js @@ -0,0 +1,38 @@ +/** + * Helper functions for debugging without source maps + * Example usage: + * In your code, you can use these functions like this: + * ```javascript + * import { debugLog, debugBreak, debugInspect } from './debug-helpers.js'; + * + * const data = debugInspect(someData, 'Data'); + * debugLog('Processing data', 'myFunction'); + * + * function processData() { + * // Add a breakpoint that works with DevTools + * debugBreak(); + * + * // Your existing code... + * } + * ``` + * + * @fileoverview DEVELOPMENT USE ONLY - Functions should not be committed to production + */ + +/* eslint-disable no-debugger */ +/* eslint-disable-next-line */ +// NOTE: These functions are detected by ESLint rules to prevent committing debug code + +export function debugLog(message, context = '') { + const contextStr = context ? `[${context}]` : ''; + console.log(`DEBUG${contextStr}: ${message}`); +} + +export function debugBreak() { + debugger; +} + +export function debugInspect(value, label = 'Inspect') { + console.log(`DEBUG[${label}]:`, value); + return value; +} diff --git a/assets/js/utils/search-interactions.js b/assets/js/utils/search-interactions.js new file mode 100644 index 000000000..6a73b2535 --- /dev/null +++ b/assets/js/utils/search-interactions.js @@ -0,0 +1,107 @@ +/** + * Manages search interactions for DocSearch integration + * Uses MutationObserver to watch for dropdown creation + */ +export default function SearchInteractions({ searchInput }) { + const contentWrapper = document.querySelector('.content-wrapper'); + let observer = null; + let dropdownObserver = null; + let dropdownMenu = null; + const debug = false; // Set to true for debugging logs + + // Fade content wrapper when focusing on search input + function handleFocus() { + contentWrapper.style.opacity = '0.35'; + contentWrapper.style.transition = 'opacity 300ms'; + } + + // Hide search dropdown when leaving search input + function handleBlur(event) { + // Only process blur if not clicking within dropdown + const relatedTarget = event.relatedTarget; + if ( + relatedTarget && + (relatedTarget.closest('.algolia-autocomplete') || + relatedTarget.closest('.ds-dropdown-menu')) + ) { + return; + } + + contentWrapper.style.opacity = '1'; + contentWrapper.style.transition = 'opacity 200ms'; + + // Hide dropdown if it exists + if (dropdownMenu) { + dropdownMenu.style.display = 'none'; + } + } + + // Add event listeners + searchInput.addEventListener('focus', handleFocus); + searchInput.addEventListener('blur', handleBlur); + + // Use MutationObserver to detect when dropdown is added to the DOM + observer = new MutationObserver((mutations) => { + for (const mutation of mutations) { + if (mutation.type === 'childList') { + const newDropdown = document.querySelector( + '.ds-dropdown-menu:not([data-monitored])' + ); + if (newDropdown) { + // Save reference to dropdown + dropdownMenu = newDropdown; + newDropdown.setAttribute('data-monitored', 'true'); + + // Monitor dropdown removal/display changes + dropdownObserver = new MutationObserver((dropdownMutations) => { + for (const dropdownMutation of dropdownMutations) { + if (debug) { + if ( + dropdownMutation.type === 'attributes' && + dropdownMutation.attributeName === 'style' + ) { + console.log( + 'Dropdown style changed:', + dropdownMenu.style.display + ); + } + } + } + }); + + // Observe changes to dropdown attributes (like style) + dropdownObserver.observe(dropdownMenu, { + attributes: true, + attributeFilter: ['style'], + }); + + // Add event listeners to keep dropdown open when interacted with + dropdownMenu.addEventListener('mousedown', (e) => { + // Prevent blur on searchInput when clicking in dropdown + e.preventDefault(); + }); + } + } + } + }); + + // Start observing the document body for dropdown creation + observer.observe(document.body, { + childList: true, + subtree: true, + }); + + // Return cleanup function + return function cleanup() { + searchInput.removeEventListener('focus', handleFocus); + searchInput.removeEventListener('blur', handleBlur); + + if (observer) { + observer.disconnect(); + } + + if (dropdownObserver) { + dropdownObserver.disconnect(); + } + }; +} diff --git a/assets/js/utils/user-agent-platform.js b/assets/js/utils/user-agent-platform.js new file mode 100644 index 000000000..803f1bdf9 --- /dev/null +++ b/assets/js/utils/user-agent-platform.js @@ -0,0 +1,35 @@ +/** + * Platform detection utility functions + * Provides methods for detecting user's operating system + */ + +/** + * Detects user's operating system using modern techniques + * Falls back to userAgent parsing when newer APIs aren't available + * @returns {string} Operating system identifier ("osx", "win", "linux", or "other") + */ +export function getPlatform() { + // Try to use modern User-Agent Client Hints API first (Chrome 89+, Edge 89+) + if (navigator.userAgentData && navigator.userAgentData.platform) { + const platform = navigator.userAgentData.platform.toLowerCase(); + + if (platform.includes('mac')) return 'osx'; + if (platform.includes('win')) return 'win'; + if (platform.includes('linux')) return 'linux'; + } + + // Fall back to userAgent string parsing + const userAgent = navigator.userAgent.toLowerCase(); + + if ( + userAgent.includes('mac') || + userAgent.includes('iphone') || + userAgent.includes('ipad') + ) + return 'osx'; + if (userAgent.includes('win')) return 'win'; + if (userAgent.includes('linux') || userAgent.includes('android')) + return 'linux'; + + return 'other'; +} diff --git a/assets/js/v3-wayfinding.js b/assets/js/v3-wayfinding.js index b50c58f7f..761a19044 100644 --- a/assets/js/v3-wayfinding.js +++ b/assets/js/v3-wayfinding.js @@ -1,6 +1,14 @@ import { CLOUD_URLS } from './influxdb-url.js'; -import * as localStorage from './local-storage.js'; -import { context, host, hostname, path, protocol, referrer, referrerHost } from './page-context.js'; +import * as localStorage from './services/local-storage.js'; +import { + context, + host, + hostname, + path, + protocol, + referrer, + referrerHost, +} from './page-context.js'; /** * Builds a referrer whitelist array that includes the current page host and all @@ -69,8 +77,6 @@ function setWayfindingInputState() { } function submitWayfindingData(engine, action) { - - // Build lp using page data and engine data const lp = `ioxwayfinding,host=${hostname},path=${path},referrer=${referrer},engine=${engine} action="${action}"`; @@ -81,10 +87,7 @@ function submitWayfindingData(engine, action) { 'https://j32dswat7l.execute-api.us-east-1.amazonaws.com/prod/wayfinding' ); xhr.setRequestHeader('X-Requested-With', 'XMLHttpRequest'); - xhr.setRequestHeader( - 'Access-Control-Allow-Origin', - `${protocol}//${host}` - ); + xhr.setRequestHeader('Access-Control-Allow-Origin', `${protocol}//${host}`); xhr.setRequestHeader('Content-Type', 'text/plain; charset=utf-8'); xhr.setRequestHeader('Accept', 'application/json'); xhr.send(lp); diff --git a/assets/js/version-selector.js b/assets/js/version-selector.js index 51fa52c53..7d9161c87 100644 --- a/assets/js/version-selector.js +++ b/assets/js/version-selector.js @@ -1,19 +1,21 @@ -// Select the product dropdown and dropdown items -const productDropdown = document.querySelector("#product-dropdown"); -const dropdownItems = document.querySelector("#dropdown-items"); +export default function ProductSelector({ component }) { + // Select the product dropdown and dropdown items + const productDropdown = component.querySelector('#product-dropdown'); + const dropdownItems = component.querySelector('#dropdown-items'); -// Expand the menu on click -if (productDropdown) { - productDropdown.addEventListener("click", function() { - productDropdown.classList.toggle("open"); - dropdownItems.classList.toggle("open"); + // Expand the menu on click + if (productDropdown) { + productDropdown.addEventListener('click', function () { + productDropdown.classList.toggle('open'); + dropdownItems.classList.toggle('open'); + }); + } + + // Close the dropdown by clicking anywhere else + document.addEventListener('click', function (e) { + // Check if the click was outside of the '.product-list' container + if (!e.target.closest('.product-list')) { + dropdownItems.classList.remove('open'); + } }); } - -// Close the dropdown by clicking anywhere else -document.addEventListener("click", function(e) { - // Check if the click was outside of the '.product-list' container - if (!e.target.closest('.product-list')) { - dropdownItems.classList.remove("open"); - } -}); diff --git a/assets/styles/layouts/_datetime.scss b/assets/styles/layouts/_datetime.scss new file mode 100644 index 000000000..dc8f20bdf --- /dev/null +++ b/assets/styles/layouts/_datetime.scss @@ -0,0 +1,18 @@ +/* + Datetime Components + ---------------------------------------------- +*/ + +.current-timestamp, +.current-date, +.current-time, +.enterprise-eol-date { + color: $current-timestamp-color; + display: inline-block; + font-family: $proxima; + white-space: nowrap; +} + +.nowrap { + white-space: nowrap; +} \ No newline at end of file diff --git a/assets/styles/layouts/article/_blocks.scss b/assets/styles/layouts/article/_blocks.scss index 62b205491..c7250749d 100644 --- a/assets/styles/layouts/article/_blocks.scss +++ b/assets/styles/layouts/article/_blocks.scss @@ -97,4 +97,4 @@ blockquote { "blocks/important", "blocks/warning", "blocks/caution", - "blocks/beta"; + "blocks/special-state"; diff --git a/assets/styles/layouts/article/_diagrams.scss b/assets/styles/layouts/article/_diagrams.scss index 4e3c1694e..f6c3e1b07 100644 --- a/assets/styles/layouts/article/_diagrams.scss +++ b/assets/styles/layouts/article/_diagrams.scss @@ -16,6 +16,10 @@ background: $article-code-bg !important; font-size: .85em; font-weight: $medium; + + p { + background: $article-bg !important; + } } .node { diff --git a/assets/styles/layouts/article/_pagination-btns.scss b/assets/styles/layouts/article/_pagination-btns.scss index 7f44860f7..c069bc8ae 100644 --- a/assets/styles/layouts/article/_pagination-btns.scss +++ b/assets/styles/layouts/article/_pagination-btns.scss @@ -34,5 +34,10 @@ vertical-align: middle; } } + + // Remove max-width when only one button is present + &:only-child { + max-width: none; + } } } diff --git a/assets/styles/layouts/article/blocks/_beta.scss b/assets/styles/layouts/article/blocks/_special-state.scss similarity index 98% rename from assets/styles/layouts/article/blocks/_beta.scss rename to assets/styles/layouts/article/blocks/_special-state.scss index b3ab3a70c..0717952cd 100644 --- a/assets/styles/layouts/article/blocks/_beta.scss +++ b/assets/styles/layouts/article/blocks/_special-state.scss @@ -1,10 +1,10 @@ -.block.beta { +.block.special-state { @include gradient($grad-burningDusk); padding: 4px; border: none; border-radius: 25px !important; - .beta-content { + .state-content { background: $article-bg; border-radius: 21px; padding: calc(1.65rem - 4px) calc(2rem - 4px) calc(.1rem + 4px) calc(2rem - 4px); diff --git a/assets/styles/styles-default.scss b/assets/styles/styles-default.scss index 1e8b162f9..5fd3eed2d 100644 --- a/assets/styles/styles-default.scss +++ b/assets/styles/styles-default.scss @@ -23,6 +23,7 @@ "layouts/syntax-highlighting", "layouts/algolia-search-overrides", "layouts/landing", + "layouts/datetime", "layouts/error-page", "layouts/footer-widgets", "layouts/modals", diff --git a/assets/styles/themes/_theme-dark.scss b/assets/styles/themes/_theme-dark.scss index b46051152..800740cf1 100644 --- a/assets/styles/themes/_theme-dark.scss +++ b/assets/styles/themes/_theme-dark.scss @@ -203,6 +203,12 @@ $article-btn-text-hover: $g20-white; $article-nav-icon-bg: $g5-pepper; $article-nav-acct-bg: $g3-castle; +// Datetime shortcode colors +$current-timestamp-color: $g15-platinum; +$current-date-color: $g15-platinum; +$current-time-color: $g15-platinum; +$enterprise-eol-date-color: $g15-platinum; + // Error Page Colors $error-page-btn: $b-pool; $error-page-btn-text: $g20-white; diff --git a/assets/styles/themes/_theme-light.scss b/assets/styles/themes/_theme-light.scss index c19e91ab2..eb9e530f3 100644 --- a/assets/styles/themes/_theme-light.scss +++ b/assets/styles/themes/_theme-light.scss @@ -203,6 +203,12 @@ $article-btn-text-hover: $g20-white !default; $article-nav-icon-bg: $g6-smoke !default; $article-nav-acct-bg: $g5-pepper !default; +// Datetime Colors +$current-timestamp-color: $article-text !default; +$current-date-color: $article-text !default; +$current-time-color: $article-text !default; +$enterprise-eol-date-color: $article-text !default; + // Error Page Colors $error-page-btn: $b-pool !default; $error-page-btn-text: $g20-white !default; diff --git a/build-scripts/build-copilot-instructions.js b/build-scripts/build-copilot-instructions.js index 0d089e2c1..1e537dc74 100644 --- a/build-scripts/build-copilot-instructions.js +++ b/build-scripts/build-copilot-instructions.js @@ -23,6 +23,7 @@ export { buildContributingInstructions }; /** Build instructions from CONTRIBUTING.md * This script reads CONTRIBUTING.md, formats it appropriately, * and saves it to .github/instructions/contributing.instructions.md + * Includes optimization to reduce file size for better performance */ function buildContributingInstructions() { // Paths @@ -41,16 +42,19 @@ function buildContributingInstructions() { // Read the CONTRIBUTING.md file let content = fs.readFileSync(contributingPath, 'utf8'); + // Optimize content by removing less critical sections for Copilot + content = optimizeContentForContext(content); + // Format the content for Copilot instructions with applyTo attribute content = `--- applyTo: "content/**/*.md, layouts/**/*.html" --- -# GitHub Copilot Instructions for InfluxData Documentation +# Contributing instructions for InfluxData Documentation ## Purpose and scope -GitHub Copilot should help document InfluxData products +Help document InfluxData products by creating clear, accurate technical content with proper code examples, frontmatter, shortcodes, and formatting. @@ -59,7 +63,17 @@ ${content}`; // Write the formatted content to the instructions file fs.writeFileSync(instructionsPath, content); - console.log(`✅ Generated Copilot instructions at ${instructionsPath}`); + const fileSize = fs.statSync(instructionsPath).size; + const sizeInKB = (fileSize / 1024).toFixed(1); + console.log( + `✅ Generated instructions at ${instructionsPath} (${sizeInKB}KB)` + ); + + if (fileSize > 40000) { + console.warn( + `⚠️ Instructions file is large (${sizeInKB}KB > 40KB) and may impact performance` + ); + } // Add the file to git if it has changed try { @@ -74,3 +88,58 @@ ${content}`; console.warn('⚠️ Could not add instructions file to git:', error.message); } } + +/** + * Optimize content for Copilot by removing or condensing less critical sections + * while preserving essential documentation guidance + */ +function optimizeContentForContext(content) { + // Remove or condense sections that are less relevant for context assistance + const sectionsToRemove = [ + // Installation and setup sections (less relevant for writing docs) + /### Install project dependencies[\s\S]*?(?=\n##|\n###|$)/g, + /### Install Node\.js dependencies[\s\S]*?(?=\n##|\n###|$)/g, + /### Install Docker[\s\S]*?(?=\n##|\n###|$)/g, + /#### Build the test dependency image[\s\S]*?(?=\n##|\n###|$)/g, + /### Install Visual Studio Code extensions[\s\S]*?(?=\n##|\n###|$)/g, + /### Run the documentation locally[\s\S]*?(?=\n##|\n###|$)/g, + + // Testing and CI/CD sections (important but can be condensed) + /### Set up test scripts and credentials[\s\S]*?(?=\n##|\n###|$)/g, + /#### Test shell and python code blocks[\s\S]*?(?=\n##|\n###|$)/g, + /#### Troubleshoot tests[\s\S]*?(?=\n##|\n###|$)/g, + /### Pytest collected 0 items[\s\S]*?(?=\n##|\n###|$)/g, + + // Long code examples that can be referenced elsewhere + /```[\s\S]{500,}?```/g, + + // Repetitive examples + /#### Example[\s\S]*?(?=\n####|\n###|\n##|$)/g, + ]; + + // Remove identified sections + sectionsToRemove.forEach((regex) => { + content = content.replace(regex, ''); + }); + + // Condense whitespace + content = content.replace(/\n{3,}/g, '\n\n'); + + // Remove HTML comments + content = content.replace(//g, ''); + + // Shorten repetitive content + content = content.replace(/(\{%[^%]+%\})[\s\S]*?\1/g, (match) => { + // If it's a long repeated pattern, show it once with a note + if (match.length > 200) { + const firstOccurrence = match.split('\n\n')[0]; + return ( + firstOccurrence + + '\n\n[Similar patterns apply - see full CONTRIBUTING.md for complete examples]' + ); + } + return match; + }); + + return content; +} diff --git a/compose.yaml b/compose.yaml index fe0293615..52c68a41a 100644 --- a/compose.yaml +++ b/compose.yaml @@ -303,14 +303,47 @@ services: container_name: influxdb3-core image: influxdb:3-core ports: - - 8181:8181 + - 8282:8181 command: - influxdb3 - serve - - --node-id=sensors_node0 + - --node-id=node0 - --log-filter=debug - --object-store=file - - --data-dir=/var/lib/influxdb3 + - --data-dir=/var/lib/influxdb3/data + - --plugin-dir=/var/lib/influxdb3/plugins + volumes: + - type: bind + source: test/.influxdb3/core/data + target: /var/lib/influxdb3/data + - type: bind + source: test/.influxdb3/core/plugins + target: /var/lib/influxdb3/plugins + influxdb3-enterprise: + container_name: influxdb3-enterprise + image: influxdb:3-enterprise + ports: + - 8181:8181 + # Change the INFLUXDB3_LICENSE_EMAIL environment variable to your email address. You can also set it in a `.env` file in the same directory as this compose file. Docker Compose automatically loads the .env file. + # The license email option is only used the first time you run the container; you can't change the license email after the first run. + # The server stores the license in the data directory in the object store and the license is associated with the cluster ID and email. + command: + - influxdb3 + - serve + - --node-id=node0 + - --cluster-id=cluster0 + - --log-filter=debug + - --object-store=file + - --data-dir=/var/lib/influxdb3/data + - --plugin-dir=/var/lib/influxdb3/plugins + - --license-email=${INFLUXDB3_LICENSE_EMAIL} + volumes: + - type: bind + source: test/.influxdb3/enterprise/data + target: /var/lib/influxdb3/data + - type: bind + source: test/.influxdb3/enterprise/plugins + target: /var/lib/influxdb3/plugins telegraf-pytest: container_name: telegraf-pytest image: influxdata/docs-pytest diff --git a/config/_default/config.yml b/config/_default/config.yml deleted file mode 100644 index 917d78e2d..000000000 --- a/config/_default/config.yml +++ /dev/null @@ -1,2 +0,0 @@ -import: - - hugo.yml \ No newline at end of file diff --git a/hugo.yml b/config/_default/hugo.yml similarity index 66% rename from hugo.yml rename to config/_default/hugo.yml index 909917486..b98cf11f7 100644 --- a/hugo.yml +++ b/config/_default/hugo.yml @@ -1,4 +1,4 @@ -baseURL: 'https://docs.influxdata.com/' +baseURL: https://docs.influxdata.com/ languageCode: en-us title: InfluxDB Documentation @@ -49,21 +49,52 @@ privacy: youtube: disable: false privacyEnhanced: true + outputFormats: json: mediaType: application/json baseName: pages isPlainText: true +# Asset processing configuration for development build: # Ensure Hugo correctly processes JavaScript modules jsConfig: nodeEnv: "development" +# Development asset processing + writeStats: false + useResourceCacheWhen: "fallback" + noJSConfigInAssets: false + +# Asset processing configuration +assetDir: "assets" module: mounts: - source: assets target: assets - - source: node_modules - target: assets/node_modules \ No newline at end of file + target: assets/node_modules + +# Environment parameters +params: + env: development + environment: development + +# Configure the server for development +server: + port: 1313 + baseURL: 'http://localhost:1313/' + watchChanges: true + disableLiveReload: false + +# Ignore specific warning logs +ignoreLogs: + - warning-goldmark-raw-html + +# Disable minification for development +minify: + disableJS: true + disableCSS: true + disableHTML: true + minifyOutput: false diff --git a/config/production/config.yml b/config/production/config.yml new file mode 100644 index 000000000..da574daff --- /dev/null +++ b/config/production/config.yml @@ -0,0 +1,40 @@ +# Production overrides for CI/CD builds +baseURL: 'https://docs.influxdata.com/' + +# Production environment parameters +params: + env: production + environment: production + +# Enable minification for production +minify: + disableJS: false + disableCSS: false + disableHTML: false + minifyOutput: true + +# Production asset processing +build: + writeStats: false + useResourceCacheWhen: "fallback" + buildOptions: + sourcemap: false + target: "es2015" + +# Asset processing configuration +assetDir: "assets" + +# Mount assets for production +module: + mounts: + - source: assets + target: assets + - source: node_modules + target: assets/node_modules + +# Disable development server settings +server: {} + +# Suppress the warning mentioned in the error +ignoreLogs: + - 'warning-goldmark-raw-html' \ No newline at end of file diff --git a/config/production/hugo.yml b/config/production/hugo.yml new file mode 100644 index 000000000..bd5911c96 --- /dev/null +++ b/config/production/hugo.yml @@ -0,0 +1,17 @@ +build: + writeStats: false + useResourceCacheWhen: "fallback" + buildOptions: + sourcemap: false + target: "es2015" +minify: + disableJS: false + disableCSS: false + disableHTML: false + minifyOutput: true +params: + env: production + environment: production +server: { + disableLiveReload: true +} \ No newline at end of file diff --git a/config/staging/hugo.yml b/config/staging/hugo.yml new file mode 100644 index 000000000..7d22ffb17 --- /dev/null +++ b/config/staging/hugo.yml @@ -0,0 +1,19 @@ +baseURL: https://test2.docs.influxdata.com/ +build: + writeStats: false + useResourceCacheWhen: "fallback" + buildOptions: + sourcemap: false + target: "es2015" +minify: + disableJS: false + disableCSS: false + disableHTML: false + minifyOutput: true +params: + env: staging + environment: staging +server: { + disableLiveReload: true +} + \ No newline at end of file diff --git a/config/testing/config.yml b/config/testing/config.yml deleted file mode 100644 index f403c8347..000000000 --- a/config/testing/config.yml +++ /dev/null @@ -1,20 +0,0 @@ -baseURL: 'http://localhost:1315/' - -server: - port: 1315 - -# Override settings for testing -buildFuture: true - -# Configure what content is built in testing env -params: - environment: testing - buildTestContent: true - -# Keep your shared content exclusions -ignoreFiles: - - "content/shared/.*" - -# Ignore specific warning logs -ignoreLogs: - - warning-goldmark-raw-html \ No newline at end of file diff --git a/content/enterprise_influxdb/v1/administration/monitor/logs.md b/content/enterprise_influxdb/v1/administration/monitor/logs.md index 0efdb8947..871de19ab 100644 --- a/content/enterprise_influxdb/v1/administration/monitor/logs.md +++ b/content/enterprise_influxdb/v1/administration/monitor/logs.md @@ -120,13 +120,13 @@ You can view the file [here](https://github.com/influxdb/influxdb/blob/master/sc InfluxDB 1.5 introduces the option to log HTTP request traffic separately from the other InfluxDB log output. When HTTP request logging is enabled, the HTTP logs are intermingled by default with internal InfluxDB logging. By redirecting the HTTP request log entries to a separate file, both log files are easier to read, monitor, and debug. -See [Redirecting HTTP request logging](/enterprise_influxdb/v1/administration/logs/#redirecting-http-access-logging) in the InfluxDB OSS documentation. +For more information, see the [InfluxDB OSS v1 HTTP access logging documentation](/influxdb/v1/administration/logs/#http-access-logging). ## Structured logging With InfluxDB 1.5, structured logging is supported and enable machine-readable and more developer-friendly log output formats. The two new structured log formats, `logfmt` and `json`, provide easier filtering and searching with external tools and simplifies integration of InfluxDB logs with Splunk, Papertrail, Elasticsearch, and other third party tools. -See [Structured logging](/enterprise_influxdb/v1/administration/logs/#structured-logging) in the InfluxDB OSS documentation. +For more information, see the [InfluxDB OSS v1 structured logging documentation](/influxdb/v1/administration/logs/#structured-logging). ## Tracing diff --git a/content/enterprise_influxdb/v1/introduction/installation/_index.md b/content/enterprise_influxdb/v1/introduction/installation/_index.md index 1998a5f8d..772703fbf 100644 --- a/content/enterprise_influxdb/v1/introduction/installation/_index.md +++ b/content/enterprise_influxdb/v1/introduction/installation/_index.md @@ -11,6 +11,10 @@ menu: name: Install weight: 103 parent: Introduction +related: + - /enterprise_influxdb/v1/introduction/installation/docker/ + - /enterprise_influxdb/v1/introduction/installation/single-server/ + - /enterprise_influxdb/v1/introduction/installation/fips-compliant/ --- Complete the following steps to install an InfluxDB Enterprise cluster in your own environment: @@ -19,8 +23,4 @@ Complete the following steps to install an InfluxDB Enterprise cluster in your o 2. [Install InfluxDB data nodes](/enterprise_influxdb/v1/introduction/installation/data_node_installation/) 3. [Install Chronograf](/enterprise_influxdb/v1/introduction/installation/chrono_install/) -{{< influxdbu title="Installing InfluxDB Enterprise" summary="Learn about InfluxDB architecture and how to install InfluxDB Enterprise with step-by-step instructions." action="Take the course" link="https://university.influxdata.com/courses/installing-influxdb-enterprise-tutorial/" >}} - -#### Other installation options -- [Install InfluxDB Enterprise on a single server](/enterprise_influxdb/v1/introduction/installation/single-server/) -- [Federal Information Processing Standards (FIPS)-compliant InfluxDB Enterprise](/enterprise_influxdb/v1/introduction/installation/fips-compliant/) \ No newline at end of file +{{< influxdbu title="Installing InfluxDB Enterprise" summary="Learn about InfluxDB architecture and how to install InfluxDB Enterprise with step-by-step instructions." action="Take the course" link="https://university.influxdata.com/courses/installing-influxdb-enterprise-tutorial/" >}} \ No newline at end of file diff --git a/content/enterprise_influxdb/v1/introduction/installation/data_node_installation.md b/content/enterprise_influxdb/v1/introduction/installation/data_node_installation.md index 57771f105..1648448cb 100644 --- a/content/enterprise_influxdb/v1/introduction/installation/data_node_installation.md +++ b/content/enterprise_influxdb/v1/introduction/installation/data_node_installation.md @@ -327,7 +327,7 @@ influxdb 2706 0.2 7.0 571008 35376 ? Sl 15:37 0:16 /usr/bin/influx ``` If you do not see the expected output, the process is either not launching or is exiting prematurely. -Check the [logs](/enterprise_influxdb/v1/administration/logs/) +Check the [logs](/enterprise_influxdb/v1/administration/monitor/logs/) for error messages and verify the previous setup steps are complete. If you see the expected output, repeat for the remaining data nodes. @@ -395,6 +395,10 @@ to the cluster. {{% /expand %}} {{< /expand-wrapper >}} +## Docker installation + +For Docker-based installations, see [Install and run InfluxDB v1 Enterprise with Docker](/enterprise_influxdb/v1/introduction/installation/docker/) for complete instructions on setting up data nodes using Docker images. + ## Next steps Once your data nodes are part of your cluster, do the following: diff --git a/content/enterprise_influxdb/v1/introduction/installation/docker/_index.md b/content/enterprise_influxdb/v1/introduction/installation/docker/_index.md new file mode 100644 index 000000000..5f3507b81 --- /dev/null +++ b/content/enterprise_influxdb/v1/introduction/installation/docker/_index.md @@ -0,0 +1,238 @@ +--- +title: Install and run InfluxDB v1 Enterprise with Docker +description: Install and run InfluxDB v1 Enterprise using Docker images for meta nodes and data nodes. +menu: + enterprise_influxdb_v1: + name: Install with Docker + weight: 30 + parent: Install +related: + - /enterprise_influxdb/v1/introduction/installation/docker/docker-troubleshooting/ +--- + +InfluxDB v1 Enterprise provides Docker images for both meta nodes and data nodes to simplify cluster deployment and management. +Using Docker allows you to quickly set up and run InfluxDB Enterprise clusters with consistent configurations. + +> [!Important] +> #### Enterprise license required +> You must have a valid license to run InfluxDB Enterprise. +> Contact for licensing information or obtain a 14-day demo license via the [InfluxDB Enterprise portal](https://portal.influxdata.com/users/new). + +## Docker image variants + +InfluxDB Enterprise provides two specialized Docker images: + +- **`influxdb:meta`**: Enterprise meta node package for clustering +- **`influxdb:data`**: Enterprise data node package for clustering + +## Requirements + +- [Docker](https://docs.docker.com/get-docker/) installed and running +- Valid [InfluxData license key](#enterprise-license-required) +- Network connectivity between nodes +- At least 3 meta nodes (odd number recommended) +- At least 2 data nodes + +## Set up an InfluxDB Enterprise cluster with Docker + +### 1. Create a Docker network + +Create a custom Docker network to allow communication between meta and data nodes: + +```bash +docker network create influxdb +``` + +### 2. Start meta nodes + +Start three meta nodes using the `influxdb:meta` image. +Each meta node requires a unique hostname and the Enterprise license key: + +```bash +# Start first meta node +docker run -d \ + --name=influxdb-meta-0 \ + --network=influxdb \ + -h influxdb-meta-0 \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:meta + +# Start second meta node +docker run -d \ + --name=influxdb-meta-1 \ + --network=influxdb \ + -h influxdb-meta-1 \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:meta + +# Start third meta node +docker run -d \ + --name=influxdb-meta-2 \ + --network=influxdb \ + -h influxdb-meta-2 \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:meta +``` + +### 3. Configure meta nodes to know each other + +From the first meta node, add the other meta nodes to the cluster: + +```bash +# Add the second meta node +docker exec influxdb-meta-0 \ + influxd-ctl add-meta influxdb-meta-1:8091 + +# Add the third meta node +docker exec influxdb-meta-0 \ + influxd-ctl add-meta influxdb-meta-2:8091 +``` + +### 4. Start data nodes + +Start two or more data nodes using the `influxdb:data` image: + +```bash +# Start first data node +docker run -d \ + --name=influxdb-data-0 \ + --network=influxdb \ + -h influxdb-data-0 \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:data + +# Start second data node +docker run -d \ + --name=influxdb-data-1 \ + --network=influxdb \ + -h influxdb-data-1 \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:data +``` + +### 5. Add data nodes to the cluster + +From the first meta node, register each data node with the cluster: + +```bash +# Add first data node +docker exec influxdb-meta-0 \ + influxd-ctl add-data influxdb-data-0:8088 + +# Add second data node +docker exec influxdb-meta-0 \ + influxd-ctl add-data influxdb-data-1:8088 +``` + +### 6. Verify the cluster + +Check that all nodes are properly added to the cluster: + +```bash +docker exec influxdb-meta-0 influxd-ctl show +``` + +Expected output: +``` +Data Nodes +========== +ID TCP Address Version +4 influxdb-data-0:8088 1.x.x-cX.X.X +5 influxdb-data-1:8088 1.x.x-cX.X.X + +Meta Nodes +========== +TCP Address Version +influxdb-meta-0:8091 1.x.x-cX.X.X +influxdb-meta-1:8091 1.x.x-cX.X.X +influxdb-meta-2:8091 1.x.x-cX.X.X +``` + +## Configuration options + +### Using environment variables + +You can configure {{% product-name %}} using environment variables with the format `INFLUXDB_
_`. + +Common environment variables: +- `INFLUXDB_REPORTING_DISABLED=true` +- `INFLUXDB_META_DIR=/path/to/metadir` +- `INFLUXDB_ENTERPRISE_REGISTRATION_ENABLED=true` +- `INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key` + +For all available environment variables, see how to [Configure Enterprise](/enterprise_influxdb/v1/administration/configure/). + +### Using configuration files + +You can also mount custom configuration files: + +```bash +# Mount custom meta configuration +docker run -d \ + --name=influxdb-meta-0 \ + --network=influxdb \ + -h influxdb-meta-0 \ + -v /path/to/influxdb-meta.conf:/etc/influxdb/influxdb-meta.conf \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:meta + +# Mount custom data configuration +docker run -d \ + --name=influxdb-data-0 \ + --network=influxdb \ + -h influxdb-data-0 \ + -v /path/to/influxdb.conf:/etc/influxdb/influxdb.conf \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:data +``` + +## Exposing ports + +To access your InfluxDB Enterprise cluster from outside Docker, expose the necessary ports: + +```bash +# Data node with HTTP API port exposed +docker run -d \ + --name=influxdb-data-0 \ + --network=influxdb \ + -h influxdb-data-0 \ + -p 8086:8086 \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:data +``` + +## Persistent data storage + +To persist data beyond container lifecycles, mount volumes: + +```bash +# Meta node with persistent storage +docker run -d \ + --name=influxdb-meta-0 \ + --network=influxdb \ + -h influxdb-meta-0 \ + -v influxdb-meta-0-data:/var/lib/influxdb \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:meta + +# Data node with persistent storage +docker run -d \ + --name=influxdb-data-0 \ + --network=influxdb \ + -h influxdb-data-0 \ + -v influxdb-data-0-data:/var/lib/influxdb \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:data +``` + +## Next steps + +Once your InfluxDB Enterprise cluster is running: + +1. [Set up authentication and authorization](/enterprise_influxdb/v1/administration/configure/security/authentication/) for your cluster. +2. [Enable TLS encryption](/enterprise_influxdb/v1/guides/enable-tls/) for secure communication. +3. [Install and set up Chronograf](/enterprise_influxdb/v1/introduction/installation/chrono_install) for cluster management and visualization. +4. Configure your load balancer to send client traffic to data nodes. For more information, see [Data node installation](/enterprise_influxdb/v1/introduction/installation/data_node_installation/). +5. [Monitor your cluster](/enterprise_influxdb/v1/administration/monitor/) for performance and reliability. +6. [Write data with the InfluxDB API](/enterprise_influxdb/v1/guides/write_data/). +7. [Query data with the InfluxDB API](/enterprise_influxdb/v1/guides/query_data/). diff --git a/content/enterprise_influxdb/v1/introduction/installation/docker/docker-troubleshooting.md b/content/enterprise_influxdb/v1/introduction/installation/docker/docker-troubleshooting.md new file mode 100644 index 000000000..959a84ea4 --- /dev/null +++ b/content/enterprise_influxdb/v1/introduction/installation/docker/docker-troubleshooting.md @@ -0,0 +1,226 @@ +--- +title: Docker troubleshooting for InfluxDB v1 Enterprise +description: Common Docker-specific issues and solutions for InfluxDB v1 Enterprise deployments. +menu: + enterprise_influxdb_v1: + name: Docker troubleshooting + weight: 35 + parent: Install with Docker +related: + - /enterprise_influxdb/v1/introduction/installation/docker/ + - /enterprise_influxdb/v1/troubleshooting/ + - /enterprise_influxdb/v1/administration/monitor/logs/ +--- + +This guide covers common Docker-specific issues and solutions when running InfluxDB v1 Enterprise in containers. + +## Common Docker issues + +### License key issues + +#### Problem: Container fails to start with license error + +**Symptoms:** +``` +license key verification failed +``` + +**Solution:** +1. Verify your license key is valid and not expired +2. Ensure the license key environment variable is set correctly: + ```bash + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-actual-license-key + ``` +3. If nodes cannot reach `portal.influxdata.com`, use a license file instead: + ```bash + -v /path/to/license.json:/etc/influxdb/license.json + -e INFLUXDB_ENTERPRISE_LICENSE_PATH=/etc/influxdb/license.json + ``` + +### Network connectivity issues + +#### Problem: Nodes cannot communicate with each other + +**Symptoms:** +- Meta nodes fail to join cluster +- Data nodes cannot connect to meta nodes +- `influxd-ctl show` shows missing nodes + +**Solution:** +1. Ensure all containers are on the same Docker network: + ```bash + docker network create influxdb + # Add --network=influxdb to all container runs + ``` +2. Use container hostnames consistently: + ```bash + # Use hostname (-h) that matches container name + -h influxdb-meta-0 --name=influxdb-meta-0 + ``` +3. Verify network connectivity between containers: + ```bash + docker exec influxdb-meta-0 ping influxdb-meta-1 + ``` + +#### Problem: Cannot access InfluxDB from host machine + +**Symptoms:** +- Connection refused when trying to connect to InfluxDB API +- Client tools cannot reach the database + +**Solution:** +Expose the HTTP API port (8086) when starting data nodes: +```bash +docker run -d \ + --name=influxdb-data-0 \ + --network=influxdb \ + -h influxdb-data-0 \ + -p 8086:8086 \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:data +``` + +### Configuration issues + +#### Problem: Custom configuration not being applied + +**Symptoms:** +- Environment variables ignored +- Configuration file changes not taking effect + +**Solution:** +1. For environment variables, use the correct format `INFLUXDB_$SECTION_$NAME`: + ```bash + # Correct + -e INFLUXDB_REPORTING_DISABLED=true + -e INFLUXDB_META_DIR=/custom/meta/dir + + # Incorrect + -e REPORTING_DISABLED=true + ``` + +2. For configuration files, ensure proper mounting: + ```bash + # Mount config file correctly + -v /host/path/influxdb.conf:/etc/influxdb/influxdb.conf + ``` + +3. Verify file permissions on mounted configuration files: + ```bash + # Config files should be readable by influxdb user (uid 1000) + chown 1000:1000 /host/path/influxdb.conf + chmod 644 /host/path/influxdb.conf + ``` + +### Data persistence issues + +#### Problem: Data lost when container restarts + +**Symptoms:** +- Databases and data disappear after container restart +- Cluster state not preserved + +**Solution:** +Mount data directories as volumes: +```bash +# For meta nodes +-v influxdb-meta-0-data:/var/lib/influxdb + +# For data nodes +-v influxdb-data-0-data:/var/lib/influxdb +``` + +### Resource and performance issues + +#### Problem: Containers running out of memory + +**Symptoms:** +- Containers being killed by Docker +- OOMKilled status in `docker ps` + +**Solution:** +1. Increase memory limits: + ```bash + --memory=4g --memory-swap=8g + ``` + +2. Monitor memory usage: + ```bash + docker stats influxdb-data-0 + ``` + +3. Optimize InfluxDB configuration for available resources. + +#### Problem: Poor performance in containerized environment + +**Solution:** +1. Ensure adequate CPU and memory allocation +2. Use appropriate Docker storage drivers +3. Consider host networking for high-throughput scenarios: + ```bash + --network=host + ``` + +## Debugging commands + +### Check container logs +```bash +# View container logs +docker logs influxdb-meta-0 +docker logs influxdb-data-0 + +# Follow logs in real-time +docker logs -f influxdb-meta-0 +``` + +### Verify cluster status +```bash +# Check cluster status from any meta node +docker exec influxdb-meta-0 influxd-ctl show + +# Check individual node status +docker exec influxdb-meta-0 influxd-ctl show-shards +``` + +### Network troubleshooting +```bash +# Test connectivity between containers +docker exec influxdb-meta-0 ping influxdb-data-0 +docker exec influxdb-meta-0 telnet influxdb-data-0 8088 + +# Check which ports are listening +docker exec influxdb-meta-0 netstat -tlnp +``` + +### Configuration verification +```bash +# Check effective configuration +docker exec influxdb-meta-0 cat /etc/influxdb/influxdb-meta.conf +docker exec influxdb-data-0 cat /etc/influxdb/influxdb.conf + +# Verify environment variables +docker exec influxdb-meta-0 env | grep INFLUXDB +``` + +## Best practices for Docker deployments + +1. **Use specific image tags** instead of `latest` for production deployments +2. **Implement health checks** to monitor container status +3. **Use Docker Compose** for complex multi-container setups +4. **Mount volumes** for data persistence +5. **Set resource limits** to prevent resource exhaustion +6. **Use secrets management** for license keys in production +7. **Implement proper logging** and monitoring +8. **Regular backups** of data volumes + +## Getting additional help + +If you continue to experience issues: + +1. Check the [general troubleshooting guide](/enterprise_influxdb/v1/troubleshooting/) +2. Review [InfluxDB Enterprise logs](/enterprise_influxdb/v1/administration/monitor/logs/) +3. Contact [InfluxData support](https://support.influxdata.com/) with: + - Docker version and configuration + - Container logs + - Cluster status output + - Network configuration details diff --git a/content/enterprise_influxdb/v1/introduction/installation/meta_node_installation.md b/content/enterprise_influxdb/v1/introduction/installation/meta_node_installation.md index 36ec1fe85..3f5c752ac 100644 --- a/content/enterprise_influxdb/v1/introduction/installation/meta_node_installation.md +++ b/content/enterprise_influxdb/v1/introduction/installation/meta_node_installation.md @@ -365,6 +365,10 @@ the cluster._ {{% /expand %}} {{< /expand-wrapper >}} +## Docker installation + +For Docker-based installations, see [Install and run InfluxDB v1 Enterprise with Docker](/enterprise_influxdb/v1/introduction/installation/docker/) for complete instructions on setting up meta nodes using Docker images. + After your meta nodes are part of your cluster, [install data nodes](/enterprise_influxdb/v1/introduction/installation/data_node_installation/). diff --git a/content/enterprise_influxdb/v1/introduction/installation/single-server.md b/content/enterprise_influxdb/v1/introduction/installation/single-server.md index 3ebb48701..a440400e2 100644 --- a/content/enterprise_influxdb/v1/introduction/installation/single-server.md +++ b/content/enterprise_influxdb/v1/introduction/installation/single-server.md @@ -475,7 +475,7 @@ sudo systemctl start influxdb ``` If you do not see the expected output, the process is either not launching or is exiting prematurely. - Check the [logs](/enterprise_influxdb/v1/administration/logs/) + Check the [logs](/enterprise_influxdb/v1/administration/monitor/logs/) for error messages and verify the previous setup steps are complete. 5. **Use `influxd-ctl` to add the data process to the InfluxDB Enterprise "cluster"**: @@ -542,9 +542,7 @@ For Chronograf installation instructions, see [Install Chronograf](/chronograf/v1/introduction/installation/). ## Next steps -- Add more users if necessary. - See [Manage users and permissions](/enterprise_influxdb/v1/administration/manage/users-and-permissions/) - for more information. -- [Enable TLS](/enterprise_influxdb/v1/guides/enable-tls/). -- [Write data with the InfluxDB API](/enterprise_influxdb/v1/guides/write_data/). -- [Query data with the InfluxDB API](/enterprise_influxdb/v1/guides/query_data/). +- For information about adding users, see [Manage users and permissions](/enterprise_influxdb/v1/administration/manage/users-and-permissions/) +- [Enable TLS](/enterprise_influxdb/v1/guides/enable-tls/) +- [Write data with the InfluxDB API](/enterprise_influxdb/v1/guides/write_data/) +- [Query data with the InfluxDB API](/enterprise_influxdb/v1/guides/query_data/) diff --git a/content/enterprise_influxdb/v1/tools/influxd-ctl/backup.md b/content/enterprise_influxdb/v1/tools/influxd-ctl/backup.md index 5b73c0325..21f4146b7 100644 --- a/content/enterprise_influxdb/v1/tools/influxd-ctl/backup.md +++ b/content/enterprise_influxdb/v1/tools/influxd-ctl/backup.md @@ -81,7 +81,7 @@ influxd-ctl backup /path/to/backup-dir ### Perform a full backup ```sh -influxd-ctl backup -full /path/to/backup-dir +influxd-ctl backup -strategy full /path/to/backup-dir ``` ### Estimate the size of a backup diff --git a/content/example.md b/content/example.md index 5bcd782aa..44800941f 100644 --- a/content/example.md +++ b/content/example.md @@ -1267,3 +1267,106 @@ This is small tab 2.4 content. {{% /tab-content %}} {{< /tabs-wrapper >}} + +## Group key demo + +Used to demonstrate Flux group keys + +{{< tabs-wrapper >}} +{{% tabs "small" %}} +[Input](#) +[Output](#) +Click to view output +{{% /tabs %}} +{{% tab-content %}} + +The following data is output from the last `filter()` and piped forward into `group()`: + +> [!Note] +> `_start` and `_stop` columns have been omitted. + +{{% flux/group-key "[_measurement=home, room=Kitchen, _field=hum]" true %}} + +| _time | _measurement | room | _field | _value | +| :------------------- | :----------- | :---------- | :----- | :----- | +| 2022-01-01T08:00:00Z | home | Kitchen | hum | 35.9 | +| 2022-01-01T09:00:00Z | home | Kitchen | hum | 36.2 | +| 2022-01-01T10:00:00Z | home | Kitchen | hum | 36.1 | + +{{% flux/group-key "[_measurement=home, room=Living Room, _field=hum]" true %}} + +| _time | _measurement | room | _field | _value | +| :------------------- | :----------- | :---------- | :----- | :----- | +| 2022-01-01T08:00:00Z | home | Living Room | hum | 35.9 | +| 2022-01-01T09:00:00Z | home | Living Room | hum | 35.9 | +| 2022-01-01T10:00:00Z | home | Living Room | hum | 36 | + +{{% flux/group-key "[_measurement=home, room=Kitchen, _field=temp]" true %}} + +| _time | _measurement | room | _field | _value | +| :------------------- | :----------- | :---------- | :----- | :----- | +| 2022-01-01T08:00:00Z | home | Kitchen | temp | 21 | +| 2022-01-01T09:00:00Z | home | Kitchen | temp | 23 | +| 2022-01-01T10:00:00Z | home | Kitchen | temp | 22.7 | + +{{% flux/group-key "[_measurement=home, room=Living Room, _field=temp]" true %}} + +| _time | _measurement | room | _field | _value | +| :------------------- | :----------- | :---------- | :----- | :----- | +| 2022-01-01T08:00:00Z | home | Living Room | temp | 21.1 | +| 2022-01-01T09:00:00Z | home | Living Room | temp | 21.4 | +| 2022-01-01T10:00:00Z | home | Living Room | temp | 21.8 | + +{{% /tab-content %}} +{{% tab-content %}} + +When grouped by `_field`, all rows with the `temp` field will be in one table +and all the rows with the `hum` field will be in another. +`_measurement` and `room` columns no longer affect how rows are grouped. + +{{% note %}} +`_start` and `_stop` columns have been omitted. +{{% /note %}} + +{{% flux/group-key "[_field=hum]" true %}} + +| _time | _measurement | room | _field | _value | +| :------------------- | :----------- | :---------- | :----- | :----- | +| 2022-01-01T08:00:00Z | home | Kitchen | hum | 35.9 | +| 2022-01-01T09:00:00Z | home | Kitchen | hum | 36.2 | +| 2022-01-01T10:00:00Z | home | Kitchen | hum | 36.1 | +| 2022-01-01T08:00:00Z | home | Living Room | hum | 35.9 | +| 2022-01-01T09:00:00Z | home | Living Room | hum | 35.9 | +| 2022-01-01T10:00:00Z | home | Living Room | hum | 36 | + +{{% flux/group-key "[_field=temp]" true %}} + +| _time | _measurement | room | _field | _value | +| :------------------- | :----------- | :---------- | :----- | :----- | +| 2022-01-01T08:00:00Z | home | Kitchen | temp | 21 | +| 2022-01-01T09:00:00Z | home | Kitchen | temp | 23 | +| 2022-01-01T10:00:00Z | home | Kitchen | temp | 22.7 | +| 2022-01-01T08:00:00Z | home | Living Room | temp | 21.1 | +| 2022-01-01T09:00:00Z | home | Living Room | temp | 21.4 | +| 2022-01-01T10:00:00Z | home | Living Room | temp | 21.8 | + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +## datetime/current-timestamp shortcode + +### Default usage + +{{< datetime/current-timestamp >}} + +### Format YYYY-MM-DD HH:mm:ss + +{{< datetime/current-timestamp format="YYYY-MM-DD HH:mm:ss" >}} + +### Format with UTC timezone + +{{< datetime/current-timestamp format="YYYY-MM-DD HH:mm:ss" timezone="UTC" >}} + +### Format with America/New_York timezone + +{{< datetime/current-timestamp format="YYYY-MM-DD HH:mm:ss" timezone="America/New_York" >}} diff --git a/content/influxdb/v1/introduction/get-started.md b/content/influxdb/v1/introduction/get-started/_index.md similarity index 73% rename from content/influxdb/v1/introduction/get-started.md rename to content/influxdb/v1/introduction/get-started/_index.md index 50c21b6e8..75cdd2c6a 100644 --- a/content/influxdb/v1/introduction/get-started.md +++ b/content/influxdb/v1/introduction/get-started/_index.md @@ -1,19 +1,9 @@ --- title: Get started with InfluxDB OSS -description: Get started with InfluxDB OSS. -# v2.0 alias below routes old external links here temporarily. +description: Get started with InfluxDB OSS. Learn how to create databases, write data, and query your time series data. aliases: - /influxdb/v1/introduction/getting_started/ - /influxdb/v1/introduction/getting-started/ - - /influxdb/v2/introduction/getting-started/ - - /influxdb/v2/introduction/getting-started/ - - /influxdb/v2/introduction/getting_started/ - - /influxdb/v2/introduction/getting_started/ - - /influxdb/v2/introduction/getting_started/ - - /influxdb/v2/introduction/getting_started/ - - /influxdb/v2/introduction/getting_started/ - - /influxdb/v2/introduction/getting-started/ - menu: influxdb_v1: name: Get started with InfluxDB @@ -23,21 +13,29 @@ alt_links: v2: /influxdb/v2/get-started/ --- -With InfluxDB open source (OSS) [installed](/influxdb/v1/introduction/installation), you're ready to start doing some awesome things. -In this section we'll use the `influx` [command line interface](/influxdb/v1/tools/shell/) (CLI), which is included in all -InfluxDB packages and is a lightweight and simple way to interact with the database. -The CLI communicates with InfluxDB directly by making requests to the InfluxDB API over port `8086` by default. +With InfluxDB open source (OSS) [installed](/influxdb/v1/introduction/installation), you're ready to start working with time series data. +This guide uses the `influx` [command line interface](/influxdb/v1/tools/shell/) (CLI), which is included with InfluxDB +and provides direct access to the database. +The CLI communicates with InfluxDB through the HTTP API on port `8086`. -> **Note:** The database can also be used by making raw HTTP requests. -See [Writing Data](/influxdb/v1/guides/writing_data/) and [Querying Data](/influxdb/v1/guides/querying_data/) -for examples with the `curl` application. +> [!Tip] +> **Docker users**: Access the CLI from your container using: +> ```bash +> docker exec -it influx +> ``` + +> [!Note] +> #### Directly access the API +> You can also interact with InfluxDB using the HTTP API directly. +> See [Writing Data](/influxdb/v1/guides/writing_data/) and [Querying Data](/influxdb/v1/guides/querying_data/) for examples using `curl`. ## Creating a database -If you've installed InfluxDB locally, the `influx` command should be available via the command line. -Executing `influx` will start the CLI and automatically connect to the local InfluxDB instance -(assuming you have already started the server with `service influxdb start` or by running `influxd` directly). -The output should look like this: +After installing InfluxDB locally, the `influx` command is available from your terminal. +Running `influx` starts the CLI and connects to your local InfluxDB instance +(ensure InfluxDB is running with `service influxdb start` or `influxd`). +To start the CLI and connect to the local InfluxDB instance, run the following command. +The [`-precision` argument](/influxdb/v1/tools/shell/#influx-arguments) specifies the format and precision of any returned timestamps. ```bash $ influx -precision rfc3339 @@ -46,15 +44,12 @@ InfluxDB shell {{< latest-patch >}} > ``` -> **Notes:** -> -* The InfluxDB API runs on port `8086` by default. -Therefore, `influx` will connect to port `8086` and `localhost` by default. -If you need to alter these defaults, run `influx --help`. -* The [`-precision` argument](/influxdb/v1/tools/shell/#influx-arguments) specifies the format/precision of any returned timestamps. -In the example above, `rfc3339` tells InfluxDB to return timestamps in [RFC3339 format](https://www.ietf.org/rfc/rfc3339.txt) (`YYYY-MM-DDTHH:MM:SS.nnnnnnnnnZ`). +The `influx` CLI connects to port `localhost:8086` (the default). +The timestamp precision `rfc3339` tells InfluxDB to return timestamps in [RFC3339 format](https://www.ietf.org/rfc/rfc3339.txt) (`YYYY-MM-DDTHH:MM:SS.nnnnnnnnnZ`). -The command line is now ready to take input in the form of the Influx Query Language (a.k.a InfluxQL) statements. +To view available options for customizing CLI connection parameters or other settings, run `influx --help` in your terminal. + +The command line is ready to take input in the form of the Influx Query Language (InfluxQL) statements. To exit the InfluxQL shell, type `exit` and hit return. A fresh install of InfluxDB has no databases (apart from the system `_internal`), @@ -75,7 +70,6 @@ Throughout this guide, we'll use the database name `mydb`: > **Note:** After hitting enter, a new prompt appears and nothing else is displayed. In the CLI, this means the statement was executed and there were no errors to display. There will always be an error displayed if something went wrong. -No news is good news! Now that the `mydb` database is created, we'll use the `SHOW DATABASES` statement to display all existing databases: @@ -204,6 +198,30 @@ including support for Go-style regex. For example: > SELECT * FROM "cpu_load_short" WHERE "value" > 0.9 ``` +## Using the HTTP API + +You can also interact with InfluxDB using HTTP requests with tools like `curl`: + +### Create a database +```bash +curl -G http://localhost:8086/query --data-urlencode "q=CREATE DATABASE mydb" +``` + +### Write data +```bash +curl -i -XPOST 'http://localhost:8086/write?db=mydb' \ + --data-binary 'cpu,host=serverA,region=us_west value=0.64' +``` + +### Query data +```bash +curl -G 'http://localhost:8086/query?pretty=true' \ + --data-urlencode "db=mydb" \ + --data-urlencode "q=SELECT * FROM cpu" +``` + +## Next steps + This is all you need to know to write data into InfluxDB and query it back. To learn more about the InfluxDB write protocol, check out the guide on [Writing Data](/influxdb/v1/guides/writing_data/). diff --git a/content/influxdb/v1/introduction/install.md b/content/influxdb/v1/introduction/install.md index 1458e5758..dce3660f4 100644 --- a/content/influxdb/v1/introduction/install.md +++ b/content/influxdb/v1/introduction/install.md @@ -24,6 +24,7 @@ By default, InfluxDB uses the following network ports: - TCP port `8086` is available for client-server communication using the InfluxDB API. - TCP port `8088` is available for the RPC service to perform back up and restore operations. +- TCP port `2003` is available for the Graphite protocol (when enabled). In addition to the ports above, InfluxDB also offers multiple plugins that may require [custom ports](/influxdb/v1/administration/ports/). @@ -51,10 +52,11 @@ you may want to check out our [SLES & openSUSE](#) [FreeBSD/PC-BSD](#) [macOS](#) +[Docker](#) {{% /tabs %}} {{% tab-content %}} For instructions on how to install the Debian package from a file, -please see the +see the [downloads page](https://influxdata.com/downloads/). Debian and Ubuntu users can install the latest stable version of InfluxDB using the @@ -194,6 +196,28 @@ InfluxDB v{{< latest-patch version="1.8" >}} (git: unknown unknown) {{% /note %}} +{{% /tab-content %}} + +{{% tab-content %}} + +Use Docker to run InfluxDB v1 in a container. + +For comprehensive Docker installation instructions, configuration options, and initialization features, see: + +**[Install and run with Docker ›](/influxdb/v1/introduction/install/docker/)** + +Quick start: + +```bash +# Pull the latest InfluxDB v1.x image +docker pull influxdb:{{< latest-patch version="1" >}} + +# Start InfluxDB with persistent storage +docker run -p 8086:8086 \ + -v $PWD/data:/var/lib/influxdb \ + influxdb:{{< latest-patch version="1" >}} +``` + {{% /tab-content %}} {{< /tabs-wrapper >}} @@ -274,6 +298,12 @@ For example: InfluxDB first checks for the `-config` option and then for the environment variable. +### Configuring InfluxDB with Docker + +For detailed Docker configuration instructions including environment variables, configuration files, initialization options, and examples, see: + +**[Install and run with Docker ›](/influxdb/v1/introduction/install/docker/)** + See the [Configuration](/influxdb/v1/administration/config/) documentation for more information. ### Data and WAL directory permissions diff --git a/content/influxdb/v1/introduction/install/docker.md b/content/influxdb/v1/introduction/install/docker.md new file mode 100644 index 000000000..80fe2eebe --- /dev/null +++ b/content/influxdb/v1/introduction/install/docker.md @@ -0,0 +1,157 @@ +--- +title: Install and run InfluxDB using Docker +description: > + Install and run InfluxDB OSS v1.x using Docker. Configure and operate InfluxDB in a Docker container. +menu: + influxdb_v1: + name: Use Docker + weight: 60 + parent: Install InfluxDB +related: + - /influxdb/v1/introduction/install/, Install InfluxDB OSS v1 + - /influxdb/v1/introduction/get-started/, Get started with InfluxDB OSS v1 + - /influxdb/v1/administration/authentication_and_authorization/, Authentication and authorization in InfluxDB OSS v1 + - /influxdb/v1/guides/write_data/, Write data to InfluxDB OSS v1 + - /influxdb/v1/guides/query_data/, Query data in InfluxDB OSS v1 + - /influxdb/v1/administration/config/, Configure InfluxDB OSS v1 +alt_links: + core: /influxdb3/core/install/ + v2: /influxdb/v2/install/use-docker-compose/ +--- + +Install and run InfluxDB OSS v1.x using Docker containers. +This guide covers Docker installation, configuration, and initialization options. + +## Install and run InfluxDB + +### Pull the InfluxDB v1.x image + +```bash +docker pull influxdb:{{< latest-patch version="1" >}} +``` + +### Start InfluxDB + +Start a basic InfluxDB container with persistent storage: + +```bash +docker run -p 8086:8086 \ + -v $PWD/data:/var/lib/influxdb \ + influxdb:{{< latest-patch version="1" >}} +``` + +InfluxDB is now running and available at http://localhost:8086. + +## Configure InfluxDB + +### Using environment variables + +Configure InfluxDB settings using environment variables: + +```bash +docker run -p 8086:8086 \ + -v $PWD/data:/var/lib/influxdb \ + -e INFLUXDB_REPORTING_DISABLED=true \ + -e INFLUXDB_HTTP_AUTH_ENABLED=true \ + -e INFLUXDB_HTTP_LOG_ENABLED=true \ + influxdb:{{< latest-patch version="1" >}} +``` + +### Using a configuration file + +Generate a default configuration file: + +```bash +docker run --rm influxdb:{{< latest-patch version="1" >}} influxd config > influxdb.conf +``` + +Start InfluxDB with your custom configuration: + +```bash +docker run -p 8086:8086 \ + -v $PWD/influxdb.conf:/etc/influxdb/influxdb.conf:ro \ + -v $PWD/data:/var/lib/influxdb \ + influxdb:{{< latest-patch version="1" >}} +``` + +## Initialize InfluxDB + +### Automatic initialization (for development) + +> [!Warning] +> Automatic initialization with InfluxDB v1 is not recommended for production. +> Use this approach only for development and testing. + +Automatically create a database and admin user on first startup: + +```bash +docker run -p 8086:8086 \ + -v $PWD/data:/var/lib/influxdb \ + -e INFLUXDB_DB=mydb \ + -e INFLUXDB_HTTP_AUTH_ENABLED=true \ + -e INFLUXDB_ADMIN_USER=admin \ + -e INFLUXDB_ADMIN_PASSWORD=supersecretpassword \ + influxdb:{{< latest-patch version="1" >}} +``` + +Environment variables for user creation: +- `INFLUXDB_USER`: Create a user with no privileges +- `INFLUXDB_USER_PASSWORD`: Password for the user +- `INFLUXDB_READ_USER`: Create a user who can read from `INFLUXDB_DB` +- `INFLUXDB_READ_USER_PASSWORD`: Password for the read user +- `INFLUXDB_WRITE_USER`: Create a user who can write to `INFLUXDB_DB` +- `INFLUXDB_WRITE_USER_PASSWORD`: Password for the write user + +### Custom initialization scripts + +InfluxDB v1.x Docker containers support custom initialization scripts for testing scenarios: + +Create an initialization script (`init-scripts/setup.iql`): + +```sql +CREATE DATABASE sensors; +CREATE DATABASE logs; + +CREATE USER "telegraf" WITH PASSWORD 'secret123'; +GRANT WRITE ON "sensors" TO "telegraf"; + +CREATE USER "grafana" WITH PASSWORD 'secret456'; +GRANT READ ON "sensors" TO "grafana"; +GRANT READ ON "logs" TO "grafana"; + +CREATE RETENTION POLICY "one_week" ON "sensors" DURATION 1w REPLICATION 1 DEFAULT; +``` + +Run with initialization scripts: + +```bash +docker run -p 8086:8086 \ + -v $PWD/data:/var/lib/influxdb \ + -v $PWD/init-scripts:/docker-entrypoint-initdb.d \ + influxdb:{{< latest-patch version="1" >}} +``` + +Supported script types: +- Shell scripts (`.sh`) +- InfluxDB query language files (`.iql`) + +> [!Important] +> Initialization scripts only run on first startup when the data directory is empty. +> Scripts execute in alphabetical order based on filename. + +## Access the InfluxDB CLI + +To access the InfluxDB command line interface from within the Docker container: + +```bash +docker exec -it influx +``` + +Replace `` with your InfluxDB container name or ID. + +## Next steps + +Once you have InfluxDB running in Docker, see the [Get started guide](/influxdb/v1/introduction/get-started/) to: +- Create databases +- Write and query data +- Learn InfluxQL basics \ No newline at end of file diff --git a/content/influxdb/v2/api-guide/_index.md b/content/influxdb/v2/api-guide/_index.md index e9218d9d8..3a1520b5f 100644 --- a/content/influxdb/v2/api-guide/_index.md +++ b/content/influxdb/v2/api-guide/_index.md @@ -34,8 +34,8 @@ and visit the `/docs` endpoint in a browser ([localhost:8086/docs](http://localh ## InfluxDB v1 compatibility API documentation -The InfluxDB v2 API includes [InfluxDB 1.x compatibility endpoints](/influxdb/v2/reference/api/influxdb-1x/) +The InfluxDB v2 API includes [InfluxDB v1 compatibility endpoints and authentication](/influxdb/v2/api-guide/influxdb-1x/) that work with InfluxDB 1.x client libraries and third-party integrations like [Grafana](https://grafana.com) and others. -View full v1 compatibility API documentation +View full v1 compatibility API documentation diff --git a/content/influxdb/v2/api-guide/api_intro.md b/content/influxdb/v2/api-guide/api_intro.md index bc12a32b4..aa559566d 100644 --- a/content/influxdb/v2/api-guide/api_intro.md +++ b/content/influxdb/v2/api-guide/api_intro.md @@ -14,4 +14,5 @@ source: /shared/influxdb-v2/api-guide/api_intro.md --- +// SOURCE content/shared/influxdb-v2/api-guide/api_intro.md +--> diff --git a/content/influxdb/v2/api-guide/influxdb-1x/_index.md b/content/influxdb/v2/api-guide/influxdb-1x/_index.md index 021dbb523..50cca1b79 100644 --- a/content/influxdb/v2/api-guide/influxdb-1x/_index.md +++ b/content/influxdb/v2/api-guide/influxdb-1x/_index.md @@ -18,4 +18,5 @@ source: /shared/influxdb-v2/api-guide/influxdb-1x/_index.md --- +// SOURCE content/shared/influxdb-v2/api-guide/influxdb-1x/_index.md +--> diff --git a/content/influxdb/v2/api-guide/influxdb-1x/query.md b/content/influxdb/v2/api-guide/influxdb-1x/query.md index 284ebd6e2..b236d74e0 100644 --- a/content/influxdb/v2/api-guide/influxdb-1x/query.md +++ b/content/influxdb/v2/api-guide/influxdb-1x/query.md @@ -14,17 +14,15 @@ list_code_example: | GET http://localhost:8086/query related: + - /influxdb/v2/query-data/execute-queries/influx-api/ - /influxdb/v2/query-data/influxql aliases: - /influxdb/v2/reference/api/influxdb-1x/query/ --- The `/query` 1.x compatibility endpoint queries InfluxDB {{< current-version >}} using **InfluxQL**. -Use the `GET` request method to query data from the `/query` endpoint. +Send an InfluxQL query in an HTTP `GET` or `POST` request to query data from the `/query` endpoint. -
-GET http://localhost:8086/query
-
The `/query` compatibility endpoint uses the **database** and **retention policy** specified in the query request to map the request to an InfluxDB bucket. @@ -32,31 +30,32 @@ For more information, see [Database and retention policy mapping](/influxdb/v2/r {{% show-in "cloud,cloud-serverless" %}} -{{% note %}} -If you have an existing bucket that doesn't follow the **database/retention-policy** naming convention, -you **must** [manually create a database and retention policy mapping](/influxdb/v2/query-data/influxql/dbrp/#create-dbrp-mappings) -to query that bucket with the `/query` compatibility API. -{{% /note %}} +> [!Note] +> If you have an existing bucket that doesn't follow the **database/retention-policy** naming convention, +> you **must** [manually create a database and retention policy mapping](/influxdb/v2/query-data/influxql/dbrp/#create-dbrp-mappings) +> to query that bucket with the `/query` compatibility API. {{% /show-in %}} ## Authentication Use one of the following authentication methods: -* **token authentication** -* **basic authentication with username and password** -* **query string authentication with username and password** -_For more information, see [Authentication](/influxdb/v2/reference/api/influxdb-1x/#authentication)._ +- the 2.x `Authorization: Token` scheme in the header +- the v1-compatible `u` and `p` query string parameters +- the v1-compatible `Basic` authentication scheme in the header + +For more information, see [Authentication for the 1.x compatibility API](/influxdb/v2/api-guide/influxdb-1x/). ## Query string parameters ### u (Optional) The 1.x **username** to authenticate the request. +If you provide an API token as the password, `u` is required, but can be any value. _See [query string authentication](/influxdb/v2/reference/api/influxdb-1x/#query-string-authentication)._ ### p -(Optional) The 1.x **password** to authenticate the request. +(Optional) The 1.x **password** or the 2.x API token to authenticate the request. _See [query string authentication](/influxdb/v2/reference/api/influxdb-1x/#query-string-authentication)._ ### db @@ -94,61 +93,65 @@ The following precisions are available: - [Return query results with millisecond Unix timestamps](#return-query-results-with-millisecond-unix-timestamps) - [Execute InfluxQL queries from a file](#execute-influxql-queries-from-a-file) -{{% code-placeholders "API_TOKEN" %}} +{{% code-placeholders "INFLUX_USERNAME|INFLUX_PASSWORD_OR_TOKEN|API_TOKEN" %}} ##### Query using basic authentication +The following example: + +- sends a `GET` request to the `/query` endpoint +- uses the `Authorization` header with the `Basic` scheme (compatible with InfluxDB 1.x) to provide username and password credentials +- uses the default retention policy for the database + {{% show-in "v2" %}} - -{{< code-tabs-wrapper >}} -{{% code-tabs %}} -[curl](#curl) -[Node.js](#nodejs) -{{% /code-tabs %}} -{{% code-tab-content %}} - ```sh -{{% get-shared-text "api/v1-compat/auth/oss/basic-auth.sh" %}} -``` -{{% /code-tab-content %}} -{{% code-tab-content %}} -```js -{{% get-shared-text "api/v1-compat/auth/oss/basic-auth.js" %}} -``` -{{% /code-tab-content %}} -{{< /code-tabs-wrapper >}} +############################################################################## +# Use Basic authentication with an +# InfluxDB v1-compatible username and password +# to query the InfluxDB 1.x compatibility API. +# +# INFLUX_USERNAME: your v1-compatible username. +# INFLUX_PASSWORD_OR_TOKEN: your API token or v1-compatible password. +############################################################################## +curl --get "http://{{< influxdb/host >}}/query" \ + --user "INFLUX_USERNAME":"INFLUX_PASSWORD_OR_TOKEN" \ + --data-urlencode "db=BUCKET_NAME" \ + --data-urlencode "q=SELECT * FROM cpu_usage" +``` {{% /show-in %}} {{% show-in "cloud,cloud-serverless" %}} -{{< code-tabs-wrapper >}} -{{% code-tabs %}} -[curl](#curl) -[Node.js](#nodejs) -{{% /code-tabs %}} -{{% code-tab-content %}} - ```sh {{% get-shared-text "api/v1-compat/auth/cloud/basic-auth.sh" %}} ``` -{{% /code-tab-content %}} -{{% code-tab-content %}} -```js -{{% get-shared-text "api/v1-compat/auth/cloud/basic-auth.js" %}} -``` -{{% /code-tab-content %}} -{{< /code-tabs-wrapper >}} - {{% /show-in %}} +##### Query using an HTTP POST request + +```bash +curl \ + --request POST \ + "http://{{< influxdb/host >}}/query?db=DATABASE_NAME&rp=RETENTION_POLICY" \ + --user "INFLUX_USERNAME":"INFLUX_PASSWORD_OR_TOKEN" \ + --header "Content-type: application/vnd.influxql" \ + --data "SELECT * FROM cpu_usage WHERE time > now() - 1h" +``` + ##### Query a non-default retention policy +The following example: + +- sends a `GET` request to the `/query` endpoint +- uses the `Authorization` header with the `Token` scheme (compatible with InfluxDB 2.x) to provide the API token +- queries a custom retention policy mapped for the database + ```sh -curl --get http://localhost:8086/query \ +curl --get http://{{< influxdb/host >}}/query \ --header "Authorization: Token API_TOKEN" \ - --data-urlencode "db=mydb" \ - --data-urlencode "rp=customrp" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "rp=RETENTION_POLICY_NAME" \ --data-urlencode "q=SELECT used_percent FROM mem WHERE host=host1" ``` ##### Execute multiple queries ```sh -curl --get http://localhost:8086/query \ +curl --get http://{{< influxdb/host >}}/query \ --header "Authorization: Token API_TOKEN" \ - --data-urlencode "db=mydb" \ + --data-urlencode "db=DATABASE_NAME" \ --data-urlencode "q=SELECT * FROM mem WHERE host=host1;SELECT mean(used_percent) FROM mem WHERE host=host1 GROUP BY time(10m)" ``` ##### Return query results with millisecond Unix timestamps ```sh -curl --get http://localhost:8086/query \ +curl --get http://{{< influxdb/host >}}/query \ --header "Authorization: Token API_TOKEN" \ - --data-urlencode "db=mydb" \ - --data-urlencode "rp=myrp" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "rp=RETENTION_POLICY_NAME" \ --data-urlencode "q=SELECT used_percent FROM mem WHERE host=host1" \ --data-urlencode "epoch=ms" ``` ##### Execute InfluxQL queries from a file ```sh -curl --get http://localhost:8086/query \ +curl --get http://{{< influxdb/host >}}/query \ --header "Authorization: Token API_TOKEN" \ - --data-urlencode "db=mydb" \ - --data-urlencode "q@path/to/influxql.txt" \ - --data-urlencode "async=true" + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q@path/to/influxql.txt" ``` +##### Return a gzip-compressed response +```sh +curl --get http://{{< influxdb/host >}}/query \ + --header 'Accept-Encoding: gzip' \ + --header "Authorization: Token API_TOKEN" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=SELECT used_percent FROM mem WHERE host=host1" +``` {{% /code-placeholders %}} Replace the following: - {{% code-placeholder-key %}}`API_TOKEN`{{% /code-placeholder-key %}}: your InfluxDB [API token](/influxdb/v2/admin/tokens/) +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query. +In InfluxDB 2.x, databases and retention policies map to [buckets](/influxdb/v2/admin/buckets/). +- {{% code-placeholder-key %}}`RETENTION_POLICY_NAME`{{% /code-placeholder-key %}}: the name of the retention policy to query. +In InfluxDB 2.x, databases and retention policies map to [buckets](/influxdb/v2/admin/buckets/). + +_For more information about the database and retention policy mapping, see [Database and retention policy mapping](/influxdb/v2/reference/api/influxdb-1x/dbrp)._ \ No newline at end of file diff --git a/content/influxdb/v2/api-guide/tutorials/_index.md b/content/influxdb/v2/api-guide/tutorials/_index.md index b9fb91f28..a30a77c85 100644 --- a/content/influxdb/v2/api-guide/tutorials/_index.md +++ b/content/influxdb/v2/api-guide/tutorials/_index.md @@ -12,4 +12,5 @@ source: /shared/influxdb-v2/api-guide/tutorials/_index.md --- +// SOURCE content/shared/influxdb-v2/api-guide/tutorials/_index.md +--> diff --git a/content/influxdb/v2/install/upgrade/v1-to-v2/automatic-upgrade.md b/content/influxdb/v2/install/upgrade/v1-to-v2/automatic-upgrade.md index 6dc3fea9b..e505d6e96 100644 --- a/content/influxdb/v2/install/upgrade/v1-to-v2/automatic-upgrade.md +++ b/content/influxdb/v2/install/upgrade/v1-to-v2/automatic-upgrade.md @@ -328,6 +328,6 @@ which requires authentication. **For these external clients to work with InfluxDB {{< current-version >}}:** -1. [Manually create a 1.x-compatible authorization](/influxdb/v2/upgrade/v1-to-v2/manual-upgrade/#create-a-1x-compatible-authorization). +1. [Manually create a v1-compatible authorization](/influxdb/v2/upgrade/v1-to-v2/manual-upgrade/#create-a-1x-compatible-authorization). 2. Update the client configuration to use the username and password associated - with your 1.x-compatible authorization. + with your v1-compatible authorization. diff --git a/content/influxdb/v2/install/upgrade/v1-to-v2/manual-upgrade.md b/content/influxdb/v2/install/upgrade/v1-to-v2/manual-upgrade.md index dbab1a0d1..ff973238f 100644 --- a/content/influxdb/v2/install/upgrade/v1-to-v2/manual-upgrade.md +++ b/content/influxdb/v2/install/upgrade/v1-to-v2/manual-upgrade.md @@ -3,7 +3,7 @@ title: Manually upgrade from InfluxDB 1.x to 2.7 list_title: Manually upgrade from 1.x to 2.7 description: > To manually upgrade from InfluxDB 1.x to InfluxDB 2.7, migrate data, create - 1.x-compatible authorizations, and create database and retention policy + v1-compatible authorizations, and create database and retention policy (DBRP) mappings. menu: influxdb_v2: diff --git a/content/influxdb/v2/install/use-docker-compose.md b/content/influxdb/v2/install/use-docker-compose.md index 0bc1b9de3..79b204218 100644 --- a/content/influxdb/v2/install/use-docker-compose.md +++ b/content/influxdb/v2/install/use-docker-compose.md @@ -13,6 +13,8 @@ related: - /influxdb/v2/reference/cli/influx/config/ - /influxdb/v2/reference/cli/influx/ - /influxdb/v2/admin/tokens/ +alt_links: + v1: /influxdb/v1/introduction/install/docker/ --- Use Docker Compose to install and set up InfluxDB v2, the time series platform diff --git a/content/influxdb/v2/query-data/execute-queries/influx-api.md b/content/influxdb/v2/query-data/execute-queries/influx-api.md index d088dc2b6..cff681793 100644 --- a/content/influxdb/v2/query-data/execute-queries/influx-api.md +++ b/content/influxdb/v2/query-data/execute-queries/influx-api.md @@ -11,4 +11,5 @@ source: /shared/influxdb-v2/query-data/execute-queries/influx-api.md --- +// SOURCE content/shared/influxdb-v2/query-data/execute-queries/influx-api.md +--> diff --git a/content/influxdb/v2/query-data/influxql/_index.md b/content/influxdb/v2/query-data/influxql/_index.md index a4ca6434b..4e7e6b229 100644 --- a/content/influxdb/v2/query-data/influxql/_index.md +++ b/content/influxdb/v2/query-data/influxql/_index.md @@ -1,8 +1,8 @@ --- title: Query data with InfluxQL description: > - Use the [InfluxDB 1.x `/query` compatibility endpoint](/influxdb/v2/reference/api/influxdb-1x/query) - to query data in InfluxDB Cloud and InfluxDB OSS 2.4 with **InfluxQL**. + Use the InfluxDB v1 `/query` compatibility endpoint + to query data in InfluxDB v2 using InfluxQL. weight: 102 influxdb/v2/tags: [influxql, query] menu: diff --git a/content/influxdb/v2/write-data/developer-tools/api.md b/content/influxdb/v2/write-data/developer-tools/api.md index 85b09d4d1..519657369 100644 --- a/content/influxdb/v2/write-data/developer-tools/api.md +++ b/content/influxdb/v2/write-data/developer-tools/api.md @@ -26,7 +26,7 @@ The URL in the examples depends on the version and location of your InfluxDB {{< {{< code-tabs-wrapper >}} {{% code-tabs %}} -[Curl](#curl) +[cURL](#curl) [Node.js](#nodejs) {{% /code-tabs %}} {{% code-tab-content %}} diff --git a/content/influxdb/v2/write-data/troubleshoot.md b/content/influxdb/v2/write-data/troubleshoot.md index 81cbc2e58..3d4fba878 100644 --- a/content/influxdb/v2/write-data/troubleshoot.md +++ b/content/influxdb/v2/write-data/troubleshoot.md @@ -10,7 +10,8 @@ menu: parent: Write data influxdb/v2/tags: [write, line protocol, errors] related: - - /influxdb/v2/api/#tag/Write, InfluxDB API /write endpoint + - /influxdb/v2/api/v2/#operation/PostLegacyWrite, InfluxDB API /write endpoint + - /influxdb/v2/api/v2/#operation/PostWrite, InfluxDB API /api/v2/write endpoint - /influxdb/v2/reference/internals - /influxdb/v2/reference/cli/influx/write source: /shared/influxdb-v2/write-data/troubleshoot.md diff --git a/content/influxdb3/cloud-dedicated/admin/databases/delete.md b/content/influxdb3/cloud-dedicated/admin/databases/delete.md index fa59968db..4ae04e121 100644 --- a/content/influxdb3/cloud-dedicated/admin/databases/delete.md +++ b/content/influxdb3/cloud-dedicated/admin/databases/delete.md @@ -33,17 +33,19 @@ or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) to delete a database from your {{< product-name omit=" Clustered" >}} cluster. > [!Warning] -> -> #### Deleting a database cannot be undone -> -> Once a database is deleted, data stored in that database cannot be recovered. -> > #### Wait before writing to a new database with the same name > > After deleting a database from your {{% product-name omit=" Clustered" %}} > cluster, you can reuse the name to create a new database, but **wait two to > three minutes** after deleting the previous database before writing to the new > database to allow write caches to clear. +> +> #### Tokens still grant access to databases with the same name +> +> [Database tokens](/influxdb3/cloud-dedicated/admin/tokens/database/) are associated to +> databases by name. If you create a new database with the same name, tokens +> that granted access to the deleted database will also grant access to the new +> database. {{< tabs-wrapper >}} {{% tabs %}} diff --git a/content/influxdb3/cloud-dedicated/admin/databases/rename.md b/content/influxdb3/cloud-dedicated/admin/databases/rename.md new file mode 100644 index 000000000..788ab030d --- /dev/null +++ b/content/influxdb3/cloud-dedicated/admin/databases/rename.md @@ -0,0 +1,58 @@ +--- +title: Rename a database +description: > + Use the [`influxctl database rename` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/rename/) + to rename a database in your {{< product-name omit=" Cluster" >}} cluster. +menu: + influxdb3_cloud_dedicated: + parent: Manage databases +weight: 202 +list_code_example: | + ##### CLI + ```sh + influxctl database rename + ``` +related: + - /influxdb3/cloud-dedicated/reference/cli/influxctl/database/rename/ + - /influxdb3/cloud-dedicated/admin/tokens/database/create/ +--- + +Use the [`influxctl database rename` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/rename/) +to rename a database in your {{< product-name omit=" Cluster" >}} cluster. + +> [!Note] +> Renaming a database does not change the database ID, modify data in the database, +> or update [database tokens](/influxdb3/cloud-dedicated/admin/tokens/database/). +> After renaming a database, any existing database tokens will stop working and you +> must create new tokens with permissions for the renamed database. + +## Rename a database using the influxctl CLI + +{{% code-placeholders "DATABASE_NAME|NEW_DATABASE_NAME" %}} +```sh +influxctl database rename DATABASE_NAME NEW_DATABASE_NAME +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Current name of the database to rename +- {{% code-placeholder-key %}}`NEW_DATABASE_NAME`{{% /code-placeholder-key %}}: New name for the database + +## Update database tokens after renaming + +After renaming a database, existing database tokens will no longer work because +they reference the old database name. Do the following: + +1. [Create new database tokens](/influxdb3/cloud-dedicated/admin/tokens/database/create/) + with permissions for the renamed database. +2. Update your applications and clients to use the new tokens. +3. [Delete the old database tokens](/influxdb3/cloud-dedicated/admin/tokens/database/delete/) + that reference the old database name. + +{{% note %}} +#### Renamed database retains its ID + +The database ID remains the same after renaming. When you list databases, +you'll see the new name associated with the original database ID. +{{% /note %}} diff --git a/content/influxdb3/cloud-dedicated/admin/databases/undelete.md b/content/influxdb3/cloud-dedicated/admin/databases/undelete.md new file mode 100644 index 000000000..093cb3f8e --- /dev/null +++ b/content/influxdb3/cloud-dedicated/admin/databases/undelete.md @@ -0,0 +1,70 @@ +--- +title: Undelete a database +description: > + Use the [`influxctl database undelete` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/undelete/) + to restore a previously deleted database in your {{< product-name omit=" Cluster" >}} cluster. +menu: + influxdb3_cloud_dedicated: + parent: Manage databases +weight: 204 +list_code_example: | + ```sh + influxctl database undelete + ``` +related: + - /influxdb3/cloud-dedicated/reference/cli/influxctl/database/undelete/ + - /influxdb3/cloud-dedicated/admin/databases/delete/ + - /influxdb3/cloud-dedicated/admin/tokens/database/create/ +--- + +Use the [`influxctl database undelete` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/undelete/) +to restore a previously deleted database in your {{< product-name omit=" Cluster" >}} cluster. + +> [!Important] +> To undelete a database: +> +> - The database name must match the name of the deleted database. +> - A new database with the same name cannot already exist. +> - You must have appropriate permissions to manage databases. + +When you undelete a database, it is restored with the same retention period, +table limits, and column limits as when it was deleted. + +> [!Warning] +> Databases can only be undeleted for +> {{% show-in "cloud-dedicated" %}}approximately 14 days{{% /show-in %}}{{% show-in "clustered" %}}a configurable "hard-delete" grace period{{% /show-in %}} +> after they are deleted. +> After this grace period, all Parquet files associated with the deleted database +> are permanently removed and the database cannot be undeleted. + +## Undelete a database using the influxctl CLI + +{{% code-placeholders "DATABASE_NAME" %}} +```sh +influxctl database undelete DATABASE_NAME +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + Name of the deleted database to restore + +## Recreate tokens for the database + +After successfully undeleting a database: + +1. **Verify the database was restored** by [listing all databases](/influxdb3/cloud-dedicated/admin/databases/list/). +2. **If you previously deleted tokens associated with the deleted database, create new database tokens** + - Any tokens that existed before deletion are not restored. + [Create new database tokens](/influxdb3/cloud-dedicated/admin/tokens/database/create/) + with appropriate permissions for the restored database. +3. **Update your applications** to use the new database tokens. + +{{% note %}} +#### Undeleted databases retain their original configuration + +When a database is undeleted, it retains the same database ID, retention period, +and table/column limits it had before deletion. However, database tokens are not +restored and must be recreated. +{{% /note %}} diff --git a/content/influxdb3/cloud-dedicated/admin/tables/delete.md b/content/influxdb3/cloud-dedicated/admin/tables/delete.md new file mode 100644 index 000000000..48b2d45c1 --- /dev/null +++ b/content/influxdb3/cloud-dedicated/admin/tables/delete.md @@ -0,0 +1,53 @@ +--- +title: Delete a table +description: > + Use the Admin UI or the [`influxctl table delete` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/table/delete/) + to delete a table from a database in your {{< product-name omit=" Cluster" >}} cluster. +menu: + influxdb3_cloud_dedicated: + parent: Manage tables +weight: 203 +list_code_example: | + ```sh + influxctl table delete + ``` +related: + - /influxdb3/cloud-dedicated/reference/cli/influxctl/table/delete/ +--- + +Use the Admin UI or the [`influxctl table delete` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/table/delete/) +to delete a table from a database in your {{< product-name omit=" Cluster" >}} cluster. + +> [!Warning] +> Deleting a table is irreversible. Once a table is deleted, all data stored in +> that table is permanently removed and cannot be recovered. + +Provide the following arguments: + +- **Database name**: Name of the database that contains the table to delete +- **Table name**: Name of the table to delete + +{{% code-placeholders "DATABASE_NAME|TABLE_NAME" %}} +```sh +influxctl table delete DATABASE_NAME TABLE_NAME +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Name of the database that contains the table to delete +- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: Name of the table to delete + +When prompted, enter `y` to confirm the deletion. + +{{% note %}} +#### Wait before reusing a deleted table name + +After deleting a table, wait a few minutes before attempting to create a new +table with the same name to ensure the deletion process has fully completed. + +{{% product-name %}} creates tables implicitly using table names specified in +line protocol written to the databases. To prevent the deleted table from being +immediately recreated by incoming write requests, pause all write requests to +the table before deleting it. +{{% /note %}} diff --git a/content/influxdb3/cloud-dedicated/admin/tables/list.md b/content/influxdb3/cloud-dedicated/admin/tables/list.md index 5533845e0..e900c5b24 100644 --- a/content/influxdb3/cloud-dedicated/admin/tables/list.md +++ b/content/influxdb3/cloud-dedicated/admin/tables/list.md @@ -1,7 +1,8 @@ --- title: List tables description: > - Use the Admin UI, the [`SHOW TABLES` SQL statement](/influxdb3/cloud-dedicated/query-data/sql/explore-schema/#list-measurements-in-a-database), + Use the Admin UI, the [`influxctl table list` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/table/list/), + the [`SHOW TABLES` SQL statement](/influxdb3/cloud-dedicated/query-data/sql/explore-schema/#list-measurements-in-a-database), or the [`SHOW MEASUREMENTS` InfluxQL statement](/influxdb3/cloud-dedicated/query-data/influxql/explore-schema/#list-measurements-in-a-database) to list tables in a database. menu: @@ -9,23 +10,30 @@ menu: parent: Manage tables weight: 201 list_code_example: | - ###### SQL + ##### CLI + ```sh + influxctl table list + ``` + + ##### SQL ```sql SHOW TABLES ``` - ###### InfluxQL + ##### InfluxQL ```sql SHOW MEASUREMENTS ``` related: + - /influxdb3/cloud-dedicated/reference/cli/influxctl/table/list/ - /influxdb3/cloud-dedicated/query-data/sql/explore-schema/ - /influxdb3/cloud-dedicated/query-data/influxql/explore-schema/ --- -Use the Admin UI, the [`SHOW TABLES` SQL statement](/influxdb3/cloud-dedicated/query-data/sql/explore-schema/#list-measurements-in-a-database), +Use the Admin UI, the [`influxctl table list` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/table/list/), +the [`SHOW TABLES` SQL statement](/influxdb3/cloud-dedicated/query-data/sql/explore-schema/#list-measurements-in-a-database), or the [`SHOW MEASUREMENTS` InfluxQL statement](/influxdb3/cloud-dedicated/query-data/influxql/explore-schema/#list-measurements-in-a-database) to list tables in a database. @@ -36,9 +44,11 @@ to list tables in a database. {{% tabs %}} [Admin UI](#admin-ui) [influxctl](#influxctl) +[SQL & InfluxQL](#sql--influxql) {{% /tabs %}} {{% tab-content %}} + The InfluxDB Cloud Dedicated administrative UI includes a portal for managing tables. You can view the list of tables associated with a database and their details, including: @@ -47,48 +57,94 @@ their details, including: - Table ID - Table size (in bytes) -1. To access the {{< product-name >}} Admin UI, visit the following URL in your browser: +1. To access the {{< product-name >}} Admin UI, visit the following URL in your browser: -
-   https://console.influxdata.com
-   
-2. Use the credentials provided by InfluxData to log into the Admin UI. - If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). +
+    https://console.influxdata.com
+    
- After you log in, the Account Management portal displays [account information](/influxdb3/cloud-dedicated/admin/account/) - and lists all clusters associated with your account. -3. In the cluster list, find the cluster that contains the database and table. You can **Search** for clusters by name or ID to filter the list and use the sort button and column headers to sort the list. -4. Click the cluster row to view the list of databases associated with the cluster. -5. In the database list, find the database that contains the table. You can **Search** for databases by name or ID to filter the list and use the sort button and column headers to sort the list. -6. Click the database row to view the list of tables associated with the database. -7. The table list displays the following table details: - - Name - - Table ID - - Table size (in bytes) -8. You can **Search** for tables by name or ID to filter the list and use the sort button and column headers to sort the list. +2. Use the credentials provided by InfluxData to log into the Admin UI. + If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). -You can **Search** for databases by name or ID to filter the list and use the sort button and column headers to sort the list. + After you log in, the Account Management portal displays [account information](/influxdb3/cloud-dedicated/admin/account/) + and lists all clusters associated with your account. +3. In the cluster list, find the cluster that contains the database and table. + You can **Search** for clusters by name or ID to filter the list and use the sort button and column headers to sort the list. +4. Click the cluster row to view the list of databases associated with the cluster. +5. In the database list, find the database that contains the table. + You can **Search** for databases by name or ID to filter the list and use + the sort button and column headers to sort the list. +6. Click the database row to view the list of tables associated with the database. +7. The table list displays the following table details: + + - Name + - Table ID + - Table size (in bytes) + +8. You can **Search** for tables by name or ID to filter the list and use the + sort button and column headers to sort the list. + +You can **Search** for databases by name or ID to filter the list and use the +sort button and column headers to sort the list. + + {{% /tab-content %}} {{% tab-content %}} - -###### SQL + + +Use the [`influxctl table list` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/table/list/) +to list all tables in a database in your {{< product-name omit=" Cluster" >}} cluster. + +{{% code-placeholders "DATABASE_NAME" %}} + +```bash +influxctl table list DATABASE_NAME +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + Name of the database containing the tables to list + +### Output formats + +The `influxctl table list` command supports the following output formats: + +- `table` (default): Human-readable table format +- `json`: JSON format for programmatic use + +Use the `--format` flag to specify the output format: + +{{% code-placeholders "DATABASE_NAME" %}} +```sh +influxctl table list --format json DATABASE_NAME +``` +{{% /code-placeholders %}} + + +{{% /tab-content %}} +{{% tab-content %}} + + +## List tables with the influxctl query command + +To list tables using SQL or InfluxQL, use the `influxctl query` command to pass +the appropriate statement. + +### SQL ```sql SHOW TABLES ``` -###### InfluxQL +### InfluxQL ```sql SHOW MEASUREMENTS ``` -## List tables with the influxctl CLI - -To list tables using the `influxctl` CLI, use the `influxctl query` command to pass -the `SHOW TABLES` SQL statement. - Provide the following with your command: - **Database token**: [Database token](/influxdb3/cloud-dedicated/admin/tokens/#database-tokens) @@ -98,17 +154,29 @@ Provide the following with your command: - **Database name**: Name of the database to query. Uses the `database` setting from the [`influxctl` connection profile](/influxdb3/cloud-dedicated/reference/cli/influxctl/#configure-connection-profiles) or the `--database` command flag. -- **SQL query**: SQL query with the `SHOW TABLES` statement. +- **SQL query**: SQL query with the `SHOW TABLES` statement or InfluxQL query with the `SHOW MEASUREMENTS` statement. {{% code-placeholders "DATABASE_(TOKEN|NAME)" %}} -```sh +##### SQL + +```bash influxctl query \ --token DATABASE_TOKEN \ --database DATABASE_NAME \ "SHOW TABLES" ``` +##### InfluxQL + +```bash +influxctl query \ + --token DATABASE_TOKEN \ + --database DATABASE_NAME \ + --language influxql \ + "SHOW MEASUREMENTS" +``` + {{% /code-placeholders %}} Replace the following: @@ -118,5 +186,6 @@ Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Name of the database to query + {{% /tab-content %}} -{{< /tabs-wrapper >}} \ No newline at end of file +{{< /tabs-wrapper >}} diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/_index.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/_index.md index ee894dca1..af137db40 100644 --- a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/_index.md +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/_index.md @@ -2,34 +2,12 @@ title: influxctl database description: > The `influxctl database` command and its subcommands manage databases in an - InfluxDB Cloud Dedicated cluster. + {{% product-name omit=" Clustered" %}} cluster. menu: influxdb3_cloud_dedicated: parent: influxctl weight: 201 +source: /shared/influxctl/database/_index.md --- -The `influxctl database` command and its subcommands manage databases in an -InfluxDB Cloud Dedicated cluster. - -## Usage - -```sh -influxctl database [subcommand] [flags] -``` - -## Subcommands - -| Subcommand | Description | -| :--------------------------------------------------------------------------- | :------------------ | -| [create](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/create/) | Create a database | -| [delete](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/delete/) | Delete a database | -| [list](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/list/) | List databases | -| [update](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/update/) | Update a database | -| help, h | Output command help | - -## Flags - -| Flag | | Description | -| :--- | :------- | :------------------ | -| `-h` | `--help` | Output command help | + diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/create.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/create.md index 69e573cc0..d383c5c1f 100644 --- a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/create.md +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/create.md @@ -1,8 +1,8 @@ --- title: influxctl database create description: > - The `influxctl database create` command creates a new database in an InfluxDB - Cloud Dedicated cluster. + The `influxctl database create` command creates a new database in an + {{% product-name omit=" Clustered" %}} cluster. menu: influxdb3_cloud_dedicated: parent: influxctl database @@ -10,173 +10,7 @@ weight: 301 related: - /influxdb3/cloud-dedicated/admin/custom-partitions/define-custom-partitions/ - /influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/ +source: /shared/influxctl/database/create.md --- -The `influxctl database create` command creates a new database with a specified -retention period in an {{< product-name omit=" Clustered" >}} cluster. - -The retention period defines the maximum age of data retained in the database, -based on the timestamp of the data. -The retention period value is a time duration value made up of a numeric value -plus a duration unit. For example, `30d` means 30 days. -A zero duration retention period is infinite and data will not expire. -The retention period value cannot be negative or contain whitespace. - -{{< flex >}} -{{% flex-content "half" %}} - -##### Valid durations units include - -- **m**: minute -- **h**: hour -- **d**: day -- **w**: week -- **mo**: month -- **y**: year - -{{% /flex-content %}} -{{% flex-content "half" %}} - -##### Example retention period values - -- `0d`: infinite/none -- `3d`: 3 days -- `6w`: 6 weeks -- `1mo`: 1 month (30 days) -- `1y`: 1 year -- `30d30d`: 60 days -- `2.5d`: 60 hours - -{{% /flex-content %}} -{{< /flex >}} - -#### Custom partitioning - -You can override the default partition template (`%Y-%m-%d`) of the database -with the `--template-tag`, `--template-tag-bucket`, and `--template-timeformat` -flags when you create the database. -Provide a time format using [Rust strftime](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#time-part-templates), partition by specific tag, or partition tag values -into a specified number of "buckets." -Each of these can be used as part of the partition template. -Be sure to follow [partitioning best practices](/influxdb3/cloud-dedicated/admin/custom-partitions/best-practices/). - -> [!Note] -> #### Always provide a time format when using custom partitioning -> -> If defining a custom partition template for your database with any of the -> `--template-*` flags, always include the `--template-timeformat` flag with a -> time format to use in your partition template. -> Otherwise, InfluxDB omits time from the partition template and won't compact partitions. - -> [!Warning] -> #### Wait before writing to a new database with the same name as a deleted database -> -> After deleting a database from your {{% product-name omit=" Clustered" %}} -> cluster, you can reuse the name to create a new database, but **wait two to -> three minutes** after deleting the previous database before writing to the new -> database to allow write caches to clear. - -## Usage - - - - -```sh -influxctl database create [flags] -``` - -## Arguments - -| Argument | Description | -| :---------------- | :--------------------- | -| **DATABASE_NAME** | InfluxDB database name | - -## Flags - -| Flag | | Description | -| :--- | :---------------------- | :--------------------------------------------------------------------------------------------------------------------------------------- | -| | `--retention-period` | [Database retention period ](/influxdb3/cloud-dedicated/admin/databases/#retention-periods)(default is `0s`, infinite) | -| | `--max-tables` | [Maximum tables per database](/influxdb3/cloud-dedicated/admin/databases/#table-limit) (default is 500, `0` uses default) | -| | `--max-columns` | [Maximum columns per table](/influxdb3/cloud-dedicated/admin/databases/#column-limit) (default is 250, `0` uses default) | -| | `--template-tag` | Tag to add to partition template (can include multiple of this flag) | -| | `--template-tag-bucket` | Tag and number of buckets to partition tag values into separated by a comma--for example: `tag1,100` (can include multiple of this flag) | -| | `--template-timeformat` | Timestamp format for partition template (default is `%Y-%m-%d`) | -| `-h` | `--help` | Output command help | - -{{% caption %}} -_Also see [`influxctl` global flags](/influxdb3/cloud-dedicated/reference/cli/influxctl/#global-flags)._ -{{% /caption %}} - -## Examples - -- [Create a database with an infinite retention period](#create-a-database-with-an-infinite-retention-period) -- [Create a database with a 30-day retention period](#create-a-database-with-a-30-day-retention-period) -- [Create a database with non-default table and column limits](#create-a-database-with-non-default-table-and-column-limits) -- [Create a database with a custom partition template](#create-a-database-with-a-custom-partition-template) - -### Create a database with an infinite retention period - - - - -```sh -influxctl database create mydb -``` - -### Create a database with a 30-day retention period - - - - -```sh -influxctl database create \ - --retention-period 30d \ - mydb -``` - -### Create a database with non-default table and column limits - - - - -```sh -influxctl database create \ - --max-tables 200 \ - --max-columns 150 \ - mydb -``` - -### Create a database with a custom partition template - -The following example creates a new `mydb` database and applies a partition -template that partitions by two tags (`room` and `sensor-type`) and by day using -the time format `%Y-%m-%d`: - - - - -```sh -influxctl database create \ - --template-tag room \ - --template-tag sensor-type \ - --template-tag-bucket customerID,1000 \ - --template-timeformat '%Y-%m-%d' \ - mydb -``` - -_For more information about custom partitioning, see -[Manage data partitioning](/influxdb3/cloud-dedicated/admin/custom-partitions/)._ - -{{% expand "View command updates" %}} - -#### v2.7.0 {date="2024-03-26"} - -- Introduce the `--template-tag-bucket` flag to group tag values into buckets - and partition by each tag bucket. - -#### v2.5.0 {date="2024-03-04"} - -- Introduce the `--template-tag` and `--template-timeformat` flags that define - a custom partition template for a database. - -{{% /expand %}} + diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/delete.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/delete.md index 23c76b6dc..f12ee7ccc 100644 --- a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/delete.md +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/delete.md @@ -1,71 +1,13 @@ --- title: influxctl database delete description: > - The `influxctl database delete` command deletes a database from an InfluxDB - Cloud Dedicated cluster. + The `influxctl database delete` command deletes a database from an + {{% product-name omit=" Clustered" %}} cluster. menu: influxdb3_cloud_dedicated: parent: influxctl database weight: 301 +source: /shared/influxctl/database/delete.md --- -The `influxctl database delete` command deletes a database from an -{{< product-name omit=" Clustered" >}} cluster. - -## Usage - - - - -```sh -influxctl database delete [command options] [--force] [...] -``` - -> [!Warning] -> #### Cannot be undone -> -> Deleting a database is a destructive action that cannot be undone. -> -> #### Wait before writing to a new database with the same name -> -> After deleting a database from your {{% product-name omit=" Clustered" %}} -> cluster, you can reuse the name to create a new database, but **wait two to -> three minutes** after deleting the previous database before writing to the new -> database to allow write caches to clear. - -## Arguments - -| Argument | Description | -| :---------------- | :----------------------------- | -| **DATABASE_NAME** | Name of the database to delete | - -## Flags - -| Flag | | Description | -| :--- | :-------- | :---------------------------------------------------------- | -| | `--force` | Do not prompt for confirmation to delete (default is false) | -| `-h` | `--help` | Output command help | - -{{% caption %}} -_Also see [`influxctl` global flags](/influxdb3/cloud-dedicated/reference/cli/influxctl/#global-flags)._ -{{% /caption %}} - -## Examples - -##### Delete a database named "mydb" - - - - -```sh -influxctl database delete mydb -``` - -##### Delete multiple databases - - - - -```sh -influxctl database delete mydb1 mydb2 -``` + diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/list.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/list.md index f3f4b3466..9a938b214 100644 --- a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/list.md +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/list.md @@ -1,34 +1,13 @@ --- title: influxctl database list description: > - The `influxctl database list` command lists all databases in an InfluxDB Cloud - Dedicated cluster. + The `influxctl database list` command lists all databases in an + {{% product-name omit=" Clustered" %}} cluster. menu: influxdb3_cloud_dedicated: parent: influxctl database weight: 301 +source: /shared/influxctl/database/list.md --- -The `influxctl database list` command lists all databases in an InfluxDB Cloud -Dedicated cluster. - -The `--format` flag lets you print the output in other formats. -The `json` format is available for programmatic parsing by other tooling. -Default: `table`. - -## Usage - -```sh -influxctl database list [--format=table|json] -``` - -## Flags - -| Flag | | Description | -| :--- | :--------- | :-------------------------------------------- | -| | `--format` | Output format (`table` _(default)_ or `json`) | -| `-h` | `--help` | Output command help | - -{{% caption %}} -_Also see [`influxctl` global flags](/influxdb3/cloud-dedicated/reference/cli/influxctl/#global-flags)._ -{{% /caption %}} + diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/rename.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/rename.md new file mode 100644 index 000000000..ede2644e3 --- /dev/null +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/rename.md @@ -0,0 +1,14 @@ +--- +title: influxctl database rename +description: > + The `influxctl database rename` command renames a database in an + {{% product-name omit=" Clustered" %}} cluster. +menu: + influxdb3_cloud_dedicated: + parent: influxctl database +weight: 301 +metadata: [influxctl 2.10.2+] +source: /shared/influxctl/database/rename.md +--- + + diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/undelete.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/undelete.md new file mode 100644 index 000000000..7e8624d67 --- /dev/null +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/undelete.md @@ -0,0 +1,14 @@ +--- +title: influxctl database undelete +description: > + The `influxctl database undelete` command undeletes a previously deleted + database in an {{% product-name omit=" Clustered" %}} cluster. +menu: + influxdb3_cloud_dedicated: + parent: influxctl database +weight: 301 +metadata: [influxctl 2.10.2+] +source: /shared/influxctl/database/undelete.md +--- + + diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/update.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/update.md index 7e127c01e..d1f48b42b 100644 --- a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/update.md +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/database/update.md @@ -7,86 +7,7 @@ menu: influxdb3_cloud_dedicated: parent: influxctl database weight: 301 +source: /shared/influxctl/database/update.md --- -The `influxctl database update` command updates a database's retention period, -table (measurement), or column limits in InfluxDB. - -## Usage - - - -```sh -influxctl database update [flags] -``` - -## Arguments - -| Argument | Description | -| :---------------- | :----------------------------- | -| **DATABASE_NAME** | Name of the database to update | - -## Flags - -| Flag | | Description | -| :--- | :------------------- | :----------------------------------------------------------- | -| | `--retention-period` | [Database retention period ](/influxdb3/cloud-dedicated/admin/databases/#retention-periods)(default is `0s` or infinite) | -| | `--max-tables` | [Maximum tables per database](/influxdb3/cloud-dedicated/admin/databases/#table-limit) (default is 500, 0 uses default) | -| | `--max-columns` | [Maximum columns per table](/influxdb3/cloud-dedicated/admin/databases/#column-limit) (default is 250, 0 uses default) | -| `-h` | `--help` | Output command help | - -{{% caption %}} -_Also see [`influxctl` global flags](/influxdb3/cloud-dedicated/reference/cli/influxctl/#global-flags)._ -{{% /caption %}} - -## Examples - -- [Update a database's retention period](#update-a-databases-retention-period) -- [Update a database's table limit](#update-a-databases-table-limit) -- [Update a database's column limit](#update-a-databases-column-limit) - -### Update a database's retention period - -```sh -influxctl database update --retention-period 1mo mydb -``` - -{{< flex >}} -{{% flex-content "half" %}} - -##### Valid durations units - -- `m`: minute -- `h`: hour -- `d`: day -- `w`: week -- `mo`: month -- `y`: year - -{{% /flex-content %}} -{{% flex-content "half" %}} - -##### Example retention period values - -- `0d`: infinite/none -- `3d`: 3 days -- `6w`: 6 weeks -- `1mo`: 1 month (30 days) -- `1y`: 1 year -- `30d30d`: 60 days -- `2.5d`: 60 hours - -{{% /flex-content %}} -{{< /flex >}} - -### Update a database's table limit - -```sh -influxctl database update --max-tables 300 mydb -``` - -### Update a database's column limit - -```sh -influxctl database update --max-columns 200 mydb -``` + diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/_index.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/_index.md index bcab7f10b..a5761b73f 100644 --- a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/_index.md +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/_index.md @@ -9,25 +9,7 @@ menu: weight: 201 cascade: metadata: [influxctl 2.5.0+] +source: /shared/influxctl/table/_index.md --- -The `influxctl table` command and its subcommands manage tables in an -InfluxDB Cloud Dedicated cluster. - -## Usage - -```sh -influxctl table [subcommand] [flags] -``` - -## Subcommands - -| Subcommand | Description | -| :------------------------------------------------------------------------ | :------------- | -| [create](/influxdb3/cloud-dedicated/reference/cli/influxctl/table/create/) | Create a table | - -## Flags - -| Flag | | Description | -| :--- | :------- | :------------------ | -| `-h` | `--help` | Output command help | + diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/create.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/create.md index 8367e4926..1476a8c43 100644 --- a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/create.md +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/create.md @@ -9,101 +9,7 @@ weight: 301 related: - /influxdb3/cloud-dedicated/admin/custom-partitions/define-custom-partitions/ - /influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/ +source: /shared/influxctl/table/create.md --- -The `influxctl table create` command creates a new table in the specified -database in an {{< product-name omit=" Clustered" >}} cluster. - -#### Custom partitioning - -You can override the default partition template (the partition template of the target database) -with the `--template-tag`, `--template-tag-bucket`, and `--template-timeformat` -flags when you create the table. -Provide a time format using [Rust strftime](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#time-part-templates), partition by specific tag, or partition tag values -into a specified number of "buckets." -Each of these can be used as part of the partition template. -Be sure to follow [partitioning best practices](/influxdb3/cloud-dedicated/admin/custom-partitions/best-practices/). - -> [!Note] -> #### Always provide a time format when using custom partitioning -> -> If defining a custom partition template for your table with any of the -> `--template-*` flags, always include the `--template-timeformat` flag with a -> time format to use in your partition template. -> Otherwise, InfluxDB omits time from the partition template and won't compact partitions. - -## Usage - -```sh -influxctl table create [flags] -``` - -## Arguments - -| Argument | Description | -| :---------------- | :-------------------------- | -| **DATABASE_NAME** | Name of the target database | -| **TABLE_NAME** | Table name | - -## Flags - -| Flag | | Description | -| :--- | :---------------------- | :--------------------------------------------------------------------------------------------------------------------------------------- | -| | `--template-tag` | Tag to add to partition template (can include multiple of this flag) | -| | `--template-tag-bucket` | Tag and number of buckets to partition tag values into separated by a comma--for example: `tag1,100` (can include multiple of this flag) | -| | `--template-timeformat` | Timestamp format for partition template | -| `-h` | `--help` | Output command help | - -{{% caption %}} -_Also see [`influxctl` global flags](/influxdb3/cloud-dedicated/reference/cli/influxctl/#global-flags)._ -{{% /caption %}} - -## Examples - -- [Create a table](#create-a-table) -- [Create a table with a custom partition template](#create-a-table-with-a-custom-partition-template) - -In the following examples, replace: - -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: - The name of the database to create the table in. -- {{% code-placeholder-key %}}`TABLE_NAME` {{% /code-placeholder-key %}}: - The name of table to create. - -### Create a table - -{{% code-placeholders "(DATABASE|TABLE)_NAME" %}} -```sh -influxctl table create DATABASE_NAME TABLE_NAME -``` -{{% /code-placeholders %}} - -### Create a table with a custom partition template - -The following example creates a new table and applies a partition -template that partitions by two tags (`room` and `sensor-type`) and by day using -the time format `%Y-%m-%d`: - -{{% code-placeholders "(DATABASE|TABLE)_NAME" %}} -```sh -influxctl table create \ - --template-tag room \ - --template-tag sensor-type \ - --template-tag-bucket customerID,1000 \ - --template-timeformat '%Y-%m-%d' \ - DATABASE_NAME \ - TABLE_NAME -``` -{{% /code-placeholders %}} - -_For more information about custom partitioning, see -[Manage data partitioning](/influxdb3/cloud-dedicated/admin/custom-partitions/)._ - -{{% expand "View command updates" %}} - -#### v2.7.0 {date="2024-03-26"} - -- Introduce the `--template-tag-bucket` flag to group tag values into buckets - and partition by each tag bucket. - -{{% /expand %}} + diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/delete.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/delete.md new file mode 100644 index 000000000..b47ace280 --- /dev/null +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/delete.md @@ -0,0 +1,15 @@ +--- +title: influxctl table delete +description: > + The `influxctl table delete` command deletes a specified table from a database. +menu: + influxdb3_cloud_dedicated: + parent: influxctl table +weight: 301 +related: + - /influxdb3/cloud-dedicated/admin/tables/delete/ +metadata: [influxctl 2.10.2+] +source: /shared/influxctl/table/delete.md +--- + + diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/iceberg/_index.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/iceberg/_index.md new file mode 100644 index 000000000..5e9475aad --- /dev/null +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/iceberg/_index.md @@ -0,0 +1,15 @@ +--- +title: influxctl table iceberg +description: > + The `influxctl table iceberg` command and its subcommands enable or disable + Iceberg-compatible exports for a table in an InfluxDB Cloud Dedicated cluster. +menu: + influxdb3_cloud_dedicated: + parent: influxctl table +weight: 301 +cascade: + metadata: [influxctl 2.10.2+] +source: /shared/influxctl/table/iceberg/_index.md +--- + + diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/iceberg/disable.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/iceberg/disable.md new file mode 100644 index 000000000..b67192c2b --- /dev/null +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/iceberg/disable.md @@ -0,0 +1,13 @@ +--- +title: influxctl table iceberg disable +description: > + The `influxctl table iceberg disable` command disables Iceberg-compatible exports + for a table in an InfluxDB Cloud Dedicated cluster. +menu: + influxdb3_cloud_dedicated: + parent: influxctl table iceberg +weight: 401 +source: /shared/influxctl/table/iceberg/disable.md +--- + + diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/iceberg/enable.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/iceberg/enable.md new file mode 100644 index 000000000..5f02ba002 --- /dev/null +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/iceberg/enable.md @@ -0,0 +1,13 @@ +--- +title: influxctl table iceberg enable +description: > + The `influxctl table iceberg enable` command enables Iceberg-compatible exports + for a table in an InfluxDB Cloud Dedicated cluster. +menu: + influxdb3_cloud_dedicated: + parent: influxctl table iceberg +weight: 401 +source: /shared/influxctl/table/iceberg/enable.md +--- + + diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/list.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/list.md new file mode 100644 index 000000000..de83b1432 --- /dev/null +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/table/list.md @@ -0,0 +1,15 @@ +--- +title: influxctl table list +description: > + The `influxctl table list` command lists all tables in the specified database. +menu: + influxdb3_cloud_dedicated: + parent: influxctl table +weight: 301 +related: + - /influxdb3/cloud-dedicated/admin/tables/list/ +metadata: [influxctl 2.10.2+] +source: /shared/influxctl/table/list.md +--- + + diff --git a/content/influxdb3/clustered/admin/databases/delete.md b/content/influxdb3/clustered/admin/databases/delete.md index 9f39e8fd5..6b64122bf 100644 --- a/content/influxdb3/clustered/admin/databases/delete.md +++ b/content/influxdb3/clustered/admin/databases/delete.md @@ -33,16 +33,19 @@ influxctl database delete DATABASE_NAME {{% /code-placeholders %}} > [!Warning] -> #### Deleting a database cannot be undone -> -> Once a database is deleted, data stored in that database cannot be recovered. -> > #### Wait before writing to a new database with the same name > > After deleting a database from your {{% product-name omit=" Clustered" %}} > cluster, you can reuse the name to create a new database, but **wait two to > three minutes** after deleting the previous database before writing to the new > database to allow write caches to clear. +> +> #### Tokens still grant access to databases with the same name +> +> [Database tokens](/influxdb3/clustered/admin/tokens/database/) are associated to +> databases by name. If you create a new database with the same name, tokens +> that granted access to the deleted database will also grant access to the new +> database. > > #### Never directly modify the Catalog > diff --git a/content/influxdb3/clustered/admin/databases/rename.md b/content/influxdb3/clustered/admin/databases/rename.md new file mode 100644 index 000000000..e83d16cdb --- /dev/null +++ b/content/influxdb3/clustered/admin/databases/rename.md @@ -0,0 +1,58 @@ +--- +title: Rename a database +description: > + Use the [`influxctl database rename` command](/influxdb3/clustered/reference/cli/influxctl/database/rename/) + to rename a database in your {{< product-name omit=" Cluster" >}} cluster. +menu: + influxdb3_clustered: + parent: Manage databases +weight: 202 +list_code_example: | + ##### CLI + ```sh + influxctl database rename + ``` +related: + - /influxdb3/clustered/reference/cli/influxctl/database/rename/ + - /influxdb3/clustered/admin/tokens/database/create/ +--- + +Use the [`influxctl database rename` command](/influxdb3/clustered/reference/cli/influxctl/database/rename/) +to rename a database in your {{< product-name omit=" Cluster" >}} cluster. + +> [!Note] +> Renaming a database does not change the database ID, modify data in the database, +> or update [database tokens](/influxdb3/clustered/admin/tokens/database/). +> After renaming a database, any existing database tokens will stop working and you +> must create new tokens with permissions for the renamed database. + +## Rename a database using the influxctl CLI + +{{% code-placeholders "DATABASE_NAME|NEW_DATABASE_NAME" %}} +```sh +influxctl database rename DATABASE_NAME NEW_DATABASE_NAME +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Current name of the database to rename +- {{% code-placeholder-key %}}`NEW_DATABASE_NAME`{{% /code-placeholder-key %}}: New name for the database + +## Update database tokens after renaming + +After renaming a database, existing database tokens will no longer work because +they reference the old database name. Do the following: + +1. [Create new database tokens](/influxdb3/clustered/admin/tokens/database/create/) + with permissions for the renamed database. +2. Update your applications and clients to use the new tokens. +3. [Delete the old database tokens](/influxdb3/clustered/admin/tokens/database/delete/) + that reference the old database name. + +{{% note %}} +#### Renamed database retains its ID + +The database ID remains the same after renaming. When you list databases, +you'll see the new name associated with the original database ID. +{{% /note %}} diff --git a/content/influxdb3/clustered/admin/databases/undelete.md b/content/influxdb3/clustered/admin/databases/undelete.md new file mode 100644 index 000000000..5b2e4562b --- /dev/null +++ b/content/influxdb3/clustered/admin/databases/undelete.md @@ -0,0 +1,70 @@ +--- +title: Undelete a database +description: > + Use the [`influxctl database undelete` command](/influxdb3/clustered/reference/cli/influxctl/database/undelete/) + to restore a previously deleted database in your {{< product-name omit=" Cluster" >}} cluster. +menu: + influxdb3_clustered: + parent: Manage databases +weight: 204 +list_code_example: | + ```sh + influxctl database undelete + ``` +related: + - /influxdb3/clustered/reference/cli/influxctl/database/undelete/ + - /influxdb3/clustered/admin/databases/delete/ + - /influxdb3/clustered/admin/tokens/database/create/ +--- + +Use the [`influxctl database undelete` command](/influxdb3/clustered/reference/cli/influxctl/database/undelete/) +to restore a previously deleted database in your {{< product-name omit=" Cluster" >}} cluster. + +> [!Important] +> To undelete a database: +> +> - The database name must match the name of the deleted database. +> - A new database with the same name cannot already exist. +> - You must have appropriate permissions to manage databases. + +When you undelete a database, it is restored with the same retention period, +table limits, and column limits as when it was deleted. + +> [!Warning] +> Databases can only be undeleted for +> {{% show-in "cloud-dedicated" %}}approximately 14 days{{% /show-in %}}{{% show-in "clustered" %}}a configurable "hard-delete" grace period{{% /show-in %}} +> after they are deleted. +> After this grace period, all Parquet files associated with the deleted database +> are permanently removed and the database cannot be undeleted. + +## Undelete a database using the influxctl CLI + +{{% code-placeholders "DATABASE_NAME" %}} +```sh +influxctl database undelete DATABASE_NAME +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + Name of the deleted database to restore + +## Recreate tokens for the database + +After successfully undeleting a database: + +1. **Verify the database was restored** by [listing all databases](/influxdb3/clustered/admin/databases/list/). +2. **If you previously deleted tokens associated with the deleted database, create new database tokens** + - Any tokens that existed before deletion are not restored. + [Create new database tokens](/influxdb3/clustered/admin/tokens/database/create/) + with appropriate permissions for the restored database. +3. **Update your applications** to use the new database tokens. + +{{% note %}} +#### Undeleted databases retain their original configuration + +When a database is undeleted, it retains the same database ID, retention period, +and table/column limits it had before deletion. However, database tokens are not +restored and must be recreated. +{{% /note %}} diff --git a/content/influxdb3/clustered/admin/tables/delete.md b/content/influxdb3/clustered/admin/tables/delete.md new file mode 100644 index 000000000..010fcb5e6 --- /dev/null +++ b/content/influxdb3/clustered/admin/tables/delete.md @@ -0,0 +1,52 @@ +--- +title: Delete a table +description: > + Use the Admin UI or the [`influxctl table delete` command](/influxdb3/clustered/reference/cli/influxctl/table/delete/) + to delete a table from a database in your {{< product-name omit=" Cluster" >}} cluster. +menu: + influxdb3_clustered: + parent: Manage tables +weight: 203 +list_code_example: | + ```sh + influxctl table delete + ``` +related: + - /influxdb3/clustered/reference/cli/influxctl/table/delete/ +--- + +Use the Admin UI or the [`influxctl table delete` command](/influxdb3/clustered/reference/cli/influxctl/table/delete/) +to delete a table from a database in your {{< product-name omit=" Cluster" >}} cluster. + +> [!Warning] +> Deleting a table is irreversible. Once a table is deleted, all data stored in +> that table is permanently removed and cannot be recovered. + +Provide the following arguments: + +- **Database name**: Name of the database that contains the table to delete +- **Table name**: Name of the table to delete + +{{% code-placeholders "DATABASE_NAME|TABLE_NAME" %}} +```sh +influxctl table delete DATABASE_NAME TABLE_NAME +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Name of the database that contains the table to delete +- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: Name of the table to delete + +When prompted, enter `y` to confirm the deletion. + +> [!Note] +> #### Wait before reusing a deleted table name +> +> After deleting a table, wait a few minutes before attempting to create a new +> table with the same name to ensure the deletion process has fully completed. +> +> {{% product-name %}} creates tables implicitly using table names specified in +> line protocol written to the databases. To prevent the deleted table from being +> immediately recreated by incoming write requests, pause all write requests to +> the table before deleting it. diff --git a/content/influxdb3/clustered/admin/tables/list.md b/content/influxdb3/clustered/admin/tables/list.md index 228155434..7f604a1a3 100644 --- a/content/influxdb3/clustered/admin/tables/list.md +++ b/content/influxdb3/clustered/admin/tables/list.md @@ -1,7 +1,8 @@ --- title: List tables description: > - Use the [`SHOW TABLES` SQL statement](/influxdb3/clustered/query-data/sql/explore-schema/#list-measurements-in-a-database) + Use the [`influxctl table list` command](/influxdb3/clustered/reference/cli/influxctl/table/list/), + the [`SHOW TABLES` SQL statement](/influxdb3/clustered/query-data/sql/explore-schema/#list-measurements-in-a-database), or the [`SHOW MEASUREMENTS` InfluxQL statement](/influxdb3/clustered/query-data/influxql/explore-schema/#list-measurements-in-a-database) to list tables in a database. menu: @@ -9,49 +10,96 @@ menu: parent: Manage tables weight: 201 list_code_example: | - ###### SQL + ##### CLI + ```sh + influxctl table list + ``` + + ##### SQL ```sql SHOW TABLES ``` - ###### InfluxQL + ##### InfluxQL ```sql SHOW MEASUREMENTS ``` related: + - /influxdb3/clustered/reference/cli/influxctl/table/list/ - /influxdb3/clustered/query-data/sql/explore-schema/ - /influxdb3/clustered/query-data/influxql/explore-schema/ --- -Use the [`SHOW TABLES` SQL statement](/influxdb3/clustered/query-data/sql/explore-schema/#list-measurements-in-a-database) +Use the [`influxctl table list` command](/influxdb3/clustered/reference/cli/influxctl/table/list/), +the [`SHOW TABLES` SQL statement](/influxdb3/clustered/query-data/sql/explore-schema/#list-measurements-in-a-database), or the [`SHOW MEASUREMENTS` InfluxQL statement](/influxdb3/clustered/query-data/influxql/explore-schema/#list-measurements-in-a-database) to list tables in a database. > [!Note] > With {{< product-name >}}, tables and measurements are synonymous. -###### SQL +{{< tabs-wrapper >}} +{{% tabs %}} +[influxctl](#influxctl) +[SQL & InfluxQL](#sql--influxql) +{{% /tabs %}} +{{% tab-content %}} + + +Use the [`influxctl table list` command](/influxdb3/clustered/reference/cli/influxctl/table/list/) +to list all tables in a database in your {{< product-name omit=" Cluster" >}}. + +{{% code-placeholders "DATABASE_NAME" %}} + +```bash +influxctl table list DATABASE_NAME +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + Name of the database containing the tables to list + +### Output formats + +The `influxctl table list` command supports the following output formats: + +- `table` (default): Human-readable table format +- `json`: JSON format for programmatic use + +Use the `--format` flag to specify the output format: + +{{% code-placeholders "DATABASE_NAME" %}} +```sh +influxctl table list --format json DATABASE_NAME +``` +{{% /code-placeholders %}} + + +{{% /tab-content %}} +{{% tab-content %}} + + +## List tables with the influxctl query command + +To list tables using SQL or InfluxQL, use the `influxctl query` command to pass +the appropriate statement. + +### SQL ```sql SHOW TABLES ``` -###### InfluxQL +### InfluxQL ```sql SHOW MEASUREMENTS ``` -## List tables with the influxctl CLI - -To list tables using the `influxctl` CLI, use the `influxctl query` command to pass -the `SHOW TABLES` SQL statement. - -> [!Note] -> The `influxctl query` command only supports SQL queries; not InfluxQL. - Provide the following with your command: - **Database token**: a [database token](/influxdb3/clustered/admin/tokens/#database-tokens) @@ -61,17 +109,29 @@ Provide the following with your command: - **Database name**: Name of the database to query. Uses the `database` setting from the [`influxctl` connection profile](/influxdb3/clustered/reference/cli/influxctl/#configure-connection-profiles) or the `--database` command flag. -- **SQL query**: SQL query with the `SHOW TABLES` statement. +- **SQL query**: SQL query with the `SHOW TABLES` statement or InfluxQL query with the `SHOW MEASUREMENTS` statement. {{% code-placeholders "DATABASE_(TOKEN|NAME)" %}} -```sh +##### SQL + +```bash influxctl query \ --token DATABASE_TOKEN \ --database DATABASE_NAME \ "SHOW TABLES" ``` +##### InfluxQL + +```bash +influxctl query \ + --token DATABASE_TOKEN \ + --database DATABASE_NAME \ + --language influxql \ + "SHOW MEASUREMENTS" +``` + {{% /code-placeholders %}} Replace the following: @@ -81,3 +141,11 @@ Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Name of the database to query +> [!Note] +> The `influxctl query` command only supports SQL queries; not InfluxQL. +> To use InfluxQL, query InfluxDB through the API using InfluxQL request parameters. + + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + diff --git a/content/influxdb3/clustered/install/secure-cluster/auth.md b/content/influxdb3/clustered/install/secure-cluster/auth.md index ef59b0a68..57bb7a9a4 100644 --- a/content/influxdb3/clustered/install/secure-cluster/auth.md +++ b/content/influxdb3/clustered/install/secure-cluster/auth.md @@ -29,9 +29,8 @@ database tokens (which provide read and write access to databases). - [Configure influxctl](#configure-influxctl) - [Test your authorization flow](#test-your-authorization-flow) -InfluxData has tested with the following identity providers, but any provider -that [meets the requirements](#identity-provider-requirements) -should work: +InfluxData has tested with and supports the following identity providers, but any +provider that [meets the requirements](#identity-provider-requirements) should work: - [Microsoft Entra ID _(formerly Azure Active Directory)_](https://www.microsoft.com/en-us/security/business/microsoft-entra) - [Keycloak](https://www.keycloak.org/) diff --git a/content/influxdb3/clustered/reference/cli/influxctl/database/_index.md b/content/influxdb3/clustered/reference/cli/influxctl/database/_index.md index 7ff42e7dd..3b42a31a4 100644 --- a/content/influxdb3/clustered/reference/cli/influxctl/database/_index.md +++ b/content/influxdb3/clustered/reference/cli/influxctl/database/_index.md @@ -2,34 +2,12 @@ title: influxctl database description: > The `influxctl database` command and its subcommands manage databases in an - InfluxDB cluster. + {{% product-name omit=" Clustered" %}} cluster. menu: influxdb3_clustered: parent: influxctl weight: 201 +source: /shared/influxctl/database/_index.md --- -The `influxctl database` command and its subcommands manage databases in an -InfluxDB cluster. - -## Usage - -```sh -influxctl database [subcommand] [flags] -``` - -## Subcommands - -| Subcommand | Description | -| :--------------------------------------------------------------------------- | :------------------ | -| [create](/influxdb3/clustered/reference/cli/influxctl/database/create/) | Create a database | -| [delete](/influxdb3/clustered/reference/cli/influxctl/database/delete/) | Delete a database | -| [list](/influxdb3/clustered/reference/cli/influxctl/database/list/) | List databases | -| [update](/influxdb3/clustered/reference/cli/influxctl/database/update/) | Update a database | -| help, h | Output command help | - -## Flags - -| Flag | | Description | -| :--- | :------- | :------------------ | -| `-h` | `--help` | Output command help | + diff --git a/content/influxdb3/clustered/reference/cli/influxctl/database/create.md b/content/influxdb3/clustered/reference/cli/influxctl/database/create.md index 7a71ec00d..7bb2031c0 100644 --- a/content/influxdb3/clustered/reference/cli/influxctl/database/create.md +++ b/content/influxdb3/clustered/reference/cli/influxctl/database/create.md @@ -1,7 +1,8 @@ --- title: influxctl database create description: > - The `influxctl database create` command creates a new database in an InfluxDB cluster. + The `influxctl database create` command creates a new database in an + {{% product-name omit=" Clustered" %}} cluster. menu: influxdb3_clustered: parent: influxctl database @@ -9,173 +10,7 @@ weight: 301 related: - /influxdb3/clustered/admin/custom-partitions/define-custom-partitions/ - /influxdb3/clustered/admin/custom-partitions/partition-templates/ +source: /shared/influxctl/database/create.md --- -The `influxctl database create` command creates a new database with a specified -retention period in an {{< product-name omit=" Clustered" >}} cluster. - -The retention period defines the maximum age of data retained in the database, -based on the timestamp of the data. -The retention period value is a time duration value made up of a numeric value -plus a duration unit. For example, `30d` means 30 days. -A zero duration retention period is infinite and data will not expire. -The retention period value cannot be negative or contain whitespace. - -{{< flex >}} -{{% flex-content "half" %}} - -##### Valid durations units include - -- **m**: minute -- **h**: hour -- **d**: day -- **w**: week -- **mo**: month -- **y**: year - -{{% /flex-content %}} -{{% flex-content "half" %}} - -##### Example retention period values - -- `0d`: infinite/none -- `3d`: 3 days -- `6w`: 6 weeks -- `1mo`: 1 month (30 days) -- `1y`: 1 year -- `30d30d`: 60 days -- `2.5d`: 60 hours - -{{% /flex-content %}} -{{< /flex >}} - -#### Custom partitioning - -You can override the default partition template (`%Y-%m-%d`) of the database -with the `--template-tag`, `--template-tag-bucket`, and `--template-timeformat` -flags when you create the database. -Provide a time format using [Rust strftime](/influxdb3/clustered/admin/custom-partitions/partition-templates/#time-part-templates), partition by specific tag, or partition tag values -into a specified number of "buckets." -Each of these can be used as part of the partition template. -Be sure to follow [partitioning best practices](/influxdb3/clustered/admin/custom-partitions/best-practices/). - -> [!Note] -> #### Always provide a time format when using custom partitioning -> -> If defining a custom partition template for your database with any of the -> `--template-*` flags, always include the `--template-timeformat` flag with a -> time format to use in your partition template. -> Otherwise, InfluxDB omits time from the partition template and won't compact partitions. - -> [!Warning] -> #### Wait before writing to a new database with the same name as a deleted database -> -> After deleting a database from your {{% product-name omit=" Clustered" %}} -> cluster, you can reuse the name to create a new database, but **wait two to -> three minutes** after deleting the previous database before writing to the new -> database to allow write caches to clear. - -## Usage - - - - -```sh -influxctl database create [flags] -``` - -## Arguments - -| Argument | Description | -| :---------------- | :--------------------- | -| **DATABASE_NAME** | InfluxDB database name | - -## Flags - -| Flag | | Description | -| :--- | :---------------------- | :--------------------------------------------------------------------------------------------------------------------------------------- | -| | `--retention-period` | [Database retention period ](/influxdb3/clustered/admin/databases/#retention-periods)(default is `0s`, infinite) | -| | `--max-tables` | [Maximum tables per database](/influxdb3/clustered/admin/databases/#table-limit) (default is 500, `0` uses default) | -| | `--max-columns` | [Maximum columns per table](/influxdb3/clustered/admin/databases/#column-limit) (default is 250, `0` uses default) | -| | `--template-tag` | Tag to add to partition template (can include multiple of this flag) | -| | `--template-tag-bucket` | Tag and number of buckets to partition tag values into separated by a comma--for example: `tag1,100` (can include multiple of this flag) | -| | `--template-timeformat` | Timestamp format for partition template (default is `%Y-%m-%d`) | -| `-h` | `--help` | Output command help | - -{{% caption %}} -_Also see [`influxctl` global flags](/influxdb3/clustered/reference/cli/influxctl/#global-flags)._ -{{% /caption %}} - -## Examples - -- [Create a database with an infinite retention period](#create-a-database-with-an-infinite-retention-period) -- [Create a database with a 30-day retention period](#create-a-database-with-a-30-day-retention-period) -- [Create a database with non-default table and column limits](#create-a-database-with-non-default-table-and-column-limits) -- [Create a database with a custom partition template](#create-a-database-with-a-custom-partition-template) - -### Create a database with an infinite retention period - - - - -```sh -influxctl database create mydb -``` - -### Create a database with a 30-day retention period - - - - -```sh -influxctl database create \ - --retention-period 30d \ - mydb -``` - -### Create a database with non-default table and column limits - - - - -```sh -influxctl database create \ - --max-tables 200 \ - --max-columns 150 \ - mydb -``` - -### Create a database with a custom partition template - -The following example creates a new `mydb` database and applies a partition -template that partitions by two tags (`room` and `sensor-type`) and by day using -the time format `%Y-%m-%d`: - - - - -```sh -influxctl database create \ - --template-tag room \ - --template-tag sensor-type \ - --template-tag-bucket customerID,1000 \ - --template-timeformat '%Y-%m-%d' \ - mydb -``` - -_For more information about custom partitioning, see -[Manage data partitioning](/influxdb3/clustered/admin/custom-partitions/)._ - -{{% expand "View command updates" %}} - -#### v2.7.0 {date="2024-03-26"} - -- Introduce the `--template-tag-bucket` flag to group tag values into buckets - and partition by each tag bucket. - -#### v2.5.0 {date="2024-03-04"} - -- Introduce the `--template-tag` and `--template-timeformat` flags that define - a custom partition template for a database. - -{{% /expand %}} + diff --git a/content/influxdb3/clustered/reference/cli/influxctl/database/delete.md b/content/influxdb3/clustered/reference/cli/influxctl/database/delete.md index 990a0b662..70bd0e33e 100644 --- a/content/influxdb3/clustered/reference/cli/influxctl/database/delete.md +++ b/content/influxdb3/clustered/reference/cli/influxctl/database/delete.md @@ -7,65 +7,7 @@ menu: influxdb3_clustered: parent: influxctl database weight: 301 +source: /shared/influxctl/database/delete.md --- -The `influxctl database delete` command deletes a database from an -{{< product-name omit=" Clustered" >}} cluster. - -## Usage - - - - -```sh -influxctl database delete [command options] [--force] [...] -``` - -> [!Warning] -> #### Cannot be undone -> -> Deleting a database is a destructive action that cannot be undone. -> -> #### Wait before writing to a new database with the same name -> -> After deleting a database from your {{% product-name omit=" Clustered" %}} -> cluster, you can reuse the name to create a new database, but **wait two to -> three minutes** after deleting the previous database before writing to the new -> database to allow write caches to clear. - -## Arguments - -| Argument | Description | -| :---------------- | :----------------------------- | -| **DATABASE_NAME** | Name of the database to delete | - -## Flags - -| Flag | | Description | -| :--- | :-------- | :---------------------------------------------------------- | -| | `--force` | Do not prompt for confirmation to delete (default is false) | -| `-h` | `--help` | Output command help | - -{{% caption %}} -_Also see [`influxctl` global flags](/influxdb3/clustered/reference/cli/influxctl/#global-flags)._ -{{% /caption %}} - -## Examples - -##### Delete a database named "mydb" - - - - -```sh -influxctl database delete mydb -``` - -##### Delete multiple databases - - - - -```sh -influxctl database delete mydb1 mydb2 -``` + diff --git a/content/influxdb3/clustered/reference/cli/influxctl/database/list.md b/content/influxdb3/clustered/reference/cli/influxctl/database/list.md index 1bb620217..d8c7241d4 100644 --- a/content/influxdb3/clustered/reference/cli/influxctl/database/list.md +++ b/content/influxdb3/clustered/reference/cli/influxctl/database/list.md @@ -1,34 +1,13 @@ --- title: influxctl database list description: > - The `influxctl database list` command lists all databases in an InfluxDB Cloud - Dedicated cluster. + The `influxctl database list` command lists all databases in an + {{% product-name omit=" Clustered" %}} cluster. menu: influxdb3_clustered: parent: influxctl database weight: 301 +source: /shared/influxctl/database/list.md --- -The `influxctl database list` command lists all databases in an InfluxDB Cloud -Dedicated cluster. - -The `--format` flag lets you print the output in other formats. -The `json` format is available for programmatic parsing by other tooling. -Default: `table`. - -## Usage - -```sh -influxctl database list [--format=table|json] -``` - -## Flags - -| Flag | | Description | -| :--- | :--------- | :-------------------------------------------- | -| | `--format` | Output format (`table` _(default)_ or `json`) | -| `-h` | `--help` | Output command help | - -{{% caption %}} -_Also see [`influxctl` global flags](/influxdb3/clustered/reference/cli/influxctl/#global-flags)._ -{{% /caption %}} + diff --git a/content/influxdb3/clustered/reference/cli/influxctl/database/rename.md b/content/influxdb3/clustered/reference/cli/influxctl/database/rename.md new file mode 100644 index 000000000..36f21f978 --- /dev/null +++ b/content/influxdb3/clustered/reference/cli/influxctl/database/rename.md @@ -0,0 +1,14 @@ +--- +title: influxctl database rename +description: > + The `influxctl database rename` command renames a database in an + {{% product-name omit=" Clustered" %}} cluster. +menu: + influxdb3_clustered: + parent: influxctl database +weight: 301 +metadata: [influxctl 2.10.2+] +source: /shared/influxctl/database/rename.md +--- + + diff --git a/content/influxdb3/clustered/reference/cli/influxctl/database/undelete.md b/content/influxdb3/clustered/reference/cli/influxctl/database/undelete.md new file mode 100644 index 000000000..3ac70ed62 --- /dev/null +++ b/content/influxdb3/clustered/reference/cli/influxctl/database/undelete.md @@ -0,0 +1,14 @@ +--- +title: influxctl database undelete +description: > + The `influxctl database undelete` command undeletes a previously deleted + database in an {{% product-name omit=" Clustered" %}} cluster. +menu: + influxdb3_clustered: + parent: influxctl database +weight: 301 +metadata: [influxctl 2.10.2+] +source: /shared/influxctl/database/undelete.md +--- + + diff --git a/content/influxdb3/clustered/reference/cli/influxctl/database/update.md b/content/influxdb3/clustered/reference/cli/influxctl/database/update.md index 13f2884d6..2a04432ce 100644 --- a/content/influxdb3/clustered/reference/cli/influxctl/database/update.md +++ b/content/influxdb3/clustered/reference/cli/influxctl/database/update.md @@ -7,86 +7,7 @@ menu: influxdb3_clustered: parent: influxctl database weight: 301 +source: /shared/influxctl/database/update.md --- -The `influxctl database update` command updates a database's retention period, -table (measurement), or column limits in InfluxDB. - -## Usage - - - -```sh -influxctl database update [flags] -``` - -## Arguments - -| Argument | Description | -| :---------------- | :----------------------------- | -| **DATABASE_NAME** | Name of the database to update | - -## Flags - -| Flag | | Description | -| :--- | :------------------- | :----------------------------------------------------------- | -| | `--retention-period` | [Database retention period ](/influxdb3/clustered/admin/databases/#retention-periods)(default is `0s` or infinite) | -| | `--max-tables` | [Maximum tables per database](/influxdb3/clustered/admin/databases/#table-limit) (default is 500, `0` uses default) | -| | `--max-columns` | [Maximum columns per table](/influxdb3/clustered/admin/databases/#column-limit) (default is 250, `0` uses default) | -| `-h` | `--help` | Output command help | - -{{% caption %}} -_Also see [`influxctl` global flags](/influxdb3/clustered/reference/cli/influxctl/#global-flags)._ -{{% /caption %}} - -## Examples - -- [Update a database's retention period](#update-a-databases-retention-period) -- [Update a database's table limit](#update-a-databases-table-limit) -- [Update a database's column limit](#update-a-databases-column-limit) - -### Update a database's retention period - -```sh -influxctl database update --retention-period 1mo mydb -``` - -{{< flex >}} -{{% flex-content "half" %}} - -##### Valid durations units - -- `m`: minute -- `h`: hour -- `d`: day -- `w`: week -- `mo`: month -- `y`: year - -{{% /flex-content %}} -{{% flex-content "half" %}} - -##### Example retention period values - -- `0d`: infinite/none -- `3d`: 3 days -- `6w`: 6 weeks -- `1mo`: 1 month (30 days) -- `1y`: 1 year -- `30d30d`: 60 days -- `2.5d`: 60 hours - -{{% /flex-content %}} -{{< /flex >}} - -### Update a database's table limit - -```sh -influxctl database update --max-tables 300 mydb -``` - -### Update a database's column limit - -```sh -influxctl database update --max-columns 200 mydb -``` + diff --git a/content/influxdb3/clustered/reference/cli/influxctl/table/_index.md b/content/influxdb3/clustered/reference/cli/influxctl/table/_index.md index 359ded2ad..8490436ed 100644 --- a/content/influxdb3/clustered/reference/cli/influxctl/table/_index.md +++ b/content/influxdb3/clustered/reference/cli/influxctl/table/_index.md @@ -1,31 +1,15 @@ --- title: influxctl table description: > - The `influxctl table` command and its subcommands manage tables in an InfluxDB cluster. + The `influxctl table` command and its subcommands manage tables in an + {{% product-name omit=" Clustered" %}} cluster. menu: influxdb3_clustered: parent: influxctl weight: 201 cascade: metadata: [influxctl 2.5.0+] +source: /shared/influxctl/table/_index.md --- -The `influxctl table` command and its subcommands manage tables in an InfluxDB cluster. - -## Usage - -```sh -influxctl table [subcommand] [flags] -``` - -## Subcommands - -| Subcommand | Description | -| :------------------------------------------------------------------ | :------------- | -| [create](/influxdb3/clustered/reference/cli/influxctl/table/create/) | Create a table | - -## Flags - -| Flag | | Description | -| :--- | :------- | :------------------ | -| `-h` | `--help` | Output command help | + diff --git a/content/influxdb3/clustered/reference/cli/influxctl/table/create.md b/content/influxdb3/clustered/reference/cli/influxctl/table/create.md index 31bc72495..e70e01ff8 100644 --- a/content/influxdb3/clustered/reference/cli/influxctl/table/create.md +++ b/content/influxdb3/clustered/reference/cli/influxctl/table/create.md @@ -9,101 +9,7 @@ weight: 301 related: - /influxdb3/clustered/admin/custom-partitions/define-custom-partitions/ - /influxdb3/clustered/admin/custom-partitions/partition-templates/ +source: /shared/influxctl/table/create.md --- -The `influxctl table create` command creates a new table in the specified -database in an {{< product-name omit=" Clustered" >}} cluster. - -#### Custom partitioning - -You can override the default partition template (the partition template of the target database) -with the `--template-tag`, `--template-tag-bucket`, and `--template-timeformat` -flags when you create the table. -Provide a time format using [Rust strftime](/influxdb3/clustered/admin/custom-partitions/partition-templates/#time-part-templates), partition by specific tag, or partition tag values -into a specified number of "buckets." -Each of these can be used as part of the partition template. -Be sure to follow [partitioning best practices](/influxdb3/clustered/admin/custom-partitions/best-practices/). - -> [!Note] -> #### Always provide a time format when using custom partitioning -> -> If defining a custom partition template for your table with any of the -> `--template-*` flags, always include the `--template-timeformat` flag with a -> time format to use in your partition template. -> Otherwise, InfluxDB omits time from the partition template and won't compact partitions. - -## Usage - -```sh -influxctl table create [flags] -``` - -## Arguments - -| Argument | Description | -| :---------------- | :-------------------------- | -| **DATABASE_NAME** | Name of the target database | -| **TABLE_NAME** | Table name | - -## Flags - -| Flag | | Description | -| :--- | :---------------------- | :--------------------------------------------------------------------------------------------------------------------------------------- | -| | `--template-tag` | Tag to add to partition template (can include multiple of this flag) | -| | `--template-tag-bucket` | Tag and number of buckets to partition tag values into separated by a comma--for example: `tag1,100` (can include multiple of this flag) | -| | `--template-timeformat` | Timestamp format for partition template | -| `-h` | `--help` | Output command help | - -{{% caption %}} -_Also see [`influxctl` global flags](/influxdb3/clustered/reference/cli/influxctl/#global-flags)._ -{{% /caption %}} - -## Examples - -- [Create a table](#create-a-table) -- [Create a table with a custom partition template](#create-a-table-with-a-custom-partition-template) - -In the following examples, replace: - -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: - The name of the database to create the table in. -- {{% code-placeholder-key %}}`TABLE_NAME` {{% /code-placeholder-key %}}: - The name of table to create. - -### Create a table - -{{% code-placeholders "(DATABASE|TABLE)_NAME" %}} -```sh -influxctl table create DATABASE_NAME TABLE_NAME -``` -{{% /code-placeholders %}} - -### Create a table with a custom partition template - -The following example creates a new table and applies a partition -template that partitions by two tags (`room` and `sensor-type`) and by day using -the time format `%Y-%m-%d`: - -{{% code-placeholders "(DATABASE|TABLE)_NAME" %}} -```sh -influxctl table create \ - --template-tag room \ - --template-tag sensor-type \ - --template-tag-bucket customerID,1000 \ - --template-timeformat '%Y-%m-%d' \ - DATABASE_NAME \ - TABLE_NAME -``` -{{% /code-placeholders %}} - -_For more information about custom partitioning, see -[Manage data partitioning](/influxdb3/clustered/admin/custom-partitions/)._ - -{{% expand "View command updates" %}} - -#### v2.7.0 {date="2024-03-26"} - -- Introduce the `--template-tag-bucket` flag to group tag values into buckets - and partition by each tag bucket. - -{{% /expand %}} + diff --git a/content/influxdb3/clustered/reference/cli/influxctl/table/delete.md b/content/influxdb3/clustered/reference/cli/influxctl/table/delete.md new file mode 100644 index 000000000..be2858639 --- /dev/null +++ b/content/influxdb3/clustered/reference/cli/influxctl/table/delete.md @@ -0,0 +1,15 @@ +--- +title: influxctl table delete +description: > + The `influxctl table delete` command deletes a specified table from a database. +menu: + influxdb3_clustered: + parent: influxctl table +weight: 301 +related: + - /influxdb3/clustered/admin/tables/delete/ +metadata: [influxctl 2.10.2+] +source: /shared/influxctl/table/delete.md +--- + + diff --git a/content/influxdb3/clustered/reference/cli/influxctl/table/iceberg/_index.md b/content/influxdb3/clustered/reference/cli/influxctl/table/iceberg/_index.md new file mode 100644 index 000000000..905c30a40 --- /dev/null +++ b/content/influxdb3/clustered/reference/cli/influxctl/table/iceberg/_index.md @@ -0,0 +1,15 @@ +--- +title: influxctl table iceberg +description: > + The `influxctl table iceberg` command and its subcommands enable or disable + Iceberg-compatible exports for a table in an InfluxDB Cloud Dedicated cluster. +menu: + influxdb3_clustered: + parent: influxctl table +weight: 301 +cascade: + metadata: [influxctl 2.10.2+] +source: /shared/influxctl/table/iceberg/_index.md +--- + + diff --git a/content/influxdb3/clustered/reference/cli/influxctl/table/iceberg/disable.md b/content/influxdb3/clustered/reference/cli/influxctl/table/iceberg/disable.md new file mode 100644 index 000000000..1d2f7664e --- /dev/null +++ b/content/influxdb3/clustered/reference/cli/influxctl/table/iceberg/disable.md @@ -0,0 +1,13 @@ +--- +title: influxctl table iceberg disable +description: > + The `influxctl table iceberg disable` command disables Iceberg-compatible exports + for a table in an InfluxDB Cloud Dedicated cluster. +menu: + influxdb3_clustered: + parent: influxctl table iceberg +weight: 401 +source: /shared/influxctl/table/iceberg/disable.md +--- + + diff --git a/content/influxdb3/clustered/reference/cli/influxctl/table/iceberg/enable.md b/content/influxdb3/clustered/reference/cli/influxctl/table/iceberg/enable.md new file mode 100644 index 000000000..e7ce6c021 --- /dev/null +++ b/content/influxdb3/clustered/reference/cli/influxctl/table/iceberg/enable.md @@ -0,0 +1,13 @@ +--- +title: influxctl table iceberg enable +description: > + The `influxctl table iceberg enable` command enables Iceberg-compatible exports + for a table in an InfluxDB Cloud Dedicated cluster. +menu: + influxdb3_clustered: + parent: influxctl table iceberg +weight: 401 +source: /shared/influxctl/table/iceberg/enable.md +--- + + diff --git a/content/influxdb3/clustered/reference/cli/influxctl/table/list.md b/content/influxdb3/clustered/reference/cli/influxctl/table/list.md new file mode 100644 index 000000000..59484772f --- /dev/null +++ b/content/influxdb3/clustered/reference/cli/influxctl/table/list.md @@ -0,0 +1,15 @@ +--- +title: influxctl table list +description: > + The `influxctl table list` command lists all tables in the specified database. +menu: + influxdb3_clustered: + parent: influxctl table +weight: 301 +related: + - /influxdb3/clustered/admin/tables/list/ +metadata: [influxctl 2.10.2+] +source: /shared/influxctl/table/list.md +--- + + diff --git a/content/influxdb3/clustered/reference/release-notes/clustered.md b/content/influxdb3/clustered/reference/release-notes/clustered.md index 7c0d688fe..b7d3c3973 100644 --- a/content/influxdb3/clustered/reference/release-notes/clustered.md +++ b/content/influxdb3/clustered/reference/release-notes/clustered.md @@ -21,10 +21,118 @@ weight: 201 > Checkpoint releases are only made when absolutely necessary and are clearly > identified below with the icon. +{{< expand-wrapper >}} +{{% expand "Download release artifacts manually" %}} + +To download a bundle of release artifacts for a specific version of +InfluxDB Clustered: + +1. [install `crane`](https://github.com/google/go-containerregistry/tree/main/cmd/crane#installation) + and [`jq`](https://jqlang.org/download/). +2. Ensure your InfluxData pull secret is in the `/tmp/influxdbsecret` directory + on your local machine. This secret was provided to you by InfluxData to + authorize the use of InfluxDB Clustered images. +3. Run the following shell script: + +{{% code-placeholders "RELEASE_VERSION" %}} + +```bash +INFLUXDB_RELEASE="RELEASE_VERSION" +IMAGE="us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:$INFLUXDB_RELEASE" +DOCKER_CFG="/tmp/influxdbsecret" + +DIGEST=$(DOCKER_CONFIG="$DOCKER_CFG" crane manifest "$IMAGE" | jq -r '.layers[1].digest') + +DOCKER_CONFIG="$DOCKER_CFG" \ +crane blob "$IMAGE@$DIGEST" | tar -xvzf - -C ./ +``` +{{% /code-placeholders %}} + +_Replace {{% code-placeholder-key %}}`RELEASE_VERSION`{{% /code-placeholder-key %}} +with the InfluxDB Clustered release version you want to download artifacts for._ + +The script creates an `influxdb-3.0-clustered` directory in the current working +directory. This new directory contains artifacts associated with the specified release. + +{{% /expand %}} +{{< /expand-wrapper >}} + {{< release-toc >}} --- +## 20250618-1758428 {date="2025-06-18"} + +### Quickstart + +```yaml +spec: + package: + image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250618-1758428 +``` + +#### Release artifacts +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20250618-1758428/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20250618-1758428/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + +### Bug Fixes +- Update Grafana to `12.0.1-security-01` to address CVE-2025-3415, CVE-2025-4123, and CVE-2025-3580. + +### Changes + +#### Database Engine + +- Update DataFusion to `45` and Apache Arrow to `54`. + +--- + +## 20250613-1754010 {date="2025-06-11"} + +### Quickstart + +```yaml +spec: + package: + image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250613-1754010 +``` + +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20250613-1754010/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20250613-1754010/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + +### Bug Fixes + +- Remove default CPU and memory limits for the Catalog service and Prometheus. +- Add time formatting checks to reject invalid custom partitioning requests. +- Ensure that an incorrect backup is not created when `pg_dump` errs during data snapshot backups. + +### Changes + +#### Deployment + +- Add support for Prometheus v3 when using the observability feature. +- Refresh dependencies to address security vulnerabilities and improve stability. + +#### Configuration + +- Change the default of `INFLUXDB_IOX_CREATE_CATALOG_BACKUP_INTERVAL` from `1h` + to `4h`. +- Introduce the following environment variables to help in cases where the + object store is large enough that the the garbage collector cannot keep up + when cleaning obsolete objects: + + - `INFLUXDB_IOX_GC_PRIMARY_OBJECTSTORE_PARTITIONS` + - `INFLUXDB_IOX_GC_SECONDARY_OBJECTSTORE_PARTITIONS` + + > [!Note] + > Increasing these settings will add load to the object store and should not + > be modified unnecessarily. + +--- + ## 20250508-1719206 {date="2025-05-08"} ### Quickstart @@ -35,6 +143,12 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250508-1719206 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20250508-1719206/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20250508-1719206/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Changes #### Deployment @@ -59,6 +173,12 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250212-1570743 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20250212-1570743/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20250212-1570743/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Bug Fixes This release fixes a bug in the 20241217-1494922 release where the default @@ -88,6 +208,12 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20241217-1494922 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20241217-1494922/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20241217-1494922/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Bug Fixes This fixes a bug present in release [20241024-1354148](#20241024-1354148), in @@ -112,7 +238,7 @@ DSN before connecting. --- -## 20241024-1354148 {date="2024-10-24" .checkpoint} +## 20241024-1354148 {date="2024-10-24" .checkpoint} ### Quickstart @@ -122,6 +248,12 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20241022-1346953 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20241024-1354148/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20241024-1354148/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Known Bugs ### `core` service DSN parsing errors @@ -318,6 +450,12 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20240819-1176644 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20240819-1176644/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20240819-1176644/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Highlights #### `admin` section is no longer required @@ -397,6 +535,12 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20240717-1117630 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20240717-1117630/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20240717-1117630/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Highlights #### Experimental license enforcement @@ -508,6 +652,12 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20240605-1035562 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20240605-1035562/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20240605-1035562/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Highlights Multiple improvements to compaction, pruning, and performance of concurrent queries. @@ -574,6 +724,12 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20240430-976585 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20240430-976585/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20240430-976585/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Highlights - Added configuration settings for an optional Prometheus `ServiceMonitor` @@ -605,6 +761,12 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20240418-955990 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20240418-955990/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20240418-955990/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Highlights #### Minimum `influxctl` version @@ -642,9 +804,15 @@ version of `influxctl` prior to v2.8.0. ```yaml spec: package: - image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20240325-920726 + image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20240326-922145 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20240326-922145/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20240326-922145/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Highlights #### Lower defaults for garbage collection @@ -696,6 +864,12 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20240227-883344 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20240227-883344/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20240227-883344/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Changes #### Deployment @@ -724,6 +898,12 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20240214-863513 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20240214-863513/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20240214-863513/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Highlights #### Grafana dashboards by default @@ -783,6 +963,12 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20240111-824437 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20240111-824437/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20240111-824437/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Highlights #### Ingress improvements @@ -845,6 +1031,12 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20231213-791734 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20231213-791734/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20231213-791734/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Highlights #### Labels/annotations @@ -885,6 +1077,12 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20231117-750011 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20231117-750011/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20231117-750011/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Highlights > ![Important] @@ -910,6 +1108,12 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20231115-746129 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20231115-746129/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20231115-746129/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Highlights #### Ingress templating @@ -1022,6 +1226,12 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20231024-711448 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20231024-711448/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20231024-711448/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Highlights #### Additional `AppInstance` parameters @@ -1083,6 +1293,12 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20231004-666907 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20231004-666907/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20231004-666907/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Highlights #### Object store custom certificates @@ -1150,6 +1366,12 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20230922-650371 ``` +#### Release artifacts + +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20230922-650371/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20230922-650371/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Highlights #### Configuration simplification diff --git a/content/influxdb3/core/admin/databases/_index.md b/content/influxdb3/core/admin/databases/_index.md index ddbfb0a16..c46711854 100644 --- a/content/influxdb3/core/admin/databases/_index.md +++ b/content/influxdb3/core/admin/databases/_index.md @@ -12,6 +12,7 @@ influxdb3/core/tags: [databases] related: - /influxdb3/core/write-data/best-practices/schema-design/ - /influxdb3/core/reference/cli/influxdb3/ + - /influxdb3/explorer/manage-databases/ alt_links: cloud: /influxdb/cloud/admin/buckets/ cloud_dedicated: /influxdb3/cloud-dedicated/admin/databases/ diff --git a/content/influxdb3/core/admin/databases/create.md b/content/influxdb3/core/admin/databases/create.md index a05fe548f..99ccd12c5 100644 --- a/content/influxdb3/core/admin/databases/create.md +++ b/content/influxdb3/core/admin/databases/create.md @@ -17,6 +17,7 @@ list_code_example: | {{% /code-placeholders %}} related: - /influxdb3/core/reference/cli/influxdb3/create/database/ + - /influxdb3/explorer/manage-databases/ source: /shared/influxdb3-admin/databases/create.md --- diff --git a/content/influxdb3/core/admin/databases/delete.md b/content/influxdb3/core/admin/databases/delete.md index 96750c65e..69bb8aaea 100644 --- a/content/influxdb3/core/admin/databases/delete.md +++ b/content/influxdb3/core/admin/databases/delete.md @@ -16,6 +16,7 @@ list_code_example: | {{% /code-placeholders %}} related: - /influxdb3/core/reference/cli/influxdb3/delete/database/ + - /influxdb3/explorer/manage-databases/ source: /shared/influxdb3-admin/databases/delete.md --- diff --git a/content/influxdb3/core/admin/databases/list.md b/content/influxdb3/core/admin/databases/list.md index e416c267b..4be161e41 100644 --- a/content/influxdb3/core/admin/databases/list.md +++ b/content/influxdb3/core/admin/databases/list.md @@ -13,6 +13,7 @@ list_code_example: | ``` related: - /influxdb3/core/reference/cli/influxdb3/show/databases/ + - /influxdb3/explorer/manage-databases/ source: /shared/influxdb3-admin/databases/list.md --- diff --git a/content/influxdb3/core/get-started/process.md b/content/influxdb3/core/get-started/process.md new file mode 100644 index 000000000..5fb233f0e --- /dev/null +++ b/content/influxdb3/core/get-started/process.md @@ -0,0 +1,27 @@ +--- +title: Process data in {{% product-name %}} +seotitle: Process data | Get started with {{% product-name %}} +description: > + Learn how to use the {{% product-name %}} Processing Engine to process data and + perform various tasks like downsampling, alerting, forecasting, data + normalization, and more. +menu: + influxdb3_core: + name: Process data + identifier: gs-process-data + parent: Get started +weight: 104 +aliases: + - /influxdb3/core/get-started/process-data/ + - /influxdb3/core/get-started/processing-engine/ +related: + - /influxdb3/core/plugins/ + - /influxdb3/core/reference/cli/influxdb3/create/plugin/ + - /influxdb3/core/reference/cli/influxdb3/create/trigger/ +source: /shared/influxdb3-get-started/processing-engine.md +--- + + diff --git a/content/influxdb3/core/get-started/query.md b/content/influxdb3/core/get-started/query.md new file mode 100644 index 000000000..87c56ad48 --- /dev/null +++ b/content/influxdb3/core/get-started/query.md @@ -0,0 +1,24 @@ +--- +title: Query data in {{% product-name %}} +seotitle: Query data | Get started with {{% product-name %}} +description: > + Learn how to get started querying data in {{% product-name %}} using native + SQL or InfluxQL with the `influxdb3` CLI and other tools. +menu: + influxdb3_core: + name: Query data + identifier: gs-query-data + parent: Get started +weight: 103 +related: + - /influxdb3/core/query-data/ + - /influxdb3/core/reference/sql/ + - https://datafusion.apache.org/user-guide/sql/index.html, Apache DataFusion SQL reference + - /influxdb3/core/reference/influxql/ +source: /shared/influxdb3-get-started/query.md +--- + + diff --git a/content/influxdb3/core/get-started/setup.md b/content/influxdb3/core/get-started/setup.md new file mode 100644 index 000000000..6b4b1a395 --- /dev/null +++ b/content/influxdb3/core/get-started/setup.md @@ -0,0 +1,21 @@ +--- +title: Set up {{% product-name %}} +seotitle: Set up InfluxDB | Get started with {{% product-name %}} +description: > + Install, configure, and set up authorization for {{% product-name %}}. +menu: + influxdb3_core: + name: Set up Core + parent: Get started +weight: 3 +related: + - /influxdb3/core/install/ + - /influxdb3/core/admin/tokens/ + - /influxdb3/core/reference/config-options/ +source: /shared/influxdb3-get-started/setup.md +--- + + diff --git a/content/influxdb3/core/get-started/write.md b/content/influxdb3/core/get-started/write.md new file mode 100644 index 000000000..5133b17fe --- /dev/null +++ b/content/influxdb3/core/get-started/write.md @@ -0,0 +1,22 @@ +--- +title: Write data to {{% product-name %}} +seotitle: Write data | Get started with {{% product-name %}} +description: > + Learn how to write time series data to {{% product-name %}} using the + `influxdb3` CLI and _line protocol_, an efficient, human-readable write syntax. +menu: + influxdb3_core: + name: Write data + identifier: gs-write-data + parent: Get started +weight: 102 +related: + - /influxdb3/core/write-data/ + - /influxdb3/core/reference/line-protocol/ +source: /shared/influxdb3-get-started/write.md +--- + + diff --git a/content/influxdb3/core/install.md b/content/influxdb3/core/install.md index a423ff48c..d5f092485 100644 --- a/content/influxdb3/core/install.md +++ b/content/influxdb3/core/install.md @@ -6,205 +6,9 @@ menu: name: Install InfluxDB 3 Core weight: 2 influxdb3/core/tags: [install] +source: /shared/influxdb3/install.md alt_links: v1: /influxdb/v1/introduction/install/ --- -- [System Requirements](#system-requirements) -- [Quick install](#quick-install) -- [Download {{% product-name %}} binaries](#download-influxdb-3-{{< product-key >}}-binaries) -- [Docker image](#docker-image) - -## System Requirements - -#### Operating system - -{{< product-name >}} runs on **Linux**, **macOS**, and **Windows**. - -#### Object storage - -A key feature of InfluxDB 3 is its use of object storage to store time series -data in Apache Parquet format. You can choose to store these files on your local -file system. Performance on your local filesystem will likely be better, but -object storage has the advantage of not running out of space and being accessible -by other systems over the network. {{< product-name >}} natively supports Amazon S3, -Azure Blob Storage, and Google Cloud Storage. -You can also use many local object storage implementations that provide an -S3-compatible API, such as [Minio](https://min.io/). - -## Quick install - -Use the InfluxDB 3 quick install script to install {{< product-name >}} on -**Linux** and **macOS**. - -> [!Important] -> If using Windows, [download the {{% product-name %}} Windows binary](?t=Windows#download-influxdb-3-{{< product-key >}}-binaries). - -1. Use the following command to download and install the appropriate - {{< product-name >}} package on your local machine: - - ```bash - curl -O https://www.influxdata.com/d/install_influxdb3.sh \ - && sh install_influxdb3.sh - ``` - -2. Verify that installation completed successfully: - - ```bash - influxdb3 --version - ``` - -> [!Note] -> -> #### influxdb3 not found -> -> If your system can't locate your `influxdb3` binary, `source` your -> current shell configuration file (`.bashrc`, `.zshrc`, etc.). -> -> {{< code-tabs-wrapper >}} -{{% code-tabs %}} -[.bashrc](#) -[.zshrc](#) -{{% /code-tabs %}} -{{% code-tab-content %}} -```bash -source ~/.bashrc -``` -{{% /code-tab-content %}} -{{% code-tab-content %}} - -```bash -source ~/.zshrc -``` -{{% /code-tab-content %}} -{{< /code-tabs-wrapper >}} - -## Download {{% product-name %}} binaries - -{{< tabs-wrapper >}} -{{% tabs %}} -[Linux](#) -[macOS](#) -[Windows](#) -{{% /tabs %}} -{{% tab-content %}} - - - -- [{{< product-name >}} • Linux (AMD64, x86_64) • GNU](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_amd64.tar.gz) - • - [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_amd64.tar.gz.sha256) - -- [{{< product-name >}} • Linux (ARM64, AArch64) • GNU](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_arm64.tar.gz) - • - [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_arm64.tar.gz.sha256) - - - -{{% /tab-content %}} -{{% tab-content %}} - - - -- [{{< product-name >}} • macOS (Silicon, ARM64)](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_darwin_arm64.tar.gz) - • - [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_darwin_arm64.tar.gz.sha256) - -> [!Note] -> macOS Intel builds are coming soon. - - - -{{% /tab-content %}} -{{% tab-content %}} - - - -- [{{< product-name >}} • Windows (AMD64, x86_64)](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}-windows_amd64.zip) - • - [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}-windows_amd64.zip.sha256) - - - -{{% /tab-content %}} -{{< /tabs-wrapper >}} - -## Docker image - -Use the `influxdb3-{{< product-key >}}` Docker image to deploy {{< product-name >}} in a -Docker container. -The image is available for x86_64 (AMD64) and ARM64 architectures. - -### Use Docker CLI - - -```bash -docker pull influxdb:3-{{< product-key >}} -``` - -Docker automatically pulls the appropriate image for your system architecture. - -To specify the system architecture, use platform-specific tags--for example: - -```bash -# For x86_64/AMD64 -docker pull \ ---platform linux/amd64 \ -influxdb:3-{{< product-key >}} -``` - -```bash -# For ARM64 -docker pull \ ---platform linux/arm64 \ -influxdb:3-{{< product-key >}} -``` - -> [!Note] -> The {{% product-name %}} Docker image exposes port `8181`, the `influxdb3` server default for HTTP connections. -> To map the exposed port to a different port when running a container, see the Docker guide for [Publishing and exposing ports](https://docs.docker.com/get-started/docker-concepts/running-containers/publishing-ports/). - -### Use Docker Compose - -1. Open `compose.yaml` for editing and add a `services` entry for {{% product-name %}}--for example: - - ```yaml - # compose.yaml - services: - influxdb3-{{< product-key >}}: - container_name: influxdb3-{{< product-key >}} - image: influxdb:3-{{< product-key >}} - ports: - - 8181:8181 - command: - - influxdb3 - - serve - - --node-id=node0 - - --object-store=file - - --data-dir=/var/lib/influxdb3 - ``` - -2. Use the Docker Compose CLI to start the server. - - Optional: to make sure you have the latest version of the image before you - start the server, run `docker compose pull`. - - - ```bash - docker compose pull && docker compose run influxdb3-{{< product-key >}} - ``` - -> [!Note] -> #### Stopping an InfluxDB 3 container -> -> To stop a running InfluxDB 3 container, find and terminate the process--for example: -> -> -> ```bash -> ps -ef | grep influxdb3 -> kill -9 -> ``` -> -> Currently, a bug prevents using {{< keybind all="Ctrl+c" >}} in the terminal to stop an InfluxDB 3 container. - -{{< page-nav next="/influxdb3/core/get-started/" nextText="Get started with InfluxDB 3 Core" >}} + diff --git a/content/influxdb3/core/reference/cli/influxdb3/_index.md b/content/influxdb3/core/reference/cli/influxdb3/_index.md index 221e4e654..831597dc6 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/_index.md +++ b/content/influxdb3/core/reference/cli/influxdb3/_index.md @@ -32,6 +32,7 @@ influxdb3 [GLOBAL-OPTIONS] [COMMAND] | [serve](/influxdb3/core/reference/cli/influxdb3/serve/) | Run the {{% product-name %}} server | | [show](/influxdb3/core/reference/cli/influxdb3/show/) | List resources | | [test](/influxdb3/core/reference/cli/influxdb3/test/) | Test plugins | +| [update](/influxdb3/core/reference/cli/influxdb3/update/) | Update resources | | [write](/influxdb3/core/reference/cli/influxdb3/write/) | Write to {{% product-name %}} | ## Global options diff --git a/content/influxdb3/core/reference/cli/influxdb3/create/file_index.md b/content/influxdb3/core/reference/cli/influxdb3/create/file_index.md deleted file mode 100644 index a24aaebbc..000000000 --- a/content/influxdb3/core/reference/cli/influxdb3/create/file_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: influxdb3 create file_index -description: > - The `influxdb3 create file_index` command creates a new file index for a - database or table. -menu: - influxdb3_core: - parent: influxdb3 create - name: influxdb3 create file_index -weight: 400 -source: /shared/influxdb3-cli/create/file_index.md ---- - - diff --git a/content/influxdb3/core/reference/cli/influxdb3/delete/file_index.md b/content/influxdb3/core/reference/cli/influxdb3/delete/file_index.md deleted file mode 100644 index c60fb90f1..000000000 --- a/content/influxdb3/core/reference/cli/influxdb3/delete/file_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: influxdb3 delete file_index -description: > - The `influxdb3 delete file_index` command deletes a file index for a - database or table. -menu: - influxdb3_core: - parent: influxdb3 delete - name: influxdb3 delete file_index -weight: 400 -source: /shared/influxdb3-cli/delete/file_index.md ---- - - diff --git a/content/influxdb3/core/reference/cli/influxdb3/serve.md b/content/influxdb3/core/reference/cli/influxdb3/serve.md index a769cb652..c6001d47b 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/core/reference/cli/influxdb3/serve.md @@ -24,8 +24,8 @@ influxdb3 serve [OPTIONS] --node-id ## Required parameters - **node-id**: A unique identifier for your server instance. Must be unique for any hosts sharing the same object store. -- **object-store**: Determines where time series data is stored. _Default is `memory`_. -- **data-dir**: Path for local file storage (required when using `--object-store file`). +- **object-store**: Determines where time series data is stored. +- Other object store parameters depending on the selected `object-store` type. > [!NOTE] > `--node-id` supports alphanumeric strings with optional hyphens. diff --git a/content/influxdb3/core/reference/cli/influxdb3/test/schedule_plugin.md b/content/influxdb3/core/reference/cli/influxdb3/test/schedule_plugin.md new file mode 100644 index 000000000..d866d2854 --- /dev/null +++ b/content/influxdb3/core/reference/cli/influxdb3/test/schedule_plugin.md @@ -0,0 +1,15 @@ +--- +title: influxdb3 test schedule_plugin +description: > + The `influxdb3 test schedule_plugin` command tests a schedule plugin file without needing to create a trigger. +menu: + influxdb3_core: + parent: influxdb3 test + name: influxdb3 test schedule_plugin +weight: 401 +source: /shared/influxdb3-cli/test/schedule_plugin.md +--- + + diff --git a/content/influxdb3/core/reference/cli/influxdb3/update/_index.md b/content/influxdb3/core/reference/cli/influxdb3/update/_index.md new file mode 100644 index 000000000..0cc4e0846 --- /dev/null +++ b/content/influxdb3/core/reference/cli/influxdb3/update/_index.md @@ -0,0 +1,15 @@ +--- +title: influxdb3 update +description: > + The `influxdb3 update` command updates resources such as databases. +menu: + influxdb3_core: + parent: influxdb3 + name: influxdb3 update +weight: 300 +source: /shared/influxdb3-cli/update/_index.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/core/reference/cli/influxdb3/update/database.md b/content/influxdb3/core/reference/cli/influxdb3/update/database.md new file mode 100644 index 000000000..754f70d38 --- /dev/null +++ b/content/influxdb3/core/reference/cli/influxdb3/update/database.md @@ -0,0 +1,15 @@ +--- +title: influxdb3 update database +description: > + The `influxdb3 update database` command updates an existing database. +menu: + influxdb3_core: + parent: influxdb3 update + name: influxdb3 update database +weight: 400 +source: /shared/influxdb3-cli/update/database/_index.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/core/reference/config-options.md b/content/influxdb3/core/reference/config-options.md index 7dfee8828..7e3f92bfc 100644 --- a/content/influxdb3/core/reference/config-options.md +++ b/content/influxdb3/core/reference/config-options.md @@ -144,7 +144,7 @@ influxdb3 serve Specifies which object storage to use to store Parquet files. This option supports the following values: -- `memory` _(default)_ +- `memory` - `memory-throttled` - `file` - `s3` @@ -171,7 +171,7 @@ Required when using the `file` [object store](#object-store). #### node-id Specifies the node identifier used as a prefix in all object store file paths. -This should be unique for any hosts sharing the same object store +Use a unique node identifier for each host sharing the same object store configuration--for example, the same bucket. | influxdb3 serve option | Environment variable | @@ -186,7 +186,7 @@ Limits the number of Parquet files a query can access. **Default:** `432` -With the default `432` setting and the default [`gen1-duration`](#`gen1-duration`) +With the default `432` setting and the default [`gen1-duration`](#gen1-duration) setting of 10 minutes, queries can access up to a 72 hours of data, but potentially less depending on whether all data for a given 10 minute block of time was ingested during the same period. diff --git a/content/influxdb3/core/write-data/api-client-libraries.md b/content/influxdb3/core/write-data/client-libraries.md similarity index 52% rename from content/influxdb3/core/write-data/api-client-libraries.md rename to content/influxdb3/core/write-data/client-libraries.md index fc44f2e06..2dcd72f32 100644 --- a/content/influxdb3/core/write-data/api-client-libraries.md +++ b/content/influxdb3/core/write-data/client-libraries.md @@ -1,21 +1,21 @@ --- -title: Use the HTTP API and client libraries to write data +title: Use InfluxDB client libraries to write data description: > - Use the `/api/v3/write_lp` HTTP API endpoint and InfluxDB API clients to write points as line protocol data to {{% product-name %}}. + Use InfluxDB API clients to write points as line protocol data to {{% product-name %}}. menu: influxdb3_core: - name: Use the API and client libraries + name: Use client libraries parent: Write data - identifier: write-api-client-libs + identifier: write-client-libs weight: 100 aliases: - - /influxdb3/core/write-data/client-libraries/ + - /influxdb3/core/write-data/api-client-libraries/ related: - /influxdb3/core/reference/syntax/line-protocol/ - /influxdb3/core/get-started/write/ - /influxdb3/core/reference/client-libraries/v3/ - /influxdb3/core/api/v3/#operation/PostWriteLP, /api/v3/write_lp endpoint -source: /shared/influxdb3-write-guides/api-client-libraries.md +source: /shared/influxdb3-write-guides/client-libraries.md --- diff --git a/content/influxdb3/core/write-data/compatibility-apis.md b/content/influxdb3/core/write-data/http-api/compatibility-apis.md similarity index 75% rename from content/influxdb3/core/write-data/compatibility-apis.md rename to content/influxdb3/core/write-data/http-api/compatibility-apis.md index bf3631118..901d0f900 100644 --- a/content/influxdb3/core/write-data/compatibility-apis.md +++ b/content/influxdb3/core/write-data/http-api/compatibility-apis.md @@ -6,21 +6,21 @@ description: > menu: influxdb3_core: name: Use v1 and v2 compatibility APIs - parent: Write data - identifier: write-compatibility-client-libs -weight: 101 + parent: write-http-api +weight: 202 aliases: - /influxdb3/core/write-data/client-libraries/ + - /influxdb3/core/write-data/compatibility-apis/ related: - /influxdb3/core/reference/syntax/line-protocol/ - /influxdb3/core/get-started/write/ - /influxdb3/core/reference/client-libraries/v2/ - /influxdb3/core/api/v3/#operation/PostV2Write, /api/v2/write (v2-compatible) endpoint - /influxdb3/core/api/v3/#operation/PostV1Write, /write (v1-compatible) endpoint -source: /shared/influxdb3-write-guides/compatibility-apis.md +source: /shared/influxdb3-write-guides/http-api/compatibility-apis.md --- \ No newline at end of file diff --git a/content/influxdb3/core/write-data/http-api/v3-write-lp.md b/content/influxdb3/core/write-data/http-api/v3-write-lp.md new file mode 100644 index 000000000..fd7fa11c1 --- /dev/null +++ b/content/influxdb3/core/write-data/http-api/v3-write-lp.md @@ -0,0 +1,20 @@ +--- +title: Use the v3 write API to write data +description: > + Use the `/api/v3/write_lp` HTTP API endpoint to write data to {{% product-name %}}. +menu: + influxdb3_core: + name: Use the v3 write API + parent: write-http-api +weight: 201 +related: + - /influxdb3/core/reference/syntax/line-protocol/ + - /influxdb3/core/get-started/write/ + - /influxdb3/core/api/v3/#operation/PostWriteLP, /api/v3/write_lp endpoint +source: /shared/influxdb3-write-guides/http-api/v3-write-lp.md +--- + + diff --git a/content/influxdb3/enterprise/admin/databases/_index.md b/content/influxdb3/enterprise/admin/databases/_index.md index f5165023b..c93183766 100644 --- a/content/influxdb3/enterprise/admin/databases/_index.md +++ b/content/influxdb3/enterprise/admin/databases/_index.md @@ -12,6 +12,7 @@ influxdb3/enterprise/tags: [databases] related: - /influxdb3/enterprise/write-data/best-practices/schema-design/ - /influxdb3/enterprise/reference/cli/influxdb3/ + - /influxdb3/explorer/manage-databases/ alt_links: cloud: /influxdb/cloud/admin/buckets/ cloud_dedicated: /influxdb3/cloud-dedicated/admin/databases/ diff --git a/content/influxdb3/enterprise/admin/databases/create.md b/content/influxdb3/enterprise/admin/databases/create.md index bccfacff2..14f56c821 100644 --- a/content/influxdb3/enterprise/admin/databases/create.md +++ b/content/influxdb3/enterprise/admin/databases/create.md @@ -17,6 +17,7 @@ list_code_example: | {{% /code-placeholders %}} related: - /influxdb3/enterprise/reference/cli/influxdb3/create/database/ + - /influxdb3/explorer/manage-databases/ source: /shared/influxdb3-admin/databases/create.md --- diff --git a/content/influxdb3/enterprise/admin/databases/delete.md b/content/influxdb3/enterprise/admin/databases/delete.md index 395ab611a..9ae9b963e 100644 --- a/content/influxdb3/enterprise/admin/databases/delete.md +++ b/content/influxdb3/enterprise/admin/databases/delete.md @@ -16,6 +16,7 @@ list_code_example: | {{% /code-placeholders %}} related: - /influxdb3/enterprise/reference/cli/influxdb3/delete/database/ + - /influxdb3/explorer/manage-databases/ source: /shared/influxdb3-admin/databases/delete.md --- diff --git a/content/influxdb3/enterprise/admin/databases/list.md b/content/influxdb3/enterprise/admin/databases/list.md index c579a9816..e9d5cd374 100644 --- a/content/influxdb3/enterprise/admin/databases/list.md +++ b/content/influxdb3/enterprise/admin/databases/list.md @@ -13,6 +13,7 @@ list_code_example: | ``` related: - /influxdb3/enterprise/reference/cli/influxdb3/show/databases/ + - /influxdb3/explorer/manage-databases/ source: /shared/influxdb3-admin/databases/list.md --- diff --git a/content/influxdb3/enterprise/admin/file-index/_index.md b/content/influxdb3/enterprise/admin/file-index/_index.md new file mode 100644 index 000000000..65c94c642 --- /dev/null +++ b/content/influxdb3/enterprise/admin/file-index/_index.md @@ -0,0 +1,51 @@ +--- +title: Manage file indexes +seotitle: Manage file indexes in {{< product-name >}} +description: > + Customize the indexing strategy of a database or table in {{% product-name %}} + to optimize the performance of single-series queries. +menu: + influxdb3_enterprise: + parent: Administer InfluxDB +weight: 106 +influxdb3/enterprise/tags: [indexing] +--- + +{{% product-name %}} lets you customize how your data is indexed to help +optimize query performance for your specific workload, especially workloads that +include single-series queries. Indexes help the InfluxDB query engine quickly +identify the physical location of files that contain the queried data. + +By default, InfluxDB indexes on the primary key—`time` and tag columns. However, +if your schema includes tags that you don't specifically use when querying, you +can define a custom indexing strategy to only index on `time` and columns +important to your query workload. + +For example, if your schema includes the following columns: + +- country +- state_province +- county +- city +- postal_code + +And in your query workload, you only query based on country, state or province, +and city, you can create a custom file indexing strategy that only indexes on +`time` and these specific columns. This makes your index more efficient and +improves the performance of your single-series queries. + +> [!Note] +> File indexes can use any string column, including both tags and fields. + +- [Indexing life cycle](#indexing-life-cycle) +- [Create a custom file index](#create-a-custom-file-index) +- [Delete a custom file index](#delete-a-custom-file-index) + +## Indexing life cycle + +{{% product-name %}} builds indexes as it compacts data. Compaction is the +process that organizes and optimizes Parquet files in storage and occurs in +multiple phases or generations. Generation 1 (gen1) data is un-compacted and +is not indexed. Generation 2 (gen2) data and beyond is all indexed. + +{{< children hlevel="h2" >}} diff --git a/content/influxdb3/enterprise/admin/file-index/create.md b/content/influxdb3/enterprise/admin/file-index/create.md new file mode 100644 index 000000000..ea6891f2b --- /dev/null +++ b/content/influxdb3/enterprise/admin/file-index/create.md @@ -0,0 +1,62 @@ +--- +title: Create a custom file index +seotitle: Create a custom file index in {{< product-name >}} +description: > + Use the [`influxdb3 create file_index` command](/influxdb3/enterprise/reference/cli/influxdb3/create/file_index/) + to create a custom file indexing strategy for a database or a table. +menu: + influxdb3_enterprise: + parent: Manage file indexes +weight: 106 +influxdb3/enterprise/tags: [indexing] +related: + - /influxdb3/enterprise/reference/cli/influxdb3/create/file_index/ +list_code_example: | + + ```bash + influxdb3 create file_index \ + --database example-db \ + --token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \ + --table wind_data \ + country,city + ``` +--- + +Use the [`influxdb3 create file_index` command](/influxdb3/enterprise/reference/cli/influxdb3/create/file_index/) +to create a custom file indexing strategy for a database or table. + +Provide the following: + +- **Token** (`--token`): _({{< req >}})_ Your {{% token-link "admin" %}}. + You can also use the `INFLUXDB3_AUTH_TOKEN` environment variable to specify + the token. +- **Database** (`-d`, `--database`): _({{< req >}})_ The name of the database to + apply the index to. You can also use the `INFLUXDB3_DATABASE_NAME` + environment variable to specify the database. +- **Table** (`-t`, `--table`): The name of the table to apply the index to. + If no table is specified, the indexing strategy applies to all tables in the + specified database. +- **Columns**: _({{< req >}})_ A comma-separated list of string columns to + index on. These are typically tag columns but can also be string fields. + +{{% code-placeholders "AUTH_TOKEN|DATABASE|TABLE|COLUMNS" %}} + +```bash +influxdb3 create file_index \ + --token AUTH_TOKEN \ + --database DATABASE_NAME \ + --table TABLE_NAME \ + COLUMNS +``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your {{% token-link "admin" %}} +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of the database to create the file index in +- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: + the name of the table to create the file index in +- {{% code-placeholder-key %}}`COLUMNS`{{% /code-placeholder-key %}}: + a comma-separated list of columns to index on--for example: `host,application` diff --git a/content/influxdb3/enterprise/admin/file-index/delete.md b/content/influxdb3/enterprise/admin/file-index/delete.md new file mode 100644 index 000000000..98bfe4bfa --- /dev/null +++ b/content/influxdb3/enterprise/admin/file-index/delete.md @@ -0,0 +1,58 @@ +--- +title: Delete a custom file index +seotitle: Delete a custom file index in {{< product-name >}} +description: > + Use the [`influxdb3 delete file_index` command](/influxdb3/enterprise/reference/cli/influxdb3/delete/file_index/) + to delete a custom file indexing strategy from a database or a table and revert + to the default indexing strategy. +menu: + influxdb3_enterprise: + parent: Manage file indexes +weight: 106 +influxdb3/enterprise/tags: [indexing] +related: + - /influxdb3/enterprise/reference/cli/influxdb3/delete/file_index/ +list_code_example: | + + ```bash + influxdb3 delete file_index \ + --database example-db \ + --token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \ + --table wind_data + ``` +--- + +Use the [`influxdb3 delete file_index` command](/influxdb3/enterprise/reference/cli/influxdb3/delete/file_index/) +to delete a custom file indexing strategy from a database or a table and revert +to the default indexing strategy. + +Provide the following: + +- **Token** (`--token`): _({{< req >}})_ Your {{% token-link "admin" %}}. + You can also use the `INFLUXDB3_AUTH_TOKEN` environment variable to specify + the token. +- **Database** (`-d`, `--database`): _({{< req >}})_ The name of the database to + apply remove the custom index from. You can also use the `INFLUXDB3_DATABASE_NAME` + environment variable to specify the database. +- **Table** (`-t`, `--table`): The name of the table to remove the custom index from. + If no table is specified, the custom indexing strategy is removed from all + tables in the specified database. + +{{% code-placeholders "AUTH_TOKEN|DATABASE|TABLE|COLUMNS" %}} + +```bash +influxdb3 delete file_index \ + --host http://localhost:8585 \ + --database DATABASE_NAME \ + --table TABLE_NAME \ +``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your {{% token-link "admin" %}} +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of the database to remove the custom file index from +- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: + the name of the table to remove the custom file index from diff --git a/content/influxdb3/enterprise/admin/license.md b/content/influxdb3/enterprise/admin/license.md index cb1f1bf37..62727201e 100644 --- a/content/influxdb3/enterprise/admin/license.md +++ b/content/influxdb3/enterprise/admin/license.md @@ -73,22 +73,49 @@ physical and virtual CPU cores. ## Activate a license -Each {{< product-name >}} license must be activated, but the process of activating -the license depends on the license type: +Each {{< product-name >}} license must be activated when you start the server, +but the process of activating the license depends on the license type: - [Activate a trial or at-home license](#activate-a-trial-or-at-home-license) - [Activate a commercial license](#activate-a-commercial-license) ### Activate a trial or at-home license -When starting the {{< product-name >}} server, it asks what type of -license you would like to use. -Select `trial` or `home` and provide your -email address. -The server auto-generates and stores your license. +1. Use the [`influxdb3 serve` command](/influxdb3/enterprise/reference/cli/influxdb3/serve/) to start the server. + If the server doesn't find a license file or email address, the server prompts you + to enter your email address. + If you're [activating a trial or home license with Docker](#activate-a-trial-or-home-license-with-docker), include options to [skip the email prompt](#skip-the-email-prompt). +2. The server prompts you to select a license type. Select `trial` or `home`. +3. In the verification email from {{% product-name %}}, + click the button to verify your email address. +After you verify your email address, {{% product-name %}} auto-generates a +license (associated with your cluster and email address) and stores the license +file in your object store. The license file is a JWT file that contains the license information. +> [!Important] +> #### Activate a trial or home license with Docker +> +> If you're starting a new {{% product-name %}} server in a Docker container, you must +> use one of the methods to [skip the email prompt](#skip-the-email-prompt). +> This ensures that the container can generate the license file after you +> verify your email address. +> See the [Docker Compose example](?t=Docker+compose#start-with-license-email-and-compose). + +#### Skip the email prompt + +To skip the email prompt when starting the server, you can provide your email +address using one of the following methods: + +- Use the [`--license-email`](/influxdb3/enterprise/reference/config-options/#license-email) option with the `influxdb3 serve` command +- Set the `INFLUXDB3_ENTERPRISE_LICENSE_EMAIL` environment variable + +If the server finds a valid license file in your object store, it ignores the +license email option. + +See examples to [start the server with your license email](#start-the-server-with-your-license-email). + #### Use an existing trial or at-home license When you activate a trial or at-home license, InfluxDB registers your email @@ -153,12 +180,13 @@ existing license if it's still valid. environment variable 7. If no license is found, the server won't start -#### Example: Start the {{% product-name %}} server with your license email: +### Start the server with your license email {{< code-tabs-wrapper >}} {{% code-tabs %}} [influxdb3 options](#) [Environment variables](#) +[Docker compose](#start-with-license-email-and-compose) {{% /code-tabs %}} {{% code-tab-content %}} @@ -185,9 +213,47 @@ influxdb3 serve \ ``` {{% /code-tab-content %}} +{{% code-tab-content %}} + +{{% code-placeholders "${EMAIL_ADDRESS}" %}} +```yaml +# compose.yaml +name: data-crunching-stack +services: + influxdb3-enterprise: + container_name: influxdb3-enterprise + image: influxdb:3-enterprise + ports: + - 8181:8181 + # In the following command, replace INFLUXDB3_LICENSE_EMAIL with your email address. + # Alternatively, pass the `INFLUXDB3_LICENSE_EMAIL` environment variable or + # store the email address in a compose CLI .env file. + command: + - influxdb3 + - serve + - --node-id=node0 + - --cluster-id=cluster0 + - --object-store=file + - --data-dir=/var/lib/influxdb3 + - --plugin-dir=/var/lib/influxdb3/plugins + environment: + - INFLUXDB3_LICENSE_EMAIL=${EMAIL_ADDRESS} + volumes: + - type: bind + source: ~/.influxdb3/data + target: /var/lib/influxdb3 + - type: bind + source: ~/.influxdb3/plugins + target: /var/lib/influxdb3/plugins +``` +{{% /code-placeholders %}} +Replace {{% code-placeholder-key %}}`${EMAIL_ADDRESS}`{{% /code-placeholder-key %}} with your email address +or a variable from your Compose `.env` file. + +{{% /code-tab-content %}} {{< /code-tabs-wrapper >}} -#### Example: Start the {{% product-name %}} server with your license file: +### Start the server with your license file {{< code-tabs-wrapper >}} {{% code-tabs %}} diff --git a/content/influxdb3/enterprise/admin/tokens/_index.md b/content/influxdb3/enterprise/admin/tokens/_index.md index 6c9a079ea..6eb899717 100644 --- a/content/influxdb3/enterprise/admin/tokens/_index.md +++ b/content/influxdb3/enterprise/admin/tokens/_index.md @@ -6,6 +6,8 @@ menu: influxdb3_enterprise: parent: Administer InfluxDB weight: 202 +related: + - /influxdb3/explorer/manage-tokens/ source: /shared/influxdb3-admin/tokens/_index.md --- diff --git a/content/influxdb3/enterprise/admin/tokens/admin/_index.md b/content/influxdb3/enterprise/admin/tokens/admin/_index.md index d870c5e91..b0745f541 100644 --- a/content/influxdb3/enterprise/admin/tokens/admin/_index.md +++ b/content/influxdb3/enterprise/admin/tokens/admin/_index.md @@ -10,6 +10,9 @@ menu: parent: Manage tokens name: Admin tokens weight: 101 +cascade: + related: + - /influxdb3/explorer/manage-tokens/ influxdb3/enterprise/tags: [tokens] source: /shared/influxdb3-admin/tokens/admin/_index.md --- diff --git a/content/influxdb3/enterprise/get-started/multi-server.md b/content/influxdb3/enterprise/get-started/multi-server.md new file mode 100644 index 000000000..56da9360d --- /dev/null +++ b/content/influxdb3/enterprise/get-started/multi-server.md @@ -0,0 +1,528 @@ +--- +title: Create a multi-node cluster +seotitle: Create a multi-node InfluxDB 3 Enterprise cluster +description: > + Create a multi-node InfluxDB 3 Enterprise cluster for high availability, + performance, read replicas, and more to meet the specific needs of your use case. +menu: + influxdb3_enterprise: + name: Create a multi-node cluster + parent: Get started + identifier: gs-multi-node-cluster +weight: 102 +influxdb3/enterprise/tags: [cluster, multi-node, multi-server] +--- + +Create a multi-node {{% product-name %}} cluster for high availability, performance, and workload isolation. +Configure nodes with specific _modes_ (ingest, query, process, compact) to optimize for your use case. + +## Prerequisites + +- Shared object store +- Network connectivity between nodes + +## Basic multi-node setup + + +```bash +## NODE 1 compacts stored data + +# Example variables +# node-id: 'host01' +# cluster-id: 'cluster01' +# bucket: 'influxdb-3-enterprise-storage' + +influxdb3 serve \ + --node-id host01 \ + --cluster-id cluster01 \ + --mode ingest,query,compact \ + --object-store s3 \ + --bucket influxdb-3-enterprise-storage \ + --http-bind {{< influxdb/host >}} \ + --aws-access-key-id \ + --aws-secret-access-key +``` + + +```bash +## NODE 2 handles writes and queries + +# Example variables +# node-id: 'host02' +# cluster-id: 'cluster01' +# bucket: 'influxdb-3-enterprise-storage' + +influxdb3 serve \ + --node-id host02 \ + --cluster-id cluster01 \ + --mode ingest,query \ + --object-store s3 \ + --bucket influxdb-3-enterprise-storage \ + --http-bind localhost:8282 \ + --aws-access-key-id AWS_ACCESS_KEY_ID \ + --aws-secret-access-key AWS_SECRET_ACCESS_KEY +``` + +Learn how to set up a multi-node cluster for different use cases, including high availability, read replicas, processing data, and workload isolation. + +- [Create an object store](#create-an-object-store) +- [Connect to your object store](#connect-to-your-object-store) +- [Server modes](#server-modes) +- [Cluster configuration examples](#cluster-configuration-examples) +- [Writing and querying in multi-node clusters](#writing-and-querying-in-multi-node-clusters) + +## Create an object store + +With the {{% product-name %}} diskless architecture, all data is stored in a common object store. +In a multi-node cluster, you connect all nodes to the same object store. + +Enterprise supports the following object stores: + +- AWS S3 (or S3-compatible) +- Azure Blob Storage +- Google Cloud Storage + +> [!Note] +> Refer to your object storage provider's documentation for +> setting up an object store. + +## Connect to your object store + +When starting your {{% product-name %}} node, include provider-specific options for connecting to your object store--for example: + +{{< tabs-wrapper >}} +{{% tabs %}} +[S3 or S3-compatible](#) +[Azure Blob Storage](#) +[Google Cloud Storage](#) +{{% /tabs %}} +{{% tab-content %}} + + +To use an AWS S3 or S3-compatible object store, provide the following options +with your `influxdb3 serve` command: + +- `--object-store`: `s3` +- `--bucket`: Your AWS S3 bucket name +- `--aws-access-key-id`: Your AWS access key ID + _(can also be defined using the `AWS_ACCESS_KEY_ID` environment variable)_ +- `--aws-secret-access-key`: Your AWS secret access key + _(can also be defined using the `AWS_SECRET_ACCESS_KEY` environment variable)_ + +{{% code-placeholders "AWS_(BUCKET_NAME|ACCESS_KEY_ID|SECRET_ACCESS_KEY)" %}} + +```bash +influxdb3 serve \ + # ... + --object-store s3 \ + --bucket AWS_BUCKET_NAME \ + --aws-access-key-id AWS_ACCESS_KEY_ID \ + --aws-secret-access-key AWS_SECRET_ACCESS_KEY +``` +{{% /code-placeholders %}} + +_For information about other S3-specific settings, see +[Configuration options - AWS](/influxdb3/enterprise/reference/config-options/#aws)._ + + +{{% /tab-content %}} +{{% tab-content %}} + + +To use Azure Blob Storage as your object store, provide the following options +with your `influxdb3 serve` command: + +- `--object-store`: `azure` +- `--bucket`: Your Azure Blob Storage container name +- `--azure-storage-account`: Your Azure Blob Storage storage account name + _(can also be defined using the `AZURE_STORAGE_ACCOUNT` environment variable)_ +- `--aws-secret-access-key`: Your Azure Blob Storage access key + _(can also be defined using the `AZURE_STORAGE_ACCESS_KEY` environment variable)_ + +{{% code-placeholders "AZURE_(CONTAINER_NAME|STORAGE_ACCOUNT|STORAGE_ACCESS_KEY)" %}} + +```bash +influxdb3 serve \ + # ... + --object-store azure \ + --bucket AZURE_CONTAINER_NAME \ + --azure-storage-account AZURE_STORAGE_ACCOUNT \ + --azure-storage-access-key AZURE_STORAGE_ACCESS_KEY +``` +{{% /code-placeholders %}} + + +{{% /tab-content %}} +{{% tab-content %}} + + +To use Google Cloud Storage as your object store, provide the following options +with your `influxdb3 serve` command: + +- `--object-store`: `google` +- `--bucket`: Your Google Cloud Storage bucket name +- `--google-service-account`: The path to to your Google credentials JSON file + _(can also be defined using the `GOOGLE_SERVICE_ACCOUNT` environment variable)_ + +{{% code-placeholders "GOOGLE_(BUCKET_NAME|SERVICE_ACCOUNT)" %}} + +```bash +influxdb3 serve \ + # ... + --object-store google \ + --bucket GOOGLE_BUCKET_NAME \ + --google-service-account GOOGLE_SERVICE_ACCOUNT +``` +{{% /code-placeholders %}} + + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +## Server modes + +{{% product-name %}} _modes_ determine what subprocesses the Enterprise node runs. +These subprocesses fulfill required tasks including data ingestion, query +processing, compaction, and running the processing engine. + +The `influxdb3 serve --mode` option defines what subprocesses a node runs. +Each node can run in one _or more_ of the following modes: + +- **all** _(default)_: Runs all necessary subprocesses. +- **ingest**: Runs the data ingestion subprocess to handle writes. +- **query**: Runs the query processing subprocess to handle queries. +- **process**: Runs the processing engine subprocess to trigger and execute plugins. +- **compact**: Runs the compactor subprocess to optimize data in object storage. + + > [!Important] + > Only _one_ node in your cluster can run in `compact` mode. + +### Server mode examples + +#### Configure a node to only handle write requests + +```bash +influxdb3 serve \ + # ... + --mode ingest +``` + +#### Configure a node to only run the Compactor + +```bash +influxdb3 serve \ + # ... + --mode compact +``` + +#### Configure a node to handle queries and run the processing engine + +```bash +influxdb3 serve \ + # ... + --mode query,process +``` + +## Cluster configuration examples + +- [High availability cluster](#high-availability-cluster) +- [High availability with a dedicated Compactor](#high-availability-with-a-dedicated-compactor) +- [High availability with read replicas and a dedicated Compactor](#high-availability-with-read-replicas-and-a-dedicated-compactor) + +### High availability cluster + +A minimum of two nodes are required for basic high availability (HA), with both +nodes reading and writing data. + +{{< img-hd src="/img/influxdb/influxdb-3-enterprise-high-availability.png" alt="Basic high availability setup" />}} + +In a basic HA setup: + +- Two nodes both write data to the same object store and both handle queries +- Node 1 and Node 2 are _read replicas_ that read from each other’s object store directories +- One of the nodes is designated as the Compactor node + +> [!Note] +> Only one node can be designated as the Compactor. +> Compacted data is meant for a single writer, and many readers. + +The following examples show how to configure and start two nodes for a basic HA +setup. + +- _Node 1_ is for compaction +- _Node 2_ is for ingest and query + + +```bash +## NODE 1 + +# Example variables +# node-id: 'host01' +# cluster-id: 'cluster01' +# bucket: 'influxdb-3-enterprise-storage' + +influxdb3 serve \ + --node-id host01 \ + --cluster-id cluster01 \ + --mode ingest,query,compact \ + --object-store s3 \ + --bucket influxdb-3-enterprise-storage \ + --http-bind {{< influxdb/host >}} \ + --aws-access-key-id \ + --aws-secret-access-key +``` + + +```bash +## NODE 2 + +# Example variables +# node-id: 'host02' +# cluster-id: 'cluster01' +# bucket: 'influxdb-3-enterprise-storage' + +influxdb3 serve \ + --node-id host02 \ + --cluster-id cluster01 \ + --mode ingest,query \ + --object-store s3 \ + --bucket influxdb-3-enterprise-storage \ + --http-bind localhost:8282 \ + --aws-access-key-id AWS_ACCESS_KEY_ID \ + --aws-secret-access-key AWS_SECRET_ACCESS_KEY +``` + +After the nodes have started, querying either node returns data for both nodes, +and _NODE 1_ runs compaction. +To add nodes to this setup, start more read replicas with the same cluster ID. + +### High availability with a dedicated Compactor + +Data compaction in {{% product-name %}} is one of the more computationally +demanding operations. +To ensure stable performance in ingest and query nodes, set up a +compactor-only node to isolate the compaction workload. + +{{< img-hd src="/img/influxdb/influxdb-3-enterprise-dedicated-compactor.png" alt="Dedicated Compactor setup" />}} + +The following examples sets up high availability with a dedicated Compactor node: + +1. Start two read-write nodes as read replicas, similar to the previous example. + + + ```bash + ## NODE 1 — Writer/Reader Node #1 + + # Example variables + # node-id: 'host01' + # cluster-id: 'cluster01' + # bucket: 'influxdb-3-enterprise-storage' + + influxdb3 serve \ + --node-id host01 \ + --cluster-id cluster01 \ + --mode ingest,query \ + --object-store s3 \ + --bucket influxdb-3-enterprise-storage \ + --http-bind {{< influxdb/host >}} \ + --aws-access-key-id \ + --aws-secret-access-key + ``` + + + ```bash + ## NODE 2 — Writer/Reader Node #2 + + # Example variables + # node-id: 'host02' + # cluster-id: 'cluster01' + # bucket: 'influxdb-3-enterprise-storage' + + influxdb3 serve \ + --node-id host02 \ + --cluster-id cluster01 \ + --mode ingest,query \ + --object-store s3 \ + --bucket influxdb-3-enterprise-storage \ + --http-bind localhost:8282 \ + --aws-access-key-id \ + --aws-secret-access-key + ``` + +2. Start the dedicated compactor node with the `--mode=compact` option to ensure the node **only** runs compaction. + + ```bash + ## NODE 3 — Compactor Node + + # Example variables + # node-id: 'host03' + # cluster-id: 'cluster01' + # bucket: 'influxdb-3-enterprise-storage' + + influxdb3 serve \ + --node-id host03 \ + --cluster-id cluster01 \ + --mode compact \ + --object-store s3 \ + --bucket influxdb-3-enterprise-storage \ + --aws-access-key-id \ + --aws-secret-access-key + ``` + +### High availability with read replicas and a dedicated Compactor + +For a robust and effective setup for managing time-series data, you can run +ingest nodes alongside query nodes and a dedicated Compactor node. + +{{< img-hd src="/img/influxdb/influxdb-3-enterprise-workload-isolation.png" alt="Workload Isolation Setup" />}} + +1. Start ingest nodes with the **`ingest`** mode. + + > [!Note] + > Send all write requests to only your ingest nodes. + + ```bash + ## NODE 1 — Writer Node #1 + + # Example variables + # node-id: 'host01' + # cluster-id: 'cluster01' + # bucket: 'influxdb-3-enterprise-storage' + + influxdb3 serve \ + --node-id host01 \ + --cluster-id cluster01 \ + --mode ingest \ + --object-store s3 \ + --bucket influxdb-3-enterprise-storage \ + --http-bind {{< influxdb/host >}} \ + --aws-access-key-id \ + --aws-secret-access-key + ``` + + + + ```bash + ## NODE 2 — Writer Node #2 + + # Example variables + # node-id: 'host02' + # cluster-id: 'cluster01' + # bucket: 'influxdb-3-enterprise-storage' + + influxdb3 serve \ + --node-id host02 \ + --cluster-id cluster01 \ + --mode ingest \ + --object-store s3 \ + --bucket influxdb-3-enterprise-storage \ + --http-bind localhost:8282 \ + --aws-access-key-id \ + --aws-secret-access-key + ``` + +2. Start the dedicated Compactor node with the `compact` mode. + + ```bash + ## NODE 3 — Compactor Node + + # Example variables + # node-id: 'host03' + # cluster-id: 'cluster01' + # bucket: 'influxdb-3-enterprise-storage' + + influxdb3 serve \ + --node-id host03 \ + --cluster-id cluster01 \ + --mode compact \ + --object-store s3 \ + --bucket influxdb-3-enterprise-storage \ + --aws-access-key-id \ + + ``` + +3. Finally, start the query nodes using the `query` mode. + + > [!Note] + > Send all query requests to only your query nodes. + + ```bash + ## NODE 4 — Read Node #1 + + # Example variables + # node-id: 'host04' + # cluster-id: 'cluster01' + # bucket: 'influxdb-3-enterprise-storage' + + influxdb3 serve \ + --node-id host04 \ + --cluster-id cluster01 \ + --mode query \ + --object-store s3 \ + --bucket influxdb-3-enterprise-storage \ + --http-bind localhost:8383 \ + --aws-access-key-id \ + --aws-secret-access-key + ``` + + ```bash + ## NODE 5 — Read Node #2 + + # Example variables + # node-id: 'host05' + # cluster-id: 'cluster01' + # bucket: 'influxdb-3-enterprise-storage' + + influxdb3 serve \ + --node-id host05 \ + --cluster-id cluster01 \ + --mode query \ + --object-store s3 \ + --bucket influxdb-3-enterprise-storage \ + --http-bind localhost:8484 \ + --aws-access-key-id \ + + ``` + +## Writing and querying in multi-node clusters + +You can use the default port `8181` for any write or query request without +changing any of the commands. + +> [!Note] +> #### Specify hosts for write and query requests +> +> To benefit from this multi-node, isolated architecture: +> +> - Send write requests to a node that you have designated as an ingester. +> - Send query requests to a node that you have designated as a querier. +> +> When running multiple local instances for testing or separate nodes in +> production, specifying the host ensures writes and queries are routed to the +> correct instance. + +{{% code-placeholders "(http://localhost:8585)|AUTH_TOKEN|DATABASE_NAME|QUERY" %}} +```bash +# Example querying a specific host +# HTTP-bound Port: 8585 +influxdb3 query \ + --host http://localhost:8585 + --token AUTH_TOKEN \ + --database DATABASE_NAME \ + "QUERY" +``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`http://localhost:8585`{{% /code-placeholder-key %}}: the host and port of the node to query +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}} +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query +- {{% code-placeholder-key %}}`QUERY`{{% /code-placeholder-key %}}: the SQL or InfluxQL query to run against the database + +{{% page-nav + prev="/influxdb3/enterprise/get-started/setup/" + prevText="Set up InfluxDB" + next="/influxdb3/enterprise/get-started/write/" + nextText="Write data" +%}} \ No newline at end of file diff --git a/content/influxdb3/enterprise/get-started/process.md b/content/influxdb3/enterprise/get-started/process.md new file mode 100644 index 000000000..5e9622388 --- /dev/null +++ b/content/influxdb3/enterprise/get-started/process.md @@ -0,0 +1,27 @@ +--- +title: Process data in {{% product-name %}} +seotitle: Process data | Get started with {{% product-name %}} +description: > + Learn how to use the {{% product-name %}} Processing Engine to process data and + perform various tasks like downsampling, alerting, forecasting, data + normalization, and more. +menu: + influxdb3_enterprise: + name: Process data + identifier: gs-process-data + parent: Get started +weight: 105 +aliases: + - /influxdb3/enterprise/get-started/process-data/ + - /influxdb3/enterprise/get-started/processing-engine/ +related: + - /influxdb3/enterprise/plugins/ + - /influxdb3/enterprise/reference/cli/influxdb3/create/plugin/ + - /influxdb3/enterprise/reference/cli/influxdb3/create/trigger/ +source: /shared/influxdb3-get-started/processing-engine.md +--- + + diff --git a/content/influxdb3/enterprise/get-started/query.md b/content/influxdb3/enterprise/get-started/query.md new file mode 100644 index 000000000..79446ee3d --- /dev/null +++ b/content/influxdb3/enterprise/get-started/query.md @@ -0,0 +1,24 @@ +--- +title: Query data in {{% product-name %}} +seotitle: Query data | Get started with {{% product-name %}} +description: > + Learn how to get started querying data in {{% product-name %}} using native + SQL or InfluxQL with the `influxdb3` CLI and other tools. +menu: + influxdb3_enterprise: + name: Query data + identifier: gs-query-data + parent: Get started +weight: 104 +related: + - /influxdb3/enterprise/query-data/ + - /influxdb3/enterprise/reference/sql/ + - https://datafusion.apache.org/user-guide/sql/index.html, Apache DataFusion SQL reference + - /influxdb3/enterprise/reference/influxql/ +source: /shared/influxdb3-get-started/query.md +--- + + diff --git a/content/influxdb3/enterprise/get-started/setup.md b/content/influxdb3/enterprise/get-started/setup.md new file mode 100644 index 000000000..489b29a5c --- /dev/null +++ b/content/influxdb3/enterprise/get-started/setup.md @@ -0,0 +1,21 @@ +--- +title: Set up {{% product-name %}} +seotitle: Set up InfluxDB | Get started with {{% product-name %}} +description: > + Install, configure, and set up authorization for {{% product-name %}}. +menu: + influxdb3_enterprise: + name: Set up Enterprise + parent: Get started +weight: 101 +related: + - /influxdb3/enterprise/install/ + - /influxdb3/enterprise/admin/tokens/ + - /influxdb3/enterprise/reference/config-options/ +source: /shared/influxdb3-get-started/setup.md +--- + + diff --git a/content/influxdb3/enterprise/get-started/write.md b/content/influxdb3/enterprise/get-started/write.md new file mode 100644 index 000000000..255c0a07e --- /dev/null +++ b/content/influxdb3/enterprise/get-started/write.md @@ -0,0 +1,22 @@ +--- +title: Write data to {{% product-name %}} +seotitle: Write data | Get started with {{% product-name %}} +description: > + Learn how to write time series data to {{% product-name %}} using the + `influxdb3` CLI and _line protocol_, an efficient, human-readable write syntax. +menu: + influxdb3_enterprise: + name: Write data + identifier: gs-write-data + parent: Get started +weight: 103 +related: + - /influxdb3/enterprise/write-data/ + - /influxdb3/enterprise/reference/line-protocol/ +source: /shared/influxdb3-get-started/write.md +--- + + diff --git a/content/influxdb3/enterprise/install.md b/content/influxdb3/enterprise/install.md deleted file mode 100644 index 3893d08d1..000000000 --- a/content/influxdb3/enterprise/install.md +++ /dev/null @@ -1,211 +0,0 @@ ---- -title: Install {{< product-name >}} -description: Download and install {{< product-name >}}. -menu: - influxdb3_enterprise: - name: Install InfluxDB 3 Enterprise -weight: 2 -influxdb3/enterprise/tags: [install] -alt_links: - v1: /influxdb/v1/introduction/install/ ---- - -- [System Requirements](#system-requirements) -- [Quick install](#quick-install) -- [Download {{% product-name %}} binaries](#download-influxdb-3-{{< product-key >}}-binaries) -- [Docker image](#docker-image) - -## System Requirements - -#### Operating system - -{{< product-name >}} runs on **Linux**, **macOS**, and **Windows**. - -#### Object storage - -A key feature of InfluxDB 3 is its use of object storage to store time series -data in Apache Parquet format. You can choose to store these files on your local -file system. Performance on your local filesystem will likely be better, but -object storage has the advantage of not running out of space and being accessible -by other systems over the network. {{< product-name >}} natively supports Amazon S3, -Azure Blob Storage, and Google Cloud Storage. -You can also use many local object storage implementations that provide an -S3-compatible API, such as [Minio](https://min.io/). - -## Quick install - -Use the InfluxDB 3 quick install script to install {{< product-name >}} on -**Linux** and **macOS**. - -> [!Important] -> If using Windows, [download the {{% product-name %}} Windows binary](?t=Windows#download-influxdb-3-{{< product-key >}}-binaries). - -1. Use the following command to download and install the appropriate - {{< product-name >}} package on your local machine: - - ```bash - curl -O https://www.influxdata.com/d/install_influxdb3.sh \ - && sh install_influxdb3.sh enterprise - ``` - -2. Verify that installation completed successfully: - - ```bash - influxdb3 --version - ``` - -> [!Note] -> -> #### influxdb3 not found -> -> If your system can't locate your `influxdb3` binary, `source` your -> current shell configuration file (`.bashrc`, `.zshrc`, etc.). -> -> {{< code-tabs-wrapper >}} -{{% code-tabs %}} -[.bashrc](#) -[.zshrc](#) -{{% /code-tabs %}} -{{% code-tab-content %}} -```bash -source ~/.bashrc -``` -{{% /code-tab-content %}} -{{% code-tab-content %}} - -```bash -source ~/.zshrc -``` -{{% /code-tab-content %}} -{{< /code-tabs-wrapper >}} - -## Download {{% product-name %}} binaries - -{{< tabs-wrapper >}} -{{% tabs %}} -[Linux](#) -[macOS](#) -[Windows](#) -{{% /tabs %}} -{{% tab-content %}} - - - -- [{{< product-name >}} • Linux (AMD64, x86_64) • GNU](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_amd64.tar.gz) - • - [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_amd64.tar.gz.sha256) - -- [{{< product-name >}} • Linux (ARM64, AArch64) • GNU](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_arm64.tar.gz) - • - [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_arm64.tar.gz.sha256) - - - -{{% /tab-content %}} -{{% tab-content %}} - - - -- [{{< product-name >}} • macOS (Silicon, ARM64)](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_darwin_arm64.tar.gz) - • - [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_darwin_arm64.tar.gz.sha256) - -> [!Note] -> macOS Intel builds are coming soon. - - - -{{% /tab-content %}} -{{% tab-content %}} - - - -- [{{< product-name >}} • Windows (AMD64, x86_64)](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}-windows_amd64.zip) - • - [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}-windows_amd64.zip.sha256) - - - -{{% /tab-content %}} -{{< /tabs-wrapper >}} - -## Docker image - -Use the `influxdb:3-{{< product-key >}}` Docker image to deploy {{< product-name >}} in a -Docker container. -The image is available for x86_64 (AMD64) and ARM64 architectures. - -### Use Docker CLI - - -```bash -docker pull influxdb:3-{{< product-key >}} -``` - -Docker automatically pulls the appropriate image for your system architecture. - -To specify the system architecture, use platform-specific tags--for example: - -```bash -# For x86_64/AMD64 -docker pull \ ---platform linux/amd64 \ -influxdb:3-{{< product-key >}} -``` - -```bash -# For ARM64 -docker pull \ ---platform linux/arm64 \ -influxdb:3-{{< product-key >}} -``` - -> [!Note] -> The {{% product-name %}} Docker image exposes port `8181`, the `influxdb3` server default for HTTP connections. -> To map the exposed port to a different port when running a container, see the Docker guide for [Publishing and exposing ports](https://docs.docker.com/get-started/docker-concepts/running-containers/publishing-ports/). - -### Use Docker Compose - -1. Open `compose.yaml` for editing and add a `services` entry for {{% product-name %}}--for example: - - ```yaml - # compose.yaml - services: - influxdb3-{{< product-key >}}: - container_name: influxdb3-{{< product-key >}} - image: influxdb:3-{{< product-key >}} - ports: - - 9999:9999 - command: - - influxdb3 - - serve - - --node-id=node0 - - --cluster-id=cluster0 - - --object-store=file - - --data-dir=/var/lib/influxdb3 - ``` - -2. Use the Docker Compose CLI to start the server. - - Optional: to make sure you have the latest version of the image before you - start the server, run `docker compose pull`. - - - ```bash - docker compose pull && docker compose run influxdb3-{{< product-key >}} - ``` - -> [!Note] -> #### Stopping an InfluxDB 3 container -> -> To stop a running InfluxDB 3 container, find and terminate the process--for example: -> -> -> ```bash -> ps -ef | grep influxdb3 -> kill -9 -> ``` -> -> Currently, a bug prevents using {{< keybind all="Ctrl+c" >}} in the terminal to stop an InfluxDB 3 container. - -{{< page-nav next="/influxdb3/enterprise/get-started/" nextText="Get started with InfluxDB 3 Enterprise" >}} diff --git a/content/influxdb3/enterprise/install/_index.md b/content/influxdb3/enterprise/install/_index.md new file mode 100644 index 000000000..ecf3f1d73 --- /dev/null +++ b/content/influxdb3/enterprise/install/_index.md @@ -0,0 +1,14 @@ +--- +title: Install {{< product-name >}} +description: Download and install {{< product-name >}}. +menu: + influxdb3_enterprise: + name: Install InfluxDB 3 Enterprise +weight: 2 +influxdb3/enterprise/tags: [install] +source: /shared/influxdb3/install.md +alt_links: + v1: /influxdb/v1/introduction/install/ +--- + + diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/_index.md b/content/influxdb3/enterprise/reference/cli/influxdb3/_index.md index db57936cb..863318111 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/_index.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/_index.md @@ -32,6 +32,7 @@ influxdb3 [GLOBAL-OPTIONS] [COMMAND] | [serve](/influxdb3/enterprise/reference/cli/influxdb3/serve/) | Run the {{% product-name %}} server | | [show](/influxdb3/enterprise/reference/cli/influxdb3/show/) | List resources | | [test](/influxdb3/enterprise/reference/cli/influxdb3/test/) | Test plugins | +| [update](/influxdb3/enterprise/reference/cli/influxdb3/update/) | Update resources | | [write](/influxdb3/enterprise/reference/cli/influxdb3/write/) | Write to {{% product-name %}} | ## Global options diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/_index.md b/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/_index.md index 1411c22bf..5e40829e9 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/_index.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/_index.md @@ -4,7 +4,7 @@ description: > The `influxdb3 create token` command creates an admin token or a scoped resource token for authenticating and authorizing actions in an {{% product-name %}} instance. menu: influxdb3_enterprise: - parent: influxdb3 + parent: influxdb3 create name: influxdb3 create token weight: 300 source: /shared/influxdb3-cli/create/token/_index.md diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md index 2b0e70db2..90f3f93c0 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md @@ -27,8 +27,8 @@ influxdb3 serve [OPTIONS] \ - **node-id**: A unique identifier for your server instance. Must be unique for any hosts sharing the same object store. - **cluster-id**: A unique identifier for your cluster. Must be different from any node-id in your cluster. -- **object-store**: Determines where time series data is stored. _Default is `memory`_. -- **data-dir**: Path for local file storage (required when using `--object-store file`). +- **object-store**: Determines where time series data is stored. +- Other object store parameters depending on the selected `object-store` type. > [!NOTE] > `--node-id` and `--cluster-id` support alphanumeric strings with optional hyphens. diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/show/license.md b/content/influxdb3/enterprise/reference/cli/influxdb3/show/license.md new file mode 100644 index 000000000..579cfb8e5 --- /dev/null +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/show/license.md @@ -0,0 +1,88 @@ +--- +title: influxdb3 show license +description: > + The `influxdb3 show license` command displays license information for your + InfluxDB 3 Enterprise server. +menu: + influxdb3_enterprise: + parent: influxdb3 show + name: influxdb3 show license +weight: 300 +--- + +The `influxdb3 show license` command displays license information for your {{< product-name >}} instance. + + +## Usage + + + +```bash +influxdb3 show license [OPTIONS] +``` + +## Options + +| Option | | Description | +| :----- | :----------- | :--------------------------------------------------------------------------------------- | +| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | +| | `--cluster-id` | _({{< req >}})_ Cluster identifier | +| | `--node-id` | _({{< req >}})_ Node identifier | +| | `--object-store` | _({{< req >}})_ Object store type (file, memory, s3, gcs, azure) | +| | `--token` | Authentication token | +| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information | + +> [!Note] +> **CLI help documentation bug in v3.2.0** +> +> The `influxdb3 show license --help` output in v3.2.0 does not display the required `--object-store`, `--cluster-id`, and `--node-id` options and related object store configuration options. +> This command requires object store configuration and cluster/node identification to function properly. + +### Additional object store options + +Depending on the `--object-store` type specified, additional configuration options may be required: + +- **S3**: AWS credentials and bucket configuration +- **GCS**: Google Cloud credentials and bucket configuration +- **Azure**: Azure credentials and container configuration +- **File**: Local file system path configuration + +### Option environment variables + +You can use the following environment variables to set command options: + +| Environment Variable | Option | +| :------------------------ | :----------- | +| `INFLUXDB3_HOST_URL` | `--host` | +| `INFLUXDB3_AUTH_TOKEN` | `--token` | + +## Examples + +### Display license information with file object store + +{{% code-placeholders "AUTH_TOKEN|CLUSTER_ID|NODE_ID" %}} + + + +```bash +influxdb3 show license \ + --cluster-id CLUSTER_ID \ + --node-id NODE_ID \ + --object-store file \ + --token AUTH_TOKEN +``` + +{{% /code-placeholders %}} + +In the example above, replace the following: + +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + Authentication token +- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: + Your cluster identifier +- {{% code-placeholder-key %}}`NODE_ID`{{% /code-placeholder-key %}}: + Your node identifier + +The command displays information about your Enterprise license, including license type, expiration date, and usage limits. \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/test/schedule_plugin.md b/content/influxdb3/enterprise/reference/cli/influxdb3/test/schedule_plugin.md new file mode 100644 index 000000000..8d5682bf6 --- /dev/null +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/test/schedule_plugin.md @@ -0,0 +1,15 @@ +--- +title: influxdb3 test schedule_plugin +description: > + The `influxdb3 test schedule_plugin` command tests a schedule plugin file without needing to create a trigger. +menu: + influxdb3_enterprise: + parent: influxdb3 test + name: influxdb3 test schedule_plugin +weight: 401 +source: /shared/influxdb3-cli/test/schedule_plugin.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/update/_index.md b/content/influxdb3/enterprise/reference/cli/influxdb3/update/_index.md new file mode 100644 index 000000000..8166b3bbb --- /dev/null +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/update/_index.md @@ -0,0 +1,15 @@ +--- +title: influxdb3 update +description: > + The `influxdb3 update` command updates resources such as databases and tables. +menu: + influxdb3_enterprise: + parent: influxdb3 + name: influxdb3 update +weight: 300 +source: /shared/influxdb3-cli/update/_index.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/update/database.md b/content/influxdb3/enterprise/reference/cli/influxdb3/update/database.md new file mode 100644 index 000000000..0a8130a62 --- /dev/null +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/update/database.md @@ -0,0 +1,15 @@ +--- +title: influxdb3 update database +description: > + The `influxdb3 update database` command updates an existing database. +menu: + influxdb3_enterprise: + parent: influxdb3 update + name: influxdb3 update database +weight: 400 +source: /shared/influxdb3-cli/update/database/_index.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/update/table.md b/content/influxdb3/enterprise/reference/cli/influxdb3/update/table.md new file mode 100644 index 000000000..d1a2b0bf2 --- /dev/null +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/update/table.md @@ -0,0 +1,17 @@ +--- +title: influxdb3 update table +description: > + The `influxdb3 update table` command updates an existing table. +menu: + influxdb3_enterprise: + parent: influxdb3 update + name: influxdb3 update table +weight: 400 +source: /shared/influxdb3-cli/update/table/_index.md +alt_links: + core: /influxdb3/core/reference/cli/influxdb3/update/ +--- + + \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/config-options.md b/content/influxdb3/enterprise/reference/config-options.md index 833848ae6..e1d4ef469 100644 --- a/content/influxdb3/enterprise/reference/config-options.md +++ b/content/influxdb3/enterprise/reference/config-options.md @@ -19,20 +19,18 @@ Pass configuration options to the `influxdb serve` server using either command options or environment variables. Command options take precedence over environment variables. -##### Example influxdb3 serve command options +##### Example `influxdb3 serve` command options ```sh influxdb3 serve \ + --node-id node0 \ + --cluster-id cluster0 \ --license-email example@email.com \ --object-store file \ --data-dir ~/.influxdb3 \ - --node-id NODE_ID \ - --cluster-id my-cluster-01 \ - --log-filter info \ - --max-http-request-size 20971520 \ - --aws-allow-http + --log-filter info ``` ##### Example environment variables @@ -43,10 +41,7 @@ influxdb3 serve \ export INFLUXDB3_ENTERPRISE_LICENSE_EMAIL=example@email.com export INFLUXDB3_OBJECT_STORE=file export INFLUXDB3_DB_DIR=~/.influxdb3 -export INFLUXDB3_WRITER_IDENTIFIER_PREFIX=my-host export LOG_FILTER=info -export INFLUXDB3_MAX_HTTP_REQUEST_SIZE=20971520 -export AWS_ALLOW_HTTP=true influxdb3 serve ``` @@ -60,8 +55,13 @@ influxdb3 serve - [license-file](#license-file) - [mode](#mode) - [node-id](#node-id) + - [node-id-from-env](#node-id-from-env) - [object-store](#object-store) - - [query-file-limit](#query-file-limit) + - [tls-key](#tls-key) + - [tls-cert](#tls-cert) + - [tls-minimum-versions](#tls-minimum-version) + - [without-auth](#without-auth) + - [disable-authz](#disable-authz) - [AWS](#aws) - [aws-access-key-id](#aws-access-key-id) - [aws-secret-access-key](#aws-secret-access-key) @@ -121,9 +121,6 @@ influxdb3 serve - [wal-snapshot-size](#wal-snapshot-size) - [wal-max-write-buffer-size](#wal-max-write-buffer-size) - [snapshotted-wal-files-to-keep](#snapshotted-wal-files-to-keep) -- [Replication](#replication) - - [read-from-node-ids](#read-from-node-ids) - - [replication-interval](#replication-interval) - [Compaction](#compaction) - [compaction-row-limit](#compaction-row-limit) - [compaction-max-num-files-per-plan](#compaction-max-num-files-per-plan) @@ -135,11 +132,14 @@ influxdb3 serve - [parquet-mem-cache-size](#parquet-mem-cache-size) - [parquet-mem-cache-prune-percentage](#parquet-mem-cache-prune-percentage) - [parquet-mem-cache-prune-interval](#parquet-mem-cache-prune-interval) - - [disable-parquet-mem-cache](#disable-parquet-mem-cache) - [parquet-mem-cache-query-path-duration](#parquet-mem-cache-query-path-duration) + - [disable-parquet-mem-cache](#disable-parquet-mem-cache) - [last-cache-eviction-interval](#last-cache-eviction-interval) + - [last-value-cache-disable-from-history](#last-value-cache-disable-from-history) - [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) -- [Processing engine](#processing-engine) + - [distinct-value-cache-disable-from-history](#distinct-value-cache-disable-from-history) + - [query-file-limit](#query-file-limit) +- [Processing Engine](#processing-engine) - [plugin-dir](#plugin-dir) - [virtual-env-location](#virtual-env-location) - [package-manager](#package-manager) @@ -164,7 +164,7 @@ This value must be different than the [`--node-id`](#node-id) value. | influxdb3 serve option | Environment variable | | :--------------------- | :--------------------------------- | -| `--cluster-id` | `INFLUXDB3_ENTERPRISE_my-cluster-01` | +| `--cluster-id` | `INFLUXDB3_ENTERPRISE_CLUSTER_ID` | --- @@ -235,6 +235,27 @@ configuration--for example, the same bucket. | :--------------------- | :--------------------------------- | | `--node-id` | `INFLUXDB3_NODE_IDENTIFIER_PREFIX` | + +#### node-id-from-env + +Specifies the node identifier used as a prefix in all object store file paths. +Takes the name of an environment variable as an argument and uses the value of that environment variable as the node identifier. +This option cannot be used with the `--node-id` option. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------------- | +| `--node-id-from-env` | `INFLUXDB3_NODE_IDENTIFIER_FROM_ENV` | + +##### Example using --node-id-from-env + +```bash +export DATABASE_NODE=node0 && influxdb3 serve \ + --node-id-from-env DATABASE_NODE \ + --cluster-id cluster0 \ + --object-store file \ + --data-dir ~/.influxdb3/data +``` + --- #### object-store @@ -242,7 +263,7 @@ configuration--for example, the same bucket. Specifies which object storage to use to store Parquet files. This option supports the following values: -- `memory` _(default)_: Effectively no object persistence +- `memory`: Effectively no object persistence - `memory-throttled`: Like `memory` but with latency and throughput that somewhat resembles a cloud object store - `file`: Stores objects in the local filesystem (must also set `--data-dir`) - `s3`: Amazon S3 (must also set `--bucket`, `--aws-access-key-id`, `--aws-secret-access-key`, and possibly `--aws-default-region`) @@ -255,26 +276,49 @@ This option supports the following values: --- -#### query-file-limit +#### tls-key -Limits the number of Parquet files a query can access. -If a query attempts to read more than this limit, InfluxDB returns an error. +The path to a key file for TLS to be enabled. -**Default:** `432` +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------- | +| `--tls-key` | `INFLUXDB3_TLS_KEY` | -You can increase this limit to allow more files to be queried, but be aware of -the following side-effects: +--- -- Degraded query performance for queries that read more Parquet files -- Increased memory usage -- Your system potentially killing the `influxdb3` process due to Out-of-Memory - (OOM) errors -- If using object storage to store data, many GET requests to access the data - (as many as 2 requests per file) +#### tls-cert -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------------- | -| `--query-file-limit` | `INFLUXDB3_QUERY_FILE_LIMIT` | +The path to a cert file for TLS to be enabled. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------- | +| `--tls-cert` | `INFLUXDB3_TLS_CERT` | + +--- + +#### tls-minimum-version + +The minimum version for TLS. +Valid values are `tls-1.2` or `tls-1.3`. +Default is `tls-1.2`. + +| influxdb3 serve option | Environment variable | +| :---------------------- | :----------------------- | +| `--tls-minimum-version` | `INFLUXDB3_TLS_MINIMUM_VERSION` | + +--- + +#### without-auth + +Disables authentication for all server actions (CLI commands and API requests). +The server processes all requests without requiring tokens or authentication. + +--- + +#### disable-authz + +Optionally disable authz by passing in a comma separated list of resources. +Valid values are `health`, `ping`, and `metrics`. --- @@ -935,35 +979,6 @@ they are deleted when the number of snapshotted WAL files exceeds this number. --- -### Replication - -- [read-from-node-ids](#read-from-node-ids) -- [replication-interval](#replication-interval) - -#### read-from-node-ids - -Specifies a comma-separated list of writer identifier prefixes (`node-id`s) to -read WAL files from. [env: =] - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------------------ | -| `--read-from-node-ids` | `INFLUXDB3_ENTERPRISE_READ_FROM_WRITER_IDS` | - ---- - -#### replication-interval - -Defines the interval at which each replica specified in the -`read-from-node-ids` option is replicated. - -**Default:** `250ms` - -| influxdb3 serve option | Environment variable | -| :----------------------- | :------------------------------------------ | -| `--replication-interval` | `INFLUXDB3_ENTERPRISE_REPLICATION_INTERVAL` | - ---- - ### Compaction - [compaction-row-limit](#compaction-row-limit) @@ -1114,6 +1129,28 @@ Sets the interval to check if the in-memory Parquet cache needs to be pruned. --- +#### parquet-mem-cache-query-path-duration + +A [duration](/influxdb3/enterprise/reference/glossary/#duration) that specifies +the time window for caching recent Parquet files in memory. Default is `5h`. + +Only files containing data with a timestamp between `now` and `now - duration` +are cached when accessed during queries--for example, with the default `5h` setting: + +- Current time: `2024-06-10 15:00:00` +- Cache window: Last 5 hours (`2024-06-10 10:00:00` to now) + +If a query requests data from `2024-06-09` (old) and `2024-06-10 14:00` (recent): + +- **Cached**: Parquet files with data from `2024-06-10 14:00` (within 5-hour window) +- **Not cached**: Parquet files with data from `2024-06-09` (outside 5-hour window) + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :------------------------------------ | +| `--parquet-mem-cache-query-path-duration` | `INFLUXDB3_PARQUET_MEM_CACHE_QUERY_PATH_DURATION` | + +--- + #### disable-parquet-mem-cache Disables the in-memory Parquet cache. By default, the cache is enabled. @@ -1124,19 +1161,6 @@ Disables the in-memory Parquet cache. By default, the cache is enabled. --- -#### parquet-mem-cache-query-path-duration - -Specifies the duration to check if Parquet files pulled in query path -require caching, expressed as a human-readable duration (starting from _now_)--for example: `5h`, `3d`. - -**Default:** `5h` - -| influxdb3 serve option | Environment variable | -| :---------------------------- | :------------------------------------ | -| `--parquet-mem-cache-query-path-duration` | `INFLUXDB3_PARQUET_MEM_CACHE_QUERY_PATH_DURATION` | - ---- - #### last-cache-eviction-interval Specifies the interval to evict expired entries from the Last-N-Value cache, @@ -1150,6 +1174,17 @@ expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. --- +#### last-value-cache-disable-from-history + +Disables populating the last-N-value cache from historical data. +If disabled, the cache is still populated with data from the write-ahead log (WAL). + +| influxdb3 serve option | Environment variable | +| :---------------------------------------- | :------------------------------------------------ | +| `--last-value-cache-disable-from-history` | `INFLUXDB3_LAST_VALUE_CACHE_DISABLE_FROM_HISTORY` | + +--- + #### distinct-cache-eviction-interval Specifies the interval to evict expired entries from the distinct value cache, @@ -1163,7 +1198,28 @@ expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. --- -### Processing engine +#### distinct-value-cache-disable-from-history + +Disables populating the distinct value cache from historical data. +If disabled, the cache is still populated with data from the write-ahead log (WAL). + +| influxdb3 serve option | Environment variable | +| :-------------------------------------------- | :---------------------------------------------------- | +| `--distinct-value-cache-disable-from-history` | `INFLUXDB3_DISTINCT_VALUE_CACHE_DISABLE_FROM_HISTORY` | +--- + +#### query-file-limit + +Limits the number of Parquet files a query can access. +If a query attempts to read more than this limit, {{% product-name %}} returns an error. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------------- | +| `--query-file-limit` | `INFLUXDB3_QUERY_FILE_LIMIT` | + +--- + +### Processing Engine - [plugin-dir](#plugin-dir) - [virtual-env-location](#virtual-env-location) @@ -1186,7 +1242,7 @@ engine uses. | influxdb3 serve option | Environment variable | | :----------------------- | :--------------------- | -| `--virtual-env-location` | `VIRTUAL_ENV_LOCATION` | +| `--virtual-env-location` | `VIRTUAL_ENV` | --- diff --git a/content/influxdb3/enterprise/write-data/api-client-libraries.md b/content/influxdb3/enterprise/write-data/client-libraries.md similarity index 53% rename from content/influxdb3/enterprise/write-data/api-client-libraries.md rename to content/influxdb3/enterprise/write-data/client-libraries.md index 5e51607b5..6b32cc7cb 100644 --- a/content/influxdb3/enterprise/write-data/api-client-libraries.md +++ b/content/influxdb3/enterprise/write-data/client-libraries.md @@ -1,24 +1,24 @@ --- -title: Use the HTTP API and client libraries to write data +title: Use InfluxDB client libraries to write data description: > - Use the `/api/v3/write_lp` HTTP API endpoint and InfluxDB API clients to write points as line protocol data to {{% product-name %}}. + Use InfluxDB API clients to write points as line protocol data to {{% product-name %}}. menu: influxdb3_enterprise: - name: Use the API and client libraries + name: Use client libraries parent: Write data - identifier: write-api-client-libs + identifier: write-client-libs weight: 100 aliases: - - /influxdb3/enterprise/write-data/client-libraries/ + - /influxdb3/enterprise/write-data/api-client-libraries/ related: - /influxdb3/enterprise/reference/syntax/line-protocol/ - /influxdb3/enterprise/get-started/write/ - /influxdb3/enterprise/reference/client-libraries/v3/ - /influxdb3/enterprise/api/v3/#operation/PostWriteLP, /api/v3/write_lp endpoint -source: /shared/influxdb3-write-guides/api-client-libraries.md +source: /shared/influxdb3-write-guides/client-libraries.md --- +--> \ No newline at end of file diff --git a/content/influxdb3/enterprise/write-data/compatibility-apis.md b/content/influxdb3/enterprise/write-data/compatibility-apis.md index 40b25bde7..b811d24c0 100644 --- a/content/influxdb3/enterprise/write-data/compatibility-apis.md +++ b/content/influxdb3/enterprise/write-data/compatibility-apis.md @@ -17,10 +17,10 @@ related: - /influxdb3/enterprise/reference/client-libraries/v2/ - /influxdb3/enterprise/api/v3/#operation/PostV2Write, /api/v2/write (v2-compatible) endpoint - /influxdb3/enterprise/api/v3/#operation/PostV1Write, /write (v1-compatible) endpoint -source: /shared/influxdb3-write-guides/compatibility-apis.md +source: /shared/influxdb3-write-guides/http-api/compatibility-apis.md --- \ No newline at end of file diff --git a/content/influxdb3/enterprise/write-data/http-api/_index.md b/content/influxdb3/enterprise/write-data/http-api/_index.md new file mode 100644 index 000000000..da321e75f --- /dev/null +++ b/content/influxdb3/enterprise/write-data/http-api/_index.md @@ -0,0 +1,22 @@ +--- +title: Use the InfluxDB HTTP API to write data +description: > + Use the `/api/v3/write_lp`, `/api/v2/write`, or `/write` HTTP API endpoints + to write data to {{% product-name %}}. +menu: + influxdb3_enterprise: + name: Use the HTTP API + parent: Write data + identifier: write-http-api +weight: 100 +related: + - /influxdb3/enterprise/reference/syntax/line-protocol/ + - /influxdb3/enterprise/get-started/write/ + - /influxdb3/enterprise/api/v3/#operation/PostWriteLP, /api/v3/write_lp endpoint +source: /shared/influxdb3-write-guides/http-api/_index.md +--- + + diff --git a/content/influxdb3/enterprise/write-data/http-api/compatibility-apis.md b/content/influxdb3/enterprise/write-data/http-api/compatibility-apis.md new file mode 100644 index 000000000..47f645856 --- /dev/null +++ b/content/influxdb3/enterprise/write-data/http-api/compatibility-apis.md @@ -0,0 +1,26 @@ +--- +title: Use compatibility APIs and client libraries to write data +description: > + Use HTTP API endpoints compatible with InfluxDB v2 and v1 clients to write + points as line protocol data to {{% product-name %}}. +menu: + influxdb3_enterprise: + name: Use v1 and v2 compatibility APIs + parent: write-http-api +weight: 202 +aliases: + - /influxdb3/enterprise/write-data/client-libraries/ + - /influxdb3/enterprise/write-data/compatibility-apis/ +related: + - /influxdb3/enterprise/reference/syntax/line-protocol/ + - /influxdb3/enterprise/get-started/write/ + - /influxdb3/enterprise/reference/client-libraries/v2/ + - /influxdb3/enterprise/api/v3/#operation/PostV2Write, /api/v2/write (v2-compatible) endpoint + - /influxdb3/enterprise/api/v3/#operation/PostV1Write, /write (v1-compatible) endpoint +source: /shared/influxdb3-write-guides/http-api/compatibility-apis.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/enterprise/write-data/http-api/v3-write-lp.md b/content/influxdb3/enterprise/write-data/http-api/v3-write-lp.md new file mode 100644 index 000000000..1feec9880 --- /dev/null +++ b/content/influxdb3/enterprise/write-data/http-api/v3-write-lp.md @@ -0,0 +1,20 @@ +--- +title: Use the v3 write API to write data +description: > + Use the `/api/v3/write_lp` HTTP API endpoint to write data to {{% product-name %}}. +menu: + influxdb3_enterprise: + name: Use the v3 write API + parent: write-http-api +weight: 201 +related: + - /influxdb3/enterprise/reference/syntax/line-protocol/ + - /influxdb3/enterprise/get-started/write/ + - /influxdb3/enterprise/api/v3/#operation/PostWriteLP, /api/v3/write_lp endpoint +source: /shared/influxdb3-write-guides/http-api/v3-write-lp.md +--- + + diff --git a/content/influxdb3/explorer/_index.md b/content/influxdb3/explorer/_index.md index 5078f75bb..a0827d43e 100644 --- a/content/influxdb3/explorer/_index.md +++ b/content/influxdb3/explorer/_index.md @@ -33,14 +33,14 @@ Run the Docker image to start InfluxDB 3 Explorer: ```sh # Pull the Docker image -docker pull quay.io/influxdb/influxdb3-explorer:latest +docker pull influxdata/influxdb3-ui:{{% latest-patch %}} # Run the Docker container docker run --detach \ --name influxdb3-explorer \ --publish 8888:80 \ --publish 8889:8888 \ - quay.io/influxdb/influxdb3-explorer:latest \ + influxdata/influxdb3-ui:{{% latest-patch %}} \ --mode=admin ``` diff --git a/content/influxdb3/explorer/get-started.md b/content/influxdb3/explorer/get-started.md new file mode 100644 index 000000000..244191130 --- /dev/null +++ b/content/influxdb3/explorer/get-started.md @@ -0,0 +1,132 @@ +--- +title: Get started with InfluxDB 3 Explorer +description: > + Learn how to use InfluxDB 3 Explorer to connect to InfluxDB 3, write data, and + query data. +menu: + influxdb3_explorer: + name: Get started +weight: 3 +--- + +Get started with {{% product-name %}} by connecting it to an InfluxDB 3 instance, +writing data to InfluxDB, and then querying that data. This guide walks you +through each of those steps. + +1. [Connect to an InfluxDB 3 server](#connect-to-an-influxdb-3-server) +2. [Write data to InfluxDB](#write-data-to-influxdb) +3. [Query data from InfluxDB](#query-data-from-influxdb) + +> [!Note] +> This guide assumes you have already [installed {{% product-name %}}](/influxdb3/explorer/install/). + +## Connect to an InfluxDB 3 server + +InfluxDB 3 Explorer supports the following InfluxDB 3 products: + +- [InfluxDB 3 Core](/influxdb3/core/) +- [InfluxDB 3 Enterprise](/influxdb3/enterprise/) + +1. Navigate to **Configure** > **Servers**. +2. Click **+ Connect Your First Server**. +3. Provide the following InfluxDB 3 server information: + + - **Server name**: A name to identify the InfluxDB 3 server you're connecting to. + - **Server URL**: The URL used to connect to your InfluxDB 3 server. + - Select the protocol to use (http or https). + - Provide the host and, if necessary, the port. + - **Token**: The authorization token to use to connect to your InfluxDB 3 server. + We recommend using an InfluxDB 3 _admin_ token. + + > [!Important] + > #### Token permissions may limit Explorer functionality + > + > The permissions associated with the provided token determine what + > databases you can access using this server configuration. InfluxDB 3 + > tokens with limited permissions may not be able to use some Explorer + > functionality. + +4. Click **Add Server**. + +InfluxDB 3 Explorer attempts to verify the connection. If successful, Explorer +saves the server configuration and selects it as the active server. + +> [!Note] +> If you already have data in your InfluxDB 3 instance, skip to +> [Query data from InfluxDB](#query-data-from-influxdb). + +## Write data to InfluxDB + +{{% product-name %}} lets you write data to InfluxDB 3 and provides multiple +options. For this getting started guide, use Explorer to write one of the +sample data sets to InfluxDB: + +1. Navigate to **Write Data** > **Sample/Dev Data**. +2. Select any of the available sample data sets. +3. Click **Write Sample Data**. + +{{% product-name %}} creates a new database and then writes the sample data to +the database. + +### Other write methods + +{{% product-name %}} provides other ways to write data to InfluxDB, including +the following: + +- Line protocol +- CSV or JSON +- InfluxDB 3 client libraries +- Telegraf + +## Query data from InfluxDB + +To use {{% product-name %}} to query data from InfluxDB 3, navigate to +**Query Data** > **Data Explorer**. + +The _Data Explorer_ lets you explore the +schema of your database and automatically builds SQL queries by either +selecting columns in the _Schema Browser_ or by using _Natural Language_ with +the {{% product-name %}} OpenAI integration. + +For this getting started guide, use the Schema Browser to build a SQL query +that returns data from the newly written sample data set. + +1. On the **Data Explorer** in the **Schema** column, select the database you + want to query from the database dropdown menu. + Once selected, all tables in the database appear. +2. Click the name of the table you want to query to expand and view all the + columns in that table. +3. Select each column you want to query. + As you select columns in the Schema Browser, Explorer generates and updates + an SQL query in the _Query pane_. +4. Use the time range dropdown menu above the Query pane to select a time range + to query. You can select one of the predefined relative time ranges, or you + can select _Custom Range_ to define an absolute time range to query. +5. With columns and a time range selected, click **Run Query**. + +{{% product-name %}} runs the query and returns the results in the _Results pane_. +The Results pane provides three view options: + +- **Table** _(default)_: Displays raw query results in paginated table format. +- **Line**: Displays query results in a line graph. +- **Bar**: Displays query results in a bar graph. + +> [!Tip] +> SQL query results may not be ordered by `time` and Line and Bar graph +> visualizations may behave unexpectedly. To order results by time: +> +> - Ensure that you query the `time` column +> - Update the query to include an `ORDER BY time` clause. + +Congratulations! You have successfully used {{% product-name %}} to connect to, +write data to, and query data from an InfluxDB 3 instance. + +## Video walkthrough + +{{< youtube "zW2Hi1Ki4Eo" >}} + + diff --git a/content/influxdb3/explorer/get-started/_index.md b/content/influxdb3/explorer/get-started/_index.md deleted file mode 100644 index e15e4a3c4..000000000 --- a/content/influxdb3/explorer/get-started/_index.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Get started using InfluxDB 3 Explorer -description: Follow steps to get started using InfluxDB 3 Explorer. -menu: - influxdb3_explorer: - name: Get started -weight: 3 ---- - -Follow steps to get started using InfluxDB 3 Explorer. - -{{< youtube "zW2Hi1Ki4Eo" >}} - - diff --git a/content/influxdb3/explorer/get-started/connect.md b/content/influxdb3/explorer/get-started/connect.md deleted file mode 100644 index 7d717c024..000000000 --- a/content/influxdb3/explorer/get-started/connect.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Connect to a server -description: - Use InfluxDB 3 Explorer to connect to an InfluxDB 3 server. -menu: - influxdb3_explorer: - parent: Get started -weight: 101 -draft: true ---- - -Use InfluxDB 3 Explorer to connect to an InfluxDB 3 server. \ No newline at end of file diff --git a/content/influxdb3/explorer/install.md b/content/influxdb3/explorer/install.md index a1858d2a2..9478137ec 100644 --- a/content/influxdb3/explorer/install.md +++ b/content/influxdb3/explorer/install.md @@ -33,7 +33,7 @@ Use [Docker](https://docker.com) to install and run **InfluxDB 3 Explorer**. 2. **Pull the {{% product-name %}} Docker image** ```bash - docker pull quay.io/influxdb/influxdb3-explorer:latest + influxdata/influxdb3-ui:{{% latest-patch %}} ``` 3. **Create local directories** _(optional)_ @@ -73,7 +73,11 @@ Use [Docker](https://docker.com) to install and run **InfluxDB 3 Explorer**. - `$(pwd)/db:/db:rw` - `$(pwd)/config:/app-root/config:ro` - `$(pwd)/ssl:/etc/nginx/ssl:ro` - - Any of the available [environment variables](#environment-variables) + - Any of the available [environment variables](#environment-variables) + + > [!Note] + > To persist sessions across container restarts, see the detailed instructions + > on setting the [`SESSION_SECRET_KEY` environment variable](#session_secret_key). ```bash docker run --detach \ @@ -83,7 +87,7 @@ Use [Docker](https://docker.com) to install and run **InfluxDB 3 Explorer**. --volume $(pwd)/config:/app-root/config:ro \ --volume $(pwd)/db:/db:rw \ --volume $(pwd)/ssl:/etc/nginx/ssl:ro \ - quay.io/influxdb/influxdb3-explorer:latest \ + influxdata/influxdb3-ui:{{% latest-patch %}} \ --mode=admin ``` @@ -114,6 +118,13 @@ To enable TLS/SSL, mount valid certificate and key files into the container: The nginx web server automatically uses certificate files when they are present in the mounted path. +> [!Note] +> You can use a custom location for certificate and key files. +> Use the [`SSL_CERT_PATH`](#ssl_cert_path) and [`SSL_KEY_PATH`](#ssl_key_path) +> environment variables to identify the custom location. +> Also update the SSL directory volume mount path inside the container. + + --- ## Pre-configure InfluxDB connection settings @@ -191,11 +202,91 @@ If `--mode` is not set, the container defaults to query mode. Use the following environment variables to customize {{% product-name %}} settings in your container. -| Variable | Description | Default | -|----------------|--------------------------------------------------|----------------------| -| `DATABASE_URL` | Path to SQLite DB inside container | `/db/sqlite.db` | +- [DATABASE_URL](#database_url) +- [SESSION_SECRET_KEY](#session_secret_key) +- [SSL_CERT_PATH](#ssl_cert_path) +- [SSL_KEY_PATH](#ssl_key_path) ---- +### DATABASE_URL + +Path to SQLite DB inside container. The default is `/db/sqlite.db`. + +{{< expand-wrapper >}} +{{% expand "View `DATABASE_URL` example" %}} + + +```bash +docker run --detach \ + # ... + --volume $(pwd)/db:/custom/db-path:rw \ + --env DATABASE_URL=/custom/db-path/sqlite.db \ + influxdata/influxdb3-ui:{{% latest-patch %}} +``` +{{% /expand %}} +{{< /expand-wrapper >}} + +### SESSION_SECRET_KEY + +Specifies the secret key for session management. If none is provided, Explorer +uses a random 32-byte hex string as the session secret key. + +{{< expand-wrapper >}} +{{% expand "View `SESSION_SECRET_KEY` example" %}} + + +```bash +docker run --detach \ + # ... + --env SESSION_SECRET_KEY=xxX0Xx000xX0XxxxX0Xx000xX0XxX00x \ + influxdata/influxdb3-ui:{{% latest-patch %}} +``` +{{% /expand %}} +{{< /expand-wrapper >}} + +> [!Important] +> #### Always set SESSION_SECRET_KEY in production +> +> When you restart the container, {{% product-name %}} generates a new key if +> not explicitly set. For production use cases, always set the `SESSION_SECRET_KEY` +> environment variable to persist sessions across restarts. + +### SSL_CERT_PATH + +Defines the path to the SSL certificate file inside the container. +Default is `/etc/nginx/ssl/cert.pem`. + +{{< expand-wrapper >}} +{{% expand "View `SSL_CERT_PATH` example" %}} + + +```bash +docker run --detach \ + # ... + --volume $(pwd)/ssl:/custom/ssl:ro \ + --env SSL_CERT_PATH=/custom/ssl/cert.pem \ + influxdata/influxdb3-ui:{{% latest-patch %}} +``` +{{% /expand %}} +{{< /expand-wrapper >}} + +### SSL_KEY_PATH + +Defines the path to the SSL private key file inside the container. +Default is `/etc/nginx/ssl/key.pem`. + +{{< expand-wrapper >}} +{{% expand "View `SSL_KEY_PATH` example" %}} + + +```bash +docker run --detach \ + # ... + --volume $(pwd)/ssl:/custom/ssl:ro \ + --env SSL_KEY_PATH=/custom/ssl/key.pem \ + influxdata/influxdb3-ui:{{% latest-patch %}} +``` +{{% /expand %}} +{{< /expand-wrapper >}} ## Volume Reference @@ -205,8 +296,6 @@ in your container. | `/app-root/config` | JSON config for defaults | `./config` | | `/etc/nginx/ssl` | SSL certs for HTTPS | `./ssl` | ---- - ## Exposed Ports | Port | Protocol | Purpose | diff --git a/content/influxdb3/explorer/manage-databases.md b/content/influxdb3/explorer/manage-databases.md new file mode 100644 index 000000000..08a7c5be5 --- /dev/null +++ b/content/influxdb3/explorer/manage-databases.md @@ -0,0 +1,53 @@ +--- +title: Manage databases with InfluxDB 3 Explorer +seotitle: Manage InfluxDB databases with InfluxDB 3 Explorer +description: > + Use InfluxDB 3 Explorer to manage databases in an InfluxDB 3 instance. +menu: + influxdb3_explorer: + name: Manage databases +weight: 4 +related: + - /influxdb3/core/admin/databases/, Manage databases in InfluxDB 3 Core + - /influxdb3/enterprise/admin/databases/, Manage databases in InfluxDB 3 Enterprise +--- + +{{% product-name %}} lets you manage databases in your InfluxDB 3 Core instance +or InfluxDB 3 Enterprise cluster. + +> [!Important] +> Using {{% product-name %}} to manage a database in InfluxDB 3 requires that +> Explorer is running in [admin mode](/influxdb3/explorer/install/#run-in-query-or-admin-mode) +> and that the token used in the InfluxDB 3 server configuration is an +> [admin token](/influxdb3/enterprise/admin/tokens/admin/). + +To manage databases, navigate to **Manage Databases** in Explorer. +This page provides a list of databases in the connected InfluxDB 3 server that +contains the database name, retention period, and number of tables +(which includes system tables). + +## Create a database + +1. On the **Manage Databases** page, click **+ Create New**. +2. Provide a **Database name**. + _For information about naming restrictions, see + [InfluxDB 3 naming restrictions](/influxdb3/enterprise/admin/databases/create/#database-naming-restrictions)._ +3. _(Optional)_ Specify a **Retention Period** for the database. + This determines how long InfluxDB retains data (based on timestamp) in the + database before expiring and removing the data. If no retention period is + specified, InfluxDB does not expire data in the database. + + Set the following: + + - **Retention Period**: The number of time units to retain data. + - **Unit**: The unit of time to use in the retention period definition. + +4. Click **{{< icon "check" >}} Create Database**. + +## Delete a database + +1. On the **Manage Databases** page, click **{{< icon "trash" >}}**. +2. Confirm that you want to delete the database. + +> [!Caution] +> Deleting a database is a destructive action and cannot be undone. diff --git a/content/influxdb3/explorer/manage-plugins/_index.md b/content/influxdb3/explorer/manage-plugins/_index.md new file mode 100644 index 000000000..4c0fad8c4 --- /dev/null +++ b/content/influxdb3/explorer/manage-plugins/_index.md @@ -0,0 +1,93 @@ +--- +title: Manage InfluxDB 3 plugins with InfluxDB 3 Explorer +description: > + Use InfluxDB 3 Explorer to manage InfluxDB 3 processing engine plugins. +menu: + influxdb3_explorer: + name: Manage InfluxDB plugins +weight: 5 +cascade: + related: + - /influxdb3/core/plugins/, InfluxDB 3 Core Processing engine plugins + - /influxdb3/enterprise/plugins/, InfluxDB 3 Enterprise Processing engine plugins +cascade: + draft: true + prepend: | + > [!Warning] + > #### {{% product-name %}} plugin management is currently in beta + > + > The {{% product-name %}} plugin management tools and the Plugin Library are + > currently in **beta**. +--- + +{{% product-name %}} lets you manage plugins in your InfluxDB 3 instance or cluster. +[InfluxDB 3 Processing engine plugins](/influxdb3/enterprise/plugins/) let you +extend your database with custom Python code. +Use {{% product-name %}} to manage plugins in your InfluxDB 3 instance and +install prebuilt plugins from the _Plugin Library_. + +Each plugin can define one or more _triggers_—rules that +specify when the plugin should execute. Triggers are typically based on +conditions such as data arriving in a specific table or matching certain +criteria. + +- **Data writes** - Process and transform data as it enters the database +- **Scheduled events** - Run code at defined intervals or specific times +- **HTTP requests** - Expose custom API endpoints that execute your code + +When a trigger condition is met, InfluxDB 3 automatically runs the associated +plugin code. This enables real-time data processing, enrichment, or alerting +without manual intervention. +Use the InfluxDB 3 Explorer UI to enable, disable, or configure triggers for each plugin. + + + +- [View installed plugins](#view-installed-plugins) + - [Filter installed plugins](#filter-installed-plugins) +- [Enable or disable a plugin](#enable-or-disable-a-plugin) +- [ View Plugin Logs](#view-plugin-logs) +- [Delete a plugin](#delete-a-plugin) +- [Use the Plugin Library](#use-the-plugin-library) + + + +## View installed plugins + +To view plugins installed in your InfluxDB 3 server, navigate to +**Manage Plugins** > **Overview**. + +1. Navigate to the **Manage Plugins** > **Overview** section in the left sidebar. +2. All installed plugins are listed under the _All Plugins_ tab. + +### Filter installed plugins + +To filter installed plugins by state, use the top tabs to filter by: + +- **All Plugins** +- **Running** +- **Stopped** +- **Errors** + +You can also use the **search bar** to filter by plugin name. + +## Enable or disable a plugin + +1. In the plugin list, locate the desired plugin. + + - **If the plugin is currently running (enabled)**, click {{< icon "pause" >}} to disable the plugin. + - **If the plugin is currently stopped (disabled)**, click the {{< icon "play" >}} button to enable the plugin. + +## View plugin logs + +1. In the plugin list, locate the desired plugin. +2. Click **Logs** to view the most recent logs output by the plugin. +3. To view more log entries, click **View More**. +4. To export the logs, click **Export**. + +## Delete a plugin + +1. In the plugin list, locate the desired plugin. +2. Click the **{{< icon "trash" >}} button** to delete the plugin. +3. Confirm that you want to delete the plugin. + +{{< children hlevel="h2" >}} diff --git a/content/influxdb3/explorer/manage-plugins/plugin-library.md b/content/influxdb3/explorer/manage-plugins/plugin-library.md new file mode 100644 index 000000000..e7894a69e --- /dev/null +++ b/content/influxdb3/explorer/manage-plugins/plugin-library.md @@ -0,0 +1,74 @@ +--- +title: Use the Plugin Library +description: > + Use InfluxDB 3 Explorer Plugin Library to view and install pre-built InfluxDB + 3 processing engine plugins. +menu: + influxdb3_explorer: + name: Plugin Library + parent: Manage InfluxDB plugins +weight: 101 +--- + +The _InfluxDB Plugin Library_ is a collection of pre-built InfluxDB 3 plugins that +you can install in your InfluxDB 3 server. To view the Plugin library, navigate +to **Manage Plugins** > **Plugin Library** in the left sidebar. + +## Search the Plugin Library + +To search for plugins in the Plugin library, submit a search term in the search bar. + +## Install a plugin + +1. In the **Plugin Library**, locate the plugin you want to install. +2. Click on the plugin card to open its details. +3. To install a plugin from {{% product-name %}} select **Install Plugin**: +4. Provide the following: + + - **Database**: The name of the InfluxDB 3 database to associate the + plugin with. + - **Trigger Name**: A unique name for the plugin and trigger combination. + - **Trigger Type**: The trigger type. What trigger types are available + depend on the plugin. + + _For more information about InfluxDB 3 plugin triggers, see + [Understand trigger types](/influxdb3/enterprise/plugins/#understand-trigger-types)._ + + Depending on the selected trigger type, provide the following: + + - **Data Writes (All Tables)**: _no additional configuration options_. + - **Data Writes (Single Table)**: + - **Table Name**: The name of the table that, when written to, triggers the plugin to run. + - **Schedule**: + - **Frequency**: When to run the plugin using one of the following patterns: + - `every:`: Run at specified intervals--for example: + `every:15m`. + - `cron:`: Run on a cron schedule--for + example: `cron:0 */12 * * *`. + - **HTTP Endpoint**: + - **API Endpoint**: The API endpoint name to use to trigger the plugin--for + example: `downsample`. To trigger the plugin, you would then send + a request to the `/api/v3/engine/downsample` endpoint of your InfluxDB + server to trigger the plugin. + + - **Advanced Settings** + - **Run Asynchronously**: Execute the plugin asynchronously and do not + wait for a response. + - **Error Behavior**: Specify the action you want the plugin to take + when it encounters an error: + - **Log**: Log the error to your InfluxDB server's logs. + - **Retry**: Retry the plugin execution. + - **Disable**: Disable the plugin. + + - **Arguments**: Specific arguments to pass to the Plugin. + Plugins can have both required and optional arguments. + +5. Click **Deploy** to install the plugin. + +### Other plugin installation options + +{{% product-name %}} also lets you do the following: + +- **Download Code**: Download the plugin code to view it or modify it for your own use. +- **Copy Install Command**: Copy the `influxdb3` CLI command you can use to + manually install the plugin on your InfluxDB 3 server. diff --git a/content/influxdb3/explorer/manage-tokens.md b/content/influxdb3/explorer/manage-tokens.md new file mode 100644 index 000000000..492f21fe0 --- /dev/null +++ b/content/influxdb3/explorer/manage-tokens.md @@ -0,0 +1,104 @@ +--- +title: Manage tokens with InfluxDB 3 Explorer +seotitle: Manage InfluxDB tokens with InfluxDB 3 Explorer +description: > + Use InfluxDB 3 Explorer to manage authorization tokens for an InfluxDB 3 instance. +menu: + influxdb3_explorer: + name: Manage tokens +weight: 4 +related: + - /influxdb3/core/admin/tokens/, Manage tokens in InfluxDB 3 Core + - /influxdb3/enterprise/admin/tokens/, Manage tokens in InfluxDB 3 Enterprise +--- + +{{% product-name %}} lets you manage authorization tokens for your InfluxDB 3 +Core instance or InfluxDB 3 Enterprise cluster. + +> [!Important] +> Using {{% product-name %}} to manage authorization tokens in InfluxDB 3 requires that +> Explorer is running in [admin mode](/influxdb3/explorer/install/#run-in-query-or-admin-mode) +> and that the token used in the InfluxDB 3 server configuration is an +> [admin token](/influxdb3/enterprise/admin/tokens/admin/). + +To manage InfluxDB authorization tokens, navigate to **Manage Tokens** in Explorer. +This page provides a list of databases in the connected InfluxDB 3 server that +contains the database name, retention period, and number of tables +(which includes system tables). + +## Create a token + +Use {{% product-name %}} to create an admin token or a resource token +_(Enterprise only)_ for your InfluxDB 3 instance or cluster. + +For more information about InfluxDB tokens, see: + +- [Manage tokens in InfluxDB 3 Core](/influxdb3/core/admin/tokens/) +- [Manage tokens in InfluxDB 3 Enterprise](/influxdb3/enterprise/admin/tokens/) + +{{< tabs-wrapper >}} +{{% tabs %}} +[Admin Token](#) +[Resource Token _(Enterprise only)_](#) +{{% /tabs %}} +{{% tab-content %}} + + + +To create an _admin_ token: + +1. On the **Manage Databases** page, click **+ Create New**. +2. Select **Admin Token** to create an admin token. +3. Provide a **Token Name**. +4. Click **Create Admin Token**. +5. Copy the generated token string and store it in a secure place. + + + +{{% /tab-content %}} +{{% tab-content %}} + + + +To create a _resource_ token with read or write permissions for specific databases: + +1. On the **Manage Databases** page, click **+ Create New**. +2. Select **Database Token** to create a resource token _(InfluxDB 3 Enterprise only)_. +3. Provide a **Token Name**. +4. _(Optional)_ Select a **Token Expiry**. +5. Select **Database Permissions** to assign to the token. + + To grant the token read or write permissions on all databases, select the + _Read_ and _Write_ column headings. + To grant permissions for specific databases, select the checkboxes next + to each respective database name. + +6. Copy the generated token string and store it in a secure place. + + + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +> [!Note] +> #### Store tokens in a secure secret store +> +> This is the _only_ time you are able to view and copy the raw token string. +> We recommend storing tokens in a **secure secret store**. + + +## Delete a token + +On the **Manage Databases** page, click the **{{< icon "trash" >}}** button +on the row of the token you want to delete. + +> [!Caution] +> Deleting a token is a destructive action and cannot be undone. +> Any clients using the deleted token will no longer be able to access your +> InfluxDB 3 instance. + +> [!Note] +> #### You cannot delete the _admin token +> +> When using InfluxDB 3 Enterprise, the first token created in the cluster is +> named `_admin`. This functions as the "operator" token and cannot be deleted. diff --git a/content/kapacitor/v1/reference/about_the_project/release-notes.md b/content/kapacitor/v1/reference/about_the_project/release-notes.md index beb05a02a..567d00928 100644 --- a/content/kapacitor/v1/reference/about_the_project/release-notes.md +++ b/content/kapacitor/v1/reference/about_the_project/release-notes.md @@ -9,6 +9,19 @@ aliases: - /kapacitor/v1/about_the_project/releasenotes-changelog/ --- +## v1.8.0 {date="2025-06-26"} + +> [!Warning] +> +> Python 2-based UDFs are deprecated as of Kapacitor 1.7.7 and are removed in this release. If you are using Python 2 +> with your User-Defined Functions (UDFs), upgrade them to be Python 3-compatible **before** installing this version of Kapacitor. +> This required change aligns with modern security practices and ensures your custom functions will continue to work after upgrading. + +### Dependency updates + +- Upgrade Go to 1.23.9. +- Upgrade go-lang `JWT library` to 4.5.2 + ## v1.7.7 {date="2025-05-27"} > [!Warning] diff --git a/content/shared/influxctl/database/_index.md b/content/shared/influxctl/database/_index.md new file mode 100644 index 000000000..6008bfe4f --- /dev/null +++ b/content/shared/influxctl/database/_index.md @@ -0,0 +1,27 @@ + +The `influxctl database` command and its subcommands manage databases in an +{{% product-name omit=" Clustered" %}} cluster. + +## Usage + +```sh +influxctl database [subcommand] [flags] +``` + +## Subcommands + +| Subcommand | Description | +| :------------------------------------------------------------------------ | :------------------ | +| [create](/influxdb3/version/reference/cli/influxctl/database/create/) | Create a database | +| [delete](/influxdb3/version/reference/cli/influxctl/database/delete/) | Delete a database | +| [list](/influxdb3/version/reference/cli/influxctl/database/list/) | List databases | +| [rename](/influxdb3/version/reference/cli/influxctl/database/rename/) | Rename a database | +| [undelete](/influxdb3/version/reference/cli/influxctl/database/undelete/) | Undelete a database | +| [update](/influxdb3/version/reference/cli/influxctl/database/update/) | Update a database | +| help, h | Output command help | + +## Flags + +| Flag | | Description | +| :--- | :------- | :------------------ | +| `-h` | `--help` | Output command help | diff --git a/content/shared/influxctl/database/create.md b/content/shared/influxctl/database/create.md new file mode 100644 index 000000000..ef1563a7b --- /dev/null +++ b/content/shared/influxctl/database/create.md @@ -0,0 +1,169 @@ + +The `influxctl database create` command creates a new database with a specified +retention period in an {{< product-name omit=" Clustered" >}} cluster. + +The retention period defines the maximum age of data retained in the database, +based on the timestamp of the data. +The retention period value is a time duration value made up of a numeric value +plus a duration unit. For example, `30d` means 30 days. +A zero duration retention period is infinite and data will not expire. +The retention period value cannot be negative or contain whitespace. + +{{< flex >}} +{{% flex-content "half" %}} + +##### Valid durations units include + +- **m**: minute +- **h**: hour +- **d**: day +- **w**: week +- **mo**: month +- **y**: year + +{{% /flex-content %}} +{{% flex-content "half" %}} + +##### Example retention period values + +- `0d`: infinite/none +- `3d`: 3 days +- `6w`: 6 weeks +- `1mo`: 1 month (30 days) +- `1y`: 1 year +- `30d30d`: 60 days +- `2.5d`: 60 hours + +{{% /flex-content %}} +{{< /flex >}} + +#### Custom partitioning + +You can override the default partition template (`%Y-%m-%d`) of the database +with the `--template-tag`, `--template-tag-bucket`, and `--template-timeformat` +flags when you create the database. +Provide a time format using [Rust strftime](/influxdb3/version/admin/custom-partitions/partition-templates/#time-part-templates), partition by specific tag, or partition tag values +into a specified number of "buckets." +Each of these can be used as part of the partition template. +Be sure to follow [partitioning best practices](/influxdb3/version/admin/custom-partitions/best-practices/). + +> [!Note] +> #### Always provide a time format when using custom partitioning +> +> If defining a custom partition template for your database with any of the +> `--template-*` flags, always include the `--template-timeformat` flag with a +> time format to use in your partition template. +> Otherwise, InfluxDB omits time from the partition template and won't compact partitions. + +> [!Warning] +> #### Wait before writing to a new database with the same name as a deleted database +> +> After deleting a database from your {{% product-name omit=" Clustered" %}} +> cluster, you can reuse the name to create a new database, but **wait two to +> three minutes** after deleting the previous database before writing to the new +> database to allow write caches to clear. + +## Usage + + + + +```sh +influxctl database create [flags] +``` + +## Arguments + +| Argument | Description | +| :---------------- | :--------------------- | +| **DATABASE_NAME** | InfluxDB database name | + +## Flags + +| Flag | | Description | +| :--- | :---------------------- | :--------------------------------------------------------------------------------------------------------------------------------------- | +| | `--retention-period` | [Database retention period ](/influxdb3/version/admin/databases/#retention-periods)(default is `0s`, infinite) | +| | `--max-tables` | [Maximum tables per database](/influxdb3/version/admin/databases/#table-limit) (default is 500, `0` uses default) | +| | `--max-columns` | [Maximum columns per table](/influxdb3/version/admin/databases/#column-limit) (default is 250, `0` uses default) | +| | `--template-tag` | Tag to add to partition template (can include multiple of this flag) | +| | `--template-tag-bucket` | Tag and number of buckets to partition tag values into separated by a comma--for example: `tag1,100` (can include multiple of this flag) | +| | `--template-timeformat` | Timestamp format for partition template (default is `%Y-%m-%d`) | +| `-h` | `--help` | Output command help | + +{{% caption %}} +_Also see [`influxctl` global flags](/influxdb3/version/reference/cli/influxctl/#global-flags)._ +{{% /caption %}} + +## Examples + +- [Create a database with an infinite retention period](#create-a-database-with-an-infinite-retention-period) +- [Create a database with a 30-day retention period](#create-a-database-with-a-30-day-retention-period) +- [Create a database with non-default table and column limits](#create-a-database-with-non-default-table-and-column-limits) +- [Create a database with a custom partition template](#create-a-database-with-a-custom-partition-template) + +### Create a database with an infinite retention period + + + + +```sh +influxctl database create mydb +``` + +### Create a database with a 30-day retention period + + + + +```sh +influxctl database create \ + --retention-period 30d \ + mydb +``` + +### Create a database with non-default table and column limits + + + + +```sh +influxctl database create \ + --max-tables 200 \ + --max-columns 150 \ + mydb +``` + +### Create a database with a custom partition template + +The following example creates a new `mydb` database and applies a partition +template that partitions by two tags (`room` and `sensor-type`) and by day using +the time format `%Y-%m-%d`: + + + + +```sh +influxctl database create \ + --template-tag room \ + --template-tag sensor-type \ + --template-tag-bucket customerID,1000 \ + --template-timeformat '%Y-%m-%d' \ + mydb +``` + +_For more information about custom partitioning, see +[Manage data partitioning](/influxdb3/version/admin/custom-partitions/)._ + +{{% expand "View command updates" %}} + +#### v2.7.0 {date="2024-03-26"} + +- Introduce the `--template-tag-bucket` flag to group tag values into buckets + and partition by each tag bucket. + +#### v2.5.0 {date="2024-03-04"} + +- Introduce the `--template-tag` and `--template-timeformat` flags that define + a custom partition template for a database. + +{{% /expand %}} diff --git a/content/shared/influxctl/database/delete.md b/content/shared/influxctl/database/delete.md new file mode 100644 index 000000000..bd9936066 --- /dev/null +++ b/content/shared/influxctl/database/delete.md @@ -0,0 +1,68 @@ + +The `influxctl database delete` command deletes a database from an +{{< product-name omit=" Clustered" >}} cluster. + +## Usage + + + + +```sh +influxctl database delete [command options] [--force] [...] +``` + +> [!Warning] +> #### Cannot be undone +> +> Deleting a database is a destructive action that cannot be undone. +> +> #### Wait before writing to a new database with the same name +> +> After deleting a database from your {{% product-name omit=" Clustered" %}} +> cluster, you can reuse the name to create a new database, but **wait two to +> three minutes** after deleting the previous database before writing to the new +> database to allow write caches to clear. +> +> #### Tokens still grant access to databases with the same name +> +> [Database tokens](/influxdb3/version/admin/tokens/database/) are associated to +> databases by name. If you create a new database with the same name, tokens +> that granted access to the deleted database will also grant access to the new +> database. + +## Arguments + +| Argument | Description | +| :---------------- | :----------------------------- | +| **DATABASE_NAME** | Name of the database to delete | + +## Flags + +| Flag | | Description | +| :--- | :-------- | :---------------------------------------------------------- | +| | `--force` | Do not prompt for confirmation to delete (default is false) | +| `-h` | `--help` | Output command help | + +{{% caption %}} +_Also see [`influxctl` global flags](/influxdb3/version/reference/cli/influxctl/#global-flags)._ +{{% /caption %}} + +## Examples + +##### Delete a database named "mydb" + + + + +```sh +influxctl database delete mydb +``` + +##### Delete multiple databases + + + + +```sh +influxctl database delete mydb1 mydb2 +``` diff --git a/content/shared/influxctl/database/list.md b/content/shared/influxctl/database/list.md new file mode 100644 index 000000000..6315497d5 --- /dev/null +++ b/content/shared/influxctl/database/list.md @@ -0,0 +1,24 @@ + +The `influxctl database list` command lists all databases in an {{% product-name omit=" Clustered" %}} +cluster. + +The `--format` flag lets you print the output in other formats. +The `json` format is available for programmatic parsing by other tooling. +Default: `table`. + +## Usage + +```sh +influxctl database list [--format=table|json] +``` + +## Flags + +| Flag | | Description | +| :--- | :--------- | :-------------------------------------------- | +| | `--format` | Output format (`table` _(default)_ or `json`) | +| `-h` | `--help` | Output command help | + +{{% caption %}} +_Also see [`influxctl` global flags](/influxdb3/version/reference/cli/influxctl/#global-flags)._ +{{% /caption %}} diff --git a/content/shared/influxctl/database/rename.md b/content/shared/influxctl/database/rename.md new file mode 100644 index 000000000..834ac7207 --- /dev/null +++ b/content/shared/influxctl/database/rename.md @@ -0,0 +1,37 @@ + +The `influxctl database rename` command renames a database in an +{{% product-name omit=" Clustered" %}} cluster. +This command does _not_ change the database ID, database properties, or the +data stored in the database. + +> [!Warning] +> #### Renaming a database requires new tokens +> +> [Database tokens](/influxdb3/version/admin/tokens/database/) are associated to +> databases by name. After renaming a database, tokens referencing the old name +> no longer function. You must generate tokens for the new database name. + +## Usage + + + +```bash +influxctl database rename [flags] +``` + +## Arguments + +| Argument | Description | +| :------------------ | :--------------------------- | +| **CURRENT_DB_NAME** | Current name of the database | +| **NEW_DB_NAME** | New name for the database | + +## Flags + +| Flag | | Description | +| :--- | :--------- | :-------------------------------------------- | +| `-h` | `--help` | Output command help | + +{{% caption %}} +_Also see [`influxctl` global flags](/influxdb3/version/reference/cli/influxctl/#global-flags)._ +{{% /caption %}} diff --git a/content/shared/influxctl/database/undelete.md b/content/shared/influxctl/database/undelete.md new file mode 100644 index 000000000..ca539419e --- /dev/null +++ b/content/shared/influxctl/database/undelete.md @@ -0,0 +1,33 @@ + +The `influxctl database undelete` command undeletes a previously deleted +database in an {{% product-name omit=" Clustered" %}} cluster and restores the +database with the same retention period, table, and column limits as when it was +deleted. + +> [!Important] +> The database name must match the name of the deleted database and +> **a new database with the same name cannot exist**. + +## Usage + + + +```bash +influxctl database undelete [flags] +``` + +## Arguments + +| Argument | Description | +| :---------------- | :----------------------------------- | +| **DATABASE_NAME** | The name of the database to undelete | + +## Flags + +| Flag | | Description | +| :--- | :--------- | :-------------------------------------------- | +| `-h` | `--help` | Output command help | + +{{% caption %}} +_Also see [`influxctl` global flags](/influxdb3/version/reference/cli/influxctl/#global-flags)._ +{{% /caption %}} diff --git a/content/shared/influxctl/database/update.md b/content/shared/influxctl/database/update.md new file mode 100644 index 000000000..0612d6341 --- /dev/null +++ b/content/shared/influxctl/database/update.md @@ -0,0 +1,82 @@ + +The `influxctl database update` command updates a database's retention period, +table (measurement), or column limits in InfluxDB. + +## Usage + + + +```sh +influxctl database update [flags] +``` + +## Arguments + +| Argument | Description | +| :---------------- | :----------------------------- | +| **DATABASE_NAME** | Name of the database to update | + +## Flags + +| Flag | | Description | +| :--- | :------------------- | :--------------------------------------------------------------------------------------------------------------- | +| | `--retention-period` | [Database retention period ](/influxdb3/version/admin/databases/#retention-periods)(default is `0s` or infinite) | +| | `--max-tables` | [Maximum tables per database](/influxdb3/version/admin/databases/#table-limit) (default is 500, 0 uses default) | +| | `--max-columns` | [Maximum columns per table](/influxdb3/version/admin/databases/#column-limit) (default is 250, 0 uses default) | +| `-h` | `--help` | Output command help | + +{{% caption %}} +_Also see [`influxctl` global flags](/influxdb3/version/reference/cli/influxctl/#global-flags)._ +{{% /caption %}} + +## Examples + +- [Update a database's retention period](#update-a-databases-retention-period) +- [Update a database's table limit](#update-a-databases-table-limit) +- [Update a database's column limit](#update-a-databases-column-limit) + +### Update a database's retention period + +```sh +influxctl database update --retention-period 1mo mydb +``` + +{{< flex >}} +{{% flex-content "half" %}} + +##### Valid durations units + +- `m`: minute +- `h`: hour +- `d`: day +- `w`: week +- `mo`: month +- `y`: year + +{{% /flex-content %}} +{{% flex-content "half" %}} + +##### Example retention period values + +- `0d`: infinite/none +- `3d`: 3 days +- `6w`: 6 weeks +- `1mo`: 1 month (30 days) +- `1y`: 1 year +- `30d30d`: 60 days +- `2.5d`: 60 hours + +{{% /flex-content %}} +{{< /flex >}} + +### Update a database's table limit + +```sh +influxctl database update --max-tables 300 mydb +``` + +### Update a database's column limit + +```sh +influxctl database update --max-columns 200 mydb +``` diff --git a/content/shared/influxctl/release-notes.md b/content/shared/influxctl/release-notes.md index 4e35c655a..288f6b6b9 100644 --- a/content/shared/influxctl/release-notes.md +++ b/content/shared/influxctl/release-notes.md @@ -1,3 +1,35 @@ +## v2.10.2 {date="2025-06-30"} + +### Features + +- Add new table management commands: + - [`influxctl table list`](/influxdb3/version/reference/cli/influxctl/table/list/) + - [`influxctl table delete`](/influxdb3/version/reference/cli/influxctl/table/delete/) + - [`influxctl table iceberg`](/influxdb3/version/reference/cli/influxctl/table/iceberg/) + - [`influxctl table iceberg enable`](/influxdb3/version/reference/cli/influxctl/table/iceberg/enable/) + - [`influxctl table iceberg disable`](/influxdb3/version/reference/cli/influxctl/table/iceberg/disable/) +- Add new database management commands: + - [`influxctl database rename`](/influxdb3/version/reference/cli/influxctl/database/rename/) + - [`influxctl database undelete`](/influxdb3/version/reference/cli/influxctl/database/undelete/) + +### Bug fixes + +- Ensure the `INFLUXCTL_PROFILE` environment variable overrides the default + connection profile file path. + +### Dependency updates + +- Update `github.com/apache/arrow-go/v18` from 18.3.0 to 18.3.1. +- Update `github.com/go-git/go-git/v5` from 5.16.0 to 5.16.2. +- Update `github.com/google/go-containerregistry` from 0.20.5 to 0.20.6. +- Update `github.com/urfave/cli/v2` from 2.27.6 to 2.27.7. +- Update `golang.org/x/mod` from 0.24.0 to 0.25.0. +- Update `google.golang.org/grpc` from 1.72.1 to 1.73.0. +- Update Go to 1.24.4. +- Update protobuf files. + +--- + ## v2.10.1 {date="2025-05-30"} ### Features diff --git a/content/shared/influxctl/table/_index.md b/content/shared/influxctl/table/_index.md new file mode 100644 index 000000000..1aa201434 --- /dev/null +++ b/content/shared/influxctl/table/_index.md @@ -0,0 +1,29 @@ + +The `influxctl table` command and its subcommands manage tables in an +{{% product-name omit=" Clustered" %}} cluster. + +## Usage + +```sh +influxctl table [subcommand] [flags] +``` + +## Subcommands + +| Subcommand | Description | +| :------------------------------------------------------------------- | :--------------------------------- | +| [create](/influxdb3/version/reference/cli/influxctl/table/create/) | Create a table | +| [delete](/influxdb3/version/reference/cli/influxctl/table/delete/) | Delete a table | +| [iceberg](/influxdb3/version/reference/cli/influxctl/table/iceberg/) | Manage iceberg exports for a table | +| [list](/influxdb3/version/reference/cli/influxctl/table/list/) | List tables | +| help, h | Output command help | + +## Flags + +| Flag | | Description | +| :--- | :------- | :------------------ | +| `-h` | `--help` | Output command help | + +{{% caption %}} +_Also see [`influxctl` global flags](/influxdb3/version/reference/cli/influxctl/#global-flags)._ +{{% /caption %}} diff --git a/content/shared/influxctl/table/create.md b/content/shared/influxctl/table/create.md new file mode 100644 index 000000000..0278d52d1 --- /dev/null +++ b/content/shared/influxctl/table/create.md @@ -0,0 +1,101 @@ + +The `influxctl table create` command creates a new table in the specified +database in an {{< product-name omit=" Clustered" >}} cluster. + +#### Custom partitioning + +You can override the default partition template (the partition template of the target database) +with the `--template-tag`, `--template-tag-bucket`, and `--template-timeformat` +flags when you create the table. +Provide a time format using [Rust strftime](/influxdb3/version/admin/custom-partitions/partition-templates/#time-part-templates), partition by specific tag, or partition tag values +into a specified number of "buckets." +Each of these can be used as part of the partition template. +Be sure to follow [partitioning best practices](/influxdb3/version/admin/custom-partitions/best-practices/). + +> [!Note] +> #### Always provide a time format when using custom partitioning +> +> If defining a custom partition template for your table with any of the +> `--template-*` flags, always include the `--template-timeformat` flag with a +> time format to use in your partition template. +> Otherwise, InfluxDB omits time from the partition template and won't compact partitions. + +## Usage + +```sh +influxctl table create [flags] +``` + +## Arguments + +| Argument | Description | +| :---------------- | :-------------------------- | +| **DATABASE_NAME** | Name of the target database | +| **TABLE_NAME** | Table name | + +## Flags + +| Flag | | Description | +| :--- | :---------------------- | :--------------------------------------------------------------------------------------------------------------------------------------- | +| | `--template-tag` | Tag to add to partition template (can include multiple of this flag) | +| | `--template-tag-bucket` | Tag and number of buckets to partition tag values into separated by a comma--for example: `tag1,100` (can include multiple of this flag) | +| | `--template-timeformat` | Timestamp format for partition template | +| `-h` | `--help` | Output command help | + +{{% caption %}} +_Also see [`influxctl` global flags](/influxdb3/version/reference/cli/influxctl/#global-flags)._ +{{% /caption %}} + +## Examples + +- [Create a table](#create-a-table) +- [Create a table with a custom partition template](#create-a-table-with-a-custom-partition-template) + +In the following examples, replace: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + The name of the database to create the table in. +- {{% code-placeholder-key %}}`TABLE_NAME` {{% /code-placeholder-key %}}: + The name of table to create. + +### Create a table + +{{% code-placeholders "(DATABASE|TABLE)_NAME" %}} +```sh +influxctl table create DATABASE_NAME TABLE_NAME +``` +{{% /code-placeholders %}} + +### Create a table with a custom partition template + +The following example creates a new table and applies a partition +template that partitions by two tags (`room` and `sensor-type`) and by day using +the time format `%Y-%m-%d`: + +{{% code-placeholders "(DATABASE|TABLE)_NAME" %}} +```sh +influxctl table create \ + --template-tag room \ + --template-tag sensor-type \ + --template-tag-bucket customerID,1000 \ + --template-timeformat '%Y-%m-%d' \ + DATABASE_NAME \ + TABLE_NAME +``` +{{% /code-placeholders %}} + +_For more information about custom partitioning, see +[Manage data partitioning](/influxdb3/version/admin/custom-partitions/)._ + +{{% expand "View command updates" %}} + +#### v2.7.0 {date="2024-03-26"} + +- Introduce the `--template-tag-bucket` flag to group tag values into buckets + and partition by each tag bucket. + +{{% /expand %}} + +{{% caption %}} +_Also see [`influxctl` global flags](/influxdb3/version/reference/cli/influxctl/#global-flags)._ +{{% /caption %}} diff --git a/content/shared/influxctl/table/delete.md b/content/shared/influxctl/table/delete.md new file mode 100644 index 000000000..b57532a49 --- /dev/null +++ b/content/shared/influxctl/table/delete.md @@ -0,0 +1,42 @@ + +The `influxctl table delete` command deletes a specified table from a database. + +## Usage + +```sh +influxctl table delete [flags] +``` + +## Arguments + +| Argument | Description | +| :---------------- | :---------------------- | +| **DATABASE_NAME** | Name of the database | +| **TABLE_NAME** | Name of table to delete | + +## Flags + +| Flag | | Description | +| :--- | :------- | :------------------ | +| `-h` | `--help` | Output command help | + +{{% caption %}} +_Also see [`influxctl` global flags](/influxdb3/version/reference/cli/influxctl/#global-flags)._ +{{% /caption %}} + +## Examples + + + +{{% code-placeholders "(DATABASE|TABLE)_NAME" %}} +```bash +influxctl table delete DATABASE_NAME TABLE_NAME +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of the database to delete the table from +- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: + the name of the table to delete diff --git a/content/shared/influxctl/table/iceberg/_index.md b/content/shared/influxctl/table/iceberg/_index.md new file mode 100644 index 000000000..b4d38aeb8 --- /dev/null +++ b/content/shared/influxctl/table/iceberg/_index.md @@ -0,0 +1,36 @@ + +The `influxctl table iceberg` command and its subcommands enable or disable +Iceberg-compatible exports for a table in an InfluxDB Cloud Dedicated cluster. + +{{% show-in "clustered" %}} +> [!Warning] +> #### Only available with InfluxDB Cloud Dedicated +> +> Iceberg-compatible exports are currently only available with InfluxDB Cloud +> Dedicated, not InfluxDB Clustered. `influxctl table iceberg` command and its +> subcommands can only be used with InfluxDB Cloud Dedicated. +{{% /show-in %}} + +## Usage + +```sh +influxctl table iceberg [subcommand] [flags] +``` + +## Subcommands + +| Subcommand | Description | +| :--------------------------------------------------------------------------- | :---------------------------------- | +| [enable](/influxdb3/version/reference/cli/influxctl/table/iceberg/enable/) | Enable Iceberg exports for a table | +| [disable](/influxdb3/version/reference/cli/influxctl/table/iceberg/disable/) | Disable Iceberg exports for a table | +| help, h | Output command help | + +## Flags + +| Flag | | Description | +| :--- | :------- | :------------------ | +| `-h` | `--help` | Output command help | + +{{% caption %}} +_Also see [`influxctl` global flags](/influxdb3/version/reference/cli/influxctl/#global-flags)._ +{{% /caption %}} diff --git a/content/shared/influxctl/table/iceberg/disable.md b/content/shared/influxctl/table/iceberg/disable.md new file mode 100644 index 000000000..a17cd81e4 --- /dev/null +++ b/content/shared/influxctl/table/iceberg/disable.md @@ -0,0 +1,35 @@ + +The `influxctl table iceberg disable` command disables Iceberg-compatible exports +for a table in an InfluxDB Cloud Dedicated cluster. + +{{% show-in "clustered" %}} +> [!Warning] +> #### Only available with InfluxDB Cloud Dedicated +> +> Iceberg-compatible exports are currently only available with InfluxDB Cloud +> Dedicated, not InfluxDB Clustered. The `influxctl table iceberg` command and +> its subcommands can only be used with InfluxDB Cloud Dedicated. +{{% /show-in %}} + +## Usage + +```sh +influxctl table iceberg disable [flags] +``` + +## Arguments + +| Argument | Description | +| :---------------- | :------------------------------------------ | +| **DATABASE_NAME** | Name of the target database | +| **TABLE_NAME** | Name of table to disable Iceberg exports on | + +## Flags + +| Flag | | Description | +| :--- | :------- | :------------------ | +| `-h` | `--help` | Output command help | + +{{% caption %}} +_Also see [`influxctl` global flags](/influxdb3/version/reference/cli/influxctl/#global-flags)._ +{{% /caption %}} diff --git a/content/shared/influxctl/table/iceberg/enable.md b/content/shared/influxctl/table/iceberg/enable.md new file mode 100644 index 000000000..baf4c6b46 --- /dev/null +++ b/content/shared/influxctl/table/iceberg/enable.md @@ -0,0 +1,35 @@ + +The `influxctl table iceberg enable` command enables Iceberg-compatible exports +for a table in an InfluxDB Cloud Dedicated cluster. + +{{% show-in "clustered" %}} +> [!Warning] +> #### Only available with InfluxDB Cloud Dedicated +> +> Iceberg-compatible exports are currently only available with InfluxDB Cloud +> Dedicated, not InfluxDB Clustered. The `influxctl table iceberg` command and +> its subcommands can only be used with InfluxDB Cloud Dedicated. +{{% /show-in %}} + +## Usage + +```sh +influxctl table iceberg enable [flags] +``` + +## Arguments + +| Argument | Description | +| :---------------- | :----------------------------------------- | +| **DATABASE_NAME** | Name of the target database | +| **TABLE_NAME** | Name of table to enable Iceberg exports on | + +## Flags + +| Flag | | Description | +| :--- | :------- | :------------------ | +| `-h` | `--help` | Output command help | + +{{% caption %}} +_Also see [`influxctl` global flags](/influxdb3/version/reference/cli/influxctl/#global-flags)._ +{{% /caption %}} diff --git a/content/shared/influxctl/table/list.md b/content/shared/influxctl/table/list.md new file mode 100644 index 000000000..b839ebfb8 --- /dev/null +++ b/content/shared/influxctl/table/list.md @@ -0,0 +1,26 @@ + +The `influxctl table list` command lists all tables in the specified database in +an {{< product-name omit=" Clustered" >}} cluster. + +## Usage + +```sh +influxctl table list [flags] +``` + +## Arguments + +| Argument | Description | +| :---------------- | :-------------------------- | +| **DATABASE_NAME** | Name of the target database | + +## Flags + +| Flag | | Description | +| :--- | :--------- | :-------------------------------------------- | +| | `--format` | Output format (`table` _(default)_ or `json`) | +| `-h` | `--help` | Output command help | + +{{% caption %}} +_Also see [`influxctl` global flags](/influxdb3/version/reference/cli/influxctl/#global-flags)._ +{{% /caption %}} diff --git a/content/shared/influxdb-v2/api-guide/api_intro.md b/content/shared/influxdb-v2/api-guide/api_intro.md index d5cdd1702..58059ad4f 100644 --- a/content/shared/influxdb-v2/api-guide/api_intro.md +++ b/content/shared/influxdb-v2/api-guide/api_intro.md @@ -5,25 +5,28 @@ This section guides you through the most commonly used API methods. For detailed documentation on the entire API, see the [InfluxDB v2 API Reference](/influxdb/version/reference/api/#influxdb-v2-api-documentation). -{{% note %}} -If you need to use InfluxDB {{< current-version >}} with **InfluxDB 1.x** API clients and integrations, see the [1.x compatibility API](/influxdb/version/reference/api/#influxdb-v1-compatibility-api-reference-documentation). -{{% /note %}} +> [!Tip] +> #### Use InfluxDB 1.x API clients with {{< current-version >}} +> If you need to use InfluxDB {{< current-version >}} with **InfluxDB 1.x** API clients and integrations, see the [1.x compatibility guide](/influxdb/version/reference/api/influxdb-1x/). ## Bootstrap your application -With most API requests, you'll need to provide a minimum of your InfluxDB URL and Authorization Token (API Token). +With most API requests, you'll need to provide a minimum of your InfluxDB URL and [Authorization Token (API Token)](/influxdb/version/admin/tokens/). -[Install InfluxDB OSS v2.x](/influxdb/version/install/) or upgrade to +[Install InfluxDB OSS v2.x](/influxdb/v2/install/) or upgrade to an [InfluxDB Cloud account](/influxdb/cloud/sign-up). ### Authentication InfluxDB uses [API tokens](/influxdb/version/admin/tokens/) to authorize API requests. +InfluxDB filters API requests and response data based on the permissions associated with the token. -1. Before exploring the API, use the InfluxDB UI to +1. Before exploring the API, use the `influx` CLI or the InfluxDB UI to [create an initial API token](/influxdb/version/admin/tokens/create-token/) for your application. -2. Include your API token in an `Authorization: Token YOUR_API_TOKEN` HTTP header with each request. +1. Include your API token in an `Authorization: Token API_TOKEN` HTTP header with each request--for example: + +{{% code-placeholders "API_TOKEN" %}} {{< code-tabs-wrapper >}} {{% code-tabs %}} @@ -31,28 +34,94 @@ InfluxDB uses [API tokens](/influxdb/version/admin/tokens/) to authorize API req [Node.js](#nodejs) {{% /code-tabs %}} {{% code-tab-content %}} -```sh -{{% get-shared-text "api/v2.0/auth/oss/token-auth.sh" %}} +```bash +# Use a token to authorize a GET request to the InfluxDB API. +# List buckets in your organization that the token can read. +curl -X GET "http://{{< influxdb/host >}}/api/v2/buckets" \ + --header 'Accept: application/json' \ + --header 'Authorization: Token API_TOKEN' ``` {{% /code-tab-content %}} {{% code-tab-content %}} ```js -{{% get-shared-text "api/v2.0/auth/oss/token-auth.js" %}} +/** + * Use a token to authorize a GET request to the InfluxDB API. + * List buckets in your organization that the token can read. + */ + +const https = require('https'); + +function listBuckets() { + + const options = { + host: '{{< influxdb/host >}}', + path: "/api/v2/buckets", + headers: { + 'Authorization': 'Token API_TOKEN', + 'Content-type': 'application/json' + }, + }; + + const request = https.get(options, (response) => { + let rawData = ''; + response.on('data', () => { + response.on('data', (chunk) => { rawData += chunk; }); + }) + response.on('end', () => { + console.log(rawData); + }) + }); + + request.end(); +} ``` {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} -Postman is another popular tool for exploring APIs. See how to [send authenticated requests with Postman](/influxdb/version/tools/postman/#send-authenticated-api-requests-with-postman). +{{% /code-placeholders %}} + +> [!Note] +> Postman is another popular tool for exploring APIs. +> See how to [send authenticated requests with Postman](/influxdb/version/tools/postman/#send-authenticated-api-requests-with-postman). ## Buckets API -Before writing data you'll need to create a Bucket in InfluxDB. -[Create a bucket](/influxdb/version/admin/buckets/create-bucket/#create-a-bucket-using-the-influxdb-api) using an HTTP request to the InfluxDB API `/buckets` endpoint. +Before writing data you'll need to create a bucket in your InfluxDB instance. +To use the API to create a bucket, send a request to the following endpoint: -```sh -{{% get-shared-text "api/v2.0/buckets/oss/create.sh" %}} +{{% api-endpoint method="POST" endpoint="/api/v2/buckets" api-ref="/influxdb/version/api/v2/#operation/PostBuckets" %}} + +{{% code-placeholders "API_TOKEN|ORG_ID|BUCKET_NAME|RETENTION_PERIOD_SECONDS" %}} + +```bash +curl --request POST \ + "http://localhost:8086/api/v2/buckets" \ + --header "Authorization: Token API_TOKEN" \ + --json '{ + "orgID": "'"ORG_ID"'", + "name": "BUCKET_NAME", + "retentionRules": [ + { + "type": "expire", + "everySeconds": RETENTION_PERIOD_SECONDS, + "shardGroupDurationSeconds": 0 + } + ] + }' ``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`API_TOKEN`{{% /code-placeholder-key %}} - your [token](/influxdb/version/admin/tokens/). +- {{% code-placeholder-key %}}`ORG_ID`{{% /code-placeholder-key %}} - the ID of the [organization](/influxdb/version/admin/organizations/) that owns the bucket. +- {{% code-placeholder-key %}}`BUCKET_NAME`{{% /code-placeholder-key %}} - the name of the [bucket](/influxdb/version/admin/buckets/) to create. +- Optional: {{% code-placeholder-key %}}`RETENTION_PERIOD_SECONDS`{{% /code-placeholder-key %}} - the [retention period](/influxdb/version/reference/glossary/#retention-period) (in number of seconds) to retain data in the bucket. Default is `0` (infinite retention). + - For example, `31536000` (1 year) or `604800` (7 days). + +For more information, see [Create a bucket](/influxdb/version/admin/buckets/create-bucket/#create-a-bucket-using-the-influxdb-api). + ## Write API [Write data to InfluxDB](/influxdb/version/write-data/developer-tools/api/) using an HTTP request to the InfluxDB API `/api/v2/write` endpoint. diff --git a/content/shared/influxdb-v2/api-guide/influxdb-1x/_index.md b/content/shared/influxdb-v2/api-guide/influxdb-1x/_index.md index fc992830d..003028314 100644 --- a/content/shared/influxdb-v2/api-guide/influxdb-1x/_index.md +++ b/content/shared/influxdb-v2/api-guide/influxdb-1x/_index.md @@ -7,11 +7,16 @@ InfluxDB 1.x client libraries and third-party integrations like [Grafana](https: ## Authentication InfluxDB 1.x compatibility endpoints require all query and write requests to be authenticated with an -[API token](/influxdb/version/admin/tokens/) or 1.x-compatible +[API token](/influxdb/version/admin/tokens/) or v1-compatible credentials. -* [Authenticate with the Token scheme](#authenticate-with-the-token-scheme) -* [Authenticate with a 1.x username and password scheme](#authenticate-with-a-username-and-password-scheme) +> [!Important] +> #### Authenticate with an API token or 1.x-compatible credentials +> You can't use an InfluxDB 2.x username and password to authenticate with the InfluxDB 1.x compatibility API. + +- [Authenticate with the Token scheme](#authenticate-with-the-token-scheme) +- [Authenticate with a 1.x username and password scheme](#authenticate-with-a-username-and-password-scheme) +- [Troubleshoot authentication issues](#troubleshoot-authentication-issues) ### Authenticate with the Token scheme Token authentication requires the following credential: @@ -67,12 +72,12 @@ Username and password schemes require the following credentials: {{% note %}} #### Password or Token -If you have [set a password](/influxdb/version/install/upgrade/v1-to-v2/manual-upgrade/#1x-compatible-authorizations) for the 1.x-compatible username, provide the 1.x-compatible password. -If you haven't set a password for the 1.x-compatible username, provide the InfluxDB [authentication token](/influxdb/version/admin/tokens/) as the password. +If you have [set a password](/influxdb/version/install/upgrade/v1-to-v2/manual-upgrade/#1x-compatible-authorizations) for the v1-compatible username, provide the v1-compatible password. +If you haven't set a password for the v1-compatible username, provide the InfluxDB [authentication token](/influxdb/version/admin/tokens/) as the password. {{% /note %}} For more information, see how to create and manage -[1.x-compatible authorizations](/influxdb/version/install/upgrade/v1-to-v2/manual-upgrade/#1x-compatible-authorizations) +[v1-compatible authorizations](/influxdb/version/install/upgrade/v1-to-v2/manual-upgrade/#1x-compatible-authorizations) when manually upgrading from InfluxDB v1 to v2. {{% /show-in %}} @@ -259,7 +264,44 @@ Replace the following: {{% /show-in %}} -##### InfluxQL support +### Troubleshoot authentication issues + +#### Unauthorized when using the initial username and password + +You can't use the InfluxDB 2.x username and password to authenticate with the InfluxDB 1.x compatibility API. +For example, given the following Docker Compose configuration: + +```yaml +# Docker compose example + influx2: + image: influxdb:2.4.0 + volumes: + - ./dev/influxdb2:/var/lib/influxdb2 + ports: + - "8086:8086" + environment: + DOCKER_INFLUXDB_INIT_USERNAME: dev + DOCKER_INFLUXDB_INIT_PASSWORD: 12345678 + DOCKER_INFLUXDB_INIT_ORG: com.some + DOCKER_INFLUXDB_INIT_BUCKET: m2_dev + DOCKER_INFLUXDB_INIT_MODE: setup +``` + +The following query using the v1 `/query` endpoint and v2 initial username and password returns an `unauthorized` error: + +```bash +# Using the initial username and password +curl --get "http://localhost:8086/query" \ + --data-urlencode "u=dev" \ + --data-urlencode "p=12345678" \ + --data-urlencode "db=m2_dev" \ + --data-urlencode "q=SELECT * FROM default" +``` + +Instead, [authenticate with a token](#authenticate-with-the-token-scheme) or a [1.x username and password scheme](#authenticate-with-a-username-and-password-scheme). + + +## InfluxQL support The compatibility API supports InfluxQL, with the following caveats: diff --git a/content/shared/influxdb-v2/get-started/query.md b/content/shared/influxdb-v2/get-started/query.md index ccb6ec3e2..0b80ef0da 100644 --- a/content/shared/influxdb-v2/get-started/query.md +++ b/content/shared/influxdb-v2/get-started/query.md @@ -496,7 +496,7 @@ To query data from InfluxDB using InfluxQL and the InfluxDB HTTP API, send a req to the InfluxDB API [`/query` 1.X compatibility endpoint](/influxdb/version/reference/api/influxdb-1x/query/) using the `POST` request method. -{{< api-endpoint endpoint="http://localhost:8086/query" method="post" api-ref="/influxdb/version/api/v1-compatibility/#operation/PostQueryV1" >}} +{{< api-endpoint endpoint="http://localhost:8086/query" method="post" api-ref="/influxdb/version/api/v1/#operation/PostQueryV1" >}} Include the following with your request: diff --git a/content/shared/influxdb-v2/monitor-alert/templates/monitor.md b/content/shared/influxdb-v2/monitor-alert/templates/monitor.md index 4b5a2c7eb..9c8e28bdc 100644 --- a/content/shared/influxdb-v2/monitor-alert/templates/monitor.md +++ b/content/shared/influxdb-v2/monitor-alert/templates/monitor.md @@ -17,7 +17,7 @@ Do the following: Before you begin, make sure you have access to the following: - InfluxDB Cloud account ([sign up for free here](https://cloud2.influxdata.com/signup)) -- Command line access to a machine [running InfluxDB OSS 2.x](/influxdb/version/install/) and permissions to install Telegraf on this machine +- Command line access to a machine [running InfluxDB OSS 2.x](/influxdb/v2/install/) and permissions to install Telegraf on this machine - Internet connectivity from the machine running InfluxDB OSS 2.x and Telegraf to InfluxDB Cloud - Sufficient resource availability to install the template (InfluxDB Cloud Free Plan accounts include [resource limits](/influxdb/cloud/account-management/pricing-plans/#resource-limits/influxdb/cloud/account-management/pricing-plans/#resource-limits)) @@ -53,9 +53,9 @@ InfluxDB OSS metrics to an InfluxDB endpoint and a dashboard that visualizes the By default, InfluxDB OSS 2.x has a `/metrics` endpoint available, which exports internal InfluxDB metrics in [Prometheus format](https://prometheus.io/docs/concepts/data_model/). -1. Ensure the `/metrics` endpoint is [enabled](/influxdb/version/reference/config-options/#metrics-disabled). +1. Ensure the `/metrics` endpoint is [enabled](/influxdb/v2/reference/config-options/#metrics-disabled). If you've changed the default settings to disable the `/metrics` endpoint, - [re-enable these settings](/influxdb/version/reference/config-options/#metrics-disabled). + [re-enable these settings](/influxdb/v2/reference/config-options/#metrics-disabled). 2. Navigate to the `/metrics` endpoint of your InfluxDB OSS instance to view the InfluxDB OSS system metrics in your browser: ## Set up Telegraf diff --git a/content/shared/influxdb-v2/query-data/execute-queries/influx-api.md b/content/shared/influxdb-v2/query-data/execute-queries/influx-api.md index 80c8cb4e7..5e8c65be1 100644 --- a/content/shared/influxdb-v2/query-data/execute-queries/influx-api.md +++ b/content/shared/influxdb-v2/query-data/execute-queries/influx-api.md @@ -2,63 +2,71 @@ The [InfluxDB v2 API](/influxdb/version/reference/api) provides a programmatic interface for all interactions with InfluxDB. To query InfluxDB {{< current-version >}}, do one of the following: -- Send a Flux query request to the [`/api/v2/query`](/influxdb/version/api/#operation/PostQueryAnalyze) endpoint. -- Send an InfluxQL query request to the [/query 1.x compatibility API](/influxdb/version/reference/api/influxdb-1x/query/). +- [Send a Flux query request](#send-a-flux-query-request) +- [Send an InfluxQL query request](#send-an-influxql-query-request) + +## Send a Flux query request + +Send a Flux query request to the following endpoint: + +{{% api-endpoint method="POST" endpoint="/api/v2/query" api-ref="/influxdb/version/api/#operation/PostQueryAnalyze" %}} In your request, set the following: -- Your organization via the `org` or `orgID` URL parameters. -- `Authorization` header to `Token ` + your API token. -- `Accept` header to `application/csv`. -- `Content-type` header to `application/vnd.flux` (Flux only) or `application/json` (Flux or InfluxQL). -- Query in Flux or InfluxQL with the request's raw data. +- Your organization via the `org` or `orgID` URL parameters +- Headers: + - `Authorization: Token ` + - `Accept: application/csv` + - `Content-type: application/vnd.flux` +- Your Flux query text in the request body -{{% note %}} -#### Use gzip to compress the query response - -To compress the query response, set the `Accept-Encoding` header to `gzip`. -This saves network bandwidth, but increases server-side load. - -We recommend only using gzip compression on responses that are larger than 1.4 KB. -If the response is smaller than 1.4 KB, gzip encoding will always return a 1.4 KB -response, despite the uncompressed response size. -1500 bytes (~1.4 KB) is the maximum transmission unit (MTU) size for the public -network and is the largest packet size allowed at the network layer. -{{% /note %}} +> [!Note] +> #### Use gzip to compress a large query response +> +> To compress the query response, set the `Accept-Encoding` header to `gzip`. +> This saves network bandwidth, but increases server-side load. +> +> We recommend only using gzip compression on responses that are larger than 1.4 KB. +> If the response is smaller than 1.4 KB, gzip encoding will always return a 1.4 KB +> response, despite the uncompressed response size. +> 1500 bytes (~1.4 KB) is the maximum transmission unit (MTU) size for the public +> network and is the largest packet size allowed at the network layer. #### Flux - Example query request -Below is an example `curl` request that sends a Flux query to InfluxDB {{< current-version >}}: +The following example shows how to use cURL to send a Flux query to InfluxDB {{< current-version >}}: + +{{% code-placeholders "ORG_ID|API_TOKEN|BUCKET_NAME" %}} {{< code-tabs-wrapper >}} {{% code-tabs %}} [Without compression](#) [With compression](#) {{% /code-tabs %}} - {{% code-tab-content %}} ```bash -curl --request POST \ - http://localhost:8086/api/v2/query?orgID=INFLUX_ORG_ID \ - --header 'Authorization: Token INFLUX_TOKEN' \ +curl \ + --request POST \ + http://{{< influxdb/host >}}/api/v2/query?orgID=ORG_ID \ + --header 'Authorization: Token API_TOKEN' \ --header 'Accept: application/csv' \ --header 'Content-type: application/vnd.flux' \ - --data 'from(bucket:"example-bucket") + --data 'from(bucket:"BUCKET_NAME") |> range(start: -12h) |> filter(fn: (r) => r._measurement == "example-measurement") |> aggregateWindow(every: 1h, fn: mean)' ``` {{% /code-tab-content %}} - {{% code-tab-content %}} ```bash -curl --request POST \ - http://localhost:8086/api/v2/query?orgID=INFLUX_ORG_ID \ - --header 'Authorization: Token INFLUX_TOKEN' \ +curl \ + --request POST \ + http://{{< influxdb/host >}}/api/v2/query?orgID=ORG_ID \ + --header 'Authorization: Token API_TOKEN' \ --header 'Accept: application/csv' \ --header 'Content-type: application/vnd.flux' \ --header 'Accept-Encoding: gzip' \ - --data 'from(bucket:"example-bucket") + --data 'from(bucket:"BUCKET_NAME") |> range(start: -12h) |> filter(fn: (r) => r._measurement == "example-measurement") |> aggregateWindow(every: 1h, fn: mean)' @@ -66,9 +74,82 @@ curl --request POST \ {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} +{{% /code-placeholders %}} + +Replace the following with your values: + +- {{% code-placeholder-key %}}`ORG_ID`{{% /code-placeholder-key %}} - the ID of the [organization](/influxdb/version/admin/organizations/) that owns the bucket. +- {{% code-placeholder-key %}}`API_TOKEN`{{% /code-placeholder-key %}} - your [token](/influxdb/version/admin/tokens/). +- {{% code-placeholder-key %}}`BUCKET_NAME`{{% /code-placeholder-key %}} - the name of the [bucket](/influxdb/version/admin/buckets/) to query. + +## Send an InfluxQL query request + +To query InfluxDB {{< current-version >}} using the [InfluxQL query language](/influxdb/v2/reference/syntax/influxql/), send a request to the v1-compatible API endpoint: + +{{% api-endpoint method="GET" endpoint="/query" api-ref="/influxdb/v2/api/v2/#operation/GetLegacyQuery" %}} + +{{% api-endpoint method="POST" endpoint="/query" api-ref="/influxdb/v2/api/v2/#operation/PostQueryV1" %}} + +In your request, set the following: + +- [1.x-compatible or 2.x authentication](/influxdb/v2/api-guide/influxdb-1x/#authentication) credentials +- Headers: + - `Accept: application/csv` or `Accept: application/json` + - `Content-type: application/vnd.influxql` +- The database and retention policy mapped to the bucket you want to query +- Your InfluxQL query text + +> [!Note] +> If you have an existing bucket that doesn't follow the **database/retention-policy** naming convention, +> you **must** [manually create a database and retention policy mapping](/influxdb/v2/query-data/influxql/dbrp/#create-dbrp-mappings) +> to query that bucket with the `/query` compatibility API. +> Use the `db` and `rp` query parameters to specify the database and retention policy +> for the bucket you want to query. + #### InfluxQL - Example query request -Below is an example `curl` request that sends an InfluxQL query to InfluxDB {{< current-version >}}: +The following example shows how to use cURL to send an InfluxQL query to InfluxDB {{< current-version >}} using v1-compatible authentication: + +{{% code-placeholders "API_TOKEN|BUCKET_NAME" %}} + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[HTTP POST](#) +[HTTP GET](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} + +```bash +# 1.x compatible POST request using Basic authentication and InfluxQL +curl --request POST \ + "http://{{< influxdb/host >}}/query?db=BUCKET_NAME&p=API_TOKEN&u=ignored" \ + --header "Content-type: application/vnd.influxql" \ + --data "SELECT * FROM home WHERE time > now() - 1h" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} + +```bash +# 1.x compatible GET request using Basic authentication and InfluxQL +curl --get "http://{{< influxdb/host >}}/query" \ + --header "Accept: application/json" \ + --data-urlencode "q=SELECT * FROM home WHERE time > now() - 1h" \ + --data-urlencode "db=BUCKET_NAME" \ + --data-urlencode "u=ignored" \ + --data-urlencode "p=API_TOKEN" +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +{{% /code-placeholders %}} + +Replace the following with your values: + +- {{% code-placeholder-key %}}`API_TOKEN`{{% /code-placeholder-key %}} - your [token](/influxdb/version/admin/tokens/). +- {{% code-placeholder-key %}}`BUCKET_NAME`{{% /code-placeholder-key %}} - the name of the [bucket](/influxdb/version/admin/buckets/) to query. + +{{% code-placeholders "ORG_ID|API_TOKEN|BUCKET_NAME" %}} {{< code-tabs-wrapper >}} {{% code-tabs %}} @@ -78,24 +159,35 @@ Below is an example `curl` request that sends an InfluxQL query to InfluxDB {{< {{% code-tab-content %}} ```bash -curl --request -G http://localhost:8086/query?orgID=INFLUX_ORG_ID&database=MyDB&retention_policy=MyRP \ - --header 'Authorization: Token INFLUX_TOKEN' \ +curl --get "http://{{< influxdb/host >}}/query" \ --header 'Accept: application/csv' \ --header 'Content-type: application/json' \ + --data-urlencode "db=BUCKET_NAME" \ + --data-urlencode "p=API_TOKEN" \ + --data-urlencode "u=ignored" \ --data-urlencode "q=SELECT used_percent FROM example-db.example-rp.example-measurement WHERE host=host1" ``` {{% /code-tab-content %}} - {{% code-tab-content %}} + ```bash -curl --request -G http://localhost:8086/query?orgID=INFLUX_ORG_ID&database=MyDB&retention_policy=MyRP \ - --header 'Authorization: Token INFLUX_TOKEN' \ +curl --get "http://{{< influxdb/host >}}/query" \ --header 'Accept: application/csv' \ --header 'Content-type: application/json' \ --header 'Accept-Encoding: gzip' \ + --data-urlencode "db=BUCKET_NAME" \ + --data-urlencode "p=API_TOKEN" \ + --data-urlencode "u=ignored" \ --data-urlencode "q=SELECT used_percent FROM example-db.example-rp.example-measurement WHERE host=host1" ``` {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} +{{% /code-placeholders %}} + +Replace the following with your values: + +- {{% code-placeholder-key %}}`API_TOKEN`{{% /code-placeholder-key %}} - your [token](/influxdb/version/admin/tokens/). +- {{% code-placeholder-key %}}`BUCKET_NAME`{{% /code-placeholder-key %}} - the name of the [bucket](/influxdb/version/admin/buckets/) to query. + InfluxDB returns the query results in [annotated CSV](/influxdb/version/reference/syntax/annotated-csv/). diff --git a/content/shared/influxdb-v2/query-data/influxql/explore-data/where.md b/content/shared/influxdb-v2/query-data/influxql/explore-data/where.md index 8476319f0..c2f5348f6 100644 --- a/content/shared/influxdb-v2/query-data/influxql/explore-data/where.md +++ b/content/shared/influxdb-v2/query-data/influxql/explore-data/where.md @@ -191,7 +191,7 @@ InfluxQL requires single quotes around tag values in the `WHERE` clause. {{% /expand %}} -{{% expand "Select data with specific field key-values and tag key-valuest" %}} +{{% expand "Select data with specific field key-values and tag key-values" %}} ```sql SELECT "water_level" FROM "h2o_feet" WHERE "location" <> 'santa_monica' AND (water_level < -0.59 OR water_level > 9.95) diff --git a/content/shared/influxdb-v2/reference/cli/influx/v1/auth/_index.md b/content/shared/influxdb-v2/reference/cli/influx/v1/auth/_index.md index 7c1996dd6..34b9778be 100644 --- a/content/shared/influxdb-v2/reference/cli/influx/v1/auth/_index.md +++ b/content/shared/influxdb-v2/reference/cli/influx/v1/auth/_index.md @@ -6,7 +6,7 @@ InfluxDB {{< current-version >}} uses [API tokens](/influxdb/version/admin/token The [1.x compatibility API](/influxdb/version/reference/api/influxdb-1x/) lets clients authenticate with InfluxDB {{< current-version >}} using the InfluxDB 1.x convention of username and password. {{% note %}} -1.x-compatible authorizations are separate from the credentials used to log +v1-compatible authorizations are separate from the credentials used to log into the InfluxDB user interface. {{% /note %}} diff --git a/content/shared/influxdb-v2/reference/faq.md b/content/shared/influxdb-v2/reference/faq.md index 58123effc..6fa20635b 100644 --- a/content/shared/influxdb-v2/reference/faq.md +++ b/content/shared/influxdb-v2/reference/faq.md @@ -664,8 +664,8 @@ from(bucket: "example-bucket") {{% /show-in %}} Using InfluxQL with InfluxDB {{< current-version >}} is made possible by the -[1.x compatibility API](/influxdb/version/reference/api/influxdb-1x/) which replicates -the `/query` endpoint from InfluxDB 1.x. This allows all InfluxDB 1.x-compatible +[v1 compatibility API](/influxdb/version/reference/api/influxdb-1x/) which replicates +the `/query` endpoint from InfluxDB 1.x. This allows all InfluxDB v1-compatible clients to work with InfluxDB {{< current-version >}}. However, InfluxQL relies on a database and retention policy data model doesn't exist in InfluxDB {{< current-version >}}, but has been replaced by [buckets](/influxdb/version/reference/glossary/#bucket). diff --git a/content/shared/influxdb-v2/write-data/migrate-data/migrate-cloud-to-oss.md b/content/shared/influxdb-v2/write-data/migrate-data/migrate-cloud-to-oss.md index af593aa4f..1189c0db4 100644 --- a/content/shared/influxdb-v2/write-data/migrate-data/migrate-cloud-to-oss.md +++ b/content/shared/influxdb-v2/write-data/migrate-data/migrate-cloud-to-oss.md @@ -22,7 +22,7 @@ each batch to an InfluxDB OSS bucket. ## Set up the migration -1. [Install and set up InfluxDB OSS](/influxdb/version/install/). +1. [Install and set up InfluxDB OSS](/influxdb/v2/install/). 2. **In InfluxDB Cloud**, [create an API token](/influxdb/cloud/admin/tokens/create-token/) with **read access** to the bucket you want to migrate. diff --git a/content/shared/influxdb-v2/write-data/replication/_index.md b/content/shared/influxdb-v2/write-data/replication/_index.md index b28081aaa..f0889f366 100644 --- a/content/shared/influxdb-v2/write-data/replication/_index.md +++ b/content/shared/influxdb-v2/write-data/replication/_index.md @@ -1,5 +1,5 @@ -Running [InfluxDB OSS](/influxdb/version/install/) at the edge lets you collect, process, transform, and analyze high-precision data locally. +Running [InfluxDB OSS](/influxdb/v2/install/) at the edge lets you collect, process, transform, and analyze high-precision data locally. **Edge Data Replication** lets you replicate data from distributed edge environments to [InfluxDB Cloud](/influxdb/cloud/sign-up/), aggregating and storing data for long-term management and analysis. {{< youtube qsj_TTpDyf4 >}} diff --git a/content/shared/influxdb3-admin/databases/create.md b/content/shared/influxdb3-admin/databases/create.md index a8cd0f8fe..76f6cc699 100644 --- a/content/shared/influxdb3-admin/databases/create.md +++ b/content/shared/influxdb3-admin/databases/create.md @@ -4,10 +4,7 @@ to create a database in {{< product-name >}}. Provide the following: - Database name _(see [Database naming restrictions](#database-naming-restrictions))_ -- {{< product-name >}} authorization token - - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. +- {{< product-name >}} {{% token-link "admin" "admin" %}} diff --git a/content/shared/influxdb3-admin/databases/delete.md b/content/shared/influxdb3-admin/databases/delete.md index 9e3354967..80190b2c1 100644 --- a/content/shared/influxdb3-admin/databases/delete.md +++ b/content/shared/influxdb3-admin/databases/delete.md @@ -11,10 +11,7 @@ to delete a database from {{< product-name >}}. Provide the following: - Name of the database to delete -- {{< product-name >}} authorization token - - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. +- - {{< product-name >}} {{% token-link "admin" "admin" %}} {{% code-placeholders "DATABASE_NAME" %}} ```sh diff --git a/content/shared/influxdb3-admin/databases/list.md b/content/shared/influxdb3-admin/databases/list.md index 109f09d94..1954dcb48 100644 --- a/content/shared/influxdb3-admin/databases/list.md +++ b/content/shared/influxdb3-admin/databases/list.md @@ -6,10 +6,7 @@ Provide the following: - _(Optional)_ [Output format](#output-formats) with the `--format` option - _(Optional)_ [Show deleted databases](list-deleted-databasese) with the `--show-deleted` option - - {{< product-name >}} authorization token with the `-t`, `--token` option - - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. + - {{< product-name >}} {{% token-link "admin" "admin" %}} with the `-t`, `--token` option ```sh influxdb3 show databases diff --git a/content/shared/influxdb3-admin/query-system-data/_index.md b/content/shared/influxdb3-admin/query-system-data/_index.md index 508cdc34b..c3ef83994 100644 --- a/content/shared/influxdb3-admin/query-system-data/_index.md +++ b/content/shared/influxdb3-admin/query-system-data/_index.md @@ -93,7 +93,7 @@ that surround field names._ ```bash curl "http://localhost:8181/api/v3/query_sql" \ - --header "Content-Type: application/json" \ + --header "Authorization: Bearer AUTH_TOKEN" \ --json '{ "db": "mydb", "q": "SELECT * FROM information_schema.columns WHERE table_schema = '"'iox'"' AND table_name = '"'system_swap'"'", @@ -120,7 +120,7 @@ To view recently executed queries, query the `queries` system table: ```bash curl "http://localhost:8181/api/v3/query_sql" \ - --header "Content-Type: application/json" \ + --header "Authorization: Bearer AUTH_TOKEN" --json '{ "db": "mydb", "q": "SELECT * FROM system.queries LIMIT 2", diff --git a/content/shared/influxdb3-admin/tokens/_index.md b/content/shared/influxdb3-admin/tokens/_index.md index 2ab0675ae..b2591ef98 100644 --- a/content/shared/influxdb3-admin/tokens/_index.md +++ b/content/shared/influxdb3-admin/tokens/_index.md @@ -12,7 +12,7 @@ The mechanism for providing your token depends on the client you use to interact {{< tabs-wrapper >}} {{% tabs %}} [influxdb3 CLI](#influxdb3-cli-auth) -[cURL](#curl-auth) +[HTTP API](#http-api-auth) {{% /tabs %}} {{% tab-content %}} @@ -49,6 +49,12 @@ authorization token to all `influxdb3` commands. {{% /tab-content %}} {{% tab-content %}} +To authenticate directly to the HTTP API, you can include your authorization token in the HTTP Authorization header of your request. +The `Authorization: Bearer AUTH_TOKEN` scheme works with all HTTP API endpoints that require authentication. + +The following examples use `curl` to show to authenticate to the HTTP API. + + {{% code-placeholders "YOUR_AUTH_TOKEN" %}} ```bash # Add your token to the HTTP Authorization header @@ -57,14 +63,46 @@ curl "http://{{< influxdb/host >}}/api/v3/query_sql" \ --data-urlencode "db=DATABASE_NAME" \ --data-urlencode "q=SELECT * FROM 'DATABASE_NAME' WHERE time > now() - INTERVAL '10 minutes'" ``` -{{% /code-placeholders %}} +### Authenticate using v1 and v2 compatibility + +```bash +# Token scheme with v2 /api/v2/write +curl http://localhost:8181/api/v2/write\?bucket\=DATABASE_NAME \ + --header "Authorization: Token YOUR_AUTH_TOKEN" \ + --data-raw "home,room=Kitchen temp=23.5 1622547800" +``` + +```bash +# Basic scheme with v1 /write +# Username is ignored, but required for the request +# Password is your auth token encoded in base64 +curl "http://localhost:8181/write?db=DATABASE_NAME" \ + --user "admin:YOUR_AUTH_TOKEN" \ + --data-raw "home,room=Kitchen temp=23.5 1622547800" +``` + +```bash +# URL auth parameters with v1 /write +# Username is ignored, but required for the request +curl "http://localhost:8181/write?db=DATABASE_NAME&u=admin&p=YOUR_AUTH_TOKEN" \ + --data-raw "home,room=Kitchen temp=23.5 1622547800" +``` +{{% /code-placeholders %}} {{% /tab-content %}} {{< /tabs-wrapper >}} Replace the following with your values: - {{% code-placeholder-key %}}`YOUR_AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link %}} -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database you want to query +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the [database](/influxdb3/version/admin/databases) you want to query + +To use tokens with other clients for {{< product-name >}}, +see the client-specific documentation: + +- [InfluxDB 3 Explorer](/influxdb3/explorer/) +- [InfluxDB client libraries](/influxdb3/version/reference/client-libraries/) +- [Telegraf](/telegraf/v1/) +- [Grafana](/influxdb3/version/visualize-data/grafana/) {{< children hlevel="h2" readmore=true hr=true >}} diff --git a/content/shared/influxdb3-cli/create/_index.md b/content/shared/influxdb3-cli/create/_index.md index 3c9fa9ae2..a01afe2a7 100644 --- a/content/shared/influxdb3-cli/create/_index.md +++ b/content/shared/influxdb3-cli/create/_index.md @@ -12,6 +12,7 @@ influxdb3 create ## Subcommands +{{% show-in "enterprise" %}} | Subcommand | Description | | :---------------------------------------------------------------------------------- | :---------------------------------------------- | | [database](/influxdb3/version/reference/cli/influxdb3/create/database/) | Create a new database | @@ -22,6 +23,19 @@ influxdb3 create | [token](/influxdb3/version/reference/cli/influxdb3/create/token/) | Create a new authentication token | | [trigger](/influxdb3/version/reference/cli/influxdb3/create/trigger/) | Create a new trigger for the processing engine | | help | Print command help or the help of a subcommand | +{{% /show-in %}} + +{{% show-in "core" %}} +| Subcommand | Description | +| :---------------------------------------------------------------------------------- | :---------------------------------------------- | +| [database](/influxdb3/version/reference/cli/influxdb3/create/database/) | Create a new database | +| [last_cache](/influxdb3/version/reference/cli/influxdb3/create/last_cache/) | Create a new last value cache | +| [distinct_cache](/influxdb3/version/reference/cli/influxdb3/create/distinct_cache/) | Create a new distinct value cache | +| [table](/influxdb3/version/reference/cli/influxdb3/create/table/) | Create a new table in a database | +| [token](/influxdb3/version/reference/cli/influxdb3/create/token/) | Create a new authentication token | +| [trigger](/influxdb3/version/reference/cli/influxdb3/create/trigger/) | Create a new trigger for the processing engine | +| help | Print command help or the help of a subcommand | +{{% /show-in %}} ## Options diff --git a/content/shared/influxdb3-cli/create/database.md b/content/shared/influxdb3-cli/create/database.md index 630546c14..30c9c78eb 100644 --- a/content/shared/influxdb3-cli/create/database.md +++ b/content/shared/influxdb3-cli/create/database.md @@ -19,13 +19,14 @@ You can also set the database name using the `INFLUXDB3_DATABASE_NAME` environme ## Options -| Option | | Description | -| :----- | :----------- | :--------------------------------------------------------------------------------------- | -| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | -| | `--token` | Authentication token | -| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | -| `-h` | `--help` | Print help information | -| | `--help-all` | Print detailed help information | +| Option | | Description | +| :----- | :------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------- | +| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | +| | `--retention-period` | Database [retention period](/influxdb3/version/reference/glossary/#retention-period) ([duration](/influxdb3/version/reference/glossary/#duration) value, for example: `30d`, `24h`, `1h`) | +| | `--token` | Authentication token | +| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information | ### Option environment variables @@ -69,4 +70,14 @@ Flags override their associated environment variables. influxdb3 create database --token AUTH_TOKEN DATABASE_NAME ``` +### Create a database with a retention period + +Creates a database with a specific retention period. + + + +```bash +influxdb3 create database --retention-period 30d DATABASE_NAME +``` + {{% /code-placeholders %}} diff --git a/content/shared/influxdb3-cli/create/table.md b/content/shared/influxdb3-cli/create/table.md index e3b858970..79c4e9ce9 100644 --- a/content/shared/influxdb3-cli/create/table.md +++ b/content/shared/influxdb3-cli/create/table.md @@ -24,6 +24,7 @@ influxdb3 create table [OPTIONS] \ ## Options +{{% hide-in "enterprise" %}} | Option | | Description | | :----- | :----------- | :--------------------------------------------------------------------------------------- | | `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | @@ -34,6 +35,22 @@ influxdb3 create table [OPTIONS] \ | | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | | `-h` | `--help` | Print help information | | | `--help-all` | Print detailed help information | +{{% /hide-in %}} + + +{{% show-in "enterprise" %}} +| Option | | Description | +| :----- | :------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------- | +| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | +| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on | +| | `--retention-period` | [Retention period](/influxdb3/version/reference/glossary/#retention-period) ([duration](/influxdb3/version/reference/glossary/#duration) value, for example: `30d`, `24h`, `1h`) for data in the table| +| | `--token` | _({{< req >}})_ Authentication token | +| | `--tags` | _({{< req >}})_ Comma-separated list of tag columns to include in the table | +| | `--fields` | Comma-separated list of field columns and their types to include in the table | +| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information | +{{% /show-in %}} > [!Important] > @@ -90,6 +107,22 @@ influxdb3 create table \ TABLE_NAME ``` +{{% show-in "enterprise" %}} +### Create a table with a retention period + + + +```bash +influxdb3 create table \ + --tags room,sensor_id \ + --fields temp:float64,hum:float64 \ + --retention-period 7d \ + --database DATABASE_NAME \ + --token AUTH_TOKEN \ + TABLE_NAME +``` +{{% /show-in %}} + ### Verification Use the `SHOW TABLES` query to verify that the table was created successfully: @@ -114,7 +147,7 @@ Example output: +---------------+--------------------+----------------------------+------------+ ``` ->[!Note] +> [!Note] > `SHOW TABLES` is an SQL query. It isn't supported in InfluxQL. {{% /code-placeholders %}} diff --git a/content/shared/influxdb3-cli/create/token/admin.md b/content/shared/influxdb3-cli/create/token/admin.md index 84d79b033..6a91594d8 100644 --- a/content/shared/influxdb3-cli/create/token/admin.md +++ b/content/shared/influxdb3-cli/create/token/admin.md @@ -7,7 +7,7 @@ Create an operator token or named admin token. influxdb3 create token --admin [OPTIONS] ``` -## Options +## Options {.no-shorthand} | Option | Description | |:-------|:------------| diff --git a/content/shared/influxdb3-cli/create/trigger.md b/content/shared/influxdb3-cli/create/trigger.md index dbb223128..bc4bfd074 100644 --- a/content/shared/influxdb3-cli/create/trigger.md +++ b/content/shared/influxdb3-cli/create/trigger.md @@ -27,7 +27,7 @@ influxdb3 create trigger [OPTIONS] \ | `-d` | `--database` | _({{< req >}})_ Name of the database to operate on | | | `--token` | _({{< req >}})_ Authentication token | | | `--plugin-filename` | _({{< req >}})_ Name of the file, stored in the server's `plugin-dir`, that contains the Python plugin code to run | -| | `--trigger-spec` | Trigger specification--for example `table:` or `all_tables` | +| | `--trigger-spec` | Trigger specification: `table:`, `all_tables`, `every:`, `cron:`, or `request:` | | | `--disabled` | Create the trigger in disabled state | | | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | | `-h` | `--help` | Print help information | diff --git a/content/shared/influxdb3-cli/delete/_index.md b/content/shared/influxdb3-cli/delete/_index.md index 569b87904..81a47ffc6 100644 --- a/content/shared/influxdb3-cli/delete/_index.md +++ b/content/shared/influxdb3-cli/delete/_index.md @@ -11,16 +11,28 @@ influxdb3 delete ## Subcommands -| Subcommand | Description | -| :----------------------------------------------------------------------------- | :--------------------------------------------- | -| [database](/influxdb3/version/reference/cli/influxdb3/delete/database/) | Delete a database | -| [file_index](/influxdb3/version/reference/cli/influxdb3/delete/file_index/) | Delete a file index for a database or table | -| [last_cache](/influxdb3/version/reference/cli/influxdb3/delete/last_cache/) | Delete a last value cache | +{{% show-in "enterprise" %}} +| Subcommand | Description | +| :---------------------------------------------------------------------------------- | :--------------------------------------------- | +| [database](/influxdb3/version/reference/cli/influxdb3/delete/database/) | Delete a database | +| [file_index](/influxdb3/version/reference/cli/influxdb3/delete/file_index/) | Delete a file index for a database or table | +| [last_cache](/influxdb3/version/reference/cli/influxdb3/delete/last_cache/) | Delete a last value cache | | [distinct_cache](/influxdb3/version/reference/cli/influxdb3/delete/distinct_cache/) | Delete a metadata cache | -| [plugin](/influxdb3/version/reference/cli/influxdb3/delete/plugin/) | Delete a processing engine plugin | -| [table](/influxdb3/version/reference/cli/influxdb3/delete/table/) | Delete a table from a database | -| [trigger](/influxdb3/version/reference/cli/influxdb3/delete/trigger/) | Delete a trigger for the processing engine | -| help | Print command help or the help of a subcommand | +| [table](/influxdb3/version/reference/cli/influxdb3/delete/table/) | Delete a table from a database | +| [trigger](/influxdb3/version/reference/cli/influxdb3/delete/trigger/) | Delete a trigger for the processing engine | +| help | Print command help or the help of a subcommand | +{{% /show-in %}} + +{{% show-in "core" %}} +| Subcommand | Description | +| :---------------------------------------------------------------------------------- | :--------------------------------------------- | +| [database](/influxdb3/version/reference/cli/influxdb3/delete/database/) | Delete a database | +| [last_cache](/influxdb3/version/reference/cli/influxdb3/delete/last_cache/) | Delete a last value cache | +| [distinct_cache](/influxdb3/version/reference/cli/influxdb3/delete/distinct_cache/) | Delete a metadata cache | +| [table](/influxdb3/version/reference/cli/influxdb3/delete/table/) | Delete a table from a database | +| [trigger](/influxdb3/version/reference/cli/influxdb3/delete/trigger/) | Delete a trigger for the processing engine | +| help | Print command help or the help of a subcommand | +{{% /show-in %}} ## Options diff --git a/content/shared/influxdb3-cli/delete/database.md b/content/shared/influxdb3-cli/delete/database.md index 6d675b414..14e1d4817 100644 --- a/content/shared/influxdb3-cli/delete/database.md +++ b/content/shared/influxdb3-cli/delete/database.md @@ -17,13 +17,14 @@ influxdb3 delete database [OPTIONS] ## Options -| Option | | Description | -| :----- | :----------- | :--------------------------------------------------------------------------------------- | -| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | -| | `--token` | Authentication token | -| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | -| `-h` | `--help` | Print help information | -| | `--help-all` | Print detailed help information | +| Option | | Description | +| :----- | :------------ | :--------------------------------------------------------------------------------------- | +| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | +| | `--hard-delete` | When to hard delete data (never/now/default/timestamp). Default behavior is a soft delete that allows recovery | +| | `--token` | Authentication token | +| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information | ### Option environment variables @@ -36,8 +37,10 @@ You can use the following environment variables to set command options: ## Examples -- [Delete a database](#delete-a-new-database) -- [Delete a database while specifying the token inline](#delete-a-new-database-while-specifying-the-token-inline) +- [Delete a database](#delete-a-database) +- [Delete a database while specifying the token inline](#delete-a-database-while-specifying-the-token-inline) +- [Hard delete a database immediately](#hard-delete-a-database-immediately) +- [Hard delete a database at a specific time](#hard-delete-a-database-at-a-specific-time) In the examples below, replace the following: @@ -64,4 +67,24 @@ influxdb3 delete database DATABASE_NAME influxdb3 delete database --token AUTH_TOKEN DATABASE_NAME ``` +### Hard delete a database immediately + +Permanently delete a database and all its data immediately without the ability to recover. + + + +```bash +influxdb3 delete database --hard-delete now DATABASE_NAME +``` + +### Hard delete a database at a specific time + +Schedule a database for permanent deletion at a specific timestamp. + + + +```bash +influxdb3 delete database --hard-delete "2024-01-01T00:00:00Z" DATABASE_NAME +``` + {{% /code-placeholders %}} diff --git a/content/shared/influxdb3-cli/delete/table.md b/content/shared/influxdb3-cli/delete/table.md index c15674071..c96178345 100644 --- a/content/shared/influxdb3-cli/delete/table.md +++ b/content/shared/influxdb3-cli/delete/table.md @@ -15,14 +15,15 @@ influxdb3 delete table [OPTIONS] --database ## Options -| Option | | Description | -| :----- | :----------- | :--------------------------------------------------------------------------------------- | -| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | -| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on | -| | `--token` | _({{< req >}})_ Authentication token | -| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | -| `-h` | `--help` | Print help information | -| | `--help-all` | Print detailed help information | +| Option | | Description | +| :----- | :------------ | :--------------------------------------------------------------------------------------- | +| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | +| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on | +| | `--hard-delete` | When to hard delete data (never/now/default/timestamp). Default behavior is a soft delete that allows recovery | +| | `--token` | _({{< req >}})_ Authentication token | +| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information | ### Option environment variables @@ -49,9 +50,23 @@ influxdb3 delete table \ TABLE_NAME ``` +### Hard delete a table immediately + +Permanently delete a table and all its data immediately without the ability to recover. + + + +```bash +influxdb3 delete table \ + --database DATABASE_NAME \ + --token AUTH_TOKEN \ + --hard-delete now \ + TABLE_NAME +``` + {{% /code-placeholders %}} -In the example above, replace the following: +Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Database name diff --git a/content/shared/influxdb3-cli/show/_index.md b/content/shared/influxdb3-cli/show/_index.md index 4a5bbf935..eea02dcf0 100644 --- a/content/shared/influxdb3-cli/show/_index.md +++ b/content/shared/influxdb3-cli/show/_index.md @@ -14,6 +14,7 @@ influxdb3 show | Subcommand | Description | | :---------------------------------------------------------------------- | :--------------------------------------------- | | [databases](/influxdb3/version/reference/cli/influxdb3/show/databases/) | List database | +{{% show-in "enterprise" %}}| [license](/influxdb3/version/reference/cli/influxdb3/show/license/) | Display license information |{{% /show-in %}} | [system](/influxdb3/version/reference/cli/influxdb3/show/system/) | Display system table data | | [tokens](/influxdb3/version/reference/cli/influxdb3/show/tokens/) | List authentication tokens | | help | Print command help or the help of a subcommand | diff --git a/content/shared/influxdb3-cli/test/schedule_plugin.md b/content/shared/influxdb3-cli/test/schedule_plugin.md new file mode 100644 index 000000000..10aee669a --- /dev/null +++ b/content/shared/influxdb3-cli/test/schedule_plugin.md @@ -0,0 +1,84 @@ + +The `influxdb3 test schedule_plugin` command tests a schedule plugin. Use this command to verify plugin behavior without creating a trigger. + +## Usage + + + +```bash +influxdb3 test schedule_plugin [OPTIONS] --database +``` + +## Arguments + +- **FILENAME**: Path to the plugin file. Use the absolute path or the path relative to the current working directory, such as `/.py`. + +## Options + +| Option | Flag | Description | +| :----- | :-------------------- | :-------------------------------------------------------------------------------------------- | +| `-H` | `--host` | URL of the running {{< product-name >}} server
(default: `http://127.0.0.1:8181`) | +| `-d` | `--database` | _({{< req >}})_ Name of the database you want to test the plugin against | +| | `--token` | _({{< req >}})_ Authentication token | +| | `--input-arguments` | JSON map of key/value pairs to pass as plugin input arguments (for example, `'{"key":"val"}'`)| +| | `--schedule` | Cron schedule to simulate when testing the plugin
(default: `* * * * *`) | +| | `--cache-name` | Optional cache name to associate with the test | +| | `--tls-ca` | Path to a custom TLS certificate authority for self-signed certs | +| `-h` | `--help` | Show basic help information | +| | `--help-all` | Show all available help options | + + +### Option environment variables + +You can use the following environment variables to set command options: + +| Environment Variable | Corresponding Option | +| :------------------------ | :------------------- | +| `INFLUXDB3_HOST_URL` | `--host` | +| `INFLUXDB3_DATABASE_NAME` | `--database` | +| `INFLUXDB3_AUTH_TOKEN` | `--token` | +| `INFLUXDB3_TLS_CA` | `--tls-ca` | + +## Examples + +In the examples below, replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Your target database +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: Your authentication token +- {{% code-placeholder-key %}}`PLUGIN_DIR`{{% /code-placeholder-key %}}: + the path to the plugin directory you provided when starting the server +- {{% code-placeholder-key %}}`FILENAME`{{% /code-placeholder-key %}}: + Plugin file name + +{{% code-placeholders "(DATABASE|PLUGIN_DIR|FILENAME|AUTH_TOKEN)" %}} + +### Test a schedule plugin + + + +```bash +influxdb3 test schedule_plugin \ + --database DATABASE_NAME \ + --token AUTH_TOKEN \ + PLUGIN_DIR/FILENAME.py +``` + +### Test with input arguments and a custom cron schedule + +You can pass input arguments to your plugin as key-value pairs and specify a custom cron schedule (using Quartz cron syntax with six fields): + + + +```bash +influxdb3 test schedule_plugin \ + --host http://localhost:8182 \ + --database DATABASE_NAME \ + --token AUTH_TOKEN \ + --input-arguments threshold=10,unit=seconds \ + --schedule "0 0 * * * ?" \ + PLUGIN_DIR/FILENAME.py +``` +- Pass plugin parameters using `--input-arguments` as comma-separated key=value pairs. +- Use `--schedule` to set the plugin’s execution time with a Quartz cron expression. For example, "0 0 * * * ?" runs the plugin at the start of every hour. + +{{% /code-placeholders %}} diff --git a/content/shared/influxdb3-cli/update/_index.md b/content/shared/influxdb3-cli/update/_index.md new file mode 100644 index 000000000..afe2a22db --- /dev/null +++ b/content/shared/influxdb3-cli/update/_index.md @@ -0,0 +1,33 @@ +The `influxdb3 update` command updates resources such as databases and tables. + +## Usage + + + +```bash +influxdb3 update +``` + +## Subcommands + +{{% show-in "enterprise" %}} +| Subcommand | Description | +| :----------------------------------------------------------------- | :--------------------- | +| [database](/influxdb3/version/reference/cli/influxdb3/update/database/) | Update a database | +| [table](/influxdb3/version/reference/cli/influxdb3/update/table/) | Update a table | +| help | Print command help or the help of a subcommand | +{{% /show-in %}} + +{{% show-in "core" %}} +| Subcommand | Description | +| :----------------------------------------------------------------- | :--------------------- | +| [database](/influxdb3/version/reference/cli/influxdb3/update/database/) | Update a database | +| help | Print command help or the help of a subcommand | +{{% /show-in %}} + +## Options + +| Option | | Description | +| :----- | :----------- | :------------------------------ | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information | \ No newline at end of file diff --git a/content/shared/influxdb3-cli/update/database/_index.md b/content/shared/influxdb3-cli/update/database/_index.md new file mode 100644 index 000000000..a292283a4 --- /dev/null +++ b/content/shared/influxdb3-cli/update/database/_index.md @@ -0,0 +1,84 @@ +The `influxdb3 update database` command updates an existing database in your {{< product-name >}} instance. + +Use this command to update a database's retention period. + +## Usage + + + +```bash +influxdb3 update database [OPTIONS] --database +``` + +## Arguments + +- **`DATABASE_NAME`**: (Required) The name of the database to update. + +You can also set the database name using the `INFLUXDB3_DATABASE_NAME` environment variable. + +## Options + +| Option | | Description | +| :----- | :------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------- | +| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | +| `-d` | `--database` | The name of the database to update | +| | `--token` | Authentication token | +| `-r` | `--retention-period` | The retention period as a [duration](/influxdb3/version/reference/glossary/#duration) value (for example: `30d`, `24h`) or `none` to clear | +| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information | + +### Option environment variables + +You can use the following environment variables instead of providing CLI options directly: + +| Environment Variable | Option | +| :------------------------ | :----------- | +| `INFLUXDB3_HOST_URL` | `--host` | +| `INFLUXDB3_DATABASE_NAME` | `--database` | +| `INFLUXDB3_AUTH_TOKEN` | `--token` | +| `INFLUXDB3_TLS_CA` | `--tls-ca` | + +## Examples + +The following examples show how to update a database. + +In your commands replace the following: +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + Database name +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + Authentication token + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} + +### Update a database retention period + +Updates a database retention period to 30 days. + + + +```bash +influxdb3 update database --retention-period 30d DATABASE_NAME +``` + +### Clear a database retention period + +Removes the retention period from a database by setting it to `none`. + + + +```bash +influxdb3 update database --retention-period none DATABASE_NAME +``` + +### Update a database with authentication + +Updates a database using an authentication token. + + + +```bash +influxdb3 update database --token AUTH_TOKEN --retention-period 7d DATABASE_NAME +``` + +{{% /code-placeholders %}} \ No newline at end of file diff --git a/content/shared/influxdb3-cli/update/table/_index.md b/content/shared/influxdb3-cli/update/table/_index.md new file mode 100644 index 000000000..928ca8573 --- /dev/null +++ b/content/shared/influxdb3-cli/update/table/_index.md @@ -0,0 +1,74 @@ +The `influxdb3 update table` command updates an existing table in a database in your {{< product-name >}} instance. + +Use this command to update a table's retention period. + +## Usage + + + +```bash +influxdb3 update table [OPTIONS] --database +``` + +## Arguments + +- **`TABLE_NAME`**: (Required) The name of the table to update + +## Options + +| Option | | Description | +| :----- | :------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------- | +| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | +| `-d` | `--database` | The name of the database containing the table | +| | `--token` | Authentication token | +| `-r` | `--retention-period` | The retention period as a [duration](/influxdb3/version/reference/glossary/#duration) value (for example: `30d`, `24h`) or `none` to clear | +| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information | + +### Option environment variables + +You can use the following environment variables instead of providing CLI options directly: + +| Environment Variable | Option | +| :------------------------ | :----------- | +| `INFLUXDB3_HOST_URL` | `--host` | +| `INFLUXDB3_DATABASE_NAME` | `--database` | +| `INFLUXDB3_AUTH_TOKEN` | `--token` | +| `INFLUXDB3_TLS_CA` | `--tls-ca` | + +## Examples + +The following examples show how to update a table. + +In your commands replace the following: +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + Database name +- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: + Table name +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + Authentication token + +{{% code-placeholders "DATABASE_NAME|TABLE_NAME|AUTH_TOKEN" %}} + +### Update a table retention period + +Updates a table retention period to 30 days. + + + +```bash +influxdb3 update table --database DATABASE_NAME --token AUTH_TOKEN --retention-period 30d TABLE_NAME +``` + +### Clear a table retention period + +Removes the retention period from a table by setting it to `none`. + + + +```bash +influxdb3 update table --database DATABASE_NAME --retention-period none TABLE_NAME +``` + +{{% /code-placeholders %}} \ No newline at end of file diff --git a/content/shared/influxdb3-get-started/_index.md b/content/shared/influxdb3-get-started/_index.md index b2d12227a..c5a7f60dc 100644 --- a/content/shared/influxdb3-get-started/_index.md +++ b/content/shared/influxdb3-get-started/_index.md @@ -1,26 +1,8 @@ +This guide walks through the basic steps of getting started with {{% product-name %}}, +including the following: -### What's in this guide - -{{% show-in "enterprise" %}} -This guide covers Enterprise as well as InfluxDB 3 Core, including the following topics: -{{% /show-in %}} -{{% show-in "core" %}} -This guide covers InfluxDB 3 Core (the open source release), including the following topics: -{{% /show-in %}} - -- [Install and startup](#install-and-startup) -- [Authentication and authorization](#authentication-and-authorization) -- [Data Model](#data-model) -- [Tools to use](#tools-to-use) -- [Write data](#write-data) -- [Query data](#query-data) -- [Last values cache](#last-values-cache) -- [Distinct values cache](#distinct-values-cache) -- [Python plugins and the processing engine](#python-plugins-and-the-processing-engine) -{{% show-in "enterprise" %}} -- [Multi-server setups](#multi-server-setup) -{{% /show-in %}} +{{< children type="ordered-list" >}} > [!Tip] > #### Find support for {{% product-name %}} @@ -28,1686 +10,64 @@ This guide covers InfluxDB 3 Core (the open source release), including the follo > The [InfluxDB Discord server](https://discord.gg/9zaNCW2PRT) is the best place to find support for {{% product-name %}}. > For other InfluxDB versions, see the [Support and feedback](#bug-reports-and-feedback) options. -### Install and startup - -{{% product-name %}} runs on **Linux**, **macOS**, and **Windows**. - -{{% show-in "enterprise" %}} -{{% tabs-wrapper %}} -{{% tabs %}} -[Linux or macOS](#linux-or-macos) -[Windows](#windows) -[Docker](#docker) -{{% /tabs %}} -{{% tab-content %}} - -To get started quickly, download and run the install script--for example, using [curl](https://curl.se/download.html): - - -```bash -curl -O https://www.influxdata.com/d/install_influxdb3.sh \ -&& sh install_influxdb3.sh enterprise -``` - -Or, download and install [build artifacts](/influxdb3/enterprise/install/#download-influxdb-3-enterprise-binaries): - -- [Linux | AMD64 (x86_64) | GNU](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_amd64.tar.gz) - • - [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_amd64.tar.gz.sha256) -- [Linux | ARM64 (AArch64) | GNU](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_arm64.tar.gz) - • - [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_arm64.tar.gz.sha256) -- [macOS | Silicon (ARM64)](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_darwin_arm64.tar.gz) - • - [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_darwin_arm64.tar.gz.sha256) - -> [!Note] -> macOS Intel builds are coming soon. - - -{{% /tab-content %}} -{{% tab-content %}} - -Download and install the {{% product-name %}} [Windows (AMD64, x86_64) binary](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}-windows_amd64.zip) - • -[sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}-windows_amd64.zip.sha256) - -{{% /tab-content %}} -{{% tab-content %}} - - -The [`influxdb:3-enterprise` image](https://hub.docker.com/_/influxdb/tags?tag=3-core&name=3-enterprise) -is available for x86_64 (AMD64) and ARM64 architectures. - -Pull the image: - - -```bash -docker pull influxdb:3-enterprise -``` - - -{{% /tab-content %}} -{{% /tabs-wrapper %}} -{{% /show-in %}} - -{{% show-in "core" %}} -{{% tabs-wrapper %}} -{{% tabs %}} -[Linux or macOS](#linux-or-macos) -[Windows](#windows) -[Docker](#docker) -{{% /tabs %}} -{{% tab-content %}} - -To get started quickly, download and run the install script--for example, using [curl](https://curl.se/download.html): - - -```bash -curl -O https://www.influxdata.com/d/install_influxdb3.sh \ -&& sh install_influxdb3.sh -``` -Or, download and install [build artifacts](/influxdb3/core/install/#download-influxdb-3-core-binaries): - -- [Linux | AMD64 (x86_64) | GNU](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_amd64.tar.gz) - • - [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_amd64.tar.gz.sha256) -- [Linux | ARM64 (AArch64) | GNU](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_arm64.tar.gz) - • - [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_arm64.tar.gz.sha256) -- [macOS | Silicon (ARM64)](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_darwin_arm64.tar.gz) - • - [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_darwin_arm64.tar.gz.sha256) - -> [!Note] -> macOS Intel builds are coming soon. - - -{{% /tab-content %}} -{{% tab-content %}} - -Download and install the {{% product-name %}} [Windows (AMD64, x86_64) binary](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}-windows_amd64.zip) - • -[sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}-windows_amd64.zip.sha256) - -{{% /tab-content %}} -{{% tab-content %}} - -The [`influxdb:3-core` image](https://hub.docker.com/_/influxdb/tags?tag=3-core&name=3-core) -is available for x86_64 (AMD64) and ARM64 architectures. - -Pull the image: - - -```bash -docker pull influxdb:3-core -``` - - -{{% /tab-content %}} -{{% /tabs-wrapper %}} -{{% /show-in %}} - -_Build artifacts and images update with every merge into the {{% product-name %}} `main` branch._ - -#### Verify the install - -After you have installed {{% product-name %}}, enter the following command to verify that it completed successfully: - -```bash -influxdb3 --version -``` - -If your system doesn't locate `influxdb3`, then `source` the configuration file (for example, .bashrc, .zshrc) for your shell--for example: - - -```zsh -source ~/.zshrc -``` - - -#### Start InfluxDB - -To start your InfluxDB instance, use the `influxdb3 serve` command and provide the following: - -- `--object-store`: Specifies the type of object store to use. - InfluxDB supports the following: local file system (`file`), `memory`, - S3 (and compatible services like Ceph or Minio) (`s3`), - Google Cloud Storage (`google`), and Azure Blob Storage (`azure`). - The default is `file`. - Depending on the object store type, you may need to provide additional options - for your object store configuration. -{{% show-in "enterprise" %}} -- `--node-id`: A string identifier that distinguishes individual server instances within the cluster. This forms the final part of the storage path: `//`. In a multi-node setup, this ID is used to reference specific nodes. -- `--cluster-id`: A string identifier that determines part of the storage path hierarchy. All nodes within the same cluster share this identifier. The storage path follows the pattern `//`. In a multi-node setup, this ID is used to reference the entire cluster. -{{% /show-in %}} -{{% show-in "core" %}} -- `--node-id`: A string identifier that distinguishes individual server instances. - This forms the final part of the storage path: `/`. -{{% /show-in %}} - -The following examples show how to start {{% product-name %}} with different object store configurations. - -> [!Note] -> #### Diskless architecture -> -> InfluxDB 3 supports a diskless architecture that can operate with object -> storage alone, eliminating the need for locally attached disks. -> {{% product-name %}} can also work with only local disk storage when needed. - -{{% show-in "enterprise" %}} -> [!Note] -> The combined path structure `//` ensures proper organization of data in your object store, allowing for clean separation between clusters and individual nodes. -{{% /show-in %}} - -##### Filesystem object store - -Store data in a specified directory on the local filesystem. -This is the default object store type. - -Replace the following with your values: - -{{% show-in "enterprise" %}} -```bash -# Filesystem object store -# Provide the filesystem directory -influxdb3 serve \ - --node-id host01 \ - --cluster-id cluster01 \ - --object-store file \ - --data-dir ~/.influxdb3 -``` -{{% /show-in %}} -{{% show-in "core" %}} -```bash -# File system object store -# Provide the file system directory -influxdb3 serve \ - --node-id host01 \ - --object-store file \ - --data-dir ~/.influxdb3 -``` -{{% /show-in %}} - -To run the [Docker image](/influxdb3/version/install/#docker-image) and persist data to the file system, mount a volume for the object store-for example, pass the following options: - -- `-v /path/on/host:/path/in/container`: Mounts a directory from your file system to the container -- `--object-store file --data-dir /path/in/container`: Uses the mount for server storage - - -{{% show-in "enterprise" %}} - -```bash -# File system object store with Docker -# Create a mount -# Provide the mount path -docker run -it \ - -v /path/on/host:/path/in/container \ - influxdb:3-enterprise influxdb3 serve \ - --node-id my_host \ - --cluster-id my_cluster \ - --object-store file \ - --data-dir /path/in/container -``` -{{% /show-in %}} -{{% show-in "core" %}} - -```bash -# File system object store with Docker -# Create a mount -# Provide the mount path -docker run -it \ - -v /path/on/host:/path/in/container \ - influxdb:3-core influxdb3 serve \ - --node-id my_host \ - --object-store file \ - --data-dir /path/in/container -``` -{{% /show-in %}} - -> [!Note] -> -> The {{% product-name %}} Docker image exposes port `8181`, the `influxdb3` server default for HTTP connections. -> To map the exposed port to a different port when running a container, see the Docker guide for [Publishing and exposing ports](https://docs.docker.com/get-started/docker-concepts/running-containers/publishing-ports/). - -##### S3 object store - -Store data in an S3-compatible object store. -This is useful for production deployments that require high availability and durability. -Provide your bucket name and credentials to access the S3 object store. - -{{% show-in "enterprise" %}} -```bash -# S3 object store (default is the us-east-1 region) -# Specify the object store type and associated options -influxdb3 serve \ - --node-id host01 \ - --cluster-id cluster01 \ - --object-store s3 \ - --bucket OBJECT_STORE_BUCKET \ - --aws-access-key AWS_ACCESS_KEY_ID \ - --aws-secret-access-key AWS_SECRET_ACCESS_KEY -``` - - -```bash -# Minio or other open source object store -# (using the AWS S3 API with additional parameters) -# Specify the object store type and associated options -influxdb3 serve \ - --node-id host01 \ - --cluster-id cluster01 \ - --object-store s3 \ - --bucket OBJECT_STORE_BUCKET \ - --aws-access-key-id AWS_ACCESS_KEY_ID \ - --aws-secret-access-key AWS_SECRET_ACCESS_KEY \ - --aws-endpoint ENDPOINT \ - --aws-allow-http -``` -{{% /show-in %}} -{{% show-in "core" %}} -```bash -# S3 object store (default is the us-east-1 region) -# Specify the object store type and associated options -influxdb3 serve \ - --node-id host01 \ - --object-store s3 \ - --bucket OBJECT_STORE_BUCKET \ - --aws-access-key AWS_ACCESS_KEY_ID \ - --aws-secret-access-key AWS_SECRET_ACCESS_KEY -``` - -```bash -# Minio or other open source object store -# (using the AWS S3 API with additional parameters) -# Specify the object store type and associated options -influxdb3 serve \ - --node-id host01 \ - --object-store s3 \ - --bucket OBJECT_STORE_BUCKET \ - --aws-access-key-id AWS_ACCESS_KEY_ID \ - --aws-secret-access-key AWS_SECRET_ACCESS_KEY \ - --aws-endpoint ENDPOINT \ - --aws-allow-http -``` -{{% /show-in %}} - -#### Memory object store - -Store data in RAM without persisting it on shutdown. -It's useful for rapid testing and development. - -{{% show-in "enterprise" %}} -```bash -# Memory object store -# Stores data in RAM; doesn't persist data -influxdb3 serve \ ---node-id host01 \ ---cluster-id cluster01 \ ---object-store memory -``` -{{% /show-in %}} -{{% show-in "core" %}} -```bash -# Memory object store -# Stores data in RAM; doesn't persist data -influxdb3 serve \ ---node-id host01 \ ---object-store memory -``` -{{% /show-in %}} - -For more information about server options, use the CLI help or view the [InfluxDB 3 CLI reference](/influxdb3/version/reference/cli/influxdb3/serve/): - -```bash -influxdb3 serve --help -``` - -> [!Tip] -> #### Run the InfluxDB 3 Explorer query interface (beta) -> -> InfluxDB 3 Explorer (currently in beta) is the web-based query and -> administrative interface for InfluxDB 3. -> It provides visual management of databases and tokens and an easy way to query your time series data. -> -> For more information, see the [InfluxDB 3 Explorer documentation](/influxdb3/explorer/). - -{{% show-in "enterprise" %}} -#### Licensing - -When first starting a new instance, {{% product-name %}} prompts you to select a license type. - -InfluxDB 3 Enterprise licenses authorize the use of the InfluxDB 3 Enterprise software and apply to a single cluster. Licenses are primarily based on the number of CPUs InfluxDB can use, but there are other limitations depending on the license type. The following InfluxDB 3 Enterprise license types are available: - -- **Trial**: 30-day trial license with full access to InfluxDB 3 Enterprise capabilities. -- **At-Home**: For at-home hobbyist use with limited access to InfluxDB 3 Enterprise capabilities. -- **Commercial**: Commercial license with full access to InfluxDB 3 Enterprise capabilities. - -You can learn more on managing your InfluxDB 3 Enterprise license on the [Manage your license](https://docs.influxdata.com/influxdb3/enterprise/admin/license/)page. -{{% /show-in %}} - -### Authentication and authorization - -{{% product-name %}} uses token-based authentication and authorization, which is enabled by default when you start the server. - -With authentication enabled, you must provide a token with `influxdb3` CLI commands and HTTP API requests. - -{{% show-in "enterprise" %}} -{{% product-name %}} supports the following types of tokens: - -- **admin token**: Grants access to all CLI actions and API endpoints. A server can have one admin token. -- **resource tokens**: Tokens that grant read and write access to specific resources (databases and system information endpoints) on the server. - - - A database token grants access to write and query data in a - database - - A system token grants read access to system information endpoints and - metrics for the server -{{% /show-in %}} -{{% show-in "core" %}} -{{% product-name %}} supports _admin_ tokens, which grant access to all CLI actions and API endpoints. -{{% /show-in %}} - -For more information about tokens and authorization, see [Manage tokens](/influxdb3/version/admin/tokens/). - -#### Create an operator token - -After you start the server, create your first admin token. -The first admin token you create is the _operator_ token for the server. - -Use the `influxdb3` CLI or the HTTP API to create your operator token. - -> [!Important] -> **Store your token securely** -> -> InfluxDB displays the token string only when you create it. -> Store your token securely—you cannot retrieve it from the database later. - -{{< code-tabs-wrapper >}} -{{% code-tabs %}} -[CLI](#) -[Docker](#) -{{% /code-tabs %}} -{{% code-tab-content %}} - -```bash -influxdb3 create token --admin -``` - -{{% /code-tab-content %}} -{{% code-tab-content %}} - -{{% code-placeholders "CONTAINER_NAME" %}} -```bash -# With Docker — in a new terminal: -docker exec -it CONTAINER_NAME influxdb3 create token --admin -``` -{{% /code-placeholders %}} - -Replace {{% code-placeholder-key %}}`CONTAINER_NAME`{{% /code-placeholder-key %}} with the name of your running Docker container. - -{{% /code-tab-content %}} -{{< /code-tabs-wrapper >}} - -The command returns a token string for authenticating CLI commands and API requests. -Store your token securely—you cannot retrieve it from the database later. - -#### Set your token for authentication - -Use your operator token to authenticate server actions in {{% product-name %}}, -such as creating additional tokens, performing administrative tasks, and writing and querying data. - -Use one of the following methods to provide your token and authenticate `influxdb3` CLI commands. - -In your command, replace {{% code-placeholder-key %}}`YOUR_AUTH_TOKEN`{{% /code-placeholder-key %}} with your token string (for example, the [operator token](#create-an-operator-token) from the previous step). - -{{< tabs-wrapper >}} -{{% tabs %}} -[Environment variable (recommended)](#) -[Command option](#) -{{% /tabs %}} -{{% tab-content %}} - -Set the `INFLUXDB3_AUTH_TOKEN` environment variable to have the CLI use your token automatically: - -{{% code-placeholders "YOUR_AUTH_TOKEN" %}} -```bash -export INFLUXDB3_AUTH_TOKEN=YOUR_AUTH_TOKEN -``` -{{% /code-placeholders %}} - -{{% /tab-content %}} -{{% tab-content %}} - -Include the `--token` option with CLI commands: - -{{% code-placeholders "YOUR_AUTH_TOKEN" %}} -```bash -influxdb3 show databases --token AUTH_TOKEN -``` -{{% /code-placeholders %}} - -{{% /tab-content %}} -{{< /tabs-wrapper >}} - -For HTTP API requests, include your token in the `Authorization` header--for example: - -{{% code-placeholders "AUTH_TOKEN" %}} -```bash -curl "http://{{< influxdb/host >}}/api/v3/configure/database" \ - --header "Authorization: Bearer AUTH_TOKEN" -``` -{{% /code-placeholders %}} - -#### Learn more about tokens and permissions - -- [Manage admin tokens](/influxdb3/version/admin/tokens/admin/) - Understand and manage operator and named admin tokens -{{% show-in "enterprise" %}} -- [Manage resource tokens](/influxdb3/version/admin/tokens/resource/) - Create, list, and delete resource tokens -{{% /show-in %}} -- [Authentication](/influxdb3/version/reference/internals/authentication/) - Understand authentication, authorizations, and permissions in {{% product-name %}} - -### Data model - -The database server contains logical databases, which have tables, which have columns. Compared to previous versions of InfluxDB you can think of a database as a `bucket` in v2 or as a `db/retention_policy` in v1. A `table` is equivalent to a `measurement`, which has columns that can be of type `tag` (a string dictionary), `int64`, `float64`, `uint64`, `bool`, or `string` and finally every table has a `time` column that is a nanosecond precision timestamp. - -In InfluxDB 3, every table has a primary key--the ordered set of tags and the time--for its data. -This is the sort order used for all Parquet files that get created. When you create a table, either through an explicit call or by writing data into a table for the first time, it sets the primary key to the tags in the order they arrived. This is immutable. Although InfluxDB is still a _schema-on-write_ database, the tag column definitions for a table are immutable. - -Tags should hold unique identifying information like `sensor_id`, or `building_id` or `trace_id`. All other data should be kept in fields. You will be able to add fast last N value and distinct value lookups later for any column, whether it is a field or a tag. - -### Tools to use +## Data model + +The {{% product-name %}} server contains logical databases; databases contain +tables; and tables are comprised of columns. + +Compared to previous versions of InfluxDB, you can think of a database as an +InfluxDB v2 `bucket` in v2 or an InfluxDB v1 `db/retention_policy`. +A `table` is equivalent to an InfluxDB v1 and v2 `measurement`. + +Columns in a table represent time, tags, and fields. Columns can be one of the +following types: + +- String dictionary (tag) +- `int64` (field) +- `float64` (field) +- `uint64` (field) +- `bool` (field) +- `string` (field) +- `time` (time with nanosecond precision) + +In {{% product-name %}}, every table has a primary key--the ordered set of tags and the time--for its data. +The primary key uniquely identifies each and determines the sort order for all +Parquet files related to the table. When you create a table, either through an +explicit call or by writing data into a table for the first time, it sets the +primary key to the tags in the order they arrived. +Although InfluxDB is still a _schema-on-write_ database, the tag column +definitions for a table are immutable. + +Tags should hold unique identifying information like `sensor_id`, `building_id`, +or `trace_id`. All other data should be stored as fields. + +## Tools to use The following table compares tools that you can use to interact with {{% product-name %}}. This tutorial covers many of the recommended tools. -| Tool | Administration | Write | Query | -| :------------------------------------------------------------------------------------------------ | :----------------------: | :----------------------: | :----------------------: | -| **`influxdb3` CLI** {{< req text="\* " color="magenta" >}} | **{{< icon "check" >}}** | **{{< icon "check" >}}** | **{{< icon "check" >}}** | -| **InfluxDB HTTP API** {{< req text="\* " color="magenta" >}} | **{{< icon "check" >}}** | **{{< icon "check" >}}** | **{{< icon "check" >}}** | -| **InfluxDB 3 Explorer** {{< req text="\* " color="magenta" >}} | **{{< icon "check" >}}** | - | **{{< icon "check" >}}** | -| [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) | - | **{{< icon "check" >}}** | **{{< icon "check" >}}** | -| [InfluxDB v2 client libraries](/influxdb3/version/reference/client-libraries/v2/) | - | **{{< icon "check" >}}** | - | -| [InfluxDB v1 client libraries](/influxdb3/version/reference/client-libraries/v1/) | - | **{{< icon "check" >}}** | **{{< icon "check" >}}** | -| [InfluxDB 3 processing engine](#python-plugins-and-the-processing-engine){{< req text="\* " color="magenta" >}} | | **{{< icon "check" >}}** | **{{< icon "check" >}}** | -| [Telegraf](/telegraf/v1/) | - | **{{< icon "check" >}}** | - | -| [Chronograf](/chronograf/v1/) | - | - | - | -| `influx` CLI | - | - | - | -| `influxctl` CLI | - | - | - | -| InfluxDB v2.x user interface | - | - | - | -| **Third-party tools** | | | | -| Flight SQL clients | - | - | **{{< icon "check" >}}** | -| [Grafana](/influxdb3/version/visualize-data/grafana/) | - | - | **{{< icon "check" >}}** | - -{{< caption >}} -{{< req type="key" text="Covered in this guide" color="magenta" >}} -{{< /caption >}} - -### Write data - -InfluxDB is a schema-on-write database. You can start writing data and InfluxDB creates the logical database, tables, and their schemas on the fly. -After a schema is created, InfluxDB validates future write requests against it before accepting the data. -Subsequent requests can add new fields on-the-fly, but can't add new tags. - -{{% show-in "core" %}} -> [!Note] -> #### Core is optimized for recent data -> -> {{% product-name %}} is optimized for recent data but accepts writes from any time period. -> The system persists data to Parquet files for historical analysis with [InfluxDB 3 Enterprise](/influxdb3/enterprise/get-started/) or third-party tools. -> For extended historical queries and optimized data organization, consider using [InfluxDB 3 Enterprise](/influxdb3/enterprise/get-started/). -{{% /show-in %}} - -#### Write data in line protocol syntax - -{{% product-name %}} accepts data in [line protocol](/influxdb3/version/reference/syntax/line-protocol/) syntax. -The following code block is an example of time series data in [line protocol](/influxdb3/version/reference/syntax/line-protocol/) syntax: - -- `cpu`: the table name. -- `host`, `region`, `applications`: the tags. A tag set is an ordered, comma-separated list of key/value pairs where the values are strings. -- `val`, `usage_percent`, `status`: the fields. A field set is a comma-separated list of key/value pairs. -- timestamp: If you don't specify a timestamp, InfluxData uses the time when data is written. - The default precision is a nanosecond epoch. - To specify a different precision, pass the `precision` parameter in your CLI command or API request. - -``` -cpu,host=Alpha,region=us-west,application=webserver val=1i,usage_percent=20.5,status="OK" -cpu,host=Bravo,region=us-east,application=database val=2i,usage_percent=55.2,status="OK" -cpu,host=Charlie,region=us-west,application=cache val=3i,usage_percent=65.4,status="OK" -cpu,host=Bravo,region=us-east,application=database val=4i,usage_percent=70.1,status="Warn" -cpu,host=Bravo,region=us-central,application=database val=5i,usage_percent=80.5,status="OK" -cpu,host=Alpha,region=us-west,application=webserver val=6i,usage_percent=25.3,status="Warn" -``` - -### Write data using the CLI - -To quickly get started writing data, you can use the `influxdb3` CLI. - -> [!Note] -> For batching and higher-volume write workloads, we recommend using the [HTTP API](#write-data-using-the-http-api). -> -> #### Write data using InfluxDB API client libraries -> -> InfluxDB provides supported client libraries that integrate with your code -> to construct data as time series points and write the data as line protocol to your {{% product-name %}} database. -> For more information, see how to [use InfluxDB client libraries to write data](/influxdb3/version/write-data/api-client-libraries/). - -##### Example: write data using the influxdb3 CLI - -Use the `influxdb3 write` command to write data to a database. - -In the code samples, replace the following placeholders with your values: - -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the [database](/influxdb3/version/admin/databases/) to write to. -- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to write to the specified database{{% /show-in %}} - -##### Write data via stdin - -Pass data as quoted line protocol via standard input (stdin)--for example: - -{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} -```bash -influxdb3 write \ - --database DATABASE_NAME \ - --token AUTH_TOKEN \ - --precision ns \ - --accept-partial \ -'cpu,host=Alpha,region=us-west,application=webserver val=1i,usage_percent=20.5,status="OK" -cpu,host=Bravo,region=us-east,application=database val=2i,usage_percent=55.2,status="OK" -cpu,host=Charlie,region=us-west,application=cache val=3i,usage_percent=65.4,status="OK" -cpu,host=Bravo,region=us-east,application=database val=4i,usage_percent=70.1,status="Warn" -cpu,host=Bravo,region=us-central,application=database val=5i,usage_percent=80.5,status="OK" -cpu,host=Alpha,region=us-west,application=webserver val=6i,usage_percent=25.3,status="Warn"' -``` -{{% /code-placeholders %}} - -##### Write data from a file - -Pass the `--file` option to write line protocol you have saved to a file--for example, save the -[sample line protocol](#write-data-in-line-protocol-syntax) to a file named `server_data` -and then enter the following command: - -{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} -```bash -influxdb3 write \ - --database DATABASE_NAME \ - --token AUTH_TOKEN \ - --precision ns \ - --accept-partial \ - --file path/to/server_data -``` -{{% /code-placeholders %}} - -Replace the following placeholders with your values: -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the [database](/influxdb3/version/admin/databases/) to write to. -- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to write to the specified database{{% /show-in %}} - -### Write data using the HTTP API - -{{% product-name %}} provides three write API endpoints that respond to HTTP `POST` requests. -The `/api/v3/write_lp` endpoint is the recommended endpoint for writing data and -provides additional options for controlling write behavior. - -If you need to write data using InfluxDB v1.x or v2.x tools, use the compatibility API endpoints. -Compatibility APIs work with [Telegraf](/telegraf/v1/), InfluxDB v2.x and v1.x [API client libraries](/influxdb3/version/reference/client-libraries), and other tools that support the v1.x or v2.x APIs. - -{{% tabs-wrapper %}} -{{% tabs %}} -[/api/v3/write_lp](#) -[v2 compatibility](#) -[v1 compatibility](#) -{{% /tabs %}} -{{% tab-content %}} - -{{% product-name %}} adds the `/api/v3/write_lp` endpoint. - -{{}} - -This endpoint accepts the same line protocol syntax as previous versions, -and supports the following parameters: - -- `?accept_partial=`: Accept or reject partial writes (default is `true`). -- `?no_sync=`: Control when writes are acknowledged: - - `no_sync=true`: Acknowledges writes before WAL persistence completes. - - `no_sync=false`: Acknowledges writes after WAL persistence completes (default). -- `?precision=`: Specify the precision of the timestamp. The default is nanosecond precision. -- request body: The line protocol data to write. - -For more information about the parameters, see [Write data](/influxdb3/version/write-data/). - -##### Example: write data using the /api/v3 HTTP API - -The following examples show how to write data using `curl` and the `/api/3/write_lp` HTTP endpoint. -To show the difference between accepting and rejecting partial writes, line `2` in the example contains a `string` value (`"hi"`) for a `float` field (`temp`). - -###### Partial write of line protocol occurred - -With `accept_partial=true` (default): - -```bash -curl -v "http://{{< influxdb/host >}}/api/v3/write_lp?db=sensors&precision=auto" \ - --header 'Authorization: Bearer apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0==' \ - --data-raw 'home,room=Sunroom temp=96 -home,room=Sunroom temp="hi"' -``` - -The response is the following: - -``` -< HTTP/1.1 400 Bad Request -... -{ - "error": "partial write of line protocol occurred", - "data": [ - { - "original_line": "home,room=Sunroom temp=hi", - "line_number": 2, - "error_message": "invalid column type for column 'temp', expected iox::column_type::field::float, got iox::column_type::field::string" - } - ] -} -``` - -Line `1` is written and queryable. -The response is an HTTP error (`400`) status, and the response body contains the error message `partial write of line protocol occurred` with details about the problem line. - -###### Parsing failed for write_lp endpoint - -With `accept_partial=false`: - -```bash -curl -v "http://{{< influxdb/host >}}/api/v3/write_lp?db=sensors&precision=auto&accept_partial=false" \ - --header 'Authorization: Bearer apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0==' \ - --data-raw 'home,room=Sunroom temp=96 -home,room=Sunroom temp="hi"' -``` - -The response is the following: - -``` -< HTTP/1.1 400 Bad Request -... -{ - "error": "parsing failed for write_lp endpoint", - "data": { - "original_line": "home,room=Sunroom temp=hi", - "line_number": 2, - "error_message": "invalid column type for column 'temp', expected iox::column_type::field::float, got iox::column_type::field::string" - } -} -``` - -InfluxDB rejects all points in the batch. -The response is an HTTP error (`400`) status, and the response body contains `parsing failed for write_lp endpoint` and details about the problem line. - -For more information about the ingest path and data flow, see [Data durability](/influxdb3/version/reference/internals/durability/). - -{{% /tab-content %}} -{{% tab-content %}} - -The `/api/v2/write` InfluxDB v2 compatibility endpoint provides backwards compatibility with clients (such as [Telegraf's InfluxDB v2 output plugin](/telegraf/v1/plugins/#output-influxdb_v2) and [InfluxDB v2 API client libraries](/influxdb3/version/reference/client-libraries/v2/)) that can write data to InfluxDB OSS v2.x and Cloud 2 (TSM). - -{{}} - -{{% /tab-content %}} - -{{% tab-content %}} - -The `/write` InfluxDB v1 compatibility endpoint provides backwards compatibility for clients that can write data to InfluxDB v1.x. - -{{}} - - -{{% /tab-content %}} -{{% /tabs-wrapper %}} - -> [!Note] -> #### Compatibility APIs differ from native APIs -> -> Keep in mind that the compatibility APIs differ from the v1 and v2 APIs in previous versions in the following ways: -> -> - Tags in a table (measurement) are _immutable_ -> - A tag and a field can't have the same name within a table. - -#### Write responses - -By default, InfluxDB acknowledges writes after flushing the WAL file to the object store (occurring every second). -For high write throughput, you can send multiple concurrent write requests. - -#### Use no_sync for immediate write responses - -To reduce the latency of writes, use the `no_sync` write option, which acknowledges writes _before_ WAL persistence completes. -When `no_sync=true`, InfluxDB validates the data, writes the data to the WAL, and then immediately responds to the client, without waiting for persistence to the object store. - -Using `no_sync=true` is best when prioritizing high-throughput writes over absolute durability. - -- Default behavior (`no_sync=false`): Waits for data to be written to the object store before acknowledging the write. Reduces the risk of data loss, but increases the latency of the response. -- With `no_sync=true`: Reduces write latency, but increases the risk of data loss in case of a crash before WAL persistence. - -##### Immediate write using the HTTP API - -The `no_sync` parameter controls when writes are acknowledged--for example: - -```bash -curl "http://{{< influxdb/host >}}/api/v3/write_lp?db=sensors&precision=auto&no_sync=true" \ - --header 'Authorization: Bearer apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0==' \ - --data-raw "home,room=Sunroom temp=96" -``` - -### Create a database or table - -To create a database without writing data, use the `create` subcommand--for example: - -{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} -```bash -influxdb3 create database DATABASE_NAME \ - --token AUTH_TOKEN -``` -{{% /code-placeholders %}} - -Replace the following placeholders with your values: - -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to create -- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: the {{% token-link "admin" %}} for your {{% product-name %}} server - -To learn more about a subcommand, use the `-h, --help` flag or view the [InfluxDB 3 CLI reference](/influxdb3/version/reference/cli/influxdb3/create): - -```bash -influxdb3 create -h -``` - -### Query data - -InfluxDB 3 supports native SQL for querying, in addition to InfluxQL, an -SQL-like language customized for time series queries. - -{{% show-in "core" %}} -{{< product-name >}} limits -query time ranges to 72 hours (both recent and historical) to ensure query performance. -For more information about the 72-hour limitation, see the -[update on InfluxDB 3 Core’s 72-hour limitation](https://www.influxdata.com/blog/influxdb3-open-source-public-alpha-jan-27/). -{{% /show-in %}} - -> [!Note] -> Flux, the language introduced in InfluxDB 2.0, is **not** supported in InfluxDB 3. - -The quickest way to get started querying is to use the `influxdb3` CLI (which uses the Flight SQL API over HTTP2). - -The `query` subcommand includes options to help ensure that the right database is queried with the correct permissions. Only the `--database` option is required, but depending on your specific setup, you may need to pass other options, such as host, port, and token. - -| Option | Description | Required | -|---------|-------------|--------------| -| `--host` | The host URL of the server [default: `http://127.0.0.1:8181`] to query | No | -| `--database` | The name of the database to operate on | Yes | -| `--token` | The authentication token for the {{% product-name %}} server | No | -| `--language` | The query language of the provided query string [default: `sql`] [possible values: `sql`, `influxql`] | No | -| `--format` | The format in which to output the query [default: `pretty`] [possible values: `pretty`, `json`, `jsonl`, `csv`, `parquet`] | No | -| `--output` | The path to output data to | No | - -#### Example: query `“SHOW TABLES”` on the `servers` database: - -```console -$ influxdb3 query --database servers "SHOW TABLES" -+---------------+--------------------+--------------+------------+ -| table_catalog | table_schema | table_name | table_type | -+---------------+--------------------+--------------+------------+ -| public | iox | cpu | BASE TABLE | -| public | information_schema | tables | VIEW | -| public | information_schema | views | VIEW | -| public | information_schema | columns | VIEW | -| public | information_schema | df_settings | VIEW | -| public | information_schema | schemata | VIEW | -+---------------+--------------------+--------------+------------+ -``` - -#### Example: query the `cpu` table, limiting to 10 rows: - -```console -$ influxdb3 query --database servers "SELECT DISTINCT usage_percent, time FROM cpu LIMIT 10" -+---------------+---------------------+ -| usage_percent | time | -+---------------+---------------------+ -| 63.4 | 2024-02-21T19:25:00 | -| 25.3 | 2024-02-21T19:06:40 | -| 26.5 | 2024-02-21T19:31:40 | -| 70.1 | 2024-02-21T19:03:20 | -| 83.7 | 2024-02-21T19:30:00 | -| 55.2 | 2024-02-21T19:00:00 | -| 80.5 | 2024-02-21T19:05:00 | -| 60.2 | 2024-02-21T19:33:20 | -| 20.5 | 2024-02-21T18:58:20 | -| 85.2 | 2024-02-21T19:28:20 | -+---------------+---------------------+ -``` - -### Query using the CLI for InfluxQL - -[InfluxQL](/influxdb3/version/reference/influxql/) is an SQL-like language developed by InfluxData with specific features tailored for leveraging and working with InfluxDB. It’s compatible with all versions of InfluxDB, making it a good choice for interoperability across different InfluxDB installations. - -To query using InfluxQL, enter the `influxdb3 query` subcommand and specify `influxql` in the language option--for example: - -{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} -```bash -influxdb3 query \ - --database DATABASE_NAME \ - --token \ - --language influxql \ - "SELECT DISTINCT usage_percent FROM cpu WHERE time >= now() - 1d" -``` -{{% /code-placeholders %}} - -Replace the following placeholders with your values: - -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query -- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}} - -### Query using the API - -InfluxDB 3 supports Flight (gRPC) APIs and an HTTP API. -To query your database using the HTTP API, send a request to the `/api/v3/query_sql` or `/api/v3/query_influxql` endpoints. -In the request, specify the database name in the `db` parameter -and a query in the `q` parameter. -You can pass parameters in the query string or inside a JSON object. - -Use the `format` parameter to specify the response format: `pretty`, `jsonl`, `parquet`, `csv`, and `json`. Default is `json`. - -##### Example: Query passing URL-encoded parameters - -The following example sends an HTTP `GET` request with a URL-encoded SQL query: - -{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} -```bash -curl -G "http://{{< influxdb/host >}}/api/v3/query_sql" \ - --header 'Authorization: Bearer AUTH_TOKEN' \ - --data-urlencode "db=DATABASE_NAME" \ - --data-urlencode "q=select * from cpu limit 5" -``` -{{% /code-placeholders %}} - -Replace the following placeholders with your values: - -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query -- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}} - -##### Example: Query passing JSON parameters - -The following example sends an HTTP `POST` request with parameters in a JSON payload: - -{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} -```bash -curl http://{{< influxdb/host >}}/api/v3/query_sql \ - --data '{"db": "DATABASE_NAME", "q": "select * from cpu limit 5"}' -``` -{{% /code-placeholders %}} - -Replace the following placeholders with your values: - -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query -- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}} - -### Query using the Python client - -Use the InfluxDB 3 Python library to interact with the database and integrate with your application. -We recommend installing the required packages in a Python virtual environment for your specific project. - -To get started, install the `influxdb3-python` package. - -```bash -pip install influxdb3-python -``` - -From here, you can connect to your database with the client library using just the **host** and **database name: - -{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} -```python -from influxdb_client_3 import InfluxDBClient3 - -client = InfluxDBClient3( - token='AUTH_TOKEN', - host='http://{{< influxdb/host >}}', - database='DATABASE_NAME' -) -``` -{{% /code-placeholders %}} - -Replace the following placeholders with your values: - -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query -- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}} - -The following example shows how to query using SQL, and then -use PyArrow to explore the schema and process results. -To authorize the query, the example retrieves the {{% token-link "database" %}} -from the `INFLUXDB3_AUTH_TOKEN` environment variable. - -```python -from influxdb_client_3 import InfluxDBClient3 -import os - -client = InfluxDBClient3( - token=os.environ.get('INFLUXDB3_AUTH_TOKEN'), - host='http://{{< influxdb/host >}}', - database='servers' -) - -# Execute the query and return an Arrow table -table = client.query( - query="SELECT * FROM cpu LIMIT 10", - language="sql" -) - -print("\n#### View Schema information\n") -print(table.schema) - -print("\n#### Use PyArrow to read the specified columns\n") -print(table.column('usage_active')) -print(table.select(['host', 'usage_active'])) -print(table.select(['time', 'host', 'usage_active'])) - -print("\n#### Use PyArrow compute functions to aggregate data\n") -print(table.group_by('host').aggregate([])) -print(table.group_by('cpu').aggregate([('time_system', 'mean')])) -``` - -For more information about the Python client library, see the [`influxdb3-python` repository](https://github.com/InfluxCommunity/influxdb3-python) in GitHub. - -### Query using InfluxDB 3 Explorer (Beta) - -You can use the InfluxDB 3 Explorer web-based interface to query and visualize data, -and administer your {{% product-name %}} instance. -For more information, see how to [install InfluxDB 3 Explorer (Beta)](/influxdb3/explorer/install/) using Docker -and get started querying your data. - -### Last values cache - -{{% product-name %}} supports a **last-n values cache** which stores the last N values in a series or column hierarchy in memory. This gives the database the ability to answer these kinds of queries in under 10 milliseconds. - -You can use the `influxdb3` CLI to [create a last value cache](/influxdb3/version/reference/cli/influxdb3/create/last_cache/). - -{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TABLE_NAME|CACHE_NAME" %}} -```bash -influxdb3 create last_cache \ - --token AUTH_TOKEN - --database DATABASE_NAME \ - --table TABLE_NAME \ - CACHE_NAME -``` -{{% /code-placeholders %}} - -Replace the following placeholders with your values: - -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to create the last values cache in -- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}} -- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to create the last values cache in -- {{% code-placeholder-key %}}`CACHE_NAME`{{% /code-placeholder-key %}}: Optionally, a name for the new cache - -Consider the following `cpu` sample table: - -| host | application | time | usage\_percent | status | -| ----- | ----- | ----- | ----- | ----- | -| Bravo | database | 2024-12-11T10:00:00 | 55.2 | OK | -| Charlie | cache | 2024-12-11T10:00:00 | 65.4 | OK | -| Bravo | database | 2024-12-11T10:01:00 | 70.1 | Warn | -| Bravo | database | 2024-12-11T10:01:00 | 80.5 | OK | -| Alpha | webserver | 2024-12-11T10:02:00 | 25.3 | Warn | - -The following command creates a last value cache named `cpuCache`: - -```bash -influxdb3 create last_cache \ - --token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \ - --database servers \ - --table cpu \ - --key-columns host,application \ - --value-columns usage_percent,status \ - --count 5 cpuCache -``` - -_You can create a last values cache per time series, but be mindful of high cardinality tables that could take excessive memory._ - -#### Query a last values cache - -To query data from the LVC, use the [`last_cache()`](/influxdb3/version/reference/sql/functions/cache/#last_cache) function in your query--for example: - -```bash -influxdb3 query \ - --token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \ - --database servers \ - "SELECT * FROM last_cache('cpu', 'cpuCache') WHERE host = 'Bravo';" -``` - -> [!Note] -> #### Only works with SQL -> -> The last values cache only works with SQL, not InfluxQL; SQL is the default language. - -#### Delete a last values cache - -Use the `influxdb3` CLI to [delete a last values cache](/influxdb3/version/reference/cli/influxdb3/delete/last_cache/) - -{{% code-placeholders "DATABASE_NAME|TABLE_NAME|CACHE_NAME" %}} -```bash -influxdb3 delete last_cache \ - --token AUTH_TOKEN \ - --database DATABASE_NAME \ - --table TABLE \ - --cache-name CACHE_NAME -``` -{{% /code-placeholders %}} - -Replace the following placeholders with your values: - -- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}} -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to delete the last values cache from -- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to delete the last values cache from -- {{% code-placeholder-key %}}`CACHE_NAME`{{% /code-placeholder-key %}}: the name of the last values cache to delete - -### Distinct values cache - -Similar to the [last values cache](#last-values-cache), the database can cache in RAM the distinct values for a single column in a table or a hierarchy of columns. -This is useful for fast metadata lookups, which can return in under 30 milliseconds. -Many of the options are similar to the last value cache. - -You can use the `influxdb3` CLI to [create a distinct values cache](/influxdb3/version/reference/cli/influxdb3/create/distinct_cache/). - -{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TABLE_NAME|CACHE_NAME" %}} -```bash -influxdb3 create distinct_cache \ - --token AUTH_TOKEN \ - --database DATABASE_NAME \ - --table TABLE \ - --columns COLUMNS \ - CACHE_NAME -``` -{{% /code-placeholders %}} -Replace the following placeholders with your values: - -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to create the last values cache in -- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}} -- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to create the distinct values cache in -- {{% code-placeholder-key %}}`CACHE_NAME`{{% /code-placeholder-key %}}: Optionally, a name for the new cache - -Consider the following `cpu` sample table: - -| host | application | time | usage\_percent | status | -| ----- | ----- | ----- | ----- | ----- | -| Bravo | database | 2024-12-11T10:00:00 | 55.2 | OK | -| Charlie | cache | 2024-12-11T10:00:00 | 65.4 | OK | -| Bravo | database | 2024-12-11T10:01:00 | 70.1 | Warn | -| Bravo | database | 2024-12-11T10:01:00 | 80.5 | OK | -| Alpha | webserver | 2024-12-11T10:02:00 | 25.3 | Warn | - -The following command creates a distinct values cache named `cpuDistinctCache`: - -```bash -influxdb3 create distinct_cache \ - --token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \ - --database servers \ - --table cpu \ - --columns host,application \ - cpuDistinctCache -``` - -#### Query a distinct values cache - -To query data from the distinct values cache, use the [`distinct_cache()`](/influxdb3/version/reference/sql/functions/cache/#distinct_cache) function in your query--for example: - -```bash -influxdb3 query \ - --token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \ - --database servers \ - "SELECT * FROM distinct_cache('cpu', 'cpuDistinctCache')" -``` - -> [!Note] -> #### Only works with SQL -> -> The distinct cache only works with SQL, not InfluxQL; SQL is the default language. - -#### Delete a distinct values cache - -Use the `influxdb3` CLI to [delete a distinct values cache](/influxdb3/version/reference/cli/influxdb3/delete/distinct_cache/) - -{{% code-placeholders "DATABASE_NAME|TABLE_NAME|CACHE_NAME" %}} -```bash -influxdb3 delete distinct_cache \ - --token AUTH_TOKEN \ - --database DATABASE_NAME \ - --table TABLE \ - --cache-name CACHE_NAME -``` -{{% /code-placeholders %}} - -Replace the following placeholders with your values: -- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}} -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to delete the distinct values cache from -- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to delete the distinct values cache from -- {{% code-placeholder-key %}}`CACHE_NAME`{{% /code-placeholder-key %}}: the name of the distinct values cache to delete - -### Python plugins and the processing engine - -The InfluxDB 3 processing engine is an embedded Python VM for running code inside the database to process and transform data. - -To activate the processing engine, pass the `--plugin-dir ` option when starting the {{% product-name %}} server. -`PLUGIN_DIR` is your filesystem location for storing [plugin](#plugin) files for the processing engine to run. - -#### Plugin - -A plugin is a Python function that has a signature compatible with a Processing engine [trigger](#trigger). - -#### Trigger - -When you create a trigger, you specify a [plugin](#plugin), a database, optional arguments, -and a _trigger-spec_, which defines when the plugin is executed and what data it receives. - -##### Trigger types - -InfluxDB 3 provides the following types of triggers, each with specific trigger-specs: - -- **On WAL flush**: Sends a batch of written data (for a specific table or all tables) to a plugin (by default, every second). -- **On Schedule**: Executes a plugin on a user-configured schedule (using a crontab or a duration); useful for data collection and deadman monitoring. -- **On Request**: Binds a plugin to a custom HTTP API endpoint at `/api/v3/engine/`. - The plugin receives the HTTP request headers and content, and can then parse, process, and send the data into the database or to third-party services. - -### Test, create, and trigger plugin code - -##### Example: Python plugin for WAL rows - -```python -# This is the basic structure for Python plugin code that runs in the -# InfluxDB 3 Processing engine. - -# When creating a trigger, you can provide runtime arguments to your plugin, -# allowing you to write generic code that uses variables such as monitoring -thresholds, environment variables, and host names. -# -# Use the following exact signature to define a function for the WAL flush -# trigger. -# When you create a trigger for a WAL flush plugin, you specify the database -# and tables that the plugin receives written data from on every WAL flush -# (default is once per second). -def process_writes(influxdb3_local, table_batches, args=None): - # here you can see logging. for now this won't do anything, but soon - # we'll capture this so you can query it from system tables - if args and "arg1" in args: - influxdb3_local.info("arg1: " + args["arg1"]) - - # here we're using arguments provided at the time the trigger was set up - # to feed into paramters that we'll put into a query - query_params = {"host": "foo"} - # here's an example of executing a parameterized query. Only SQL is supported. - # It will query the database that the trigger is attached to by default. We'll - # soon have support for querying other DBs. - query_result = influxdb3_local.query("SELECT * FROM cpu where host = '$host'", query_params) - # the result is a list of Dict that have the column name as key and value as - # value. If you run the WAL test plugin with your plugin against a DB that - # you've written data into, you'll be able to see some results - influxdb3_local.info("query result: " + str(query_result)) - - # this is the data that is sent when the WAL is flushed of writes the server - # received for the DB or table of interest. One batch for each table (will - # only be one if triggered on a single table) - for table_batch in table_batches: - # here you can see that the table_name is available. - influxdb3_local.info("table: " + table_batch["table_name"]) - - # example to skip the table we're later writing data into - if table_batch["table_name"] == "some_table": - continue - - # and then the individual rows, which are Dict with keys of the column names and values - for row in table_batch["rows"]: - influxdb3_local.info("row: " + str(row)) - - # this shows building a line of LP to write back to the database. tags must go first and - # their order is important and must always be the same for each individual table. Then - # fields and lastly an optional time, which you can see in the next example below - line = LineBuilder("some_table")\ - .tag("tag1", "tag1_value")\ - .tag("tag2", "tag2_value")\ - .int64_field("field1", 1)\ - .float64_field("field2", 2.0)\ - .string_field("field3", "number three") - - # this writes it back (it actually just buffers it until the completion of this function - # at which point it will write everything back that you put in) - influxdb3_local.write(line) - - # here's another example, but with us setting a nanosecond timestamp at the end - other_line = LineBuilder("other_table") - other_line.int64_field("other_field", 1) - other_line.float64_field("other_field2", 3.14) - other_line.time_ns(1302) - - # and you can see that we can write to any DB in the server - influxdb3_local.write_to_db("mytestdb", other_line) - - # just some log output as an example - influxdb3_local.info("done") -``` - -##### Test a plugin on the server - -Test your InfluxDB 3 plugin safely without affecting written data. During a plugin test: - -- A query executed by the plugin queries against the server you send the request to. -- Writes aren't sent to the server but are returned to you. - -To test a plugin, do the following: - -1. Create a _plugin directory_--for example, `/path/to/.influxdb/plugins` -2. [Start the InfluxDB server](#start-influxdb) and include the `--plugin-dir ` option. -3. Save the [example plugin code](#example-python-plugin-for-wal-rows) to a plugin file inside of the plugin directory. If you haven't yet written data to the table in the example, comment out the lines where it queries. -4. To run the test, enter the following command with the following options: - - - `--lp` or `--file`: The line protocol to test - - Optional: `--input-arguments`: A comma-delimited list of `=` arguments for your plugin code - -{{% code-placeholders "INPUT_LINE_PROTOCOL|INPUT_ARGS|DATABASE_NAME|AUTH_TOKEN|PLUGIN_FILENAME" %}} -```bash -influxdb3 test wal_plugin \ ---lp INPUT_LINE_PROTOCOL \ ---input-arguments INPUT_ARGS \ ---database DATABASE_NAME \ ---token AUTH_TOKEN \ -PLUGIN_FILENAME -``` -{{% /code-placeholders %}} - -Replace the following placeholders with your values: - -- {{% code-placeholder-key %}}`INPUT_LINE_PROTOCOL`{{% /code-placeholder-key %}}: the line protocol to test -- Optional: {{% code-placeholder-key %}}`INPUT_ARGS`{{% /code-placeholder-key %}}: a comma-delimited list of `=` arguments for your plugin code--for example, `arg1=hello,arg2=world` -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to test against -- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: the {{% token-link "admin" %}} for your {{% product-name %}} server -- {{% code-placeholder-key %}}`PLUGIN_FILENAME`{{% /code-placeholder-key %}}: the name of the plugin file to test - -The command runs the plugin code with the test data, yields the data to the plugin code, and then responds with the plugin result. -You can quickly see how the plugin behaves, what data it would have written to the database, and any errors. -You can then edit your Python code in the plugins directory, and rerun the test. -The server reloads the file for every request to the `test` API. - -For more information, see [`influxdb3 test wal_plugin`](/influxdb3/version/reference/cli/influxdb3/test/wal_plugin/) or run `influxdb3 test wal_plugin -h`. - -With the plugin code inside the server plugin directory, and a successful test, -you're ready to create a plugin and a trigger to run on the server. - -##### Example: Test, create, and run a plugin - -The following example shows how to test a plugin, and then create the plugin and -trigger: - -```bash -# Test and create a plugin -# Requires: -# - A database named `mydb` with a table named `foo` -# - A Python plugin file named `test.py` -# Test a plugin -influxdb3 test wal_plugin \ - --lp "my_measure,tag1=asdf f1=1.0 123" \ - --token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \ - --database sensors \ - --input-arguments "arg1=hello,arg2=world" \ - test.py -``` - -```bash -# Create a trigger that runs the plugin -influxdb3 create trigger \ - --token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \ - --database sensors \ - --plugin test_plugin \ - --trigger-spec "table:foo" \ - --trigger-arguments "arg1=hello,arg2=world" \ - trigger1 -``` - -After you have created a plugin and trigger, enter the following command to -enable the trigger and have it run the plugin as you write data: - -{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TRIGGER_NAME" %}} -```bash -influxdb3 enable trigger \ - --token AUTH_TOKEN \ - --database DATABASE_NAME \ - TRIGGER_NAME -``` -{{% /code-placeholders %}} - -Replace the following placeholders with your values: - -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to enable the trigger in -- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}} -- {{% code-placeholder-key %}}`TRIGGER_NAME`{{% /code-placeholder-key %}}: the name of the trigger to enable - -For example, to enable the trigger named `trigger1` in the `sensors` database: - -```bash -influxdb3 enable trigger \ - --token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \ - --database sensors - trigger1 -``` - -For more information, see [Python plugins and the Processing engine](/influxdb3/version/plugins/). - -{{% show-in "enterprise" %}} -### Multi-server setup - -{{% product-name %}} is built to support multi-node setups for high availability, read replicas, and flexible implementations depending on use case. - -### High availability - -Enterprise is architecturally flexible, giving you options on how to configure multiple servers that work together for high availability (HA) and high performance. -Built on top of the diskless engine and leveraging the Object store, an HA setup ensures that if a node fails, you can still continue reading from, and writing to, a secondary node. - -A two-node setup is the minimum for basic high availability, with both nodes having read-write permissions. - -{{< img-hd src="/img/influxdb/influxdb-3-enterprise-high-availability.png" alt="Basic high availability setup" />}} - -In a basic HA setup: - -- Two nodes both write data to the same Object store and both handle queries -- Node 1 and Node 2 are _read replicas_ that read from each other’s Object store directories -- One of the nodes is designated as the Compactor node - -> [!Note] -> Only one node can be designated as the Compactor. -> Compacted data is meant for a single writer, and many readers. - -The following examples show how to configure and start two nodes -for a basic HA setup. - -- _Node 1_ is for compaction (passes `compact` in `--mode`) -- _Node 2_ is for ingest and query - -```bash -## NODE 1 - -# Example variables -# node-id: 'host01' -# cluster-id: 'cluster01' -# bucket: 'influxdb-3-enterprise-storage' - -influxdb3 serve \ - --node-id host01 \ - --cluster-id cluster01 \ - --mode ingest,query,compact \ - --object-store s3 \ - --bucket influxdb-3-enterprise-storage \ - --http-bind {{< influxdb/host >}} \ - --aws-access-key-id \ - --aws-secret-access-key -``` - -```bash -## NODE 2 - -# Example variables -# node-id: 'host02' -# cluster-id: 'cluster01' -# bucket: 'influxdb-3-enterprise-storage' - -influxdb3 serve \ - --node-id host02 \ - --cluster-id cluster01 \ - --mode ingest,query \ - --object-store s3 \ - --bucket influxdb-3-enterprise-storage \ - --http-bind localhost:8282 \ - --aws-access-key-id AWS_ACCESS_KEY_ID \ - --aws-secret-access-key AWS_SECRET_ACCESS_KEY -``` - -After the nodes have started, querying either node returns data for both nodes, and _NODE 1_ runs compaction. -To add nodes to this setup, start more read replicas with the same cluster ID. - -### High availability with a dedicated Compactor - -Data compaction in InfluxDB 3 is one of the more computationally expensive operations. -To ensure that your read-write nodes don't slow down due to compaction work, set up a compactor-only node for consistent and high performance across all nodes. - -{{< img-hd src="/img/influxdb/influxdb-3-enterprise-dedicated-compactor.png" alt="Dedicated Compactor setup" />}} - -The following examples show how to set up high availability with a dedicated Compactor node: - -1. Start two read-write nodes as read replicas, similar to the previous example. - - ```bash - ## NODE 1 — Writer/Reader Node #1 - - # Example variables - # node-id: 'host01' - # cluster-id: 'cluster01' - # bucket: 'influxdb-3-enterprise-storage' - - influxdb3 serve \ - --node-id host01 \ - --cluster-id cluster01 \ - --mode ingest,query \ - --object-store s3 \ - --bucket influxdb-3-enterprise-storage \ - --http-bind {{< influxdb/host >}} \ - --aws-access-key-id \ - --aws-secret-access-key - ``` - - ```bash - ## NODE 2 — Writer/Reader Node #2 - - # Example variables - # node-id: 'host02' - # cluster-id: 'cluster01' - # bucket: 'influxdb-3-enterprise-storage' - - influxdb3 serve \ - --node-id host02 \ - --cluster-id cluster01 \ - --mode ingest,query \ - --object-store s3 \ - --bucket influxdb-3-enterprise-storage \ - --http-bind localhost:8282 \ - --aws-access-key-id \ - --aws-secret-access-key - ``` - -2. Start the dedicated compactor node with the `--mode=compact` option to ensure the node **only** runs compaction. - - ```bash - ## NODE 3 — Compactor Node - - # Example variables - # node-id: 'host03' - # cluster-id: 'cluster01' - # bucket: 'influxdb-3-enterprise-storage' - - influxdb3 serve \ - --node-id host03 \ - --cluster-id cluster01 \ - --mode compact \ - --object-store s3 \ - --bucket influxdb-3-enterprise-storage \ - --aws-access-key-id \ - --aws-secret-access-key - ``` - -### High availability with read replicas and a dedicated Compactor - -For a robust and effective setup for managing time-series data, you can run ingest nodes alongside read-only nodes and a dedicated Compactor node. - -{{< img-hd src="/img/influxdb/influxdb-3-enterprise-workload-isolation.png" alt="Workload Isolation Setup" />}} - -1. Start ingest nodes by assigning them the **`ingest`** mode. - To achieve the benefits of workload isolation, you'll send _only write requests_ to these ingest nodes. Later, you'll configure the _read-only_ nodes. - - ```bash - ## NODE 1 — Writer Node #1 - - # Example variables - # node-id: 'host01' - # cluster-id: 'cluster01' - # bucket: 'influxdb-3-enterprise-storage' - - influxdb3 serve \ - --node-id host01 \ - --cluster-id cluster01 \ - --mode ingest \ - --object-store s3 \ - --bucket influxdb-3-enterprise-storage \ - --http-bind {{< influxdb/host >}} \ - --aws-access-key-id \ - --aws-secret-access-key - ``` - - - - ```bash - ## NODE 2 — Writer Node #2 - - # Example variables - # node-id: 'host02' - # cluster-id: 'cluster01' - # bucket: 'influxdb-3-enterprise-storage' - - influxdb3 serve \ - --node-id host02 \ - --cluster-id cluster01 \ - --mode ingest \ - --object-store s3 \ - --bucket influxdb-3-enterprise-storage \ - --http-bind localhost:8282 \ - --aws-access-key-id \ - --aws-secret-access-key - ``` - -2. Start the dedicated Compactor node with ` compact`. - - ```bash - ## NODE 3 — Compactor Node - - # Example variables - # node-id: 'host03' - # cluster-id: 'cluster01' - # bucket: 'influxdb-3-enterprise-storage' - - influxdb3 serve \ - --node-id host03 \ - --cluster-id cluster01 \ - --mode compact \ - --object-store s3 \ - --bucket influxdb-3-enterprise-storage \ - --aws-access-key-id \ - - ``` - -3. Finally, start the query nodes as _read-only_ with `--mode query`. - - ```bash - ## NODE 4 — Read Node #1 - - # Example variables - # node-id: 'host04' - # cluster-id: 'cluster01' - # bucket: 'influxdb-3-enterprise-storage' - - influxdb3 serve \ - --node-id host04 \ - --cluster-id cluster01 \ - --mode query \ - --object-store s3 \ - --bucket influxdb-3-enterprise-storage \ - --http-bind localhost:8383 \ - --aws-access-key-id \ - --aws-secret-access-key - ``` - - ```bash - ## NODE 5 — Read Node #2 - - # Example variables - # node-id: 'host05' - # cluster-id: 'cluster01' - # bucket: 'influxdb-3-enterprise-storage' - - influxdb3 serve \ - --node-id host05 \ - --cluster-id cluster01 \ - --mode query \ - --object-store s3 \ - --bucket influxdb-3-enterprise-storage \ - --http-bind localhost:8484 \ - --aws-access-key-id \ - - ``` - -Congratulations, you have a robust setup for workload isolation using {{% product-name %}}. - -### Writing and querying for multi-node setups - -You can use the default port `8181` for any write or query, without changing any of the commands. - -> [!Note] -> #### Specify hosts for writes and queries -> -> To benefit from this multi-node, isolated architecture, specify hosts: -> -> - In write requests, specify a host that you have designated as _write-only_. -> - In query requests, specify a host that you have designated as _read-only_. -> -> When running multiple local instances for testing or separate nodes in production, specifying the host ensures writes and queries are routed to the correct instance. - -{{% code-placeholders "(http://localhost:8585)|AUTH_TOKEN|DATABASE_NAME|QUERY" %}} -```bash -# Example querying a specific host -# HTTP-bound Port: 8585 -influxdb3 query \ - --host http://localhost:8585 - --token AUTH_TOKEN \ - --database DATABASE_NAME "QUERY" -``` -{{% /code-placeholders %}} - -Replace the following placeholders with your values: - -- {{% code-placeholder-key %}}`http://localhost:8585`{{% /code-placeholder-key %}}: the host and port of the node to query -- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}} -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query -- {{% code-placeholder-key %}}`QUERY`{{% /code-placeholder-key %}}: the SQL or InfluxQL query to run against the database - -### File index settings - -To accelerate performance on specific queries, you can define non-primary keys to index on, which helps improve performance for single-series queries. -This feature is only available in {{% product-name %}} and is not available in Core. - -#### Create a file index - -{{% code-placeholders "AUTH_TOKEN|DATABASE|TABLE|COLUMNS" %}} - -```bash -# Example variables on a query -# HTTP-bound Port: 8585 - -influxdb3 create file_index \ - --host http://localhost:8585 \ - --token AUTH_TOKEN \ - --database DATABASE_NAME \ - --table TABLE_NAME \ - COLUMNS -``` - -#### Delete a file index - -```bash -influxdb3 delete file_index \ - --host http://localhost:8585 \ - --database DATABASE_NAME \ - --table TABLE_NAME \ -``` -{{% /code-placeholders %}} - -Replace the following placeholders with your values: - -- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}} -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to create the file index in -- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to create the file index in -- {{% code-placeholder-key %}}`COLUMNS`{{% /code-placeholder-key %}}: a comma-separated list of columns to index on, for example, `host,application` -{{% /show-in %}} \ No newline at end of file +| Tool | Administration | Write | Query | +| :-------------------------------------------------------------------------------- | :----------------------: | :----------------------: | :----------------------: | +| **[`influxdb3` CLI](/influxdb3/version/reference/cli/influxdb3/)** | **{{< icon "check" >}}** | **{{< icon "check" >}}** | **{{< icon "check" >}}** | +| **[InfluxDB HTTP API](/influxdb3/version/reference/api/)** | **{{< icon "check" >}}** | **{{< icon "check" >}}** | **{{< icon "check" >}}** | +| **[InfluxDB 3 Explorer](/influxdb3/explorer/)** | **{{< icon "check" >}}** | **{{< icon "check" >}}** | **{{< icon "check" >}}** | +| [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) | - | **{{< icon "check" >}}** | **{{< icon "check" >}}** | +| [InfluxDB v2 client libraries](/influxdb3/version/reference/client-libraries/v2/) | - | **{{< icon "check" >}}** | - | +| [InfluxDB v1 client libraries](/influxdb3/version/reference/client-libraries/v1/) | - | **{{< icon "check" >}}** | **{{< icon "check" >}}** | +| [InfluxDB 3 processing engine](/influxdb3/version/plugins/) | | **{{< icon "check" >}}** | **{{< icon "check" >}}** | +| [Telegraf](/telegraf/v1/) | - | **{{< icon "check" >}}** | - | +| [Chronograf](/chronograf/v1/) | - | - | - | +| `influx` CLI | - | - | - | +| `influxctl` CLI | - | - | - | +| InfluxDB v2.x user interface | - | - | - | +| **Third-party tools** | | | | +| Flight SQL clients | - | - | **{{< icon "check" >}}** | +| [Grafana](/influxdb3/version/visualize-data/grafana/) | - | - | **{{< icon "check" >}}** | + +{{< show-in "core" >}} +{{< page-nav next="/influxdb3/core/get-started/setup/" nextText="Set up InfluxDB 3 Core" >}} +{{< /show-in >}} + +{{< show-in "enterprise" >}} +{{< page-nav next="/influxdb3/enterprise/get-started/setup/" nextText="Set up InfluxDB 3 Enterprise" >}} +{{< /show-in >}} diff --git a/content/shared/influxdb3-get-started/processing-engine.md b/content/shared/influxdb3-get-started/processing-engine.md new file mode 100644 index 000000000..009b3d4ee --- /dev/null +++ b/content/shared/influxdb3-get-started/processing-engine.md @@ -0,0 +1,261 @@ +The {{% product-name %}} processing engine is an embedded Python virtual machine +(VM) that runs code inside the database to process and transform data. +Create processing engine [plugins](#plugin) that run when [triggered](#trigger) +by specific events. + +- [Processing engine terminology](#processing-engine-terminology) + - [Plugin](#plugin) + - [Trigger](#trigger) + - [Trigger types](#trigger-types) +- [Activate the processing engine](#activate-the-processing-engine) +- [Create a plugin](#create-a-plugin) +- [Test a plugin on the server](#test-a-plugin-on-the-server) +- [Create a trigger](#create-a-trigger) +- [Enable the trigger](#enable-the-trigger) + +## Processing engine terminology + +### Plugin + +A plugin is a Python function that has a signature compatible with a processing +engine [trigger](#trigger). + +### Trigger + +When you create a trigger, you specify a [plugin](#plugin), a database, optional +arguments, and a trigger specification, which defines when the plugin is executed and +what data it receives. + +#### Trigger types + +InfluxDB 3 provides the following types of triggers, each with specific +specifications: + +- **Data write** (`table:` or `all_tables`): Sends a batch of written data (for a specific table or all + tables) to a plugin when the database flushes data to the Write-Ahead Log (by default, every second). +- **Scheduled** (`every:` or `cron:`): Executes a plugin on a user-configured schedule (using a + crontab or a duration). This trigger type is useful for data collection and + deadman monitoring. +- **HTTP request** (`request:`): Binds a plugin to a custom HTTP API endpoint at + `/api/v3/engine/`. + The plugin receives the HTTP request headers and content, and can parse, + process, and send the data into the database or to third-party services. + +## Activate the processing engine + +To activate the processing engine, include the `--plugin-dir ` option +when starting the {{% product-name %}} server. +`PLUGIN_DIR` is your file system location for storing [plugin](#plugin) files for +the processing engine to run. + +{{% code-placeholders "PLUGIN_DIR" %}} + +```bash +influxdb3 serve \ + # ... + --plugin-dir PLUGIN_DIR +``` +{{% /code-placeholders %}} + +Replace {{% code-placeholder-key %}}`PLUGIN_DIR`{{% /code-placeholder-key %}} +with the path to your plugin directory. This path can be absolute or relative +to the current working directory of the `influxdb3` server. + +## Create a plugin + +To create a plugin, write and store a Python file in your configured `PLUGIN_DIR`. +The following example is a data write plugin that processes data before it gets +persisted to the object store. + +##### Example Python plugin for data writes + +```python +# This is the basic structure for Python plugin code that runs in the +# InfluxDB 3 Processing engine. + +# When creating a trigger, you can provide runtime arguments to your plugin, +# allowing you to write generic code that uses variables such as monitoring +# thresholds, environment variables, and host names. +# +# Use the following exact signature to define a function for the data write +# trigger. +# When you create a trigger for a data write plugin, you specify the database +# and tables that the plugin receives written data from on every WAL flush +# (default is once per second). +def process_writes(influxdb3_local, table_batches, args=None): + # here you can see logging. for now this won't do anything, but soon + # we'll capture this so you can query it from system tables + if args and "arg1" in args: + influxdb3_local.info("arg1: " + args["arg1"]) + + # here we're using arguments provided at the time the trigger was set up + # to feed into parameters that we'll put into a query + query_params = {"room": "Kitchen"} + # The following example shows how to execute a parameterized query. Only SQL is supported. + # It queries the database that the trigger is configured for. + query_result = influxdb3_local.query("SELECT * FROM home where room = '$room'", query_params) + # The result is a list of Dict that have the column name as key and value as + # value. + influxdb3_local.info("query result: " + str(query_result)) + + # this is the data that is sent when data is written to the database and flushed to the WAL. + # One batch for each table (will only be one if triggered on a single table) + for table_batch in table_batches: + # here you can see that the table_name is available. + influxdb3_local.info("table: " + table_batch["table_name"]) + + # example to skip the table we're later writing data into + if table_batch["table_name"] == "some_table": + continue + + # and then the individual rows, which are Dict with keys of the column names and values + for row in table_batch["rows"]: + influxdb3_local.info("row: " + str(row)) + + # this shows building a line of LP to write back to the database. tags must go first and + # their order is important and must always be the same for each individual table. Then + # fields and lastly an optional time, which you can see in the next example below + line = LineBuilder("some_table")\ + .tag("tag1", "tag1_value")\ + .tag("tag2", "tag2_value")\ + .int64_field("field1", 1)\ + .float64_field("field2", 2.0)\ + .string_field("field3", "number three") + + # this writes it back (it actually just buffers it until the completion of this function + # at which point it will write everything back that you put in) + influxdb3_local.write(line) + + # here's another example, but with us setting a nanosecond timestamp at the end + other_line = LineBuilder("other_table") + other_line.int64_field("other_field", 1) + other_line.float64_field("other_field2", 3.14) + other_line.time_ns(1302) + + # and you can see that we can write to any DB in the server + influxdb3_local.write_to_db("mytestdb", other_line) + + # just some log output as an example + influxdb3_local.info("done") +``` + +## Test a plugin on the server + +Use the [`influxdb3 test wal_plugin`](/influxdb3/version/reference/cli/influxdb3/test/wal_plugin/) +CLI command to test your processing engine plugin safely without +affecting actual data. During a plugin test: + +- A query executed by the plugin queries against the server you send the request to. +- Writes aren't sent to the server but are returned to you. + +To test a plugin: + +1. Save the [example plugin code](#example-python-plugin-for-data-writes) to a + plugin file inside of the plugin directory. If you haven't yet written data + to the table in the example, comment out the lines where it queries. +2. To run the test, enter the following command with the following options: + + - `--lp` or `--file`: The line protocol to test + - Optional: `--input-arguments`: A comma-delimited list of `=` arguments for your plugin code + +{{% code-placeholders "INPUT_LINE_PROTOCOL|INPUT_ARGS|DATABASE_NAME|AUTH_TOKEN|PLUGIN_FILENAME" %}} +```bash +influxdb3 test wal_plugin \ + --database DATABASE_NAME \ + --token AUTH_TOKEN \ + --lp INPUT_LINE_PROTOCOL \ + --input-arguments INPUT_ARGS \ + PLUGIN_FILENAME +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`INPUT_LINE_PROTOCOL`{{% /code-placeholder-key %}}: the line protocol to test +- Optional: {{% code-placeholder-key %}}`INPUT_ARGS`{{% /code-placeholder-key %}}: a comma-delimited list of `=` arguments for your plugin code--for example, `arg1=hello,arg2=world` +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to test against +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: the {{% token-link "admin" %}} for your {{% product-name %}} server +- {{% code-placeholder-key %}}`PLUGIN_FILENAME`{{% /code-placeholder-key %}}: the name of the plugin file to test + +### Example: Test a plugin + +```bash +# Test a plugin +# Requires: +# - A database named `mydb` with a table named `foo` +# - A Python plugin file named `test.py` +# Test a plugin +influxdb3 test wal_plugin \ + --lp "my_measure,tag1=asdf f1=1.0 123" \ + --token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \ + --database sensors \ + --input-arguments "arg1=hello,arg2=world" \ + test.py +``` + +The command runs the plugin code with the test data, yields the data to the +plugin code, and then responds with the plugin result. +You can quickly see how the plugin behaves, what data it would have written to +the database, and any errors. +You can then edit your Python code in the plugins directory, and rerun the test. +The server reloads the file for every request to the `test` API. + +For more information, see [`influxdb3 test wal_plugin`](/influxdb3/version/reference/cli/influxdb3/test/wal_plugin/) +or run `influxdb3 test wal_plugin -h`. + +## Create a trigger + +With the plugin code inside the server plugin directory, and a successful test, +you're ready to create a trigger to run the plugin. Use the +[`influxdb3 create trigger` command](/influxdb3/version/reference/cli/influxdb3/create/trigger/) +to create a trigger. + +```bash +# Create a trigger that runs the plugin +influxdb3 create trigger \ + --token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \ + --database sensors \ + --plugin test_plugin \ + --trigger-spec "table:foo" \ + --trigger-arguments "arg1=hello,arg2=world" \ + trigger1 +``` + +## Enable the trigger + +After you have created a plugin and trigger, enter the following command to +enable the trigger and have it run the plugin as you write data: + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TRIGGER_NAME" %}} +```bash +influxdb3 enable trigger \ + --token AUTH_TOKEN \ + --database DATABASE_NAME \ + TRIGGER_NAME +``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to enable the trigger in +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}} +- {{% code-placeholder-key %}}`TRIGGER_NAME`{{% /code-placeholder-key %}}: the name of the trigger to enable + +For example, to enable the trigger named `trigger1` in the `sensors` database: + +```bash +influxdb3 enable trigger \ + --token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \ + --database sensors + trigger1 +``` + +## Next steps + +If you've completed this Get Started guide for {{% product-name %}}, +learn more about tools and options for: + +- [Writing data](/influxdb3/version/write-data/) +- [Querying data](/influxdb3/version/query-data/) +- [Processing data with plugins](/influxdb3/version/plugins/) +- [Visualizing data](/influxdb3/version/visualize-data/) diff --git a/content/shared/influxdb3-get-started/query.md b/content/shared/influxdb3-get-started/query.md new file mode 100644 index 000000000..86c92fd9e --- /dev/null +++ b/content/shared/influxdb3-get-started/query.md @@ -0,0 +1,503 @@ + +{{% product-name %}} supports both native SQL and InfluxQL for querying data. InfluxQL is +an SQL-like query language designed for InfluxDB v1 and customized for time +series queries. + +{{% show-in "core" %}} +{{< product-name >}} limits +query time ranges to approximately 72 hours (both recent and historical) to +ensure query performance. For more information about the 72-hour limitation, see +the [update on InfluxDB 3 Core’s 72-hour limitation](https://www.influxdata.com/blog/influxdb3-open-source-public-alpha-jan-27/). +{{% /show-in %}} + +> [!Note] +> Flux, the language introduced in InfluxDB v2, is **not** supported in InfluxDB 3. + + + +- [Query data with the influxdb3 CLI](#query-data-with-the-influxdb3-cli) + - [Example queries](#example-queries) +- [Other tools for executing queries](#other-tools-for-executing-queries) +- [SQL vs InfluxQL](#sql-vs-influxql) + - [SQL](#sql) + - [InfluxQL](#influxql) +- [Optimize queries](#optimize-queries) + - [Last values cache](#last-values-cache) + - [Distinct values cache](#distinct-values-cache) + {{% show-in "enterprise" %}}- [File indexes](#file-indexes){{% /show-in %}} + + + +## Query data with the influxdb3 CLI + +To get started querying data in {{% product-name %}}, use the +[`influxdb3 query` command](/influxdb3/version/reference/cli/influxdb3/query/) +and provide the following: + +- `-H`, `--host`: The host URL of the server _(default is `http://127.0.0.1:8181`)_ +- `-d`, `--database`: _({{% req %}})_ The name of the database to query +- `-l`, `--language`: The query language of the provided query string + - `sql` _(default)_ + - `influxql` +- SQL or InfluxQL query as a string + +> [!Important] +> If the `INFLUXDB3_AUTH_TOKEN` environment variable defined in +> [Set up {{% product-name %}}](/influxdb3/version/get-started/setup/#set-your-token-for-authorization) +> isn't set in your environment, set it or provide your token using +> the `-t, --token` option in your command. + +To query the home sensor sample data you wrote in +[Write data to {{% product-name %}}](/influxdb3/version/get-started/write/#write-data-using-the-cli), +run the following command: + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[SQL](#) +[InfluxQL](#) +{{% /code-tabs %}} +{{% code-tab-content %}} + + +```bash +influxdb3 query \ + --database DATABASE_NAME \ + "SELECT * FROM home ORDER BY time" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} + +```bash +influxdb3 query \ + --database DATABASE_NAME \ + --language influxql \ + "SELECT * FROM home" +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +{{% /code-placeholders %}} + +_Replace {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}} +with the name of the database to query._ + +To query from a specific time range, use the `WHERE` clause to designate the +boundaries of your time range. + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[SQL](#) +[InfluxQL](#) +{{% /code-tabs %}} +{{% code-tab-content %}} + + +```bash +influxdb3 query \ + --database DATABASE_NAME \ + "SELECT * FROM home WHERE time >= now() - INTERVAL '7 days' ORDER BY time" +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} + +```bash +influxdb3 query \ + --database DATABASE_NAME \ + --language influxql \ + "SELECT * FROM home WHERE time >= now() - 7d" +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +{{% /code-placeholders %}} + +### Example queries + +{{< expand-wrapper >}} +{{% expand "List tables in a database" %}} + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[SQL](#) +[InfluxQL](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sql +SHOW TABLES +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sql +SHOW MEASUREMENTS +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +{{% /expand %}} +{{% expand "Return the average temperature of all rooms" %}} + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[SQL](#) +[InfluxQL](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sql +SELECT avg(temp) AS avg_temp FROM home +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sql +SELECT MEAN(temp) AS avg_temp FROM home +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +{{% /expand %}} +{{% expand "Return the average temperature of the kitchen" %}} + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[SQL](#) +[InfluxQL](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sql +SELECT avg(temp) AS avg_temp FROM home WHERE room = 'Kitchen' +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sql +SELECT MEAN(temp) AS avg_temp FROM home WHERE room = 'Kitchen' +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +{{% /expand %}} +{{% expand "Query data from an absolute time range" %}} + +{{% influxdb/custom-timestamps %}} + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[SQL](#) +[InfluxQL](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sql +SELECT + * +FROM + home +WHERE + time >= '2022-01-01T12:00:00Z' + AND time <= '2022-01-01T18:00:00Z' +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sql +SELECT + * +FROM + home +WHERE + time >= '2022-01-01T12:00:00Z' + AND time <= '2022-01-01T18:00:00Z' +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +{{% /influxdb/custom-timestamps %}} + +{{% /expand %}} +{{% expand "Query data from a relative time range" %}} + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[SQL](#) +[InfluxQL](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sql +SELECT + * +FROM + home +WHERE + time >= now() - INTERVAL '7 days' +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sql +SELECT + * +FROM + home +WHERE + time >= now() - 7d +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +{{% /expand %}} +{{% expand "Calculate average humidity in 3-hour windows per room" %}} + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[SQL](#) +[InfluxQL](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```sql +SELECT + date_bin(INTERVAL '3 hours', time) AS time, + room, + avg(hum) AS avg_hum +FROM + home +GROUP BY + 1, + room +ORDER BY + room, + 1 +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```sql +SELECT + MEAN(hum) AS avg_hum +FROM + home +WHERE + time >= '2022-01-01T08:00:00Z' + AND time <= '2022-01-01T20:00:00Z' +GROUP BY + time(3h), + room +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +{{% /expand %}} +{{< /expand-wrapper >}} + +## Other tools for executing queries + +Other tools are available for querying data in {{% product-name %}}, including +the following: + +{{< expand-wrapper >}} +{{% expand "Query using the API" %}} +#### Query using the API + +InfluxDB 3 supports Flight (gRPC) APIs and an HTTP API. +To query your database using the HTTP API, send a request to the `/api/v3/query_sql` or `/api/v3/query_influxql` endpoints. +In the request, specify the database name in the `db` parameter +and a query in the `q` parameter. +You can pass parameters in the query string or inside a JSON object. + +Use the `format` parameter to specify the response format: `pretty`, `jsonl`, `parquet`, `csv`, and `json`. Default is `json`. + +##### Example: Query passing URL-encoded parameters + +The following example sends an HTTP `GET` request with a URL-encoded SQL query: + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} +```bash +curl -G "http://{{< influxdb/host >}}/api/v3/query_sql" \ + --header 'Authorization: Bearer AUTH_TOKEN' \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=select * from cpu limit 5" +``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}} + +##### Example: Query passing JSON parameters + +The following example sends an HTTP `POST` request with parameters in a JSON payload: + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} +```bash +curl http://{{< influxdb/host >}}/api/v3/query_sql \ + --data '{"db": "DATABASE_NAME", "q": "select * from cpu limit 5"}' +``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}} + +{{% /expand %}} + +{{% expand "Query using the Python client" %}} + +#### Query using the Python client + +Use the InfluxDB 3 Python library to interact with the database and integrate with your application. +We recommend installing the required packages in a Python virtual environment for your specific project. + +To get started, install the `influxdb3-python` package. + +```bash +pip install influxdb3-python +``` + +From here, you can connect to your database with the client library using just the **host** and **database name: + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} +```python +from influxdb_client_3 import InfluxDBClient3 + +client = InfluxDBClient3( + token='AUTH_TOKEN', + host='http://{{< influxdb/host >}}', + database='DATABASE_NAME' +) +``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}} + +The following example shows how to query using SQL, and then +use PyArrow to explore the schema and process results. +To authorize the query, the example retrieves the {{% token-link "database" %}} +from the `INFLUXDB3_AUTH_TOKEN` environment variable. + +```python +from influxdb_client_3 import InfluxDBClient3 +import os + +client = InfluxDBClient3( + token=os.environ.get('INFLUXDB3_AUTH_TOKEN'), + host='http://{{< influxdb/host >}}', + database='servers' +) + +# Execute the query and return an Arrow table +table = client.query( + query="SELECT * FROM cpu LIMIT 10", + language="sql" +) + +print("\n#### View Schema information\n") +print(table.schema) + +print("\n#### Use PyArrow to read the specified columns\n") +print(table.column('usage_active')) +print(table.select(['host', 'usage_active'])) +print(table.select(['time', 'host', 'usage_active'])) + +print("\n#### Use PyArrow compute functions to aggregate data\n") +print(table.group_by('host').aggregate([])) +print(table.group_by('cpu').aggregate([('time_system', 'mean')])) +``` + +For more information about the Python client library, see the +[`influxdb3-python` repository](https://github.com/InfluxCommunity/influxdb3-python) +in GitHub. + +{{% /expand %}} + +{{% expand "Query using InfluxDB 3 Explorer" %}} + +#### Query using InfluxDB 3 Explorer + +You can use the InfluxDB 3 Explorer web-based interface to query and visualize data, +and administer your {{% product-name %}} instance. +For more information, see how to [install InfluxDB 3 Explorer](/influxdb3/explorer/install/) +using Docker and get started querying your data. + +{{% /expand %}} +{{< /expand-wrapper >}} + +## SQL vs InfluxQL + +{{% product-name %}} supports two query languages--SQL and InfluxQL. +While these two query languages are similar, there are important differences to +consider. + +### SQL + +The InfluxDB 3 SQL implementation provides a full-featured SQL query engine +powered by [Apache DataFusion](https://datafusion.apache.org/). InfluxDB extends +DataFusion with additional time series-specific functionality and supports the +complex SQL queries, including queries that use joins, unions, window functions, +and more. + +- [SQL query guides](/influxdb3/version/query-data/sql/) +- [SQL reference](/influxdb3/version/reference/sql/) +- [Apache DataFusion SQL reference](https://datafusion.apache.org/user-guide/sql/index.html) + +### InfluxQL + +InfluxQL is a SQL-like query language built for InfluxDB v1 and supported in +{{% product-name %}}. Its syntax and functionality is similar SQL, but specifically +designed for querying time series data. InfluxQL does not offer the full range +of query functionality that SQL does. + +If you are migrating from previous versions of InfluxDB, you can continue to use +InfluxQL and the established InfluxQL-related APIs you have been using. + +- [InfluxQL query guides](/influxdb3/version/query-data/influxql/) +- [InfluxQL reference](/influxdb3/version/reference/influxql/) +- [InfluxQL feature support](/influxdb3/version/reference/influxql/feature-support/) + +## Optimize queries + +{{% product-name %}} provides the following optimization options to improve +specific kinds of queries: + +- [Last values cache](#last-values-cache) +- [Distinct values cache](#distinct-values-cache) +{{% show-in "enterprise" %}}- [File indexes](#file-indexes){{% /show-in %}} + +### Last values cache + +The {{% product-name %}} last values cache (LVC) stores the last N values in a +series or column hierarchy in memory. This gives the database the ability to +answer these kinds of queries in under 10 milliseconds. +For information about configuring and using the LVC, see: + +- [Manage a last values cache](/influxdb3/version/admin/last-value-cache/) +- [Query the last values cache](/influxdb3/version/admin/last-value-cache/query/) + +### Distinct values cache + +The {{% product-name %}} distinct values cache (DVC) stores distinct values for +specified columns in a series or column hierarchy in memory. +This is useful for fast metadata lookups, which can return in under 30 milliseconds. +For information about configuring and using the DVC, see: + +- [Manage a distinct values cache](/influxdb3/version/admin/distinct-value-cache/) +- [Query the distinct values cache](/influxdb3/version/admin/distinct-value-cache/query/) + +{{% show-in "enterprise" %}} +### File indexes + +{{% product-name %}} lets you customize how your data is indexed to help +optimize query performance for your specific workload, especially workloads that +include single-series queries. Define custom indexing strategies for databases +or specific tables. For more information, see +[Manage file indexes](/influxdb3/enterprise/admin/file-index/). + +{{% /show-in %}} + +{{% page-nav + prev="/influxdb3/version/get-started/write/" + prevText="Write data" + next="/influxdb3/version/get-started/process/" + nextText="Processing engine" +%}} diff --git a/content/shared/influxdb3-get-started/setup.md b/content/shared/influxdb3-get-started/setup.md new file mode 100644 index 000000000..e8a3aa83f --- /dev/null +++ b/content/shared/influxdb3-get-started/setup.md @@ -0,0 +1,541 @@ + +- [Prerequisites](#prerequisites) +- [Start InfluxDB](#start-influxdb) + - [Object store examples](#object-store-examples) +{{% show-in "enterprise" %}} +- [Set up licensing](#set-up-licensing) + - [Available license types](#available-license-types) +{{% /show-in %}} +- [Set up authorization](#set-up-authorization) + - [Create an operator token](#create-an-operator-token) + - [Set your token for authorization](#set-your-token-for-authorization) + + + +## Prerequisites + +To get started, you'll need: + +- **{{% product-name %}}**: [Install and verify the latest version](/influxdb3/version/install/) on your system. +- If you want to persist data, have access to one of the following: + - A directory on your local disk where you can persist data (used by examples in this guide) + - S3-compatible object store and credentials + +## Start InfluxDB + +Use the [`influxdb3 serve` command](/influxdb3/version/reference/cli/influxdb3/serve/) +to start {{% product-name %}}. +Provide the following: + +{{% show-in "enterprise" %}} +- `--node-id`: A string identifier that distinguishes individual server + instances within the cluster. This forms the final part of the storage path: + `//`. + In a multi-node setup, this ID is used to reference specific nodes. +- `--cluster-id`: A string identifier that determines part of the storage path + hierarchy. All nodes within the same cluster share this identifier. + The storage path follows the pattern `//`. + In a multi-node setup, this ID is used to reference the entire cluster. +{{% /show-in %}} +{{% show-in "core" %}} +- `--node-id`: A string identifier that distinguishes individual server instances. + This forms the final part of the storage path: `/`. +{{% /show-in %}} +- `--object-store`: Specifies the type of object store to use. + InfluxDB supports the following: + + - `file`: local file system + - `memory`: in memory _(no object persistence)_ + - `memory-throttled`: like `memory` but with latency and throughput that + somewhat resembles a cloud-based object store + - `s3`: AWS S3 and S3-compatible services like Ceph or Minio + - `google`: Google Cloud Storage + - `azure`: Azure Blob Storage + +- Other object store parameters depending on the selected `object-store` type. + For example, if you use `s3`, you must provide the bucket name and credentials. + +> [!Note] +> #### Diskless architecture +> +> InfluxDB 3 supports a diskless architecture that can operate with object +> storage alone, eliminating the need for locally attached disks. +> {{% product-name %}} can also work with only local disk storage when needed. +> +> {{% show-in "enterprise" %}} +> The combined path structure `//` ensures +> proper organization of data in your object store, allowing for clean +> separation between clusters and individual nodes. +> {{% /show-in %}} + +For this getting started guide, use the `file` object store to persist data to +your local disk. + +{{% show-in "enterprise" %}} +```bash +# File system object store +# Provide the filesystem directory +influxdb3 serve \ + --node-id host01 \ + --cluster-id cluster01 \ + --object-store file \ + --data-dir ~/.influxdb3 +``` +{{% /show-in %}} +{{% show-in "core" %}} +```bash +# File system object store +# Provide the file system directory +influxdb3 serve \ + --node-id host01 \ + --object-store file \ + --data-dir ~/.influxdb3 +``` +{{% /show-in %}} + +### Object store examples + +{{< expand-wrapper >}} +{{% expand "File system object store" %}} + +Store data in a specified directory on the local filesystem. +This is the default object store type. + +Replace the following with your values: + +{{% show-in "enterprise" %}} +```bash +# Filesystem object store +# Provide the filesystem directory +influxdb3 serve \ + --node-id host01 \ + --cluster-id cluster01 \ + --object-store file \ + --data-dir ~/.influxdb3 +``` +{{% /show-in %}} +{{% show-in "core" %}} +```bash +# File system object store +# Provide the file system directory +influxdb3 serve \ + --node-id host01 \ + --object-store file \ + --data-dir ~/.influxdb3 +``` +{{% /show-in %}} + +{{% /expand %}} +{{% expand "Docker with a mounted file system object store" %}} + +To run the [Docker image](/influxdb3/version/install/#docker-image) and persist +data to the local file system, mount a volume for the object store--for example, +provide the following options with your `docker run` command: + +- `--volume /path/on/host:/path/in/container`: Mounts a directory from your file system to the container +- `--object-store file --data-dir /path/in/container`: Use the volume mount for object storage + +{{% show-in "enterprise" %}} + +```bash +# File system object store with Docker +# Create a mount +# Provide the mount path +docker run -it \ + --volume /path/on/host:/path/in/container \ + influxdb:3-enterprise influxdb3 serve \ + --node-id my_host \ + --cluster-id my_cluster \ + --object-store file \ + --data-dir /path/in/container +``` +{{% /show-in %}} +{{% show-in "core" %}} + +```bash +# File system object store with Docker +# Create a mount +# Provide the mount path +docker run -it \ + --volume /path/on/host:/path/in/container \ + influxdb:3-core influxdb3 serve \ + --node-id my_host \ + --object-store file \ + --data-dir /path/in/container +``` +{{% /show-in %}} + +> [!Note] +> +> The {{% product-name %}} Docker image exposes port `8181`, the `influxdb3` +> server default for HTTP connections. +> To map the exposed port to a different port when running a container, see the +> Docker guide for [Publishing and exposing ports](https://docs.docker.com/get-started/docker-concepts/running-containers/publishing-ports/). + +{{% /expand %}} +{{% expand "Docker compose with a mounted file system object store" %}} +{{% show-in "enterprise" %}} +1. Open `compose.yaml` for editing and add a `services` entry for {{% product-name %}}. + --for example: + + ```yaml + # compose.yaml + services: + influxdb3-{{< product-key >}}: + container_name: influxdb3-{{< product-key >}} + image: influxdb:3-{{< product-key >}} + ports: + - 8181:8181 + command: + - influxdb3 + - serve + - --node-id=node0 + - --cluster-id=cluster0 + - --object-store=file + - --data-dir=/var/lib/influxdb3 + - --plugin-dir=/var/lib/influxdb3-plugins + environment: + - INFLUXDB3_LICENSE_EMAIL=EMAIL_ADDRESS + ``` + _Replace `EMAIL_ADDRESS` with your email address to bypass the email prompt + when generating a trial or at-home license. For more information, see [Manage your + {{% product-name %}} license](/influxdb3/version/admin/license/)_. +{{% /show-in %}} +{{% show-in "core" %}} +1. Open `compose.yaml` for editing and add a `services` entry for {{% product-name %}}--for example: + + ```yaml + # compose.yaml + services: + influxdb3-{{< product-key >}}: + container_name: influxdb3-{{< product-key >}} + image: influxdb:3-{{< product-key >}} + ports: + - 8181:8181 + command: + - influxdb3 + - serve + - --node-id=node0 + - --object-store=file + - --data-dir=/var/lib/influxdb3 + - --plugin-dir=/var/lib/influxdb3-plugins + ``` +{{% /show-in %}} + +2. Use the Docker Compose CLI to start the server. + + Optional: to make sure you have the latest version of the image before you + start the server, run `docker compose pull`. + + + ```bash + docker compose pull && docker compose run influxdb3-{{< product-key >}} + ``` + +InfluxDB 3 starts in a container with host port `8181` mapped to container port +`8181`, the `influxdb3` server default for HTTP connections. + +> [!Tip] +> #### Custom port mapping +> +> To customize your `influxdb3` server hostname and port, specify the +> [`--http-bind` option or the `INFLUXDB3_HTTP_BIND_ADDR` environment variable](/influxdb3/version/reference/config-options/#http-bind). +> +> For more information about mapping your container port to a specific host port, see the +> Docker guide for [Publishing and exposing ports](https://docs.docker.com/get-started/docker-concepts/running-containers/publishing-ports/). + +> [!Note] +> #### Stopping an InfluxDB 3 container +> +> To stop a running InfluxDB 3 container, find and terminate the process or container--for example: +> +> +> ```bash +> docker container ls --filter "name=influxdb3" +> docker kill +> ``` +> +> _Currently, a bug prevents using {{< keybind all="Ctrl+c" >}} in the terminal to stop an InfluxDB 3 container._ +{{% /expand %}} +{{% expand "S3 object storage" %}} + +Store data in an S3-compatible object store. +This is useful for production deployments that require high availability and durability. +Provide your bucket name and credentials to access the S3 object store. + +{{% show-in "enterprise" %}} +```bash +# S3 object store (default is the us-east-1 region) +# Specify the object store type and associated options +influxdb3 serve \ + --node-id host01 \ + --cluster-id cluster01 \ + --object-store s3 \ + --bucket OBJECT_STORE_BUCKET \ + --aws-access-key AWS_ACCESS_KEY_ID \ + --aws-secret-access-key AWS_SECRET_ACCESS_KEY +``` + +```bash +# Minio or other open source object store +# (using the AWS S3 API with additional parameters) +# Specify the object store type and associated options +influxdb3 serve \ + --node-id host01 \ + --cluster-id cluster01 \ + --object-store s3 \ + --bucket OBJECT_STORE_BUCKET \ + --aws-access-key-id AWS_ACCESS_KEY_ID \ + --aws-secret-access-key AWS_SECRET_ACCESS_KEY \ + --aws-endpoint ENDPOINT \ + --aws-allow-http +``` +{{% /show-in %}} +{{% show-in "core" %}} +```bash +# S3 object store (default is the us-east-1 region) +# Specify the object store type and associated options +influxdb3 serve \ + --node-id host01 \ + --object-store s3 \ + --bucket OBJECT_STORE_BUCKET \ + --aws-access-key AWS_ACCESS_KEY_ID \ + --aws-secret-access-key AWS_SECRET_ACCESS_KEY +``` + +```bash +# Minio or other open source object store +# (using the AWS S3 API with additional parameters) +# Specify the object store type and associated options +influxdb3 serve \ + --node-id host01 \ + --object-store s3 \ + --bucket OBJECT_STORE_BUCKET \ + --aws-access-key-id AWS_ACCESS_KEY_ID \ + --aws-secret-access-key AWS_SECRET_ACCESS_KEY \ + --aws-endpoint ENDPOINT \ + --aws-allow-http +``` +{{% /show-in %}} + +{{% /expand %}} +{{% expand "Memory-based object store" %}} + +Store data in RAM without persisting it on shutdown. +It's useful for rapid testing and development. + +{{% show-in "enterprise" %}} +```bash +# Memory object store +# Stores data in RAM; doesn't persist data +influxdb3 serve \ + --node-id host01 \ + --cluster-id cluster01 \ + --object-store memory +``` +{{% /show-in %}} +{{% show-in "core" %}} +```bash +# Memory object store +# Stores data in RAM; doesn't persist data +influxdb3 serve \ + --node-id host01 \ + --object-store memory +``` +{{% /show-in %}} + +{{% /expand %}} +{{< /expand-wrapper >}} + +For more information about server options, use the CLI help or view the +[InfluxDB 3 CLI reference](/influxdb3/version/reference/cli/influxdb3/serve/): + +```bash +influxdb3 serve --help +``` + +{{% show-in "enterprise" %}} +## Set up licensing + +When you first start a new instance, {{% product-name %}} prompts you to select a +license type. + +InfluxDB 3 Enterprise licenses: + +- **Authorize** usage of InfluxDB 3 Enterprise software for a single cluster. +- **Apply per cluster**, with limits based primarily on CPU cores. +- **Vary by license type**, each offering different capabilities and restrictions. + +### Available license types + +- **Trial**: 30-day trial license with full access to InfluxDB 3 Enterprise capabilities. +- **At-Home**: For at-home hobbyist use with limited access to InfluxDB 3 Enterprise capabilities. +- **Commercial**: Commercial license with full access to InfluxDB 3 Enterprise capabilities. + +> [!Important] +> #### Trial and at-home licenses with Docker +> +> To generate the trial or home license in Docker, bypass the email prompt. +> The first time you start a new instance, provide your email address with the +> `--license-email` option or the `INFLUXDB3_LICENSE_EMAIL` environment variable. +> +> _Currently, if you use Docker and enter your email address in the prompt, a bug may +> prevent the container from generating the license ._ +> +> For more information, see [the Docker Compose example](/influxdb3/enterprise/admin/license/?t=Docker+compose#start-the-server-with-your-license-email). +{{% /show-in %}} + +> [!Tip] +> #### Use the InfluxDB 3 Explorer query interface (beta) +> +> You can complete the remaining steps in this guide using InfluxDB 3 Explorer +> (currently in beta), the web-based query and administrative interface for InfluxDB 3. +> Explorer provides visual management of databases and tokens and an +> easy way to write and query your time series data. +> +> For more information, see the [InfluxDB 3 Explorer documentation](/influxdb3/explorer/). + +## Set up authorization + +{{% product-name %}} uses token-based authorization to authorize actions in the +database. Authorization is enabled by default when you start the server. +With authorization enabled, you must provide a token with `influxdb3` CLI +commands and HTTP API requests. + +{{% show-in "enterprise" %}} +{{% product-name %}} supports the following types of tokens: + +- **admin token**: Grants access to all CLI actions and API endpoints. +- **resource tokens**: Tokens that grant read and write access to specific + resources (databases and system information endpoints) on the server. + + - A database token grants access to write and query data in a + database + - A system token grants read access to system information endpoints and + metrics for the server +{{% /show-in %}} +{{% show-in "core" %}} +{{% product-name %}} supports _admin_ tokens, which grant access to all CLI actions and API endpoints. +{{% /show-in %}} + +For more information about tokens and authorization, see [Manage tokens](/influxdb3/version/admin/tokens/). + +### Create an operator token + +After you start the server, create your first admin token. +The first admin token you create is the _operator_ token for the server. + +Use the [`influxdb3 create token` command](/influxdb3/version/reference/cli/influxdb3/create/token/) +with the `--admin` option to create your operator token: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[CLI](#) +[Docker](#) +{{% /code-tabs %}} +{{% code-tab-content %}} + +```bash +influxdb3 create token --admin +``` + +{{% /code-tab-content %}} +{{% code-tab-content %}} + +{{% code-placeholders "CONTAINER_NAME" %}} +```bash +# With Docker — in a new terminal: +docker exec -it CONTAINER_NAME influxdb3 create token --admin +``` +{{% /code-placeholders %}} + +Replace {{% code-placeholder-key %}}`CONTAINER_NAME`{{% /code-placeholder-key %}} with the name of your running Docker container. + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +The command returns a token string for authenticating CLI commands and API requests. + +> [!Important] +> #### Store your token securely +> +> InfluxDB displays the token string only when you create it. +> Store your token securely—you cannot retrieve it from the database later. + +### Set your token for authorization + +Use your operator token to authenticate server actions in {{% product-name %}}, +such as {{% show-in "enterprise" %}}creating additional tokens, {{% /show-in %}} +performing administrative tasks{{% show-in "enterprise" %}},{{% /show-in %}} +and writing and querying data. + +Use one of the following methods to provide your token and authenticate `influxdb3` CLI commands. + +In your command, replace {{% code-placeholder-key %}}`YOUR_AUTH_TOKEN`{{% /code-placeholder-key %}} with your token string (for example, the [operator token](#create-an-operator-token) from the previous step). + +{{< tabs-wrapper >}} +{{% tabs %}} +[Environment variable (recommended)](#) +[Command option](#) +{{% /tabs %}} +{{% tab-content %}} + +Set the `INFLUXDB3_AUTH_TOKEN` environment variable to have the CLI use your +token automatically: + +{{% code-placeholders "YOUR_AUTH_TOKEN" %}} +```bash +export INFLUXDB3_AUTH_TOKEN=YOUR_AUTH_TOKEN +``` +{{% /code-placeholders %}} + +{{% /tab-content %}} +{{% tab-content %}} + +Include the `--token` option with CLI commands: + +{{% code-placeholders "YOUR_AUTH_TOKEN" %}} +```bash +influxdb3 show databases --token YOUR_AUTH_TOKEN +``` +{{% /code-placeholders %}} + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +For HTTP API requests, include your token in the `Authorization` header--for example: + +{{% code-placeholders "YOUR_AUTH_TOKEN" %}} +```bash +curl "http://{{< influxdb/host >}}/api/v3/configure/database" \ + --header "Authorization: Bearer YOUR_AUTH_TOKEN" +``` +{{% /code-placeholders %}} + +#### Learn more about tokens and permissions + +- [Manage admin tokens](/influxdb3/version/admin/tokens/admin/) - Understand and + manage operator and named admin tokens +{{% show-in "enterprise" %}} +- [Manage resource tokens](/influxdb3/version/admin/tokens/resource/) - Create, + list, and delete resource tokens +{{% /show-in %}} +- [Authentication](/influxdb3/version/reference/internals/authentication/) - + Understand authentication, authorizations, and permissions in {{% product-name %}} + +{{% show-in "core" %}} +{{% page-nav + prev="/influxdb3/version/get-started/" + prevText="Get started" + next="/influxdb3/version/get-started/write/" + nextText="Write data" +%}} +{{% /show-in %}} +{{% show-in "enterprise" %}} +{{% page-nav + prev="/influxdb3/version/get-started/" + prevText="Get started" + next="/influxdb3/version/get-started/multi-server/" + nextText="Create a multi-node cluster" +%}} +{{% /show-in %}} diff --git a/content/shared/influxdb3-get-started/write.md b/content/shared/influxdb3-get-started/write.md new file mode 100644 index 000000000..0f5d775f2 --- /dev/null +++ b/content/shared/influxdb3-get-started/write.md @@ -0,0 +1,252 @@ + + +{{% product-name %}} is designed for high write-throughput and uses an efficient, +human-readable write syntax called _[line protocol](#line-protocol)_. InfluxDB +is a schema-on-write database, meaning you can start writing data and InfluxDB +creates the logical database, tables, and their schemas automatically, without +any required intervention. Once InfluxDB creates the schema, it validates future +write requests against the schema before accepting new data. +Both new tags and fields can be added later as your schema changes. + +{{% show-in "core" %}} +> [!Note] +> #### InfluxDB 3 Core is optimized for recent data +> +> {{% product-name %}} is optimized for recent data but accepts writes from any time period. +> The system persists data to Parquet files for historical analysis with [InfluxDB 3 Enterprise](/influxdb3/enterprise/get-started/) or third-party tools. +> For extended historical queries and optimized data organization, consider using [InfluxDB 3 Enterprise](/influxdb3/enterprise/get-started/). +{{% /show-in %}} + + + +- [Line protocol](#line-protocol) +- [Construct line protocol](#construct-line-protocol) +- [Write data using the CLI](#write-data-using-the-cli) +- [Other tools for writing data](#other-tools-for-writing-data) + + + +## Line protocol + +{{% product-name %}} accepts data in +[line protocol](/influxdb3/version/reference/syntax/line-protocol/) syntax. +Line protocol consists of the following elements: + + + +{{< req type="key" >}} + +- {{< req "\*" >}} **table**: A string that identifies the + [table](/influxdb3/version/reference/glossary/#table) to store the data in. +- **tag set**: Comma-delimited list of key value pairs, each representing a tag. + Tag keys and values are unquoted strings. _Spaces, commas, and equal characters + must be escaped._ +- {{< req "\*" >}} **field set**: Comma-delimited list of key value pairs, each + representing a field. + Field keys are unquoted strings. _Spaces and commas must be escaped._ + Field values can be one of the following types: + + - [strings](/influxdb3/clustered/reference/syntax/line-protocol/#string) (quoted) + - [floats](/influxdb3/clustered/reference/syntax/line-protocol/#float) + - [integers](/influxdb3/clustered/reference/syntax/line-protocol/#integer) + - [unsigned integers](/influxdb3/clustered/reference/syntax/line-protocol/#uinteger) + - [booleans](/influxdb3/clustered/reference/syntax/line-protocol/#boolean) + +- **timestamp**: [Unix timestamp](/influxdb3/clustered/reference/syntax/line-protocol/#unix-timestamp) +associated with the data. InfluxDB supports up to nanosecond precision. + + +{{< expand-wrapper >}} +{{% expand "How are InfluxDB line protocol elements parsed?" %}} + + + +- **table**: Everything before the _first unescaped comma before the first + whitespace_. +- **tag set**: Key-value pairs between the _first unescaped comma_ and the _first + unescaped whitespace_. +- **field set**: Key-value pairs between the _first and second unescaped whitespaces_. +- **timestamp**: Integer value after the _second unescaped whitespace_. +- Lines are separated by the newline character (`\n`). Line protocol is +whitespace sensitive. + + +{{% /expand %}} +{{< /expand-wrapper >}} + +_For schema design recommendations, see +[InfluxDB schema design recomendations](/influxdb3/version/write-data/best-practices/schema-design/)._ + +--- + +{{< influxdb/line-protocol version="v3" >}} + +--- + +## Construct line protocol + + + +With a basic understanding of line protocol, you can now construct line protocol +and write data to {{% product-name %}}. +Consider a use case where you collect data from sensors in your home. +Each sensor collects temperature, humidity, and carbon monoxide readings. +To collect this data, use the following schema: + +- **table**: `home` + - **tags** + - `room`: Living Room or Kitchen + - **fields** + - `temp`: temperature in °C (float) + - `hum`: percent humidity (float) + - `co`: carbon monoxide in parts per million (integer) + - **timestamp**: Unix timestamp in _second_ precision + + +The following line protocol sample represents data collected hourly beginning at +{{% influxdb/custom-timestamps-span %}}**2022-01-01T08:00:00Z (UTC)** until **2022-01-01T20:00:00Z (UTC)**{{% /influxdb/custom-timestamps-span %}}. +_These timestamps are dynamic and can be updated by clicking the {{% icon "clock" %}} +icon in the bottom right corner._ + +{{% influxdb/custom-timestamps %}} + +##### Home sensor data line protocol + +```text +home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 +home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 +home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 +home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 +home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 +home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 +home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 +home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 +home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 +home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 +home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 +home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000 +home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1641045600 +home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641045600 +home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1641049200 +home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641049200 +home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1641052800 +home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641052800 +home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1641056400 +home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641056400 +home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1641060000 +home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641060000 +home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1641063600 +home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600 +home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200 +home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 +``` + +{{% /influxdb/custom-timestamps %}} + +## Write data using the CLI + +To quickly get started writing data, use the +[`influxdb3 write` command](/influxdb3/version/reference/cli/influxdb3/write/). +Include the following: + +- `--database` option that identifies the target database +- `--token` option that specifies the token to use _(unless the `INFLUXDB3_AUTH_TOKEN` + environment variable is already set)_ +- Quoted line protocol data via standard input (stdin) + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} +```bash +influxdb3 write \ + --database DATABASE_NAME \ + --token AUTH_TOKEN \ + --precision s \ +'home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000 +home,room=Kitchen temp=21.0,hum=35.9,co=0i 1641024000 +home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 +home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 +home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1641031200 +home,room=Kitchen temp=22.7,hum=36.1,co=0i 1641031200 +home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1641034800 +home,room=Kitchen temp=22.4,hum=36.0,co=0i 1641034800 +home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1641038400 +home,room=Kitchen temp=22.5,hum=36.0,co=0i 1641038400 +home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1641042000 +home,room=Kitchen temp=22.8,hum=36.5,co=1i 1641042000 +home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1641045600 +home,room=Kitchen temp=22.8,hum=36.3,co=1i 1641045600 +home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1641049200 +home,room=Kitchen temp=22.7,hum=36.2,co=3i 1641049200 +home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1641052800 +home,room=Kitchen temp=22.4,hum=36.0,co=7i 1641052800 +home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1641056400 +home,room=Kitchen temp=22.7,hum=36.0,co=9i 1641056400 +home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1641060000 +home,room=Kitchen temp=23.3,hum=36.9,co=18i 1641060000 +home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1641063600 +home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600 +home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200 +home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200' +``` +{{% /code-placeholders %}} + +In the code samples, replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + the name of the [database](/influxdb3/version/admin/databases/) to write to +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: + your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission + to write to the specified database{{% /show-in %}} + +### Write data from a file + +To write line protocol you have saved to a file, pass the `--file` option--for example, save the +[sample line protocol](#home-sensor-data-line-protocol) to a file named `sensor_data` +and then enter the following command: + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} +```bash +influxdb3 write \ + --database DATABASE_NAME \ + --token AUTH_TOKEN \ + --precision s \ + --accept-partial \ + --file path/to/sensor_data +``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the [database](/influxdb3/version/admin/databases/) to write to. +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to write to the specified database{{% /show-in %}} + +## Other tools for writing data + +There are many ways to write data to your {{% product-name %}} database, including: + +- [InfluxDB HTTP API](/influxdb3/version/write-data/http-api/): Recommended for + batching and higher-volume write workloads. +- [InfluxDB client libraries](/influxdb3/version/write-data/client-libraries/): + Client libraries that integrate with your code to construct data as time + series points and write the data as line protocol to your + {{% product-name %}} database. +- [Telegraf](/telegraf/v1/): A data collection agent with over 300 plugins for + collecting, processing, and writing data. + +For more information, see [Write data to {{% product-name %}}](/influxdb3/version/write-data/). + +{{% show-in "enterprise" %}} +{{% page-nav + prev="/influxdb3/version/get-started/multi-server/" + prevText="Create a multi-node cluster" + next="/influxdb3/version/get-started/query/" + nextText="Query data" +%}} +{{% /show-in %}} + +{{% show-in "core" %}} +{{% page-nav + prev="/influxdb3/version/get-started/setup/" + prevText="Set up InfluxDB" + next="/influxdb3/version/get-started/query/" + nextText="Query data" +%}} +{{% /show-in %}} diff --git a/content/shared/influxdb3-query-guides/execute-queries/influxdb-v1-api.md b/content/shared/influxdb3-query-guides/execute-queries/influxdb-v1-api.md index e0a296c5b..810c075ae 100644 --- a/content/shared/influxdb3-query-guides/execute-queries/influxdb-v1-api.md +++ b/content/shared/influxdb3-query-guides/execute-queries/influxdb-v1-api.md @@ -21,12 +21,6 @@ Provide the following with your request: - **Headers:** - **Authorization:** `Bearer AUTH_TOKEN` - - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization - > token. You can either omit this header or include it with an arbitrary - > token string. - - **Query parameters:** - **db**: the database to query - **rp**: Optional: the retention policy to query @@ -44,9 +38,9 @@ curl --get https://{{< influxdb/host >}}/query \ Replace the following configuration values: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: - the name of the database to query + the name of the [database](/influxdb3/version/admin/databases/) to query - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - your authorization token + your {{< product-name >}} {{% token-link %}}{{% show-in "enterprise" %}} with read access to the database{{% /show-in %}} ## Return results as JSON or CSV @@ -57,7 +51,7 @@ with the `application/csv` or `text/csv` MIME type: {{% code-placeholders "(DATABASE|AUTH)_(NAME|TOKEN)" %}} ```sh curl --get https://{{< influxdb/host >}}/query \ - --header "Authorization: BEARER AUTH_TOKEN" \ + --header "Authorization: Bearer AUTH_TOKEN" \ --header "Accept: application/csv" \ --data-urlencode "db=DATABASE_NAME" \ --data-urlencode "q=SELECT * FROM home" diff --git a/content/shared/influxdb3-query-guides/execute-queries/influxdb3-api.md b/content/shared/influxdb3-query-guides/execute-queries/influxdb3-api.md index a8ab9d018..ef8f0c6a1 100644 --- a/content/shared/influxdb3-query-guides/execute-queries/influxdb3-api.md +++ b/content/shared/influxdb3-query-guides/execute-queries/influxdb3-api.md @@ -35,7 +35,8 @@ Include the following parameters: The following example sends an HTTP `GET` request with a URL-encoded SQL query: ```bash -curl -v "http://{{< influxdb/host >}}/api/v3/query_sql?db=servers&q=select+*+from+cpu+limit+5" +curl "http://{{< influxdb/host >}}/api/v3/query_sql?db=servers&q=select+*+from+cpu+limit+5" \ + --header "Authorization: Bearer AUTH_TOKEN" ``` ### Example: Query passing JSON parameters @@ -44,7 +45,8 @@ The following example sends an HTTP `POST` request with parameters in a JSON pay ```bash curl http://{{< influxdb/host >}}/api/v3/query_sql \ - --data '{"db": "server", "q": "select * from cpu limit 5"}' + --header "Authorization: Bearer AUTH_TOKEN" + --json '{"db": "server", "q": "select * from cpu limit 5"}' ``` ### Query system information @@ -71,7 +73,8 @@ tables (`"table_schema":"iox"`), system tables, and information schema tables for a database: ```bash -curl "http://{{< influxdb/host >}}/api/v3/query_sql?db=mydb&format=jsonl&q=show%20tables" +curl "http://{{< influxdb/host >}}/api/v3/query_sql?db=mydb&format=jsonl&q=show%20tables" \ + --header "Authorization: Bearer AUTH_TOKEN" ``` The response body contains the following JSONL: @@ -117,7 +120,7 @@ that surround field names._ ```bash curl "http://localhost:8181/api/v3/query_sql" \ - --header "Content-Type: application/json" \ + --header "Authorization: Bearer AUTH_TOKEN" \ --json '{ "db": "mydb", "q": "SELECT * FROM information_schema.columns WHERE table_schema = '"'iox'"' AND table_name = '"'system_swap'"'", @@ -144,7 +147,7 @@ To view recently executed queries, query the `queries` system table: ```bash curl "http://localhost:8181/api/v3/query_sql" \ - --header "Content-Type: application/json" \ + --header "Authorization: Bearer AUTH_TOKEN" \ --json '{ "db": "mydb", "q": "SELECT * FROM system.queries LIMIT 2", @@ -180,7 +183,8 @@ Include the following parameters: The following example sends an HTTP `GET` request with a URL-encoded InfluxQL query: ```bash -curl -v "http://{{< influxdb/host >}}/api/v3/query_influxql?db=servers&q=select+*+from+cpu+limit+5" +curl "http://{{< influxdb/host >}}/api/v3/query_influxql?db=servers&q=select+*+from+cpu+limit+5" \ + --header "Authorization: Bearer AUTH_TOKEN" ``` ### Example: Query passing JSON parameters @@ -189,5 +193,6 @@ The following example sends an HTTP `POST` request with parameters in a JSON pay ```bash curl http://{{< influxdb/host >}}/api/v3/query_influxql \ - --data '{"db": "server", "q": "select * from cpu limit 5"}' + --header "Authorization: Bearer AUTH_TOKEN" \ + --json '{"db": "server", "q": "select * from cpu limit 5"}' ``` \ No newline at end of file diff --git a/content/shared/influxdb3-query-guides/execute-queries/influxdb3-cli.md b/content/shared/influxdb3-query-guides/execute-queries/influxdb3-cli.md index 49468f277..23edcb6c1 100644 --- a/content/shared/influxdb3-query-guides/execute-queries/influxdb3-cli.md +++ b/content/shared/influxdb3-query-guides/execute-queries/influxdb3-cli.md @@ -4,12 +4,12 @@ to query data in {{< product-name >}} with SQL or InfluxQL. Provide the following with your command: - + - `INFLUXDB3_AUTH_TOKEN` environment variable - **Database name**: The name of the database to query. Provide this using one of the following: @@ -53,6 +53,7 @@ Provide the following with your command: ```bash influxdb3 query \ + --token AUTH_TOKEN \ --database DATABASE_NAME \ "SELECT * FROM home" ``` @@ -62,6 +63,7 @@ influxdb3 query \ ```bash influxdb3 query \ + --token AUTH_TOKEN \ --database DATABASE_NAME \ --file ./query.sql ``` @@ -70,7 +72,7 @@ influxdb3 query \ ```bash -cat ./query.sql | influxdb3 query --database DATABASE_NAME +cat ./query.sql | influxdb3 query --token AUTH_TOKEN --database DATABASE_NAME ``` {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} @@ -94,6 +96,7 @@ cat ./query.sql | influxdb3 query --database DATABASE_NAME ```bash influxdb3 query \ + --token AUTH_TOKEN \ --language influxql \ --database DATABASE_NAME \ "SELECT * FROM home" @@ -104,8 +107,8 @@ influxdb3 query \ ```bash influxdb3 query \ + --token AUTH_TOKEN \ --language influxql \ - --database DATABASE_NAME \ --file ./query.influxql ``` {{% /code-tab-content %}} @@ -114,6 +117,7 @@ influxdb3 query \ ```bash cat ./query.influxql | influxdb3 query \ + --token AUTH_TOKEN \ --language influxql \ --database DATABASE_NAME ``` @@ -150,6 +154,7 @@ Use the `--format` flag to specify the output format: {{% influxdb/custom-timestamps %}} ```sh influxdb3 query \ + --token AUTH_TOKEN \ --database DATABASE_NAME \ --format json \ "SELECT * FROM home WHERE time >= '2022-01-01T08:00:00Z' LIMIT 5" @@ -217,6 +222,7 @@ the `influxdb3 query` command: {{% influxdb/custom-timestamps %}} ```sh influxdb3 query \ + --token AUTH_TOKEN \ --database DATABASE_NAME \ --format parquet \ --output path/to/results.parquet \ diff --git a/content/shared/influxdb3-sample-data/sample-data.md b/content/shared/influxdb3-sample-data/sample-data.md index 885a1f081..213806595 100644 --- a/content/shared/influxdb3-sample-data/sample-data.md +++ b/content/shared/influxdb3-sample-data/sample-data.md @@ -216,21 +216,16 @@ home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 Replace the following in the sample script: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: - the name of database to write to + the name of [database](/influxdb3/version/admin/databases/) to write to - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - your {{< product-name >}} authorization token - - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. - > You can either omit the CLI `--token` option or the HTTP `Authorization` header or - > you can provide an arbitrary token string. + your {{< product-name >}} {{% token-link %}} {{% /expand %}} {{< /expand-wrapper >}} ## Home sensor actions data -Includes hypothetical actions triggered by data in the [Get started home sensor data](#get-started-home-sensor-data) +Includes hypothetical actions triggered by data in the [home sensor data](#home-sensor-data) and is a companion dataset to that sample dataset. To customize timestamps in the dataset, use the {{< icon "clock" >}} button in the lower right corner of the page. @@ -371,12 +366,7 @@ Replace the following in the sample script: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of database to write to - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - your {{< product-name >}} authorization token - - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. - > You can either omit the CLI `--token` option or the HTTP `Authorization` header or - > you can provide an arbitrary token string. + your {{< product-name >}} {{% token-link %}} {{% /expand %}} {{< /expand-wrapper >}} @@ -478,12 +468,7 @@ Replace the following in the sample script: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of database to write to - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - your {{< product-name >}} authorization token - - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. - > You can either omit the CLI `--token` option or the HTTP `Authorization` header or - > you can provide an arbitrary token string. + your {{< product-name >}} {{% token-link %}} {{% /expand %}} {{< /expand-wrapper >}} @@ -575,12 +560,7 @@ Replace the following in the sample script: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of database to write to - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - your {{< product-name >}} authorization token - - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. - > You can either omit the CLI `--token` option or the HTTP `Authorization` header or - > you can provide an arbitrary token string. + your {{< product-name >}} {{% token-link %}} {{% /expand %}} {{< /expand-wrapper >}} @@ -674,12 +654,7 @@ Replace the following in the sample script: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of database to write to - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - your {{< product-name >}} authorization token - - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. - > You can either omit the CLI `--token` option or the HTTP `Authorization` header or - > you can provide an arbitrary token string. + your {{< product-name >}} {{% token-link %}} {{% /expand %}} {{< /expand-wrapper >}} diff --git a/content/shared/influxdb3-visualize/grafana.md b/content/shared/influxdb3-visualize/grafana.md index 785e4676f..06dbf82bc 100644 --- a/content/shared/influxdb3-visualize/grafana.md +++ b/content/shared/influxdb3-visualize/grafana.md @@ -69,13 +69,6 @@ When creating an InfluxDB data source that uses SQL to query data: - **Database**: Provide a default database name to query. - **Token**: Provide an arbitrary, non-empty string. - - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. - > However, if you included a `--token` option or defined the - > `INFLUXDB3_AUTH_TOKEN` environment variable when starting your - > {{< product-name >}} server, provide that token. - - **Insecure Connection**: If _not_ using HTTPS, enable this option. 3. Click **Save & test**. @@ -103,11 +96,6 @@ When creating an InfluxDB data source that uses InfluxQL to query data: - **User**: Provide an arbitrary string. _This credential is ignored when querying {{% product-name %}}, but it cannot be empty._ - **Password**: Provide an arbitrary string. - - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization - > token, but the **Password** field does require a value. - - **HTTP Method**: Choose one of the available HTTP request methods to use when querying data: - **POST** ({{< req text="Recommended" >}}) diff --git a/content/shared/influxdb3-visualize/superset.md b/content/shared/influxdb3-visualize/superset.md index 5513fbc22..dbe0baaa2 100644 --- a/content/shared/influxdb3-visualize/superset.md +++ b/content/shared/influxdb3-visualize/superset.md @@ -211,11 +211,8 @@ a database connection. **Query parameters** - - **`?database`**: URL-encoded InfluxDB database name - - **`?token`**: InfluxDB authorization token - - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. + - **`?database`**: URL-encoded [database](/influxdb3/version/admin/databases/) name + - **`?token`**: {{< product-name >}} {{% token-link %}} {{< code-callout "<(domain|port|database-name|token)>" >}} {{< code-callout "localhost|8181|example-database|example-token" >}} diff --git a/content/shared/influxdb3-visualize/tableau.md b/content/shared/influxdb3-visualize/tableau.md index 27973cdcc..d3a2a80af 100644 --- a/content/shared/influxdb3-visualize/tableau.md +++ b/content/shared/influxdb3-visualize/tableau.md @@ -67,10 +67,6 @@ the **Flight SQL JDBC driver**. - **Dialect**: PostgreSQL - **Username**: _Leave empty_ - **Password**: _Leave empty_ - - > [!Note] - > While in beta, {{< product-name >}} does not require authorization tokens. - - **Properties File**: _Leave empty_ 4. Click **Sign In**. diff --git a/content/shared/influxdb3-write-guides/_index.md b/content/shared/influxdb3-write-guides/_index.md index f95c94b7a..eb743e572 100644 --- a/content/shared/influxdb3-write-guides/_index.md +++ b/content/shared/influxdb3-write-guides/_index.md @@ -15,8 +15,9 @@ to line protocol. > > #### Choose the write endpoint for your workload > -> When creating new write workloads, use the HTTP API -> [`/api/v3/write_lp` endpoint with client libraries](/influxdb3/version/write-data/api-client-libraries/). +> When creating new write workloads, use the +> [InfluxDB HTTP API `/api/v3/write_lp` endpoint](influxdb3/version/write-data/http-api/v3-write-lp/) +> and [client libraries](/influxdb3/version/write-data/client-libraries/). > > When bringing existing v1 write workloads, use the {{% product-name %}} > HTTP API [`/write` endpoint](/influxdb3/core/api/v3/#operation/PostV1Write). diff --git a/content/shared/influxdb3-write-guides/best-practices/optimize-writes.md b/content/shared/influxdb3-write-guides/best-practices/optimize-writes.md index 6abfdd5c7..2441b414e 100644 --- a/content/shared/influxdb3-write-guides/best-practices/optimize-writes.md +++ b/content/shared/influxdb3-write-guides/best-practices/optimize-writes.md @@ -162,14 +162,9 @@ Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to write data to - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - your {{< product-name >}} authorization token. + your {{< product-name >}} {{% token-link %}} _Store this in a secret store or environment variable to avoid exposing the raw token string._ - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. - > You can either omit the `Authorization` header or you can provide an - > arbitrary token string. - {{% /tab-content %}} {{< /tabs-wrapper >}} @@ -248,13 +243,9 @@ EOF - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to write data to - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - your {{< product-name >}} authorization token. + your {{< product-name >}} {{% token-link %}} _Store this in a secret store or environment variable to avoid exposing the raw token string._ - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. - > For the `token` option, provide an empty or arbitrary token string. - 2. To test the input and processor, enter the following command: @@ -361,12 +352,9 @@ EOF - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to write data to - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - your {{< product-name >}} authorization token. + your {{< product-name >}} {{% token-link %}} _Store this in a secret store or environment variable to avoid exposing the raw token string._ - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. - > For the `token` option, provide an empty or arbitrary token string. 3. To test the input and processor, enter the following command: @@ -463,12 +451,9 @@ table, tag set, and timestamp), and then merges points in each series: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to write data to - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - your {{< product-name >}} authorization token. + your {{< product-name >}} {{% token-link %}} _Store this in a secret store or environment variable to avoid exposing the raw token string._ - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. - > For the `token` option, provide an empty or arbitrary token string. 3. To test the input and aggregator, enter the following command: @@ -566,12 +551,9 @@ field values, and then write the data to InfluxDB: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to write data to - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - your {{< product-name >}} authorization token. + your {{< product-name >}} {{% token-link %}} _Store this in a secret store or environment variable to avoid exposing the raw token string._ - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. - > For the `token` option, provide an empty or arbitrary token string. 3. To test the input and processor, enter the following command: @@ -805,12 +787,9 @@ EOF - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to write data to - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - your {{< product-name >}} authorization token. + your {{< product-name >}} {{% token-link %}} _Store this in a secret store or environment variable to avoid exposing the raw token string._ - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. - > For the `token` option, provide an empty or arbitrary token string. 5. To test the input and processor, enter the following command: diff --git a/content/shared/influxdb3-write-guides/api-client-libraries.md b/content/shared/influxdb3-write-guides/client-libraries.md similarity index 57% rename from content/shared/influxdb3-write-guides/api-client-libraries.md rename to content/shared/influxdb3-write-guides/client-libraries.md index 5be1f203a..e33d59122 100644 --- a/content/shared/influxdb3-write-guides/api-client-libraries.md +++ b/content/shared/influxdb3-write-guides/client-libraries.md @@ -1,193 +1,42 @@ - Use the `/api/v3/write_lp` HTTP API endpoint and InfluxDB v3 API clients to write points as line protocol data to {{% product-name %}}. - -- [Use the /api/v3/write\_lp endpoint](#use-the-apiv3write_lp-endpoint) - - [Example: write data using the /api/v3 HTTP API](#example-write-data-using-the-apiv3-http-api) - - [Write responses](#write-responses) - - [Use no\_sync for immediate write responses](#use-no_sync-for-immediate-write-responses) -- [Use API client libraries](#use-api-client-libraries) - - [Construct line protocol](#construct-line-protocol) - - [Set up your project](#set-up-your-project) - -## Use the /api/v3/write_lp endpoint - -{{% product-name %}} adds the `/api/v3/write_lp` endpoint. - -{{}} - -This endpoint accepts the same line protocol syntax as [previous versions](/influxdb3/version/write-data/compatibility-apis/), -and supports the following parameters: - -- `?accept_partial=`: Accept or reject partial writes (default is `true`). -- `?no_sync=`: Control when writes are acknowledged: - - `no_sync=true`: Acknowledge writes before WAL persistence completes. - - `no_sync=false`: Acknowledges writes after WAL persistence completes (default). -- `?precision=`: Specify the precision of the timestamp. The default is nanosecond precision. - -For more information about the parameters, see [Write data](/influxdb3/version/write-data/). - -InfluxData provides supported InfluxDB 3 client libraries that you can integrate with your code -to construct data as time series points, and then write them as line protocol to an {{% product-name %}} database. -For more information, see how to [use InfluxDB client libraries to write data](/influxdb3/version/write-data/client-libraries/). - -### Example: write data using the /api/v3 HTTP API - -The following examples show how to write data using `curl` and the `/api/3/write_lp` HTTP endpoint. -To show the difference between accepting and rejecting partial writes, line `2` in the example contains a string value (`"hi"`) for a float field (`temp`). - -#### Partial write of line protocol occurred - -With `accept_partial=true` (default): - -```bash -curl -v "http://{{< influxdb/host >}}/api/v3/write_lp?db=sensors&precision=auto" \ - --data-raw 'home,room=Sunroom temp=96 -home,room=Sunroom temp="hi"' -``` - -The response is the following: - -``` -< HTTP/1.1 400 Bad Request -... -{ - "error": "partial write of line protocol occurred", - "data": [ - { - "original_line": "home,room=Sunroom temp=hi", - "line_number": 2, - "error_message": "invalid column type for column 'temp', expected iox::column_type::field::float, got iox::column_type::field::string" - } - ] -} -``` - -Line `1` is written and queryable. -Line `2` is rejected. -The response is an HTTP error (`400`) status, and the response body contains the error message `partial write of line protocol occurred` with details about the problem line. - -#### Parsing failed for write_lp endpoint - -With `accept_partial=false`: - -```bash -curl -v "http://{{< influxdb/host >}}/api/v3/write_lp?db=sensors&precision=auto&accept_partial=false" \ - --data-raw 'home,room=Sunroom temp=96 -home,room=Sunroom temp="hi"' -``` - -The response is the following: - -``` -< HTTP/1.1 400 Bad Request -... -{ - "error": "parsing failed for write_lp endpoint", - "data": { - "original_line": "home,room=Sunroom temp=hi", - "line_number": 2, - "error_message": "invalid column type for column 'temp', expected iox::column_type::field::float, got iox::column_type::field::string" - } -} -``` - -InfluxDB rejects all points in the batch. -The response is an HTTP error (`400`) status, and the response body contains `parsing failed for write_lp endpoint` and details about the problem line. - -For more information about the ingest path and data flow, see [Data durability](/influxdb3/version/reference/internals/durability/). - -### Write responses - -By default, InfluxDB acknowledges writes after flushing the WAL file to the Object store (occurring every second). -For high write throughput, you can send multiple concurrent write requests. - -### Use no_sync for immediate write responses - -To reduce the latency of writes, use the `no_sync` write option, which acknowledges writes _before_ WAL persistence completes. -When `no_sync=true`, InfluxDB validates the data, writes the data to the WAL, and then immediately responds to the client, without waiting for persistence to the Object store. - -Using `no_sync=true` is best when prioritizing high-throughput writes over absolute durability. - -- Default behavior (`no_sync=false`): Waits for data to be written to the Object store before acknowledging the write. Reduces the risk of data loss, but increases the latency of the response. -- With `no_sync=true`: Reduces write latency, but increases the risk of data loss in case of a crash before WAL persistence. - -#### Immediate write using the HTTP API - -The `no_sync` parameter controls when writes are acknowledged--for example: - -```bash -curl "http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto&no_sync=true" \ - --data-raw "home,room=Sunroom temp=96" -``` - -## Use API client libraries - Use InfluxDB 3 client libraries that integrate with your code to construct data -as time series points, and -then write them as line protocol to an {{% product-name %}} database. +as time series points, and then write them as line protocol to an +{{% product-name %}} database. +- [Set up your project](#set-up-your-project) + - [Initialize a project directory](#initialize-a-project-directory) + - [Install the client library](#install-the-client-library) - [Construct line protocol](#construct-line-protocol) - [Example home schema](#example-home-schema) -- [Set up your project](#set-up-your-project) -- [Construct points and write line protocol](#construct-points-and-write-line-protocol) -### Construct line protocol +## Set up your project -With a [basic understanding of line protocol](/influxdb3/version/write-data/#line-protocol), -you can construct line protocol data and write it to {{% product-name %}}. +Set up your {{< product-name >}} project and credentials +to write data using the InfluxDB 3 client library for your programming language +of choice. -All InfluxDB client libraries write data in line protocol format to InfluxDB. -Client library `write` methods let you provide data as raw line protocol or as -`Point` objects that the client library converts to line protocol. If your -program creates the data you write to InfluxDB, use the client library `Point` -interface to take advantage of type safety in your program. - -#### Example home schema - -Consider a use case where you collect data from sensors in your home. Each -sensor collects temperature, humidity, and carbon monoxide readings. - -To collect this data, use the following schema: - - - -- **table**: `home` - - **tags** - - `room`: Living Room or Kitchen - - **fields** - - `temp`: temperature in °C (float) - - `hum`: percent humidity (float) - - `co`: carbon monoxide in parts per million (integer) - - **timestamp**: Unix timestamp in _second_ precision - - - -The following example shows how to construct and write points that follow the -`home` schema. - -### Set up your project +1. [Install {{< product-name >}}](/influxdb3/version/install/) +2. [Set up {{< product-name >}}](/influxdb3/version/get-started/setup/) +3. Create a project directory and store your + {{< product-name >}} credentials as environment variables or in a project + configuration file, such as a `.env` ("dotenv") file. After setting up {{< product-name >}} and your project, you should have the following: - {{< product-name >}} credentials: - [Database](/influxdb3/version/admin/databases/) - - Authorization token - - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. - + - [Authorization token](/influxdb3/version/admin/tokens/) - {{% product-name %}} URL - A directory for your project. - - Credentials stored as environment variables or in a project configuration file--for example, a `.env` ("dotenv") file. -- Client libraries installed for writing data to {{< product-name >}}. +### Initialize a project directory -The following examples use InfluxDB 3 client libraries to show how to construct -`Point` objects that follow the [example `home` schema](#example-home-schema), -and then write the data as line protocol to an {{% product-name %}} database. +Create a project directory and initialize it for your programming language. + + {{< tabs-wrapper >}} {{% tabs %}} @@ -196,86 +45,61 @@ and then write the data as line protocol to an {{% product-name %}} database. [Python](#) {{% /tabs %}} {{% tab-content %}} - -The following steps set up a Go project using the -[InfluxDB 3 Go client](https://github.com/InfluxCommunity/influxdb3-go/): - 1. Install [Go 1.13 or later](https://golang.org/doc/install). -1. Create a directory for your Go module and change to the directory--for +2. Create a directory for your Go module and change to the directory--for example: ```sh mkdir iot-starter-go && cd $_ ``` -1. Initialize a Go module--for example: +3. Initialize a Go module--for example: ```sh go mod init iot-starter ``` -1. Install [`influxdb3-go`](https://github.com/InfluxCommunity/influxdb3-go/), - which provides the InfluxDB `influxdb3` Go client library module. - - ```sh - go get github.com/InfluxCommunity/influxdb3-go/v2 - ``` - -{{% /tab-content %}} {{% tab-content %}} - - - -The following steps set up a JavaScript project using the -[InfluxDB 3 JavaScript client](https://github.com/InfluxCommunity/influxdb3-js/). +{{% /tab-content %}} +{{% tab-content %}} + 1. Install [Node.js](https://nodejs.org/en/download/). -1. Create a directory for your JavaScript project and change to the +2. Create a directory for your JavaScript project and change to the directory--for example: ```sh mkdir -p iot-starter-js && cd $_ ``` -1. Initialize a project--for example, using `npm`: +3. Initialize a project--for example, using `npm`: ```sh npm init ``` + -1. Install the `@influxdata/influxdb3-client` InfluxDB 3 JavaScript client - library. - - ```sh - npm install @influxdata/influxdb3-client - ``` - - - -{{% /tab-content %}} {{% tab-content %}} +{{% /tab-content %}} +{{% tab-content %}} - -The following steps set up a Python project using the -[InfluxDB 3 Python client](https://github.com/InfluxCommunity/influxdb3-python/): - 1. Install [Python](https://www.python.org/downloads/) -1. Inside of your project directory, create a directory for your Python module +2. Inside of your project directory, create a directory for your Python module and change to the module directory--for example: ```sh mkdir -p iot-starter-py && cd $_ ``` -1. **Optional, but recommended**: Use +3. **Optional, but recommended**: Use [`venv`](https://docs.python.org/3/library/venv.html) or [`conda`](https://docs.continuum.io/anaconda/install/) to activate a virtual environment for installing and executing code--for example, enter the @@ -285,29 +109,134 @@ The following steps set up a Python project using the ```bash python3 -m venv envs/iot-starter && source ./envs/iot-starter/bin/activate ``` - -1. Install - [`influxdb3-python`](https://github.com/InfluxCommunity/influxdb3-python), - which provides the InfluxDB `influxdb_client_3` Python client library module - and also installs the - [`pyarrow` package](https://arrow.apache.org/docs/python/index.html) for - working with Arrow data. - - ```sh - pip install influxdb3-python - ``` - {{% /tab-content %}} {{< /tabs-wrapper >}} -#### Construct points and write line protocol +### Install the client library + +Install the InfluxDB 3 client library for your programming language of choice. + +{{< tabs-wrapper >}} +{{% tabs %}} +[C#](#) +[Go](#) +[Java](#) +[Node.js](#) +[Python](#) +{{% /tabs %}} +{{% tab-content %}} + +Add the [InfluxDB 3 C# client library](https://github.com/InfluxCommunity/influxdb3-csharp) to your project using the +[`dotnet` CLI](https://docs.microsoft.com/dotnet/core/tools/dotnet) or +by adding the package to your project file--for example: + +```bash +dotnet add package InfluxDB3.Client +``` + +{{% /tab-content %}} +{{% tab-content %}} + +Add the +[InfluxDB 3 Go client library](https://github.com/InfluxCommunity/influxdb3-go) +to your project using the +[`go get` command](https://golang.org/cmd/go/#hdr-Add_dependencies_to_current_module_and_install_them)--for example: + +```bash +go mod init path/to/project/dir && cd $_ +go get github.com/InfluxCommunity/influxdb3-go/v2/influxdb3 +``` + +{{% /tab-content %}} +{{% tab-content %}} + +Add the [InfluxDB 3 Java client library](https://github.com/InfluxCommunity/influxdb3-java) to your project dependencies using +the [Maven](https://maven.apache.org/) +[Gradle](https://gradle.org/) build tools. + +For example, to add the library to a Maven project, add the following dependency +to your `pom.xml` file: + +```xml + + com.influxdb + influxdb3-java + 1.1.0 + +``` + +To add the library to a Gradle project, add the following dependency to your `build.gradle` file: + +```groovy +dependencies { + implementation 'com.influxdb:influxdb3-java:1.1.0' +} +``` + +{{% /tab-content %}} +{{% tab-content %}} + +For a Node.js project, use `@influxdata/influxdb3-client`, which provides main (CommonJS), +module (ESM), and browser (UMD) exports. +Add the [InfluxDB 3 JavaScript client library](https://github.com/InfluxCommunity/influxdb3-js) using your preferred package manager--for example, using [`npm`](https://www.npmjs.com/): + +```bash +npm install --save @influxdata/influxdb3-client +``` + +{{% /tab-content %}} +{{% tab-content %}} + +Install the [InfluxDB 3 Python client library](https://github.com/InfluxCommunity/influxdb3-python) using +[`pip`](https://pypi.org/project/pip/). +To use Pandas features, such as `to_pandas()`, provided by the Python +client library, you must also install the +[`pandas` package](https://pandas.pydata.org/). + +```bash +pip install influxdb3-python pandas +``` + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +## Construct line protocol + +With a [basic understanding of line protocol](/influxdb3/version/write-data/#line-protocol), +you can construct line protocol data and write it to {{% product-name %}}. + +Use client library write methods to provide data as raw line protocol +or as `Point` objects that the client library converts to line protocol. +If your program creates the data you write to InfluxDB, the `Point` +interface to take advantage of type safety in your program. Client libraries provide one or more `Point` constructor methods. Some libraries support language-native data structures, such as Go's `struct`, for creating points. +Examples in this guide show how to construct `Point` objects that follow the [example `home` schema](#example-home-schema), +and then write the points as line protocol data to an {{% product-name %}} database. + +### Example home schema + +Consider a use case where you collect data from sensors in your home. Each +sensor collects temperature, humidity, and carbon monoxide readings. + +To collect this data, use the following schema: + + + +- **table**: `home` + - **tags** + - `room`: Living Room or Kitchen + - **fields** + - `temp`: temperature in °C (float) + - `hum`: percent humidity (float) + - `co`: carbon monoxide in parts per million (integer) + - **timestamp**: Unix timestamp in _second_ precision + {{< tabs-wrapper >}} {{% tabs %}} [Go](#) diff --git a/content/shared/influxdb3-write-guides/http-api/_index.md b/content/shared/influxdb3-write-guides/http-api/_index.md new file mode 100644 index 000000000..6122cf589 --- /dev/null +++ b/content/shared/influxdb3-write-guides/http-api/_index.md @@ -0,0 +1,4 @@ +Use the InfluxDB HTTP API to write data to {{< product-name >}}. +There are different APIs you can use depending on your integration method. + +{{< children >}} diff --git a/content/shared/influxdb3-write-guides/compatibility-apis.md b/content/shared/influxdb3-write-guides/http-api/compatibility-apis.md similarity index 81% rename from content/shared/influxdb3-write-guides/compatibility-apis.md rename to content/shared/influxdb3-write-guides/http-api/compatibility-apis.md index eec64c67a..41dc335aa 100644 --- a/content/shared/influxdb3-write-guides/compatibility-apis.md +++ b/content/shared/influxdb3-write-guides/http-api/compatibility-apis.md @@ -15,14 +15,15 @@ to write points as line protocol data to {{% product-name %}}. ## InfluxDB v2 compatibility -The `/api/v2/write` InfluxDB v2 compatibility endpoint provides backwards compatibility with clients that can write data to InfluxDB OSS v2.x and Cloud 2 (TSM). +The `/api/v2/write` InfluxDB v2 compatibility endpoint provides backwards +compatibility with clients that can write data to InfluxDB OSS v2.x and Cloud 2 (TSM). -{{}} +{{}} ## InfluxDB v1 compatibility The `/write` InfluxDB v1 compatibility endpoint provides backwards compatibility with clients that can write data to InfluxDB v1.x. -{{}} +{{}} diff --git a/content/shared/influxdb3-write-guides/http-api/v3-write-lp.md b/content/shared/influxdb3-write-guides/http-api/v3-write-lp.md new file mode 100644 index 000000000..ec00bb4c2 --- /dev/null +++ b/content/shared/influxdb3-write-guides/http-api/v3-write-lp.md @@ -0,0 +1,162 @@ +Use the `/api/v3/write_lp` endpoint to write data to {{% product-name %}}. + +This endpoint accepts the same [line protocol](/influxdb3/version/reference/line-protocol/) +syntax as previous versions of InfluxDB, and supports the following: + +##### Query parameters + +- `?accept_partial=`: Accept or reject partial writes (default is `true`). +- `?no_sync=`: Control when writes are acknowledged: + - `no_sync=true`: Acknowledge writes before WAL persistence completes. + - `no_sync=false`: Acknowledges writes after WAL persistence completes (default). +- `?precision=`: Specify the precision of the timestamp. + The default is `ns` (nanosecond) precision. + You can also use `auto` to let InfluxDB automatically determine the timestamp + precision by identifying which precisions resolves most closely to _now_. + +##### Request body + +- Line protocol + +{{}} + +_The following example uses [cURL](https://curl.se/) to send a write request using +the {{< influxdb3/home-sample-link >}}, but you can use any HTTP client._ + +{{% influxdb/custom-timestamps %}} +```bash +curl -v "http://{{< influxdb/host >}}/api/v3/write_lp?db=sensors&precision=auto" \ + --data-raw "home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1735545600 +home,room=Kitchen temp=21.0,hum=35.9,co=0i 1735545600 +home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1735549200 +home,room=Kitchen temp=23.0,hum=36.2,co=0i 1735549200 +home,room=Living\ Room temp=21.8,hum=36.0,co=0i 1735552800 +home,room=Kitchen temp=22.7,hum=36.1,co=0i 1735552800 +home,room=Living\ Room temp=22.2,hum=36.0,co=0i 1735556400 +home,room=Kitchen temp=22.4,hum=36.0,co=0i 1735556400 +home,room=Living\ Room temp=22.2,hum=35.9,co=0i 1735560000 +home,room=Kitchen temp=22.5,hum=36.0,co=0i 1735560000 +home,room=Living\ Room temp=22.4,hum=36.0,co=0i 1735563600 +home,room=Kitchen temp=22.8,hum=36.5,co=1i 1735563600 +home,room=Living\ Room temp=22.3,hum=36.1,co=0i 1735567200 +home,room=Kitchen temp=22.8,hum=36.3,co=1i 1735567200 +home,room=Living\ Room temp=22.3,hum=36.1,co=1i 1735570800 +home,room=Kitchen temp=22.7,hum=36.2,co=3i 1735570800 +home,room=Living\ Room temp=22.4,hum=36.0,co=4i 1735574400 +home,room=Kitchen temp=22.4,hum=36.0,co=7i 1735574400 +home,room=Living\ Room temp=22.6,hum=35.9,co=5i 1735578000 +home,room=Kitchen temp=22.7,hum=36.0,co=9i 1735578000 +home,room=Living\ Room temp=22.8,hum=36.2,co=9i 1735581600 +home,room=Kitchen temp=23.3,hum=36.9,co=18i 1735581600 +home,room=Living\ Room temp=22.5,hum=36.3,co=14i 1735585200 +home,room=Kitchen temp=23.1,hum=36.6,co=22i 1735585200 +home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1735588800 +home,room=Kitchen temp=22.7,hum=36.5,co=26i 1735588800" +``` +{{% /influxdb/custom-timestamps %}} + +- [Partial writes](#partial-writes) + - [Accept partial writes](#accept-partial-writes) + - [Do not accept partial writes](#do-not-accept-partial-writes) +- [Write responses](#write-responses) + - [Use no_sync for immediate write responses](#use-no_sync-for-immediate-write-responses) + +> [!Note] +> #### InfluxDB client libraries +> +> InfluxData provides supported InfluxDB 3 client libraries that you can +> integrate with your code to construct data as time series points, and then +> write them as line protocol to an {{% product-name %}} database. +> For more information, see how to [use InfluxDB client libraries to write data](/influxdb3/version/write-data/client-libraries/). + +## Partial writes + +The `/api/v3/write_lp` endpoint lets you accept or reject partial writes using +the `accept_partial` parameter. This parameter changes the behavior of the API +when the write request contains invalid line protocol or schema conflicts. + +For example, the following line protocol contains two points, each using a +different datatype for the `temp` field, which causes a schema conflict: + +``` +home,room=Sunroom temp=96 1735545600 +home,room=Sunroom temp="hi" 1735549200 +``` + +### Accept partial writes + +With `accept_partial=true` (default), InfluxDB: + +- Accepts and writes line `1` +- Rejects line `2` +- Returns a `400 Bad Request` status code and the following response body: + +``` +< HTTP/1.1 400 Bad Request +... +{ + "error": "partial write of line protocol occurred", + "data": [ + { + "original_line": "home,room=Sunroom temp=hi 1735549200", + "line_number": 2, + "error_message": "invalid column type for column 'temp', expected iox::column_type::field::float, got iox::column_type::field::string" + } + ] +} +``` + +### Do not accept partial writes + +With `accept_partial=false`, InfluxDB: + +- Rejects _all_ points in the batch +- Returns a `400 Bad Request` status code and the following response body: + +``` +< HTTP/1.1 400 Bad Request +... +{ + "error": "parsing failed for write_lp endpoint", + "data": { + "original_line": "home,room=Sunroom temp=hi 1735549200", + "line_number": 2, + "error_message": "invalid column type for column 'temp', expected iox::column_type::field::float, got iox::column_type::field::string" + } +} +``` + +_For more information about the ingest path and data flow, see +[Data durability](/influxdb3/version/reference/internals/durability/)._ + +## Write responses + +By default, {{% product-name %}} acknowledges writes after flushing the WAL file +to the Object store (occurring every second). +For high write throughput, you can send multiple concurrent write requests. + +### Use no_sync for immediate write responses + +To reduce the latency of writes, use the `no_sync` write option, which +acknowledges writes _before_ WAL persistence completes. +When `no_sync=true`, InfluxDB validates the data, writes the data to the WAL, +and then immediately responds to the client, without waiting for persistence to +the Object store. + +> [!Tip] +> Using `no_sync=true` is best when prioritizing high-throughput writes over +> absolute durability. + +- Default behavior (`no_sync=false`): Waits for data to be written to the Object + store before acknowledging the write. Reduces the risk of data loss, but + increases the latency of the response. +- With `no_sync=true`: Reduces write latency, but increases the risk of data + loss in case of a crash before WAL persistence. + +The following example immediately returns a response without waiting for WAL +persistence: + +```bash +curl "http://localhost:8181/api/v3/write_lp?db=sensors&no_sync=true" \ + --data-raw "home,room=Sunroom temp=96" +``` diff --git a/content/shared/influxdb3-write-guides/influxdb3-cli.md b/content/shared/influxdb3-write-guides/influxdb3-cli.md index 6e95dc048..11aa37b35 100644 --- a/content/shared/influxdb3-write-guides/influxdb3-cli.md +++ b/content/shared/influxdb3-write-guides/influxdb3-cli.md @@ -9,8 +9,9 @@ to write line protocol data to {{< product-name >}}. > #### Use the API for batching and higher-volume writes > > The `influxdb3` CLI lets you quickly get started writing data to {{< product-name >}}. -> For batching and higher-volume write workloads, use -> [API client libraries](/influxdb3/version/write-data/api/#use-api-client-libraries) +> For batching and higher-volume write workloads, use the +> [InfluxDB HTTP API](/influxdb3/version/write-data/http-api), +> [API client libraries](/influxdb3/version/write-data/client-libraries/) > or [Telegraf](/influxdb3/version/write-data/use-telegraf/). ## Construct line protocol @@ -63,7 +64,7 @@ Provide the following: - The [database](/influxdb3/version/admin/databases/) name using the `--database` option -- Your {{< product-name >}} authorization token using the `-t`, `--token` option +- Your {{< product-name >}} {{% token-link %}} using the `-t`, `--token` option - [Line protocol](#construct-line-protocol). Provide the line protocol in one of the following ways: @@ -195,7 +196,4 @@ Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to write to - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - your {{< product-name >}} authorization token - - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. + your {{< product-name >}} {{% token-link %}} diff --git a/content/shared/influxdb3-write-guides/troubleshoot.md b/content/shared/influxdb3-write-guides/troubleshoot.md index 3d9691d2d..2f7d04d10 100644 --- a/content/shared/influxdb3-write-guides/troubleshoot.md +++ b/content/shared/influxdb3-write-guides/troubleshoot.md @@ -41,7 +41,7 @@ Write requests return the following status codes: | :-------------------------------| :--------------------------------------------------------------- | :------------- | | `204 "Success"` | | If InfluxDB ingested the data | | `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | If some or all request data isn't allowed (for example, if it is malformed or falls outside of the bucket's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected | -| `401 "Unauthorized"` | | If the `Authorization` header is missing or malformed or if the [token](/influxdb3/version/admin/tokens/) doesn't have [permission](/influxdb3/version/reference/cli/influxctl/token/create/#examples) to write to the database. See [examples using credentials](/influxdb3/version/write-data/api-client-libraries/) in write requests. | +| `401 "Unauthorized"` | | If the `Authorization` header is missing or malformed or if the [token](/influxdb3/version/admin/tokens/) doesn't have permission to write to the database. See [write API examples](/influxdb3/enterprise/write-data/http-api/) using credentials. | | `404 "Not found"` | requested **resource type** (for example, "organization" or "database"), and **resource name** | If a requested resource (for example, organization or database) wasn't found | | `500 "Internal server error"` | | Default status for an error | | `503` "Service unavailable" | | If the server is temporarily unavailable to accept writes. The `Retry-After` header describes when to try the write again. diff --git a/content/shared/influxdb3-write-guides/use-telegraf/_index.md b/content/shared/influxdb3-write-guides/use-telegraf/_index.md index ba5b643f3..1bc78340e 100644 --- a/content/shared/influxdb3-write-guides/use-telegraf/_index.md +++ b/content/shared/influxdb3-write-guides/use-telegraf/_index.md @@ -46,13 +46,9 @@ Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to write data to - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - your {{< product-name >}} authorization token. + your {{< product-name >}} {{% token-link %}}. _Store this in a secret store or environment variable to avoid exposing the raw token string._ - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. - > For the `token` option, provide an arbitrary, non-empty token string. - _See how to [Configure Telegraf to write to {{% product-name %}}](/influxdb3/version/write-data/use-telegraf/configure/)._ ## Use Telegraf with InfluxDB diff --git a/content/shared/influxdb3-write-guides/use-telegraf/configure.md b/content/shared/influxdb3-write-guides/use-telegraf/configure.md index 1fa5fabd0..0e04adaac 100644 --- a/content/shared/influxdb3-write-guides/use-telegraf/configure.md +++ b/content/shared/influxdb3-write-guides/use-telegraf/configure.md @@ -65,13 +65,9 @@ Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to write data to - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - your {{< product-name >}} authorization token. + your {{< product-name >}} {{% token-link %}}. _Store this in a secret store or environment variable to avoid exposing the raw token string._ - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. - > For the `token` option, provide an arbitrary, non-empty token string. - The InfluxDB output plugin configuration contains the following options: #### urls @@ -87,10 +83,6 @@ To write to {{% product-name %}}, include your {{% product-name %}} URL: Your {{% product-name %}} authorization token. -> [!Note] -> While in beta, {{< product-name >}} does not require an authorization token. -> For the `token` option, provide an arbitrary, non-empty token string. - > [!Tip] > > ##### Store your authorization token as an environment variable diff --git a/content/shared/influxdb3-write-guides/use-telegraf/csv.md b/content/shared/influxdb3-write-guides/use-telegraf/csv.md index b3e9941fc..41967996a 100644 --- a/content/shared/influxdb3-write-guides/use-telegraf/csv.md +++ b/content/shared/influxdb3-write-guides/use-telegraf/csv.md @@ -95,13 +95,9 @@ Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to write data to - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - your {{< product-name >}} authorization token. + your {{< product-name >}} {{% token-link %}}. _Store this in a secret store or environment variable to avoid exposing the raw token string._ - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. - > For the `token` option, provide an arbitrary, non-empty token string. - > [!Tip] > > ##### Store your authorization token as an environment variable diff --git a/content/shared/influxdb3-write-guides/use-telegraf/dual-write.md b/content/shared/influxdb3-write-guides/use-telegraf/dual-write.md index f557781a0..71f3fa52d 100644 --- a/content/shared/influxdb3-write-guides/use-telegraf/dual-write.md +++ b/content/shared/influxdb3-write-guides/use-telegraf/dual-write.md @@ -5,7 +5,7 @@ to a separate instance or for migrating from other versions of InfluxDB to {{< product-name >}}. The following example configures Telegraf for dual writing to {{% product-name %}} and an InfluxDB v2 OSS instance. - +Specifically, it uses the following: - The [InfluxDB v2 output plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/influxdb_v2) twice--the first pointing to {{< product-name >}} and the other to an @@ -14,11 +14,6 @@ The following example configures Telegraf for dual writing to {{% product-name % Configure both tokens as environment variables and use string interpolation in your Telegraf configuration file to reference each environment variable. - > [!Note] - > While in beta, {{< product-name >}} does not require an authorization token. - > For the `token` option, provide an arbitrary, non-empty token string. - - ## Sample configuration ```toml diff --git a/content/shared/influxdb3/_index.md b/content/shared/influxdb3/_index.md index 505e32a12..f5e59e89c 100644 --- a/content/shared/influxdb3/_index.md +++ b/content/shared/influxdb3/_index.md @@ -28,7 +28,7 @@ Core's feature highlights include: - Compatibility with InfluxDB 1.x and 2.x write APIs {{% show-in "core" %}} -[Get started with Core](/influxdb3/version/get-started/) +Get started with {{% product-name %}} {{% /show-in %}} The Enterprise version adds the following features to Core: @@ -41,5 +41,8 @@ The Enterprise version adds the following features to Core: - Integrated admin UI (coming soon) {{% show-in "core" %}} -For more information, see how to [get started with Enterprise](/influxdb3/enterprise/get-started/). +For more information, see how to [get started with InfluxDB 3 Enterprise](/influxdb3/enterprise/get-started/). +{{% /show-in %}} +{{% show-in "enterprise" %}} +Get started with {{% product-name %}} {{% /show-in %}} \ No newline at end of file diff --git a/content/shared/influxdb3/install.md b/content/shared/influxdb3/install.md new file mode 100644 index 000000000..6523e8a47 --- /dev/null +++ b/content/shared/influxdb3/install.md @@ -0,0 +1,151 @@ + +- [System Requirements](#system-requirements) +- [Install](#install) + - [Quick install for Linux and macOS](#quick-install-for-linux-and-macos) + - [Download and install the latest build artifacts](#download-and-install-the-latest-build-artifacts) + - [Pull the Docker image](#pull-the-docker-image) + - [Verify the installation](#verify-the-installation) + +{{% show-in "enterprise" %}} +> [!Note] +> For information about setting up a multi-node {{% product-name %}} cluster, +> see [Create a multi-node cluster](/influxdb3/enterprise/get-started/multi-server/) in the Get started guide. +{{% /show-in %}} + +## System Requirements + +#### Operating system + +{{< product-name >}} runs on **Linux**, **macOS**, and **Windows**. + +#### Object storage + +A key feature of InfluxDB 3 is its use of object storage to store time series +data in Apache Parquet format. You can choose to store these files on your local +file system. Performance on your local filesystem will likely be better, but +object storage has the advantage of not running out of space and being accessible +by other systems over the network. {{< product-name >}} natively supports Amazon S3, +Azure Blob Storage, and Google Cloud Storage. +You can also use many local object storage implementations that provide an +S3-compatible API, such as [Minio](https://min.io/). + +## Install + +{{% product-name %}} runs on **Linux**, **macOS**, and **Windows**. + +Choose one of the following methods to install {{% product-name %}}: + +- [Quick install for Linux and macOS](#quick-install-for-linux-and-macos) +- [Download and install the latest build artifacts](#download-and-install-the-latest-build-artifacts) +- [Pull the Docker image](#pull-the-docker-image) + +### Quick install for Linux and macOS + +To install {{% product-name %}} on **Linux** or **macOS**, download and run the quick +installer script for {{% product-name %}}--for example, using [`curl`](https://curl.se/) +to download the script: + + +```bash +curl -O https://www.influxdata.com/d/install_influxdb3.sh \ +&& sh install_influxdb3.sh {{% show-in "enterprise" %}}enterprise{{% /show-in %}} +``` + +> [!Note] +> The quick installer script is updated with each {{% product-name %}} release, +> so it always installs the latest version. + +### Download and install the latest build artifacts + +You can also download and install [{{% product-name %}} build artifacts](/influxdb3/enterprise/install/#download-influxdb-3-enterprise-binaries) directly: + +{{< expand-wrapper >}} +{{% expand "Linux binaries" %}} + +- [Linux | AMD64 (x86_64) | GNU](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_amd64.tar.gz) + • + [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_amd64.tar.gz.sha256) +- [Linux | ARM64 (AArch64) | GNU](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_arm64.tar.gz) + • + [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_arm64.tar.gz.sha256) + +{{% /expand %}} +{{% expand "macOS binaries" %}} + +- [macOS | Silicon (ARM64)](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_darwin_arm64.tar.gz) + • + [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_darwin_arm64.tar.gz.sha256) + +> [!Note] +> macOS Intel builds are coming soon. + +{{% /expand %}} +{{% expand "Windows binaries" %}} + +- [Windows (AMD64, x86_64) binary](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}-windows_amd64.zip) + • +[sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}-windows_amd64.zip.sha256) + +{{% /expand %}} +{{< /expand-wrapper >}} + +### Pull the Docker image + +Run the following command to pull the [`influxdb:3-{{< product-key >}}` image](https://hub.docker.com/_/influxdb/tags?tag=3-{{< product-key >}}&name=3-{{< product-key >}}), available for x86_64 (AMD64) and ARM64 architectures: + + +```bash +docker pull influxdb:3-{{< product-key >}} +``` + +Docker automatically pulls the appropriate image for your system architecture. + +{{< expand-wrapper >}} +{{% expand "Pull for a specific system architecture" %}} +To specify the system architecture, use platform-specific tags--for example: + +```bash +# For x86_64/AMD64 +docker pull \ +--platform linux/amd64 \ +influxdb:3-{{< product-key >}} +``` + +```bash +# For ARM64 +docker pull \ +--platform linux/arm64 \ +influxdb:3-{{< product-key >}} +``` +{{% /expand %}} +{{< /expand-wrapper >}} + + +### Verify the installation + +After installing {{% product-name %}}, enter the following command to verify +that it installed successfully: + +```bash +influxdb3 --version +``` + +If your system doesn't locate `influxdb3`, then `source` the configuration file (for example, .bashrc, .zshrc) for your shell--for example: + + +```zsh +source ~/.zshrc +``` + +{{% show-in "enterprise" %}} +> [!Note] +> For information about setting up a multi-node {{% product-name %}} cluster, +> see [Create a multi-node cluster](/influxdb3/enterprise/get-started/multi-server/) in the Get started guide. +{{% /show-in %}} + +{{% show-in "enterprise" %}} +{{< page-nav next="/influxdb3/enterprise/get-started/" nextText="Get started with InfluxDB 3 Enterprise" >}} +{{% /show-in %}} +{{% show-in "core" %}} +{{< page-nav next="/influxdb3/core/get-started/" nextText="Get started with InfluxDB 3 Core" >}} +{{% /show-in %}} \ No newline at end of file diff --git a/content/shared/sql-reference/functions/time-and-date.md b/content/shared/sql-reference/functions/time-and-date.md index a4e82d375..befe2331d 100644 --- a/content/shared/sql-reference/functions/time-and-date.md +++ b/content/shared/sql-reference/functions/time-and-date.md @@ -209,6 +209,9 @@ date_bin_gapfill(interval, expression[, origin_timestamp]) - hours - days - weeks + + +The following intervals are not currently supported: - months - years - century diff --git a/content/shared/v3-core-enterprise-release-notes/_index.md b/content/shared/v3-core-enterprise-release-notes/_index.md index ff46e3d03..9de837d59 100644 --- a/content/shared/v3-core-enterprise-release-notes/_index.md +++ b/content/shared/v3-core-enterprise-release-notes/_index.md @@ -5,6 +5,87 @@ > All updates to Core are automatically included in Enterprise. > The Enterprise sections below only list updates exclusive to Enterprise. +## v3.2.1 {date="2025-07-03"} + +### Core + +#### Features + +- **Enhanced database lifecycle management**: + - Allow updating the hard deletion date for already-deleted databases and tables, providing flexibility in managing data retention and compliance requirements + - Include `hard_deletion_date` column in `_internal` system tables (`databases` and `tables`) for better visibility into data lifecycle and audit trails + +#### Bug Fixes + +- **CLI improvements**: + - Added help text for the new `update` subcommand for database and table update features ([#26569](https://github.com/influxdata/influxdb/pull/26569)) + - `--object-store` and storage configuration parameters are required for the `serve` command ([#26575](https://github.com/influxdata/influxdb/pull/26575)) +- **Query processing**: Fixed V1-compatible `/query` HTTP API endpoint to correctly default to nanosecond precision (`ns`) for CSV output, ensuring backward compatibility with InfluxDB 1.x clients and preventing data precision loss ([#26577](https://github.com/influxdata/influxdb/pull/26577)) +- **Database reliability**: Fixed issue preventing hard deletion of soft-deleted databases and tables, enabling complete data removal for compliance and storage management needs ([#26574](https://github.com/influxdata/influxdb/pull/26574)) + +### Enterprise + +All Core updates are included in Enterprise. Additional Enterprise-specific features and fixes: + +#### Features + +- **License management improvements**: New `influxdb3 show license` command displays detailed license information including type, expiration date, and resource limits, making it easier to monitor license status and compliance + +#### Bug Fixes + +- **API stability**: Fixed HTTP API trigger specification to use the correct `"request:REQUEST_PATH"` syntax, ensuring proper request-based trigger configuration for processing engine workflows + +## v3.2.0 {date="2025-06-25"} + +**Core**: revision 1ca3168bee +**Enterprise**: revision 1ca3168bee + +### Core + +#### Features + +- **Hard delete for databases and tables**: Permanently delete databases and tables, enabling complete removal of data structures for compliance and storage management ([#26553](https://github.com/influxdata/influxdb/pull/26553)) +- **AWS credentials auto-reload**: Support dynamic reloading of ephemeral AWS credentials from files, improving security and reliability when using AWS services ([#26537](https://github.com/influxdata/influxdb/pull/26537)) +- **Database retention period support**: Add retention period support for databases via CLI commands (`create database` and `update database` commands) and HTTP APIs ([#26520](https://github.com/influxdata/influxdb/pull/26520)): + - New CLI command: `update database --retention-period` +- **Configurable lookback duration**: Users can specify lookback duration for PersistedFiles buffer, providing better control over query performance ([#26528](https://github.com/influxdata/influxdb/pull/26528)) +- **WAL replay concurrency control**: Add concurrency limits for WAL (Write-Ahead Log) replay to improve startup performance and resource management ([#26483](https://github.com/influxdata/influxdb/pull/26483)) +- **Enhanced write path**: Separate write path executor with unbounded memory for improved write performance ([#26455](https://github.com/influxdata/influxdb/pull/26455)) + +#### Bug Fixes + +- **WAL corruption handling**: Handle corrupt WAL files during replay without panic, improving data recovery and system resilience ([#26556](https://github.com/influxdata/influxdb/pull/26556)) +- **Database naming validation**: Disallow underscores in database names when created via API to ensure consistency ([#26507](https://github.com/influxdata/influxdb/pull/26507)) +- **Object store cleanup**: Automatic intermediate directory cleanup for file object store, preventing storage bloat ([#26480](https://github.com/influxdata/influxdb/pull/26480)) + +#### Additional Updates + +- Track generation 1 duration in catalog for better performance monitoring ([#26508](https://github.com/influxdata/influxdb/pull/26508)) +- Add retention period support to the catalog ([#26479](https://github.com/influxdata/influxdb/pull/26479)) +- Update help text for improved user experience ([#26509](https://github.com/influxdata/influxdb/pull/26509)) + +### Enterprise + +All Core updates are included in Enterprise. Additional Enterprise-specific features and fixes: + +#### Features + +- **License management improvements**: + - New `influxdb3 show license` command to display current license information +- **Table-level retention period support**: Add retention period support for individual tables in addition to database-level retention, providing granular data lifecycle management + - New CLI commands: `create table --retention-period` and `update table --retention-period` + - Set or clear table-specific retention policies independent of database settings +- **Compaction improvements**: + - Address compactor restart issues for better reliability + - Track compacted generation durations in catalog for monitoring + - Disable parquet cache for ingest mode to optimize memory usage + +#### Bug Fixes + +- **Query optimization**: Correctly partition query chunks into generations for improved performance +- **Data integrity**: Don't delete generation 1 files as part of compaction process +- **License handling**: Trim whitespace from license file contents after reading to prevent validation issues + ## v3.1.0 {date="2025-05-29"} **Core**: revision 482dd8aac580c04f37e8713a8fffae89ae8bc264 diff --git a/content/shared/v3-core-plugins/_index.md b/content/shared/v3-core-plugins/_index.md index 200ab5a65..c324a3bb0 100644 --- a/content/shared/v3-core-plugins/_index.md +++ b/content/shared/v3-core-plugins/_index.md @@ -22,20 +22,13 @@ Ensure you have: Once you have all the prerequisites in place, follow these steps to implement the Processing Engine for your data automation needs. -1. [Set up the Processing Engine](#set-up-the-processing-engine) -2. [Add a Processing Engine plugin](#add-a-processing-engine-plugin) - - [Use example plugins](#use-example-plugins) - - [Create a custom plugin](#create-a-custom-plugin) -3. [Set up a trigger](#set-up-a-trigger) - - [Understand trigger types](#understand-trigger-types) - - [Use the create trigger command](#use-the-create-trigger-command) - - [Trigger specification examples](#trigger-specification-examples) -4. [Advanced trigger configuration](#advanced-trigger-configuration) - - [Access community plugins from GitHub](#access-community-plugins-from-github) - - [Pass arguments to plugins](#pass-arguments-to-plugins) - - [Control trigger execution](#control-trigger-execution) - - [Configure error handling for a trigger](#configure-error-handling-for-a-trigger) - - [Install Python dependencies](#install-python-dependencies) +- [Set up the Processing Engine](#set-up-the-processing-engine) +- [Add a Processing Engine plugin](#add-a-processing-engine-plugin) +- [Set up a trigger](#set-up-a-trigger) +- [Advanced trigger configuration](#advanced-trigger-configuration) +{{% show-in "enterprise" %}} +- [Distributed cluster considerations](#distributed-cluster-considerations) +{{% /show-in %}} ## Set up the Processing Engine @@ -75,6 +68,10 @@ When running {{% product-name %}} in a distributed setup, follow these steps to > > Configure your plugin directory on the same system as the nodes that run the triggers and plugins. +{{% show-in "enterprise" %}} +For more information about configuring distributed environments, see the [Distributed cluster considerations](#distributed-cluster-considerations) section. +{{% /show-in %}} + ## Add a Processing Engine plugin A plugin is a Python script that defines a specific function signature for a trigger (_trigger spec_). When the specified event occurs, InfluxDB runs the plugin. @@ -168,11 +165,11 @@ Before you begin, make sure: Choose a plugin type based on your automation goals: -| Plugin Type | Best For | Trigger Type | -| ---------------- | ------------------------------------------- | ------------------------ | -| **Data write** | Processing data as it arrives | `table:` or `all_tables` | -| **Scheduled** | Running code at specific intervals or times | `every:` or `cron:` | -| **HTTP request** | Running code on demand via API endpoints | `path:` | +| Plugin Type | Best For | +| ---------------- | ------------------------------------------- | +| **Data write** | Processing data as it arrives | +| **Scheduled** | Running code at specific intervals or times | +| **HTTP request** | Running code on demand via API endpoints | #### Create your plugin file @@ -184,7 +181,7 @@ After writing your plugin, [create a trigger](#use-the-create-trigger-command) t #### Create a data write plugin -Use a data write plugin to process data as it's written to the database. Ideal use cases include: +Use a data write plugin to process data as it's written to the database. These plugins use [`table:` or `all_tables:`](#trigger-on-data-writes) trigger specifications. Ideal use cases include: - Data transformation and enrichment - Alerting on incoming values @@ -209,7 +206,7 @@ def process_writes(influxdb3_local, table_batches, args=None): #### Create a scheduled plugin -Scheduled plugins run at defined intervals. Use them for: +Scheduled plugins run at defined intervals using [`every:` or `cron:`](#trigger-on-a-schedule) trigger specifications. Use them for: - Periodic data aggregation - Report generation @@ -231,7 +228,7 @@ def process_scheduled_call(influxdb3_local, call_time, args=None): #### Create an HTTP request plugin -HTTP request plugins respond to API calls. Use them for: +HTTP request plugins respond to API calls using [`request:`](#trigger-on-http-requests) trigger specifications. Use them for: - Creating custom API endpoints - Webhooks for external integrations @@ -270,7 +267,7 @@ After writing your plugin: |------------|----------------------|-----------------| | Data write | `table:` or `all_tables` | When data is written to tables | | Scheduled | `every:` or `cron:` | At specified time intervals | -| HTTP request | `path:` | When HTTP requests are received | +| HTTP request | `request:` | When HTTP requests are received | ### Use the create trigger command @@ -302,7 +299,7 @@ In the example above, replace the following: ### Trigger specification examples -#### Data write example +#### Trigger on data writes ```bash # Trigger on writes to a specific table @@ -325,13 +322,13 @@ The trigger runs when the database flushes ingested data for the specified table The plugin receives the written data and table information. -#### Scheduled events example +#### Trigger on a schedule ```bash # Run every 5 minutes influxdb3 create trigger \ --trigger-spec "every:5m" \ - --plugin-filename "hourly_check.py" \ + --plugin-filename "periodic_check.py" \ --database my_database \ regular_check @@ -346,7 +343,7 @@ influxdb3 create trigger \ The plugin receives the scheduled call time. -#### HTTP requests example +#### Trigger on HTTP requests ```bash # Create an endpoint at /api/v3/engine/webhook @@ -357,7 +354,9 @@ influxdb3 create trigger \ webhook_processor ``` -Access your endpoint available at `/api/v3/engine/`. +Access your endpoint at `/api/v3/engine/{REQUEST_PATH}` (in this example, `/api/v3/engine/webhook`). +The trigger is enabled by default and runs when an HTTP request is received at the specified path. + To run the plugin, send a `GET` or `POST` request to the endpoint--for example: ```bash @@ -366,6 +365,12 @@ curl http://{{% influxdb/host %}}/api/v3/engine/webhook The plugin receives the HTTP request object with methods, headers, and body. +To view triggers associated with a database, use the `influxdb3 show summary` command: + +```bash +influxdb3 show summary --database my_database --token AUTH_TOKEN +``` + ### Pass arguments to plugins Use trigger arguments to pass configuration from a trigger to the plugin it runs. You can use this for: @@ -523,27 +528,90 @@ influxdb3 create trigger \ ### Install Python dependencies -If your plugin needs additional Python packages, use the `influxdb3 install` command: +Use the `influxdb3 install package` command to add third-party libraries (like `pandas`, `requests`, or `influxdb3-python`) to your plugin environment. +This installs packages into the Processing Engine’s embedded Python environment to ensure compatibility with your InfluxDB instance. + +{{% code-placeholders "CONTAINER_NAME|PACKAGE_NAME" %}} + +{{< code-tabs-wrapper >}} + +{{% code-tabs %}} +[CLI](#) +[Docker](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} ```bash -# Install a package directly +# Use the CLI to install a Python package influxdb3 install package pandas + ``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} + ```bash -# With Docker +# Use the CLI to install a Python package in a Docker container docker exec -it CONTAINER_NAME influxdb3 install package pandas ``` -This creates a Python virtual environment in your plugins directory with the specified packages installed. +{{% /code-tab-content %}} + +{{< /code-tabs-wrapper >}} + +These examples install the specified Python package (for example, pandas) into the Processing Engine’s embedded virtual environment. + +- Use the CLI command when running InfluxDB directly on your system. +- Use the Docker variant if you're running InfluxDB in a containerized environment. + +> [!Important] +> #### Use bundled Python for plugins +> When you start the server with the `--plugin-dir` option, InfluxDB 3 creates a Python virtual environment (`/venv`) for your plugins. +> If you need to create a custom virtual environment, use the Python interpreter bundled with InfluxDB 3. Don't use the system Python. +> Creating a virtual environment with the system Python (for example, using `python -m venv`) can lead to runtime errors and plugin failures. +> +>For more information, see the [processing engine README](https://github.com/influxdata/influxdb/blob/main/README_processing_engine.md#official-builds). + +{{% /code-placeholders %}} + +InfluxDB creates a Python virtual environment in your plugins directory with the specified packages installed. {{% show-in "enterprise" %}} -### Connect Grafana to your InfluxDB instance +## Distributed cluster considerations -When configuring Grafana to connect to an InfluxDB 3 Enterprise instance: +When you deploy {{% product-name %}} in a multi-node environment, configure each node based on its role and the plugins it runs. -- **URL**: Use a querier URL or any node that serves queries +### Match plugin types to the correct node + +Each plugin must run on a node that supports its trigger type: + +| Plugin type | Trigger spec | Runs on | +|--------------------|--------------------------|-----------------------------| +| Data write | `table:` or `all_tables` | Ingester nodes | +| Scheduled | `every:` or `cron:` | Any node with scheduler | +| HTTP request | `request:` | Nodes that serve API traffic| + +For example: +- Run write-ahead log (WAL) plugins on ingester nodes. +- Run scheduled plugins on any node configured to execute them. +- Run HTTP-triggered plugins on querier nodes or any node that handles HTTP endpoints. + +Place all plugin files in the `--plugin-dir` directory configured for each node. + +> [!Note] +> Triggers fail if the plugin file isn’t available on the node where it runs. + +### Route third-party clients to querier nodes + +External tools—such as Grafana, custom dashboards, or REST clients—must connect to querier nodes in your InfluxDB Enterprise deployment. + +#### Examples + +- **Grafana**: When adding InfluxDB 3 as a Grafana data source, use a querier node URL, such as: +`https://querier.example.com:8086` +- **REST clients**: Applications using `POST /api/v3/query/sql` or similar endpoints must target a querier node. -Example URL format: `https://querier.your-influxdb.com:8086` {{% /show-in %}} diff --git a/content/telegraf/v1/_index.md b/content/telegraf/v1/_index.md index e433e90e4..2ef2e0fb2 100644 --- a/content/telegraf/v1/_index.md +++ b/content/telegraf/v1/_index.md @@ -5,7 +5,7 @@ description: > time series platform, used to collect and report metrics. Telegraf supports four categories of plugins -- input, output, aggregator, and processor. menu: telegraf_v1: - name: Telegraf v1.34 + name: Telegraf v1.35 weight: 1 related: - /resources/videos/intro-to-telegraf/ diff --git a/content/telegraf/v1/release-notes.md b/content/telegraf/v1/release-notes.md index a5cd55648..05d994186 100644 --- a/content/telegraf/v1/release-notes.md +++ b/content/telegraf/v1/release-notes.md @@ -11,6 +11,369 @@ menu: weight: 60 --- +## v1.35.0 {date="2025-06-16"} + +### Deprecation Removals + +This release removes the following deprecated plugin aliases: + +- `inputs.cisco_telemetry_gnmi` in [#17101](https://github.com/influxdata/telegraf/pull/17101) +- `inputs.http_listener` in [#17102](https://github.com/influxdata/telegraf/pull/17102) +- `inputs.KNXListener` in [#17168](https://github.com/influxdata/telegraf/pull/17168) +- `inputs.logparser` in [#17170](https://github.com/influxdata/telegraf/pull/17170) + +And removes the following deprecated plugin options: + +- `ssl_ca`, `ssl_cert` and `ssl_key` of common TLS settings in [#17119](https://github.com/influxdata/telegraf/pull/17119) +- `url` of `inputs.amqp_consumer` in [#17149](https://github.com/influxdata/telegraf/pull/17149) +- `namespace` of `inputs.cloudwatch` in [#17123](https://github.com/influxdata/telegraf/pull/17123) +- `datacentre` of `inputs.consul` in [#17150](https://github.com/influxdata/telegraf/pull/17150) +- `container_names`, `perdevice` and `total` of `inputs.docker` in [#17148](https://github.com/influxdata/telegraf/pull/17148) +- `http_timeout` of `inputs.elasticsearch` in [#17124](https://github.com/influxdata/telegraf/pull/17124) +- `directory` of `inputs.filecount` in [#17152](https://github.com/influxdata/telegraf/pull/17152) +- `guess_path_tag` and `enable_tls` of `inputs.gnmi` in [#17151](https://github.com/influxdata/telegraf/pull/17151) +- `bearer_token` of `inputs.http` in [#17153](https://github.com/influxdata/telegraf/pull/17153) +- `path` and `port` of `inputs.http_listener_v2` in [#17158](https://github.com/influxdata/telegraf/pull/17158) +- `address` of `inputs.http_response` in [#17157](https://github.com/influxdata/telegraf/pull/17157) +- `object_type` of `inputs.icinga2` in [#17163](https://github.com/influxdata/telegraf/pull/17163) +- `max_line_size` of `inputs.influxdb_listener` in [#17162](https://github.com/influxdata/telegraf/pull/17162) +- `enable_file_download` of `inputs.internet_speed` in [#17165](https://github.com/influxdata/telegraf/pull/17165) +- `bearer_token_string` of `inputs.kube_inventory` in [#17110](https://github.com/influxdata/telegraf/pull/17110) +- `bearer_token_string` of `inputs.kubernetes` in [#17109](https://github.com/influxdata/telegraf/pull/17109) +- `server` of `inputs.nsq_consumer` in [#17166](https://github.com/influxdata/telegraf/pull/17166) +- `dns_lookup` of `inputs.ntpq` in [#17159](https://github.com/influxdata/telegraf/pull/17159) +- `ssl` of `inputs.openldap` in [#17103](https://github.com/influxdata/telegraf/pull/17103) +- `name` and `queues` of `inputs.rabbitmq` in [#17105](https://github.com/influxdata/telegraf/pull/17105) +- `path` of `inputs.smart` in [#17113](https://github.com/influxdata/telegraf/pull/17113) +- `azuredb` and `query_version` of `inputs.sqlserver` in [#17112](https://github.com/influxdata/telegraf/pull/17112) +- `parse_data_dog_tags` and `udp_packet_size` of `inputs.statsd` in [#17171](https://github.com/influxdata/telegraf/pull/17171) +- `force_discover_on_init` of `inputs.vsphere` in [#17169](https://github.com/influxdata/telegraf/pull/17169) +- `database`, `precision`, `retention_policy` and `url` of `outputs.amqp` in [#16950](https://github.com/influxdata/telegraf/pull/16950) +- `precision` of `outputs.influxdb` in [#17160](https://github.com/influxdata/telegraf/pull/17160) +- `partitionkey` and `use_random_partitionkey` of `outputs.kinesis` in [#17167](https://github.com/influxdata/telegraf/pull/17167) +- `source_tag` of `outputs.librato` in [#17174](https://github.com/influxdata/telegraf/pull/17174) +- `batch` and `topic_prefix` of `outputs.mqtt` in [#17176](https://github.com/influxdata/telegraf/pull/17176) +- `trace` of `outputs.remotefile` in [#17173](https://github.com/influxdata/telegraf/pull/17173) +- `host`, `port` and `string_to_number` of `outputs.wavefront` in [#17172](https://github.com/influxdata/telegraf/pull/17172) + +If you're using deprecated Telegraf plugins or options, migrate your configuration to use the available replacements. +The `telegraf config migrate` command might be able to help with the migration. + +### New Plugins + +- [#16390](https://github.com/influxdata/telegraf/pull/16390) `inputs.fritzbox` Add plugin +- [#16780](https://github.com/influxdata/telegraf/pull/16780) `inputs.mavlink` Add plugin +- [#16509](https://github.com/influxdata/telegraf/pull/16509) `inputs.whois` Add plugin +- [#16211](https://github.com/influxdata/telegraf/pull/16211) `outputs.inlong` Add plugin +- [#16827](https://github.com/influxdata/telegraf/pull/16827) `outputs.microsoft_fabric` Add plugin +- [#16629](https://github.com/influxdata/telegraf/pull/16629) `processors.cumulative_sum` Add plugin + +### Features + +- [#17048](https://github.com/influxdata/telegraf/pull/17048) `agent` Add debounce for watch events +- [#16524](https://github.com/influxdata/telegraf/pull/16524) `common.kafka` Add AWS-MSK-IAM SASL authentication +- [#16867](https://github.com/influxdata/telegraf/pull/16867) `common.ratelimiter` Implement means to reserve memory for concurrent use +- [#16148](https://github.com/influxdata/telegraf/pull/16148) `common.shim` Add batch to shim +- [#17121](https://github.com/influxdata/telegraf/pull/17121) `inputs.amqp_consumer` Allow string values in queue arguments +- [#17051](https://github.com/influxdata/telegraf/pull/17051) `inputs.opcua` Allow forcing reconnection on every gather cycle +- [#16532](https://github.com/influxdata/telegraf/pull/16532) `inputs.opcua_listener` Allow to subscribe to OPCUA events +- [#16882](https://github.com/influxdata/telegraf/pull/16882) `inputs.prometheus` Add HTTP service discovery support +- [#16999](https://github.com/influxdata/telegraf/pull/16999) `inputs.s7comm` Add support for LREAL and LINT data types +- [#16452](https://github.com/influxdata/telegraf/pull/16452) `inputs.unbound` Collect histogram statistics +- [#16700](https://github.com/influxdata/telegraf/pull/16700) `inputs.whois` Support IDN domains +- [#17119](https://github.com/influxdata/telegraf/pull/17119) `migrations` Add migration for common.tls ssl options +- [#17101](https://github.com/influxdata/telegraf/pull/17101) `migrations` Add migration for inputs.cisco_telemetry_gnmi +- [#17123](https://github.com/influxdata/telegraf/pull/17123) `migrations` Add migration for inputs.cloudwatch +- [#17148](https://github.com/influxdata/telegraf/pull/17148) `migrations` Add migration for inputs.docker +- [#17124](https://github.com/influxdata/telegraf/pull/17124) `migrations` Add migration for inputs.elasticsearch +- [#17102](https://github.com/influxdata/telegraf/pull/17102) `migrations` Add migration for inputs.http_listener +- [#17162](https://github.com/influxdata/telegraf/pull/17162) `migrations` Add migration for inputs.influxdb_listener +- [#17110](https://github.com/influxdata/telegraf/pull/17110) `migrations` Add migration for inputs.kube_inventory +- [#17109](https://github.com/influxdata/telegraf/pull/17109) `migrations` Add migration for inputs.kubernetes +- [#17103](https://github.com/influxdata/telegraf/pull/17103) `migrations` Add migration for inputs.openldap +- [#17105](https://github.com/influxdata/telegraf/pull/17105) `migrations` Add migration for inputs.rabbitmq +- [#17113](https://github.com/influxdata/telegraf/pull/17113) `migrations` Add migration for inputs.smart +- [#17112](https://github.com/influxdata/telegraf/pull/17112) `migrations` Add migration for inputs.sqlserver +- [#16950](https://github.com/influxdata/telegraf/pull/16950) `migrations` Add migration for outputs.amqp +- [#17160](https://github.com/influxdata/telegraf/pull/17160) `migrations` Add migration for outputs.influxdb +- [#17149](https://github.com/influxdata/telegraf/pull/17149) `migrations` Add migration for inputs.amqp_consumer +- [#17150](https://github.com/influxdata/telegraf/pull/17150) `migrations` Add migration for inputs.consul +- [#17152](https://github.com/influxdata/telegraf/pull/17152) `migrations` Add migration for inputs.filecount +- [#17151](https://github.com/influxdata/telegraf/pull/17151) `migrations` Add migration for inputs.gnmi +- [#17153](https://github.com/influxdata/telegraf/pull/17153) `migrations` Add migration for inputs.http +- [#17158](https://github.com/influxdata/telegraf/pull/17158) `migrations` Add migration for inputs.http_listener_v2 +- [#17157](https://github.com/influxdata/telegraf/pull/17157) `migrations` Add migration for inputs.http_response +- [#17163](https://github.com/influxdata/telegraf/pull/17163) `migrations` Add migration for inputs.icinga2 +- [#17165](https://github.com/influxdata/telegraf/pull/17165) `migrations` Add migration for inputs.internet_speed +- [#17166](https://github.com/influxdata/telegraf/pull/17166) `migrations` Add migration for inputs.nsq_consumer +- [#17159](https://github.com/influxdata/telegraf/pull/17159) `migrations` Add migration for inputs.ntpq +- [#17171](https://github.com/influxdata/telegraf/pull/17171) `migrations` Add migration for inputs.statsd +- [#17169](https://github.com/influxdata/telegraf/pull/17169) `migrations` Add migration for inputs.vsphere +- [#17167](https://github.com/influxdata/telegraf/pull/17167) `migrations` Add migration for outputs.kinesis +- [#17174](https://github.com/influxdata/telegraf/pull/17174) `migrations` Add migration for outputs.librato +- [#17176](https://github.com/influxdata/telegraf/pull/17176) `migrations` Add migration for outputs.mqtt +- [#17173](https://github.com/influxdata/telegraf/pull/17173) `migrations` Add migration for outputs.remotefile +- [#17172](https://github.com/influxdata/telegraf/pull/17172) `migrations` Add migration for outputs.wavefront +- [#17168](https://github.com/influxdata/telegraf/pull/17168) `migrations` Add migration for inputs.KNXListener +- [#17170](https://github.com/influxdata/telegraf/pull/17170) `migrations` Add migration for inputs.logparser +- [#16646](https://github.com/influxdata/telegraf/pull/16646) `outputs.health` Add max time between metrics check +- [#16597](https://github.com/influxdata/telegraf/pull/16597) `outputs.http` Include body sample in non-retryable error logs +- [#16741](https://github.com/influxdata/telegraf/pull/16741) `outputs.influxdb_v2` Implement concurrent writes +- [#16746](https://github.com/influxdata/telegraf/pull/16746) `outputs.influxdb_v2` Support secrets in http_headers values +- [#16582](https://github.com/influxdata/telegraf/pull/16582) `outputs.nats` Allow asynchronous publishing for Jetstream +- [#16544](https://github.com/influxdata/telegraf/pull/16544) `outputs.sql` Add option to automate table schema updates +- [#16678](https://github.com/influxdata/telegraf/pull/16678) `outputs.sql` Support secret for dsn +- [#16583](https://github.com/influxdata/telegraf/pull/16583) `outputs.stackdriver` Ensure quota is charged to configured project +- [#16717](https://github.com/influxdata/telegraf/pull/16717) `processors.defaults` Add support for specifying default tags +- [#16701](https://github.com/influxdata/telegraf/pull/16701) `processors.enum` Add multiple tag mapping +- [#16030](https://github.com/influxdata/telegraf/pull/16030) `processors.enum` Allow mapping to be applied to multiple fields +- [#16494](https://github.com/influxdata/telegraf/pull/16494) `serializer.prometheusremotewrite` Allow sending native histograms + +### Bugfixes + +- [#17044](https://github.com/influxdata/telegraf/pull/17044) `inputs.opcua` Fix integration test +- [#16986](https://github.com/influxdata/telegraf/pull/16986) `inputs.procstat` Resolve remote usernames on Posix systems +- [#16699](https://github.com/influxdata/telegraf/pull/16699) `inputs.win_wmi` Free resources to avoid leaks +- [#17118](https://github.com/influxdata/telegraf/pull/17118) `migrations` Update table content for general plugin migrations + +### Dependency Updates + +- [#17089](https://github.com/influxdata/telegraf/pull/17089) `deps` Bump cloud.google.com/go/bigquery from 1.68.0 to 1.69.0 +- [#17026](https://github.com/influxdata/telegraf/pull/17026) `deps` Bump cloud.google.com/go/storage from 1.53.0 to 1.54.0 +- [#17095](https://github.com/influxdata/telegraf/pull/17095) `deps` Bump cloud.google.com/go/storage from 1.54.0 to 1.55.0 +- [#17034](https://github.com/influxdata/telegraf/pull/17034) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/azidentity from 1.9.0 to 1.10.0 +- [#17065](https://github.com/influxdata/telegraf/pull/17065) `deps` Bump github.com/ClickHouse/clickhouse-go/v2 from 2.34.0 to 2.35.0 +- [#17145](https://github.com/influxdata/telegraf/pull/17145) `deps` Bump github.com/ClickHouse/clickhouse-go/v2 from 2.35.0 to 2.36.0 +- [#17062](https://github.com/influxdata/telegraf/pull/17062) `deps` Bump github.com/IBM/nzgo/v12 from 12.0.9 to 12.0.10 +- [#17083](https://github.com/influxdata/telegraf/pull/17083) `deps` Bump github.com/IBM/sarama from 1.45.1 to 1.45.2 +- [#17040](https://github.com/influxdata/telegraf/pull/17040) `deps` Bump github.com/apache/inlong/inlong-sdk/dataproxy-sdk-twins/dataproxy-sdk-golang from 1.0.0 to 1.0.1 +- [#17060](https://github.com/influxdata/telegraf/pull/17060) `deps` Bump github.com/apache/inlong/inlong-sdk/dataproxy-sdk-twins/dataproxy-sdk-golang from 1.0.1 to 1.0.2 +- [#17127](https://github.com/influxdata/telegraf/pull/17127) `deps` Bump github.com/apache/inlong/inlong-sdk/dataproxy-sdk-twins/dataproxy-sdk-golang from 1.0.2 to 1.0.3 +- [#17061](https://github.com/influxdata/telegraf/pull/17061) `deps` Bump github.com/apache/thrift from 0.21.0 to 0.22.0 +- [#16954](https://github.com/influxdata/telegraf/pull/16954) `deps` Bump github.com/aws/aws-msk-iam-sasl-signer-go from 1.0.1 to 1.0.3 +- [#17041](https://github.com/influxdata/telegraf/pull/17041) `deps` Bump github.com/aws/aws-msk-iam-sasl-signer-go from 1.0.3 to 1.0.4 +- [#17128](https://github.com/influxdata/telegraf/pull/17128) `deps` Bump github.com/aws/aws-sdk-go-v2/config from 1.29.14 to 1.29.15 +- [#17129](https://github.com/influxdata/telegraf/pull/17129) `deps` Bump github.com/aws/aws-sdk-go-v2/credentials from 1.17.67 to 1.17.68 +- [#17057](https://github.com/influxdata/telegraf/pull/17057) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.44.3 to 1.45.0 +- [#17132](https://github.com/influxdata/telegraf/pull/17132) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.45.0 to 1.45.1 +- [#17029](https://github.com/influxdata/telegraf/pull/17029) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs from 1.49.0 to 1.50.0 +- [#17131](https://github.com/influxdata/telegraf/pull/17131) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs from 1.50.0 to 1.50.1 +- [#17143](https://github.com/influxdata/telegraf/pull/17143) `deps` Bump github.com/aws/aws-sdk-go-v2/service/dynamodb from 1.43.1 to 1.43.2 +- [#17037](https://github.com/influxdata/telegraf/pull/17037) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.218.0 to 1.219.0 +- [#17067](https://github.com/influxdata/telegraf/pull/17067) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.220.0 to 1.222.0 +- [#17093](https://github.com/influxdata/telegraf/pull/17093) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.222.0 to 1.224.0 +- [#17136](https://github.com/influxdata/telegraf/pull/17136) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.224.0 to 1.225.0 +- [#17139](https://github.com/influxdata/telegraf/pull/17139) `deps` Bump github.com/aws/aws-sdk-go-v2/service/kinesis from 1.35.0 to 1.35.1 +- [#16996](https://github.com/influxdata/telegraf/pull/16996) `deps` Bump github.com/bluenviron/gomavlib/v3 from 3.1.0 to 3.2.1 +- [#16987](https://github.com/influxdata/telegraf/pull/16987) `deps` Bump github.com/creack/goselect from 0.1.2 to 0.1.3 +- [#17097](https://github.com/influxdata/telegraf/pull/17097) `deps` Bump github.com/docker/docker from 28.1.1+incompatible to 28.2.2+incompatible +- [#17133](https://github.com/influxdata/telegraf/pull/17133) `deps` Bump github.com/gosnmp/gosnmp from 1.40.0 to 1.41.0 +- [#17126](https://github.com/influxdata/telegraf/pull/17126) `deps` Bump github.com/linkedin/goavro/v2 from 2.13.1 to 2.14.0 +- [#17087](https://github.com/influxdata/telegraf/pull/17087) `deps` Bump github.com/lxc/incus/v6 from 6.12.0 to 6.13.0 +- [#17085](https://github.com/influxdata/telegraf/pull/17085) `deps` Bump github.com/microsoft/go-mssqldb from 1.8.1 to 1.8.2 +- [#17064](https://github.com/influxdata/telegraf/pull/17064) `deps` Bump github.com/nats-io/nats-server/v2 from 2.11.3 to 2.11.4 +- [#17140](https://github.com/influxdata/telegraf/pull/17140) `deps` Bump github.com/nats-io/nats.go from 1.42.0 to 1.43.0 +- [#17134](https://github.com/influxdata/telegraf/pull/17134) `deps` Bump github.com/netsampler/goflow2/v2 from 2.2.2 to 2.2.3 +- [#17028](https://github.com/influxdata/telegraf/pull/17028) `deps` Bump github.com/prometheus/common from 0.63.0 to 0.64.0 +- [#17066](https://github.com/influxdata/telegraf/pull/17066) `deps` Bump github.com/rclone/rclone from 1.69.2 to 1.69.3 +- [#17096](https://github.com/influxdata/telegraf/pull/17096) `deps` Bump github.com/redis/go-redis/v9 from 9.8.0 to 9.9.0 +- [#17088](https://github.com/influxdata/telegraf/pull/17088) `deps` Bump github.com/shirou/gopsutil/v4 from 4.25.4 to 4.25.5 +- [#17135](https://github.com/influxdata/telegraf/pull/17135) `deps` Bump github.com/sijms/go-ora/v2 from 2.8.24 to 2.9.0 +- [#17094](https://github.com/influxdata/telegraf/pull/17094) `deps` Bump github.com/snowflakedb/gosnowflake from 1.14.0 to 1.14.1 +- [#17035](https://github.com/influxdata/telegraf/pull/17035) `deps` Bump github.com/tinylib/msgp from 1.2.5 to 1.3.0 +- [#17054](https://github.com/influxdata/telegraf/pull/17054) `deps` Bump github.com/vmware/govmomi from 0.50.0 to 0.51.0 +- [#17039](https://github.com/influxdata/telegraf/pull/17039) `deps` Bump github.com/yuin/goldmark from 1.7.11 to 1.7.12 +- [#17130](https://github.com/influxdata/telegraf/pull/17130) `deps` Bump go.mongodb.org/mongo-driver from 1.17.3 to 1.17.4 +- [#17056](https://github.com/influxdata/telegraf/pull/17056) `deps` Bump go.opentelemetry.io/collector/pdata from 1.31.0 to 1.33.0 +- [#17058](https://github.com/influxdata/telegraf/pull/17058) `deps` Bump go.step.sm/crypto from 0.63.0 to 0.64.0 +- [#17141](https://github.com/influxdata/telegraf/pull/17141) `deps` Bump golang.org/x/crypto from 0.38.0 to 0.39.0 +- [#17144](https://github.com/influxdata/telegraf/pull/17144) `deps` Bump golang.org/x/mod from 0.24.0 to 0.25.0 +- [#17033](https://github.com/influxdata/telegraf/pull/17033) `deps` Bump google.golang.org/api from 0.232.0 to 0.233.0 +- [#17055](https://github.com/influxdata/telegraf/pull/17055) `deps` Bump google.golang.org/api from 0.233.0 to 0.234.0 +- [#17086](https://github.com/influxdata/telegraf/pull/17086) `deps` Bump google.golang.org/api from 0.234.0 to 0.235.0 +- [#17036](https://github.com/influxdata/telegraf/pull/17036) `deps` Bump google.golang.org/grpc from 1.72.0 to 1.72.1 +- [#17059](https://github.com/influxdata/telegraf/pull/17059) `deps` Bump google.golang.org/grpc from 1.72.1 to 1.72.2 +- [#17137](https://github.com/influxdata/telegraf/pull/17137) `deps` Bump google.golang.org/grpc from 1.72.2 to 1.73.0 +- [#17031](https://github.com/influxdata/telegraf/pull/17031) `deps` Bump k8s.io/api from 0.33.0 to 0.33.1 +- [#17038](https://github.com/influxdata/telegraf/pull/17038) `deps` Bump k8s.io/apimachinery from 0.33.0 to 0.33.1 +- [#17030](https://github.com/influxdata/telegraf/pull/17030) `deps` Bump k8s.io/client-go from 0.33.0 to 0.33.1 +- [#17025](https://github.com/influxdata/telegraf/pull/17025) `deps` Bump super-linter/super-linter from 7.3.0 to 7.4.0 + +## v1.34.4 {date="2025-05-19"} + +### Bugfixes + +- [#17009](https://github.com/influxdata/telegraf/pull/17009) `inputs.cloudwatch` Restore filtering to match all dimensions +- [#16978](https://github.com/influxdata/telegraf/pull/16978) `inputs.nfsclient` Handle errors during mountpoint filtering +- [#17021](https://github.com/influxdata/telegraf/pull/17021) `inputs.opcua` Fix type mismatch in unit test +- [#16854](https://github.com/influxdata/telegraf/pull/16854) `inputs.opcua` Handle session invalidation between gather cycles +- [#16879](https://github.com/influxdata/telegraf/pull/16879) `inputs.tail` Prevent leaking file descriptors +- [#16815](https://github.com/influxdata/telegraf/pull/16815) `inputs.win_eventlog` Handle large events to avoid they get dropped silently +- [#16878](https://github.com/influxdata/telegraf/pull/16878) `parsers.json_v2` Handle measurements with multiple objects correctly + +### Dependency Updates + +- [#16991](https://github.com/influxdata/telegraf/pull/16991) `deps` Bump cloud.google.com/go/bigquery from 1.67.0 to 1.68.0 +- [#16963](https://github.com/influxdata/telegraf/pull/16963) `deps` Bump cloud.google.com/go/storage from 1.52.0 to 1.53.0 +- [#16955](https://github.com/influxdata/telegraf/pull/16955) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue from 1.0.0 to 1.0.1 +- [#16989](https://github.com/influxdata/telegraf/pull/16989) `deps` Bump github.com/SAP/go-hdb from 1.13.5 to 1.13.6 +- [#16998](https://github.com/influxdata/telegraf/pull/16998) `deps` Bump github.com/apache/arrow-go/v18 from 18.2.0 to 18.3.0 +- [#16952](https://github.com/influxdata/telegraf/pull/16952) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs from 1.47.3 to 1.48.0 +- [#16995](https://github.com/influxdata/telegraf/pull/16995) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs from 1.48.0 to 1.49.0 +- [#16974](https://github.com/influxdata/telegraf/pull/16974) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.212.0 to 1.214.0 +- [#16993](https://github.com/influxdata/telegraf/pull/16993) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.215.0 to 1.218.0 +- [#16968](https://github.com/influxdata/telegraf/pull/16968) `deps` Bump github.com/aws/aws-sdk-go-v2/service/kinesis from 1.33.3 to 1.35.0 +- [#16988](https://github.com/influxdata/telegraf/pull/16988) `deps` Bump github.com/aws/aws-sdk-go-v2/service/timestreamwrite from 1.30.2 to 1.31.0 +- [#17013](https://github.com/influxdata/telegraf/pull/17013) `deps` Bump github.com/ebitengine/purego from 0.8.2 to 0.8.3 +- [#16972](https://github.com/influxdata/telegraf/pull/16972) `deps` Bump github.com/hashicorp/consul/api from 1.32.0 to 1.32.1 +- [#16992](https://github.com/influxdata/telegraf/pull/16992) `deps` Bump github.com/microsoft/go-mssqldb from 1.8.0 to 1.8.1 +- [#16990](https://github.com/influxdata/telegraf/pull/16990) `deps` Bump github.com/miekg/dns from 1.1.65 to 1.1.66 +- [#16975](https://github.com/influxdata/telegraf/pull/16975) `deps` Bump github.com/nats-io/nats-server/v2 from 2.11.2 to 2.11.3 +- [#16967](https://github.com/influxdata/telegraf/pull/16967) `deps` Bump github.com/nats-io/nats.go from 1.41.2 to 1.42.0 +- [#16964](https://github.com/influxdata/telegraf/pull/16964) `deps` Bump github.com/rclone/rclone from 1.69.1 to 1.69.2 +- [#16973](https://github.com/influxdata/telegraf/pull/16973) `deps` Bump github.com/redis/go-redis/v9 from 9.7.3 to 9.8.0 +- [#16962](https://github.com/influxdata/telegraf/pull/16962) `deps` Bump github.com/shirou/gopsutil/v4 from 4.25.3 to 4.25.4 +- [#16969](https://github.com/influxdata/telegraf/pull/16969) `deps` Bump github.com/snowflakedb/gosnowflake from 1.13.3 to 1.14.0 +- [#16994](https://github.com/influxdata/telegraf/pull/16994) `deps` Bump github.com/vishvananda/netlink from 1.3.1-0.20250221194427-0af32151e72b to 1.3.1 +- [#16958](https://github.com/influxdata/telegraf/pull/16958) `deps` Bump go.step.sm/crypto from 0.62.0 to 0.63.0 +- [#16960](https://github.com/influxdata/telegraf/pull/16960) `deps` Bump golang.org/x/crypto from 0.37.0 to 0.38.0 +- [#16966](https://github.com/influxdata/telegraf/pull/16966) `deps` Bump golang.org/x/net from 0.39.0 to 0.40.0 +- [#16957](https://github.com/influxdata/telegraf/pull/16957) `deps` Bump google.golang.org/api from 0.230.0 to 0.231.0 +- [#16853](https://github.com/influxdata/telegraf/pull/16853) `deps` Switch to maintained azure testcontainer module + +## v1.34.3 {date="2025-05-05"} + +### Bugfixes + +- [#16697](https://github.com/influxdata/telegraf/pull/16697) `agent` Correctly truncate the disk buffer +- [#16868](https://github.com/influxdata/telegraf/pull/16868) `common.ratelimiter` Only grow the buffer but never shrink +- [#16812](https://github.com/influxdata/telegraf/pull/16812) `inputs.cloudwatch` Handle metric includes/excludes correctly to prevent panic +- [#16911](https://github.com/influxdata/telegraf/pull/16911) `inputs.lustre2` Skip empty files +- [#16594](https://github.com/influxdata/telegraf/pull/16594) `inputs.opcua` Handle node array values +- [#16782](https://github.com/influxdata/telegraf/pull/16782) `inputs.win_wmi` Replace hard-coded class-name with correct config setting +- [#16781](https://github.com/influxdata/telegraf/pull/16781) `inputs.win_wmi` Restrict threading model to APARTMENTTHREADED +- [#16857](https://github.com/influxdata/telegraf/pull/16857) `outputs.quix` Allow empty certificate for new cloud managed instances + +### Dependency Updates + +- [#16804](https://github.com/influxdata/telegraf/pull/16804) `deps` Bump cloud.google.com/go/bigquery from 1.66.2 to 1.67.0 +- [#16835](https://github.com/influxdata/telegraf/pull/16835) `deps` Bump cloud.google.com/go/monitoring from 1.24.0 to 1.24.2 +- [#16785](https://github.com/influxdata/telegraf/pull/16785) `deps` Bump cloud.google.com/go/pubsub from 1.48.0 to 1.49.0 +- [#16897](https://github.com/influxdata/telegraf/pull/16897) `deps` Bump cloud.google.com/go/storage from 1.51.0 to 1.52.0 +- [#16840](https://github.com/influxdata/telegraf/pull/16840) `deps` Bump github.com/BurntSushi/toml from 1.4.0 to 1.5.0 +- [#16838](https://github.com/influxdata/telegraf/pull/16838) `deps` Bump github.com/aliyun/alibaba-cloud-sdk-go from 1.63.104 to 1.63.106 +- [#16908](https://github.com/influxdata/telegraf/pull/16908) `deps` Bump github.com/aliyun/alibaba-cloud-sdk-go from 1.63.106 to 1.63.107 +- [#16789](https://github.com/influxdata/telegraf/pull/16789) `deps` Bump github.com/antchfx/xpath from 1.3.3 to 1.3.4 +- [#16807](https://github.com/influxdata/telegraf/pull/16807) `deps` Bump github.com/apache/arrow-go/v18 from 18.1.0 to 18.2.0 +- [#16844](https://github.com/influxdata/telegraf/pull/16844) `deps` Bump github.com/apache/iotdb-client-go from 1.3.3 to 1.3.4 +- [#16839](https://github.com/influxdata/telegraf/pull/16839) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.44.1 to 1.44.3 +- [#16836](https://github.com/influxdata/telegraf/pull/16836) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs from 1.45.3 to 1.47.3 +- [#16846](https://github.com/influxdata/telegraf/pull/16846) `deps` Bump github.com/aws/aws-sdk-go-v2/service/dynamodb from 1.42.2 to 1.42.4 +- [#16905](https://github.com/influxdata/telegraf/pull/16905) `deps` Bump github.com/aws/aws-sdk-go-v2/service/dynamodb from 1.42.4 to 1.43.1 +- [#16842](https://github.com/influxdata/telegraf/pull/16842) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.210.1 to 1.211.3 +- [#16900](https://github.com/influxdata/telegraf/pull/16900) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.211.3 to 1.212.0 +- [#16903](https://github.com/influxdata/telegraf/pull/16903) `deps` Bump github.com/aws/aws-sdk-go-v2/service/kinesis from 1.33.2 to 1.33.3 +- [#16793](https://github.com/influxdata/telegraf/pull/16793) `deps` Bump github.com/aws/aws-sdk-go-v2/service/timestreamwrite from 1.27.4 to 1.30.2 +- [#16802](https://github.com/influxdata/telegraf/pull/16802) `deps` Bump github.com/clarify/clarify-go from 0.3.1 to 0.4.0 +- [#16849](https://github.com/influxdata/telegraf/pull/16849) `deps` Bump github.com/docker/docker from 28.0.4+incompatible to 28.1.1+incompatible +- [#16830](https://github.com/influxdata/telegraf/pull/16830) `deps` Bump github.com/go-ldap/ldap/v3 from 3.4.10 to 3.4.11 +- [#16801](https://github.com/influxdata/telegraf/pull/16801) `deps` Bump github.com/go-sql-driver/mysql from 1.8.1 to 1.9.2 +- [#16806](https://github.com/influxdata/telegraf/pull/16806) `deps` Bump github.com/gofrs/uuid/v5 from 5.3.0 to 5.3.2 +- [#16895](https://github.com/influxdata/telegraf/pull/16895) `deps` Bump github.com/google/cel-go from 0.24.1 to 0.25.0 +- [#16797](https://github.com/influxdata/telegraf/pull/16797) `deps` Bump github.com/gopcua/opcua from 0.7.1 to 0.7.4 +- [#16894](https://github.com/influxdata/telegraf/pull/16894) `deps` Bump github.com/gopcua/opcua from 0.7.4 to 0.8.0 +- [#16660](https://github.com/influxdata/telegraf/pull/16660) `deps` Bump github.com/gosmnp/gosnmp from 1.39.0 to 1.40.0 +- [#16902](https://github.com/influxdata/telegraf/pull/16902) `deps` Bump github.com/gosnmp/gosnmp from 1.39.0 to 1.40.0 +- [#16841](https://github.com/influxdata/telegraf/pull/16841) `deps` Bump github.com/hashicorp/consul/api from 1.31.2 to 1.32.0 +- [#16891](https://github.com/influxdata/telegraf/pull/16891) `deps` Bump github.com/jedib0t/go-pretty/v6 from 6.6.5 to 6.6.7 +- [#16892](https://github.com/influxdata/telegraf/pull/16892) `deps` Bump github.com/lxc/incus/v6 from 6.11.0 to 6.12.0 +- [#16786](https://github.com/influxdata/telegraf/pull/16786) `deps` Bump github.com/microsoft/go-mssqldb from 1.7.2 to 1.8.0 +- [#16851](https://github.com/influxdata/telegraf/pull/16851) `deps` Bump github.com/miekg/dns from 1.1.64 to 1.1.65 +- [#16808](https://github.com/influxdata/telegraf/pull/16808) `deps` Bump github.com/nats-io/nats-server/v2 from 2.10.25 to 2.10.27 +- [#16888](https://github.com/influxdata/telegraf/pull/16888) `deps` Bump github.com/nats-io/nats-server/v2 from 2.10.27 to 2.11.2 +- [#16909](https://github.com/influxdata/telegraf/pull/16909) `deps` Bump github.com/nats-io/nats.go from 1.41.1 to 1.41.2 +- [#16790](https://github.com/influxdata/telegraf/pull/16790) `deps` Bump github.com/openconfig/gnmi from 0.11.0 to 0.14.1 +- [#16799](https://github.com/influxdata/telegraf/pull/16799) `deps` Bump github.com/openconfig/goyang from 1.6.0 to 1.6.2 +- [#16848](https://github.com/influxdata/telegraf/pull/16848) `deps` Bump github.com/prometheus-community/pro-bing from 0.4.1 to 0.7.0 +- [#16795](https://github.com/influxdata/telegraf/pull/16795) `deps` Bump github.com/prometheus/client_golang from 1.21.1 to 1.22.0 +- [#16845](https://github.com/influxdata/telegraf/pull/16845) `deps` Bump github.com/prometheus/client_model from 0.6.1 to 0.6.2 +- [#16901](https://github.com/influxdata/telegraf/pull/16901) `deps` Bump github.com/prometheus/procfs from 0.16.0 to 0.16.1 +- [#16792](https://github.com/influxdata/telegraf/pull/16792) `deps` Bump github.com/safchain/ethtool from 0.3.0 to 0.5.10 +- [#16791](https://github.com/influxdata/telegraf/pull/16791) `deps` Bump github.com/seancfoley/ipaddress-go from 1.7.0 to 1.7.1 +- [#16794](https://github.com/influxdata/telegraf/pull/16794) `deps` Bump github.com/shirou/gopsutil/v4 from 4.25.1 to 4.25.3 +- [#16828](https://github.com/influxdata/telegraf/pull/16828) `deps` Bump github.com/snowflakedb/gosnowflake from 1.11.2 to 1.13.1 +- [#16904](https://github.com/influxdata/telegraf/pull/16904) `deps` Bump github.com/snowflakedb/gosnowflake from 1.13.1 to 1.13.3 +- [#16787](https://github.com/influxdata/telegraf/pull/16787) `deps` Bump github.com/srebhan/cborquery from 1.0.3 to 1.0.4 +- [#16837](https://github.com/influxdata/telegraf/pull/16837) `deps` Bump github.com/srebhan/protobufquery from 1.0.1 to 1.0.4 +- [#16893](https://github.com/influxdata/telegraf/pull/16893) `deps` Bump github.com/testcontainers/testcontainers-go from 0.36.0 to 0.37.0 +- [#16803](https://github.com/influxdata/telegraf/pull/16803) `deps` Bump github.com/testcontainers/testcontainers-go/modules/kafka from 0.34.0 to 0.36.0 +- [#16890](https://github.com/influxdata/telegraf/pull/16890) `deps` Bump github.com/testcontainers/testcontainers-go/modules/kafka from 0.36.0 to 0.37.0 +- [#16850](https://github.com/influxdata/telegraf/pull/16850) `deps` Bump github.com/vmware/govmomi from 0.49.0 to 0.50.0 +- [#16784](https://github.com/influxdata/telegraf/pull/16784) `deps` Bump github.com/yuin/goldmark from 1.7.8 to 1.7.9 +- [#16896](https://github.com/influxdata/telegraf/pull/16896) `deps` Bump github.com/yuin/goldmark from 1.7.9 to 1.7.11 +- [#16832](https://github.com/influxdata/telegraf/pull/16832) `deps` Bump go.mongodb.org/mongo-driver from 1.17.0 to 1.17.3 +- [#16800](https://github.com/influxdata/telegraf/pull/16800) `deps` Bump go.opentelemetry.io/collector/pdata from 1.29.0 to 1.30.0 +- [#16907](https://github.com/influxdata/telegraf/pull/16907) `deps` Bump go.opentelemetry.io/collector/pdata from 1.30.0 to 1.31.0 +- [#16831](https://github.com/influxdata/telegraf/pull/16831) `deps` Bump go.step.sm/crypto from 0.60.0 to 0.61.0 +- [#16886](https://github.com/influxdata/telegraf/pull/16886) `deps` Bump go.step.sm/crypto from 0.61.0 to 0.62.0 +- [#16816](https://github.com/influxdata/telegraf/pull/16816) `deps` Bump golangci-lint from v2.0.2 to v2.1.2 +- [#16852](https://github.com/influxdata/telegraf/pull/16852) `deps` Bump gonum.org/v1/gonum from 0.15.1 to 0.16.0 +- [#16805](https://github.com/influxdata/telegraf/pull/16805) `deps` Bump google.golang.org/api from 0.228.0 to 0.229.0 +- [#16898](https://github.com/influxdata/telegraf/pull/16898) `deps` Bump google.golang.org/api from 0.229.0 to 0.230.0 +- [#16834](https://github.com/influxdata/telegraf/pull/16834) `deps` Bump google.golang.org/grpc from 1.71.1 to 1.72.0 +- [#16889](https://github.com/influxdata/telegraf/pull/16889) `deps` Bump k8s.io/client-go from 0.32.3 to 0.33.0 +- [#16843](https://github.com/influxdata/telegraf/pull/16843) `deps` Bump modernc.org/sqlite from 1.36.2 to 1.37.0 + +## v1.34.2 {date="2025-04-14"} + +### Bugfixes + +- [#16375](https://github.com/influxdata/telegraf/pull/16375) `aggregators` Handle time drift when calculating aggregation windows + +### Dependency Updates + +- [#16689](https://github.com/influxdata/telegraf/pull/16689) `deps` Bump cloud.google.com/go/pubsub from 1.45.3 to 1.48.0 +- [#16769](https://github.com/influxdata/telegraf/pull/16769) `deps` Bump cloud.google.com/go/storage from 1.50.0 to 1.51.0 +- [#16771](https://github.com/influxdata/telegraf/pull/16771) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/azcore from 1.17.0 to 1.18.0 +- [#16708](https://github.com/influxdata/telegraf/pull/16708) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs from 1.2.3 to 1.3.1 +- [#16764](https://github.com/influxdata/telegraf/pull/16764) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs from 1.3.1 to 1.3.2 +- [#16777](https://github.com/influxdata/telegraf/pull/16777) `deps` Bump github.com/ClickHouse/clickhouse-go/v2 from 2.30.3 to 2.34.0 +- [#16707](https://github.com/influxdata/telegraf/pull/16707) `deps` Bump github.com/IBM/sarama from v1.43.3 to v1.45.1 +- [#16739](https://github.com/influxdata/telegraf/pull/16739) `deps` Bump github.com/SAP/go-hdb from 1.9.10 to 1.13.5 +- [#16754](https://github.com/influxdata/telegraf/pull/16754) `deps` Bump github.com/aliyun/alibaba-cloud-sdk-go from 1.62.721 to 1.63.104 +- [#16767](https://github.com/influxdata/telegraf/pull/16767) `deps` Bump github.com/antchfx/jsonquery from 1.3.3 to 1.3.6 +- [#16758](https://github.com/influxdata/telegraf/pull/16758) `deps` Bump github.com/aws/aws-sdk-go-v2/config from 1.29.6 to 1.29.13 +- [#16710](https://github.com/influxdata/telegraf/pull/16710) `deps` Bump github.com/aws/aws-sdk-go-v2/credentials from 1.17.59 to 1.17.65 +- [#16685](https://github.com/influxdata/telegraf/pull/16685) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.43.14 to 1.44.1 +- [#16773](https://github.com/influxdata/telegraf/pull/16773) `deps` Bump github.com/aws/aws-sdk-go-v2/service/dynamodb from 1.40.0 to 1.42.2 +- [#16688](https://github.com/influxdata/telegraf/pull/16688) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.203.1 to 1.210.1 +- [#16772](https://github.com/influxdata/telegraf/pull/16772) `deps` Bump github.com/aws/aws-sdk-go-v2/service/kinesis from 1.32.6 to 1.33.2 +- [#16711](https://github.com/influxdata/telegraf/pull/16711) `deps` Bump github.com/cloudevents/sdk-go/v2 from 2.15.2 to 2.16.0 +- [#16687](https://github.com/influxdata/telegraf/pull/16687) `deps` Bump github.com/google/cel-go from 0.23.0 to 0.24.1 +- [#16712](https://github.com/influxdata/telegraf/pull/16712) `deps` Bump github.com/gophercloud/gophercloud/v2 from 2.0.0-rc.3 to 2.6.0 +- [#16738](https://github.com/influxdata/telegraf/pull/16738) `deps` Bump github.com/gorcon/rcon from 1.3.5 to 1.4.0 +- [#16737](https://github.com/influxdata/telegraf/pull/16737) `deps` Bump github.com/gosnmp/gosnmp from 1.38.0 to 1.39.0 +- [#16752](https://github.com/influxdata/telegraf/pull/16752) `deps` Bump github.com/lxc/incus/v6 from 6.9.0 to 6.11.0 +- [#16761](https://github.com/influxdata/telegraf/pull/16761) `deps` Bump github.com/nats-io/nats.go from 1.39.1 to 1.41.1 +- [#16753](https://github.com/influxdata/telegraf/pull/16753) `deps` Bump github.com/netsampler/goflow2/v2 from 2.2.1 to 2.2.2 +- [#16760](https://github.com/influxdata/telegraf/pull/16760) `deps` Bump github.com/p4lang/p4runtime from 1.4.0 to 1.4.1 +- [#16766](https://github.com/influxdata/telegraf/pull/16766) `deps` Bump github.com/prometheus/common from 0.62.0 to 0.63.0 +- [#16686](https://github.com/influxdata/telegraf/pull/16686) `deps` Bump github.com/rclone/rclone from 1.68.2 to 1.69.1 +- [#16770](https://github.com/influxdata/telegraf/pull/16770) `deps` Bump github.com/sijms/go-ora/v2 from 2.8.22 to 2.8.24 +- [#16709](https://github.com/influxdata/telegraf/pull/16709) `deps` Bump github.com/testcontainers/testcontainers-go from 0.35.0 to 0.36.0 +- [#16763](https://github.com/influxdata/telegraf/pull/16763) `deps` Bump github.com/tinylib/msgp from 1.2.0 to 1.2.5 +- [#16757](https://github.com/influxdata/telegraf/pull/16757) `deps` Bump github.com/urfave/cli/v2 from 2.27.2 to 2.27.6 +- [#16724](https://github.com/influxdata/telegraf/pull/16724) `deps` Bump github.com/vmware/govmomi from v0.45.1 to v0.49.0 +- [#16768](https://github.com/influxdata/telegraf/pull/16768) `deps` Bump go.opentelemetry.io/collector/pdata from 1.25.0 to 1.29.0 +- [#16765](https://github.com/influxdata/telegraf/pull/16765) `deps` Bump go.step.sm/crypto from 0.59.1 to 0.60.0 +- [#16756](https://github.com/influxdata/telegraf/pull/16756) `deps` Bump golang.org/x/crypto from 0.36.0 to 0.37.0 +- [#16683](https://github.com/influxdata/telegraf/pull/16683) `deps` Bump golangci-lint from v1.64.5 to v2.0.2 +- [#16759](https://github.com/influxdata/telegraf/pull/16759) `deps` Bump google.golang.org/api from 0.224.0 to 0.228.0 +- [#16755](https://github.com/influxdata/telegraf/pull/16755) `deps` Bump k8s.io/client-go from 0.32.1 to 0.32.3 +- [#16684](https://github.com/influxdata/telegraf/pull/16684) `deps` Bump tj-actions/changed-files from 46.0.1 to 46.0.3 +- [#16736](https://github.com/influxdata/telegraf/pull/16736) `deps` Bump tj-actions/changed-files from 46.0.3 to 46.0.4 +- [#16751](https://github.com/influxdata/telegraf/pull/16751) `deps` Bump tj-actions/changed-files from 46.0.4 to 46.0.5 + ## v1.34.1 {date="2025-03-24"} ### Bugfixes diff --git a/cypress/downloads/downloads.html b/cypress/downloads/downloads.html deleted file mode 100644 index 523cdaa3e..000000000 Binary files a/cypress/downloads/downloads.html and /dev/null differ diff --git a/cypress/support/hugo-server.js b/cypress/support/hugo-server.js index 0e4e6a646..d4e4c7361 100644 --- a/cypress/support/hugo-server.js +++ b/cypress/support/hugo-server.js @@ -2,8 +2,10 @@ import { spawn } from 'child_process'; import fs from 'fs'; import http from 'http'; import net from 'net'; +import process from 'process'; // Hugo server constants +export const HUGO_ENVIRONMENT = 'testing'; export const HUGO_PORT = 1315; export const HUGO_LOG_FILE = '/tmp/hugo_server.log'; @@ -28,7 +30,8 @@ export async function isPortInUse(port) { /** * Start the Hugo server with the specified options * @param {Object} options - Configuration options for Hugo - * @param {string} options.configFile - Path to Hugo config file (e.g., 'config/testing/config.yml') + * @param {string} options.configFile - Path to Hugo config file + * @param {string} options.environment - Environment to run Hugo in * @param {number} options.port - Port number for Hugo server * @param {boolean} options.buildDrafts - Whether to build draft content * @param {boolean} options.noHTTPCache - Whether to disable HTTP caching @@ -36,9 +39,10 @@ export async function isPortInUse(port) { * @returns {Promise} Child process object */ export async function startHugoServer({ - configFile = 'config/testing/config.yml', + configFile = 'config/_default/hugo.yml', port = HUGO_PORT, - buildDrafts = true, + environment = 'testing', + buildDrafts = false, noHTTPCache = true, logFile = HUGO_LOG_FILE, } = {}) { @@ -48,6 +52,8 @@ export async function startHugoServer({ const hugoArgs = [ 'hugo', 'server', + '--environment', + environment, '--config', configFile, '--port', @@ -64,16 +70,16 @@ export async function startHugoServer({ return new Promise((resolve, reject) => { try { - // Use npx to find and execute Hugo, which will work regardless of installation method - console.log(`Running Hugo with npx: npx ${hugoArgs.join(' ')}`); - const hugoProc = spawn('npx', hugoArgs, { + // Use yarn to find and execute Hugo, which will work regardless of installation method + console.log(`Running Hugo with yarn: yarn ${hugoArgs.join(' ')}`); + const hugoProc = spawn('yarn', hugoArgs, { stdio: ['ignore', 'pipe', 'pipe'], shell: true, }); // Check if the process started successfully if (!hugoProc || !hugoProc.pid) { - return reject(new Error('Failed to start Hugo server via npx')); + return reject(new Error('Failed to start Hugo server via yarn')); } // Set up logging diff --git a/cypress/support/run-e2e-specs.js b/cypress/support/run-e2e-specs.js index 9ff3c5f31..fd2a214d1 100644 --- a/cypress/support/run-e2e-specs.js +++ b/cypress/support/run-e2e-specs.js @@ -38,9 +38,10 @@ import fs from 'fs'; import path from 'path'; import cypress from 'cypress'; import net from 'net'; -import matter from 'gray-matter'; +import { Buffer } from 'buffer'; import { displayBrokenLinksReport, initializeReport } from './link-reporter.js'; import { + HUGO_ENVIRONMENT, HUGO_PORT, HUGO_LOG_FILE, startHugoServer, @@ -90,28 +91,6 @@ async function isPortInUse(port) { }); } -/** - * Extract source information from frontmatter - * @param {string} filePath - Path to the markdown file - * @returns {string|null} Source information if present - */ -function getSourceFromFrontmatter(filePath) { - if (!fs.existsSync(filePath)) { - return null; - } - - try { - const fileContent = fs.readFileSync(filePath, 'utf8'); - const { data } = matter(fileContent); - return data.source || null; - } catch (err) { - console.warn( - `Warning: Could not extract frontmatter from ${filePath}: ${err.message}` - ); - return null; - } -} - /** * Ensures a directory exists, creating it if necessary * Also creates an empty file to ensure the directory is not empty @@ -296,7 +275,7 @@ async function main() { }); console.log('Hugo is available on the system'); - } catch (checkErr) { + } catch { console.log( 'Hugo not found on PATH, will use project-local Hugo via yarn' ); @@ -304,9 +283,8 @@ async function main() { // Use the startHugoServer function from hugo-server.js hugoProc = await startHugoServer({ - configFile: 'config/testing/config.yml', + environment: HUGO_ENVIRONMENT, port: HUGO_PORT, - buildDrafts: true, noHTTPCache: true, logFile: HUGO_LOG_FILE, }); @@ -412,7 +390,7 @@ async function main() { `ℹ️ Note: ${testFailureCount} test(s) failed but no broken links were detected in the report.` ); console.warn( - ` This usually indicates test errors unrelated to link validation.` + ' This usually indicates test errors unrelated to link validation.' ); // We should not consider special case domains (those with expected errors) as failures diff --git a/data/influxdb_urls.yml b/data/influxdb_urls.yml index 9b8a34789..66c0fae92 100644 --- a/data/influxdb_urls.yml +++ b/data/influxdb_urls.yml @@ -66,6 +66,23 @@ cloud: - name: East US (Virginia) location: Virginia, USA url: https://eastus-1.azure.cloud2.influxdata.com + +serverless: + product: InfluxDB Cloud + providers: + - name: Amazon Web Services + short_name: AWS + iox: true + regions: + - name: US East (Virginia) + location: Virginia, USA + url: https://us-east-1-1.aws.cloud2.influxdata.com + iox: true + - name: EU Frankfurt + location: Frankfurt, Germany + url: https://eu-central-1-1.aws.cloud2.influxdata.com + iox: true + cloud_dedicated: providers: - name: Default diff --git a/data/list_filters.yml b/data/list_filters.yml index bd4c0ecd2..9cb44319c 100644 --- a/data/list_filters.yml +++ b/data/list_filters.yml @@ -5,25 +5,35 @@ telegraf: - Output - Aggregator - Processor - - External - category: Plugin category values: + - Annotation - Applications - - Build & Deploy - Cloud - Containers - - Data Stores + - Datastore + - Filtering + - General Purpose + - Grouping + - Hardware - IoT - Logging + - Math - Messaging - - Networking - - Servers - - Systems + - Network + - Sampling + - Server + - Statistics + - System + - Testing + - Transformation - Web - category: Operating system values: + - FreeBSD - Linux - macOS + - Solaris - Windows - category: Status values: diff --git a/data/notifications.yaml b/data/notifications.yaml index ef9fa720b..2c7012a3a 100644 --- a/data/notifications.yaml +++ b/data/notifications.yaml @@ -40,26 +40,23 @@ # - [The plan for InfluxDB 3.0 Open Source](https://influxdata.com/blog/the-plan-for-influxdb-3-0-open-source) # - [InfluxDB 3.0 benchmarks](https://influxdata.com/blog/influxdb-3-0-is-2.5x-45x-faster-compared-to-influxdb-open-source/) -- id: influxdb3-ga - level: ga-announcement +- id: influxdb3.2-explorer-ga + level: note scope: - / - title_tag: Now Generally Available - title: InfluxDB 3 Core and Enterprise + title: New in InfluxDB 3.2 slug: | - Start fast. Scale faster. + Key enhancements in InfluxDB 3.2 and the InfluxDB 3 Explorer UI is now generally available. - Get the Updates + See the Blog Post message: | - InfluxDB 3 Core is an open source, high-speed, recent-data engine that collects - and processes data in real-time and persists it to local disk or object storage. - InfluxDB 3 Enterprise builds on Core’s foundation, adding high availability, - read replicas, enhanced security, and data compaction for faster queries and - optimized storage. A free tier of InfluxDB 3 Enterprise is available for - non-commercial at-home or hobbyist use. + InfluxDB 3.2 is now available for both Core and Enterprise, bringing the + general availability of InfluxDB 3 Explorer, a new UI that simplifies how + you query, explore, and visualize data. On top of that, InfluxDB 3.2 includes + a wide range of performance improvements, feature updates, and bug fixes + including automated data retention and more. For more information, check out: - - [Announcement blog from Paul Dix](https://www.influxdata.com/blog/influxdb-3-OSS-GA/) - - [Get Started with InfluxDB 3 Core](https://docs.influxdata.com/influxdb3/core/get-started/) - - [Get Started with InfluxDB 3 Enterprise](https://docs.influxdata.com/influxdb3/enterprise/get-started/) + - [InfluxDB 3.2 blog from Paul Dix](http://influxdata.com/blog/influxdb-3-2) + - [Get Started with InfluxDB 3 Explorer](/influxdb3/explorer/get-started/) diff --git a/data/products.yml b/data/products.yml index a0d613cb8..d4484e420 100644 --- a/data/products.yml +++ b/data/products.yml @@ -6,7 +6,7 @@ influxdb3_core: versions: [core] list_order: 2 latest: core - latest_patch: 3.1.0 + latest_patch: 3.2.1 placeholder_host: localhost:8181 ai_sample_questions: - How do I install and run InfluxDB 3 Core? @@ -21,13 +21,13 @@ influxdb3_enterprise: versions: [enterprise] list_order: 2 latest: enterprise - latest_patch: 3.1.0 + latest_patch: 3.2.1 placeholder_host: localhost:8181 ai_sample_questions: - How do I install and run InfluxDB 3 Enterprise? - Help me write a plugin for the Python Processing engine? - How do I start a read replica node with InfluxDB 3 Enterprise? - + influxdb3_explorer: name: InfluxDB 3 Explorer altname: Explorer @@ -64,7 +64,7 @@ influxdb3_cloud_dedicated: list_order: 3 latest: cloud-dedicated link: "https://www.influxdata.com/contact-sales-cloud-dedicated/" - latest_cli: 2.10.1 + latest_cli: 2.10.2 placeholder_host: cluster-id.a.influxdb.io ai_sample_questions: - How do I migrate from InfluxDB v1 to InfluxDB Cloud Dedicated? @@ -128,9 +128,8 @@ explorer: menu_category: other list_order: 4 versions: [v1] - latest: v1.0 - latest_patches: - v1: 1.0.0 + latest: explorer + latest_patch: 1.0.0 ai_sample_questions: - How do I use InfluxDB 3 Explorer to visualize data? - How do I create a dashboard in InfluxDB 3 Explorer? @@ -142,9 +141,9 @@ telegraf: menu_category: other list_order: 6 versions: [v1] - latest: v1.34 + latest: v1.35 latest_patches: - v1: 1.34.1 + v1: 1.35.0 ai_sample_questions: - How do I install and configure Telegraf? - How do I write a custom Telegraf plugin? @@ -170,9 +169,9 @@ kapacitor: menu_category: other list_order: 7 versions: [v1] - latest: v1.7 + latest: v1.8 latest_patches: - v1: 1.7.7 + v1: 1.8.0 ai_sample_questions: - How do I configure Kapacitor for InfluxDB v1? - How do I write a custom Kapacitor task? diff --git a/data/telegraf_plugins.yml b/data/telegraf_plugins.yml index f1a96597f..0f9ee3fe4 100644 --- a/data/telegraf_plugins.yml +++ b/data/telegraf_plugins.yml @@ -1,3141 +1,4168 @@ -############## %%%%%% %% %% %%%%% %% %% %%%%%% %%%% ############## -############## %% %%% %% %% %% %% %% %% %% ############## -############## %% %% %%% %%%%% %% %% %% %%%% ############## -############## %% %% %% %% %% %% %% %% ############## -############## %%%%%% %% %% %% %%%% %% %%%% ############## - input: - - name: AMQP Consumer - id: amqp_consumer - description: | - The AMQP Consumer input plugin provides a consumer for use with AMQP 0-9-1, - a prominent implementation of this protocol - being RabbitMQ. - introduced: 1.3.0 - tags: [linux, macos, windows, messaging] - - name: ActiveMQ id: activemq description: | - The ActiveMQ input plugin gathers queues, topics, and subscriber metrics - using the ActiveMQ Console API. - introduced: 1.8.0 - tags: [linux, macos, windows, messaging] - + This plugin gathers queue, topics and subscribers metrics using the + Console API [ActiveMQ](https://activemq.apache.org/) message broker + daemon. + introduced: v1.8.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [messaging] - name: Aerospike id: aerospike description: | - **Deprecated in favor of the [Prometheus plugin](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/prometheus/README.md) - with the Aerospike Prometheus Exporter.** + This plugin queries [Aerospike](https://www.aerospike.com) server(s) for + node statistics and statistics on all configured namespaces. - The Aerospike input plugin queries Aerospike servers and gets node statistics - and statistics for all configured namespaces. - introduced: 0.2.0 - deprecated: 1.30.0 - tags: [linux, macos, windows, data-stores] + > [!CAUTION] + > As of version 1.30 the Aerospike plugin has been deprecated in favor of + > the prometheus plugin and the officially supported [Aerospike Prometheus + > Exporter](https://aerospike.com/docs/monitorstack/configure/configure-exporter) - - name: Alibaba CloudMonitor Service (Aliyun) + For details on the measurements mean, please consult the [Aerospike + Metrics Reference + Docs](https://www.aerospike.com/docs/reference/metrics). + + > [!NOTE] + > Metric names will have dashes (`-`) replaced as underscores (`_`) to + > make querying more consistently and easy. + + All metrics are attempted to be cast to integers, then booleans, then + strings in order. + introduced: v0.2.0 + deprecated: v1.30.0 + removal: v1.40.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Alibaba Cloud Monitor Service (Aliyun) id: aliyuncms description: | - This plugin pulls metric statistics from Aliyun CMS. - introduced: 1.19.0 + This plugin gathers statistics from the [Alibaba / Aliyun cloud + monitoring service](https://www.alibabacloud.com). In the following we + will use `Aliyun` instead of `Alibaba` as it's the default naming across + the web console and docs. + introduced: v1.19.0 + os_support: [freebsd, linux, macos, solaris, windows] tags: [cloud] - - - name: Amazon CloudWatch Alarms - id: awsalarms - description: | - The Amazon CloudWatch Alarms input plugin pulls alarm statistics from Amazon CloudWatch. - introduced: 1.16.0 - link: https://github.com/vipinvkmenon/awsalarms/blob/master/README.MD - tags: [linux, macos, windows, cloud, external] - external: true - - - name: Amazon CloudWatch Statistics - id: cloudwatch - description: | - The Amazon CloudWatch Statistics input plugin pulls metric statistics from Amazon CloudWatch. - introduced: 0.12.1 - tags: [linux, macos, windows, cloud] - - - name: Amazon ECS - id: ecs - description: | - Amazon ECS input plugin (AWS Fargate compatible) uses the Amazon ECS v2 metadata and stats API endpoints to gather stats on running containers in a task. - The Telegraf container and the workload that Telegraf is inspecting must be run in the same task. This is similar to (and reuses pieces of) the - Docker input plugin, with some ECS-specific modifications for AWS metadata and stats formats. - introduced: 1.11.0 - tags: [linux, macos, windows, cloud, containers] - - - name: Amazon Kinesis Consumer - id: kinesis_consumer - description: | - The Amazon Kinesis Consumer input plugin reads from a Kinesis data stream and creates - metrics using one of the supported [input data formats](/telegraf/v1/data_formats/input). - introduced: 1.10.0 - tags: [linux, macos, windows, cloud, messaging, external] - external: true - - - name: Apache Aurora - id: aurora - description: | - The Aurora input plugin gathers metrics from [Apache Aurora](https://aurora.apache.org/) schedulers. - For monitoring recommendations, see [Monitoring your Aurora cluster](https://aurora.apache.org/documentation/latest/operations/monitoring/). - introduced: 1.7.0 - tags: [linux, macos, windows, applications, containers] - - - name: Apache HTTP Server - id: apache - description: | - The Apache HTTP Server input plugin collects server performance information - using the `mod_status` module of the Apache HTTP Server. - - Typically, the `mod_status` module is configured to expose a page at the - `/server-status?auto` location of the Apache server. - The [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) - option must be enabled in order to collect all available fields. - For information about how to configure your server reference, see the - [module documentation](https://httpd.apache.org/docs/2.4/mod/mod_status.html#enable). - introduced: 1.8.0 - tags: [linux, macos, windows, servers, web] - - - name: Apache Kafka Consumer - id: kafka_consumer - description: | - The Apache Kafka Consumer input plugin polls a specified Kafka topic and adds messages to InfluxDB. - Messages are expected in the line protocol format. - [Consumer Group](http://godoc.org/github.com/wvanbergen/kafka/consumergroup) - is used to talk to the Kafka cluster so multiple instances of Telegraf can read - from the same topic in parallel. - introduced: 0.2.3 - tags: [linux, macos, windows, messaging] - - - name: Apache Mesos - id: mesos - description: | - The Apache Mesos input plugin gathers metrics from Mesos. For more information, please check the - [Mesos Observability Metrics](http://mesos.apache.org/documentation/latest/monitoring/) page. - introduced: 0.10.3 - tags: [linux, macos, windows, containers] - - - name: Apache Solr - id: solr - description: | - The Apache Solr input plugin collects stats using the MBean Request Handler. - introduced: 1.5.0 - tags: [linux, macos, windows, data-stores] - - - name: Apache Tomcat - id: tomcat - description: | - The Apache Tomcat input plugin collects statistics available from the Apache - Tomcat manager status page (`http:///manager/status/all?XML=true`). - Using `XML=true` returns XML data. - See the [Apache Tomcat documentation](https://tomcat.apache.org/tomcat-9.0-doc/manager-howto.html#Server_Status) - for details on these statistics. - introduced: 1.4.0 - tags: [linux, macos, windows, servers, web] - - - name: Apache Zipkin - id: zipkin - description: | - The Apache Zipkin input plugin implements the Zipkin HTTP server to gather trace - and timing data needed to troubleshoot latency problems in microservice architectures. - - > This plugin is experimental. Its data schema may be subject to change based on - > its main usage cases and the evolution of the OpenTracing standard. - introduced: 1.4.0 - tags: [linux, macos, windows, networking] - - - name: Apache Zookeeper - id: zookeeper - description: | - The Apache Zookeeper input plugin collects variables output from the `mntr` - command [Zookeeper Admin](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html). - introduced: 0.2.0 - tags: [linux, macos, windows, build-deploy] - - - name: Apcupsd - id: apcupsd - description: | - The Apcupsd input plugin reads data from an apcupsd daemon over its NIS network protocol. - introduced: 1.12.0 - tags: [linux, macos, windows, systems] - - - name: APT - id: telegraf-apt - description: | - The APT input plugin checks Debian for package updates. - link: https://github.com/x70b1/telegraf-apt/blob/master/README.md - introduced: 1.21.4 - tags: [debian, ubuntu, external] - external: true - - - name: Arista LANZ Consumer - id: lanz - description: | - The Arista LANZ Consumer input plugin provides a consumer for use with Arista Networks’ Latency Analyzer (LANZ). - Metrics are read from a stream of data via TCP through port 50001 on the switch's management IP. Data is in Protobuffers format. - For more information, see [Arista LANZ](https://www.arista.com/en/um-eos/eos-latency-analyzer-lanz). - introduced: 1.14.0 - tags: [linux, macos, windows, networking] - - - name: AWS CloudWatch Metric Streams - id: cloudwatch_metric_streams - description: | - The CloudWatch Metric Streams plugin is a service input plugin that listens for metrics sent via HTTP and performs the required processing for metric streams from AWS. - introduced: 1.24.0 - tags: [linux, macos, windows, cloud, aws, streams] - - - name: Azure Monitor - id: azure_monitor - description: | - The Azure Monitor plugin gathers metrics from Azure Monitor API. - introduced: 1.25.0 - tags: [linux, macos, windows, systems, cloud] - - - name: Azure Storage Queue - id: azure_storage_queue - description: | - The Azure Storage Queue plugin gathers sizes of Azure Storage Queues. - introduced: 1.13.0 - tags: [linux, macos, windows, systems, cloud] - - - name: Bcache - id: bcache - description: | - The Bcache input plugin gets bcache statistics from the `stats_total` directory and `dirty_data` file. - introduced: 0.2.0 - tags: [linux, macos, windows, systems] - - - name: Beat - id: beat - description: | - The Beat input plugin collects metrics from the given Elastic Beat instances. - introduced: 1.18.0 - tags: [linux, macos, windows, applications] - - - name: Beanstalkd - id: beanstalkd - description: | - The Beanstalkd input plugin collects server stats as well as tube stats - (reported by `stats` and `stats-tube` commands respectively). - introduced: 1.8.0 - tags: [linux, macos, windows, messaging] - - - name: Big Blue Button - id: bigbluebutton - description: | - The BigBlueButton Input Plugin gathers metrics from a BigBlueButton server. - introduced: 1.19.0 - link: https://github.com/SLedunois/bigbluebutton-telegraf-plugin/blob/main/README.md - tags: [external] - external: true - - - name: BIND 9 Nameserver Statistics - id: bind - description: | - plugin decodes the JSON or XML statistics provided by BIND 9 nameservers. - introduced: 1.11.0 - tags: [linux, macos, windows, netoworking] - - - name: Bond - id: bond - description: | - The Bond input plugin collects network bond interface status, bond's slaves - interfaces status and failures count of bond's slaves interfaces. - The plugin collects these metrics from `/proc/net/bonding/*` files. - introduced: 1.5.0 - tags: [linux, macos, windows, networking] - - - name: Burrow - id: burrow - description: | - The Burrow input plugin collects Apache Kafka topic, consumer, and partition - status using the [Burrow](https://github.com/linkedin/Burrow) - [HTTP Endpoint](https://github.com/linkedin/Burrow/wiki/HTTP-Endpoint). - introduced: 1.7.0 - tags: [linux, macos, windows, messaging] - - - name: Ceph Storage - id: ceph - description: | - The Ceph Storage input plugin collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. - introduced: 0.13.1 - tags: [linux, macos, windows, data-stores] - - - name: CGroup - id: cgroup - description: | - The CGroup input plugin captures specific statistics per cgroup. - introduced: 1.0.0 - tags: [linux, macos, windows, systems] - - - name: Chrony - id: chrony - description: | - The Chrony input plugin gets standard chrony metrics, requires chronyc executable. - introduced: 0.13.1 - tags: [linux, macos, windows, networking, systems] - - - name: Cisco GNMI Telemetry - id: cisco_telemetry_gnmi - description: | - > The `inputs.cisco_telemetry_gnmi` plugin was renamed to `inputs.gmni` - in **Telegraf 1.15.0** to better reflect its general support for gNMI devices. - See the [gNMI plugin](#input-cisco_telemetry_gnmi). - - Cisco GNMI Telemetry input plugin consumes telemetry data similar to the GNMI specification. - This GRPC-based protocol can utilize TLS for authentication and encryption. - This plugin has been developed to support GNMI telemetry as produced by Cisco IOS XR (64-bit) version 6.5.1 and later. - introduced: 1.11.0 - deprecated: 1.14.5 - link: https://github.com/influxdata/telegraf/tree/release-1.14/plugins/inputs/cisco_telemetry_gnmi - tags: [linux, macos, windows, applications] - - - name: Cisco Model-driven Telemetry (MDT) - id: cisco_telemetry_mdt - description: | - Cisco model-driven telemetry (MDT) is an input plugin that consumes telemetry data from Cisco IOS XR, IOS XE and NX-OS platforms. - It supports TCP & GRPC dialout transports. GRPC-based transport can utilize TLS for authentication and encryption. - Telemetry data is expected to be GPB-KV (self-describing-gpb) encoded. - introduced: 1.11.0 - tags: [linux, macos, windows, applications] - - - name: ClickHouse - id: clickhouse - description: | - The ClickHouse input plugin gathers statistics from a [ClickHouse](https://github.com/ClickHouse/ClickHouse) server, an open source - column-oriented database management system that lets you generate analytical data reports in real time. - introduced: 1.14.0 - tags: [linux, macos, windows, servers, systems] - - - name: Conntrack - id: conntrack - description: | - The Conntrack input plugin collects stats from Netfilter's conntrack-tools. - - The conntrack-tools provide a mechanism for tracking various aspects of - network connections as they are processed by netfilter. - At runtime, conntrack exposes many of those connection statistics within `/proc/sys/net`. - Depending on your kernel version, these files can be found in either `/proc/sys/net/ipv4/netfilter` - or `/proc/sys/net/netfilter` and will be prefixed with either `ip_` or `nf_`. - This plugin reads the files specified in its configuration and publishes each one as a field, - with the prefix normalized to `ip_`. - introduced: 1.0.0 - tags: [linux, macos, windows, networking] - - - name: Consul - id: consul - description: | - The Consul input plugin will collect statistics about all health checks registered in the Consul. - It uses Consul API to query the data. - It will not report the telemetry but Consul can report those stats already using StatsD protocol, if needed. - introduced: 1.0.0 - tags: [linux, macos, windows, build-deploy, containers] - - - name: Couchbase - id: couchbase - description: | - The Couchbase input plugin reads per-node and per-bucket metrics from Couchbase. - introduced: 0.12.0 - tags: [linux, macos, windows, data-stores] - - - name: CouchDB - id: couchdb - description: | - The CouchDB input plugin gathers metrics of CouchDB using `_stats` endpoint. - introduced: 0.10.3 - tags: [linux, macos, windows, data-stores] - - - name: CPU - id: cpu - description: | - The CPU input plugin gathers metrics about cpu usage. - introduced: 0.1.5 - tags: [linux, macos, windows, systems] - - - name: CS:GO - id: csgo - description: | - The CSGO input plugin gahers metrics from Counter-Strike: Global Offensive servers. - introduced: 1.18.0 - tags: [linux, macos, windows, web, servers] - - - name: ctrlX Data Layer - id: ctrlx_datalayer - description: | - Gather data from communication middleware running on ctrlX CORE devices - introduced: 1.27.0 - tags: [linux, macos, windows, messaging] - - - name: Disk - id: disk - description: | - The Disk input plugin gathers metrics about disk usage by mount point. - introduced: 0.1.1 - tags: [linux, macos, windows, systems] - - - name: DiskIO - id: diskio - description: | - The DiskIO input plugin gathers metrics about disk IO by device. - introduced: 0.10.0 - tags: [linux, macos, windows, systems] - - - name: Directory Monitoring - id: directory_monitor - description: | - The Directory Monitoring input plugin monitors a single directory and takes in each file placed in the directory. The plugin gathers all files in the directory at a configurable interval, and parses the ones that haven't been picked up yet. - introduced: 1.18.0 - tags: [linux, macos, windows, systems] - - - name: Disque - id: disque - description: | - The Disque input plugin gathers metrics from one or more [Disque](https://github.com/antirez/disque) servers. - introduced: 0.10.0 - tags: [linux, macos, windows, messaging] - - - name: DMCache - id: dmcache - description: | - The DMCache input plugin provides a native collection for dmsetup-based statistics for dm-cache. - introduced: 1.3.0 - tags: [linux, macos, windows, systems] - - - name: DNS Query - id: dns_query - description: | - The DNS Query input plugin gathers DNS query times in milliseconds - - like [Dig](https://en.wikipedia.org/wiki/Dig_(command)). - introduced: 1.4.0 - tags: [linux, macos, windows, networking] - - - name: dnsmasq - id: dnsmasq - description: | - This plugin gathers dnsmasq statistics on the DNS side. - introduced: 1.19.0 - link: https://github.com/machinly/dnsmasq-telegraf-plugin/blob/main/README.md - tags: [external] - external: true - - - name: Docker - id: docker - description: | - The Docker input plugin uses the Docker Engine API to gather metrics on running Docker containers. - The Docker plugin uses the [Official Docker Client](https://github.com/moby/moby/tree/master/client) - to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/) library documentation. - introduced: 0.1.9 - tags: [linux, macos, windows, build-deploy, containers] - - - name: Docker Log - id: docker_log - description: | - The Docker Log input plugin uses the Docker Engine API to collect logs from running Docker containers. - The plugin uses the [Official Docker Client](https://github.com/moby/moby/tree/master/client) - to gather logs from the [Engine API](https://docs.docker.com/engine/api/v1.24/). - - > This plugin works only for containers with the local or `json-file` or `journald` logging driver. - introduced: 1.12.0 - tags: [linux, macos, windows, build-deploy, containers, logging] - - - name: Dovecot - id: dovecot - description: | - The Dovecot input plugin uses the dovecot Stats protocol to gather metrics on configured domains. - For more information, see the [Dovecot documentation](http://wiki2.dovecot.org/Statistics). - introduced: 0.10.3 - tags: [linux, macos, windows, applications, web] - - - name: 389 Directory Server Input Plugin - id: ds389 - description: | - This plugin gathers metrics from 389 Directory Servers' cn=Monitor backend. - introduced: 1.19.0 - link: https://github.com/falon/CSI-telegraf-plugins/blob/master/plugins/inputs/ds389/README.md - tags: [external] - external: true - - - name: Elasticsearch - id: elasticsearch - description: | - The Elasticsearch input plugin queries endpoints to obtain [node](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) - and optionally [cluster-health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) - or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) metrics. - introduced: 0.1.5 - tags: [linux, macos, windows, data-stores] - - - name: Elasticsearch Query - id: elasticsearch_query - description: | - This elasticsearch query plugin queries endpoints to obtain metrics from data stored in an Elasticsearch cluster. - introduced: 1.20.0 - tags: [linux, macos, windows, data-stores] - - - name: Ethtool - id: ethtool - description: | - The Ethtool plugin gathers ethernet device statistics. - The network device and driver determine what fields are gathered. - introduced: 1.13.0 - tags: [linux, macos, windows, networking, servers] - - - name: Event Hub Consumer - id: eventhub_consumer - description: | - The Event Hub Consumer input plugin provides a consumer for use with Azure Event Hubs and Azure IoT Hub. - introduced: 1.14.0 - tags: [linux, macos, windows, iot] - - - name: Exec - id: exec - description: | - The Exec input plugin parses supported [Telegraf input data formats](/telegraf/v1/data_formats/input/) - (line protocol, JSON, Graphite, Value, Nagios, Collectd, and Dropwizard) into metrics. - Each Telegraf metric includes the measurement name, tags, fields, and timestamp. - introduced: 0.1.5 - tags: [linux, macos, windows] - - - name: Execd - id: execd - description: | - The Execd input plugin runs an external program as a daemon. Programs must output metrics in an accepted - [Telegraf input data format](/telegraf/v1/data_formats/input/) - on its standard output. Configure `signal` to send a signal to the daemon running on each collection interval. - The program output on standard error is mirrored to the Telegraf log. - introduced: 1.14.0 - tags: [linux, macos, windows] - - - name: Fail2ban - id: fail2ban - description: | - The Fail2ban input plugin gathers the count of failed and banned IP addresses - using [fail2ban](https://www.fail2ban.org/). - introduced: 1.4.0 - tags: [linux, macos, windows, networking, security] - - - name: Fibaro - id: fibaro - description: | - The Fibaro input plugin makes HTTP calls to the Fibaro controller API to gather values of hooked devices. - Those values could be true (`1`) or false (`0`) for switches, percentage for dimmers, temperature, etc. - introduced: 1.7.0 - tags: [linux, macos, windows, iot] - - - name: File - id: file - description: | - The File input plugin updates a list of files every interval and parses - the contents using the selected input data format. - - Files will always be read in their entirety. If you wish to tail or follow a file, - then use the [Tail input plugin](#tail). - - > To parse metrics from multiple files that are formatted in one of the supported - > [input data formats](/telegraf/v1/data_formats/input), - > use the [Multifile input plugin](#multifile). - introduced: 1.8.0 - tags: [linux, macos, windows, systems] - - - name: Filecount - id: filecount - description: | - The Filecount input plugin reports the number and total size of files in directories that match certain criteria. - introduced: 1.8.0 - tags: [linux, macos, windows, systems] - - - name: Filestat - id: filestat - description: | - The Filestat input plugin gathers metrics about file existence, size, and other stats. - introduced: 0.13.0 - tags: [linux, macos, windows, systems] - - - name: Fireboard - id: fireboard - description: | - The Fireboard input plugin gathers real time temperature data from Fireboard thermometers. - To use this input plugin, sign up to use the [Fireboard REST API](https://docs.fireboard.io/reference/restapi.html). - introduced: 1.12.0 - tags: [linux, macos, windows, cloud, Io] - - - name: Firehose - id: firehose - description: | - The Firehose input plugin listens for metrics sent via HTTP from [AWS Data Firehose](https://aws.amazon.com/firehose/) - in one of the supported input data formats. - The plugin strictly follows the Firehose request-response schema. - introduced: 1.34.0 - tags: [linux, macos, windows, aws, cloud, streams] - - - name: Fluentd - id: fluentd - description: | - The Fluentd input plugin gathers Fluentd server metrics from plugin endpoint provided by in_monitor plugin. - This plugin understands data provided by `/api/plugin.json` resource (`/api/config.json` is not covered). - introduced: 1.4.0 - tags: [linux, macos, windows, servers] - - - name: Fritzbox - id: fritzbox - description: | - This plugin gathers statistics from a [FRITZ!Box](https://avm.de/produkte/fritzbox/) router and repeater. - introduced: 1.23.0 - link: https://github.com/hdecarne-github/fritzbox-telegraf-plugin/blob/main/README.md - external: true - tags: [external, networking] - - - name: GitHub - id: github - description: | - Gathers repository information from GitHub-hosted repositories. - introduced: 1.11.0 - tags: [linux, macos, windows, applications] - - - name: gNMI - id: gnmi - description: | - The gNMI plugin consumes telemetry data based on the - [gNMI](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md) `Subscribe` method. - The plugin supports TLS for authentication and encryption. - This input plugin is vendor-agnostic and is supported on any platform that supports the gNMI spec. - - **For Cisco devices:** - The gNMI plugin is optimized to support gNMI telemetry as produced by - Cisco IOS XR (64-bit) version 6.5.1, Cisco NX-OS 9.3 and Cisco IOS XE 16.12 and later. - introduced: 1.15.0 - tags: [linux, macos, windows, applications] - - - name: Google Cloud PubSub - id: cloud_pubsub - description: | - The Google Cloud PubSub input plugin ingests metrics from - [Google Cloud PubSub](https://cloud.google.com/pubsub) and creates metrics - using one of the supported [input data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). - introduced: 1.10.0 - tags: [linux, macos, windows, cloud, messaging] - - - name: Google Cloud PubSub Push - id: cloud_pubsub_push - description: | - The Google Cloud PubSub Push (`cloud_pubsub_push`) input plugin listens for - messages sent using HTTP POST requests from Google Cloud PubSub. - The plugin expects messages in Google's Pub/Sub JSON Format ONLY. - The intent of the plugin is to allow Telegraf to serve as an endpoint of the - Google Pub/Sub 'Push' service. Google's PubSub service will only send over - HTTPS/TLS so this plugin must be behind a valid proxy or must be configured to use TLS. - introduced: 1.10.0 - tags: [linux, macos, windows, cloud, messaging] - - - name: Google Cloud Storage - id: google_cloud_storage - description: | - The Google Cloud Storage input plugin collects metrics by iterating files - located on a cloud storage bucket. - introduced: 1.25.0 - tags: [linux, macos, windows, storage, cloud] - - - name: Graylog - id: graylog - description: | - The Graylog input plugin can collect data from remote Graylog service URLs. This plugin currently supports two - types of endpoints: - - - multiple (e.g., `http://[graylog-server-ip]:12900/system/metrics/multiple`) - - namespace (e.g., `http://[graylog-server-ip]:12900/system/metrics/namespace/{namespace}`) - introduced: 1.0.0 - tags: [linux, macos, windows, logging] - - - name: HAproxy - id: haproxy - description: | - The HAproxy input plugin gathers metrics directly from any running HAproxy instance. - It can do so by using CSV generated by HAproxy status page or from admin sockets. - introduced: 0.1.5 - tags: [linux, macos, windows, networking, web] - - - name: Hashicorp Consul Agent - id: consul_agent - description: | - The Hashicorp Consul agent plugin grab metrics from every Nomad agent of the cluster. - introduced: 1.22.0 - tags: [applications] - - - name: Hashicorp Nomad - id: nomad - description: | - The Nomad plugin grabs metrics from every Nomad agent of the cluster. - introduced: 1.22.0 - tags: [applications] - - - name: Hashicorp Vault - id: vault - description: | - The Vault plugin grabs metrics from every Nomad agent of the cluster. - introduced: 1.22.0 - tags: [security] - - - name: Hddtemp - id: hddtemp - description: | - The Hddtemp input plugin reads data from `hddtemp` daemons. - introduced: 1.0.0 - tags: [linux, macos, windows, systems] - - - name: HTTP - id: http - description: | - The HTTP input plugin collects metrics from one or more HTTP (or HTTPS) endpoints. - The endpoint should have metrics formatted in one of the [supported input data formats](/telegraf/v1/data_formats/input/). - Each data format has its own unique set of configuration options which can be added to the input configuration. - introduced: 1.6.0 - tags: [linux, macos, windows, servers, web] - - - name: HTTP Listener - id: http_listener - description: | - The `http_listener` input plugin was renamed to [`influxdb_listener`](#influxdb_listener). - The new name better describes the intended use of the plugin as a InfluxDB relay. - For general purpose transfer of metrics in any format via HTTP, use [`http_listener_v2`](#http_listener_v2)instead. - link: https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/http_listener/README.md - introduced: 1.1.0 - deprecated: 1.8.3 - tags: [linux, macos, windows, servers, web] - - - name: HTTP Listener v2 - id: http_listener_v2 - description: | - The HTTP Listener v2 input plugin listens for metrics sent via HTTP. - Metrics may be sent in any supported [Telegraf input data format](/telegraf/v1/data_formats/input/influx). - Note the plugin previously known as `http_listener` has been renamed `influxdb_listener`. - To use Telegraf as a proxy/relay for InfluxDB, we recommend using [`influxdb_listener`](/telegraf/v1/plugins/#influxdb_listener). - introduced: 1.9.0 - tags: [linux, macos, windows, servers, web] - - - name: HTTP Response - id: http_response - description: | - The HTTP Response input plugin gathers metrics for HTTP responses. - The measurements and fields include `response_time`, `http_response_code`, - and `result_type`. Tags for measurements include `server` and `method`. - introduced: 0.12.1 - tags: [linux, macos, windows, servers, web] - - - name: Hue Bridge - id: huebridge - description: | - The Hue Bridge input plugin gathers status from [Hue Bridge](https://www.philips-hue.com/) devices using the - [CLIP API](https://developers.meethue.com/develop/hue-api-v2/) device interface. - introduced: 1.34.0 - tags: [iot] - - - name: Huge Pages - id: hugepages - description: | - The Huge Pages input plugin gathers Huge pages measurements. Transparent Huge Pages (THP) is a Linux memory management system that reduces the overhead of Translation Lookaside Buffer (TLB) lookups on machines with large amounts of memory by using larger memory pages. - introduced: 1.22.0 - tags: [linux] - - - name: IBM DB2 - id: db2 - description: | - The IBM DB2 plugin collects metrics from DB2 RDBMS using performance monitor tables. - introduced: 1.20.3 - link: https://github.com/bonitoo-io/telegraf-input-db2/blob/main/README.md - external: true - tags: [linux, macos, windows, IBM, datastores, external] - - - name: Icinga 2 - id: icinga2 - description: | - The Icinga 2 input plugin gather status on running services and hosts using - the [Icinga 2 API](https://docs.icinga.com/icinga2/latest/doc/module/icinga2/chapter/icinga2-api). - introduced: 1.8.0 - tags: [linux, macos, windows, networking, servers, systems] - - - name: InfiniBand - id: infiniband - description: | - The InfiniBand input plugin gathers statistics for all InfiniBand devices and ports on the system. - Counters are stored in `/sys/class/infiniband//port//counters/`. - introduced: 1.14.0 - tags: [linux, systems] - - - name: InfluxDB v1.x - id: influxdb - description: | - The InfluxDB v1.x input plugin gathers metrics from the exposed InfluxDB v1.x `/debug/vars` endpoint. - Using Telegraf to extract these metrics to create a "monitor of monitors" is a - best practice and allows you to reduce the overhead associated with capturing - and storing these metrics locally within the `_internal` database for production deployments. - [Read more about this approach here](https://www.influxdata.com/blog/influxdb-debugvars-endpoint/). - introduced: 0.2.5 - tags: [linux, macos, windows, data-stores] - - - name: InfluxDB v2 - id: influxdb - description: | - InfluxDB 2.x exposes its metrics using the Prometheus Exposition Format — there is no InfluxDB v2 input - plugin. - - To collect data on an InfluxDB 2.x instance running on localhost, the configuration for the - Prometheus input plugin would be: - -
- - ```toml - [[inputs.prometheus]] - ## An array of urls to scrape metrics from. - urls = ["http://localhost:8086/metrics"] - ``` - introduced: 1.8.0 - tags: [linux, macos, windows, data-stores] - - - name: InfluxDB Listener - id: influxdb_listener - description: | - The InfluxDB Listener input plugin listens for requests sent - according to the [InfluxDB HTTP API](/influxdb/v1/guides/write_data/). - The intent of the plugin is to allow Telegraf to serve as a proxy, or router, - for the HTTP `/write` endpoint of the InfluxDB HTTP API. - - > This plugin was previously known as `http_listener`. - > To send general metrics via HTTP, use the [HTTP Listener v2 input plugin](#http_listener_v2) instead. - > - > This plugin is compatible with **InfluxDB 1.x** only. - - The `/write` endpoint supports the `precision` query parameter and can be - set to `ns`, `u`, `ms`, `s`, `m`, `h`. Other parameters are ignored and - defer to the output plugins configuration. - - When chaining Telegraf instances using this plugin, `CREATE DATABASE` requests - receive a `200 OK` response with message body `{"results":[]}` but they are not - relayed. The output configuration of the Telegraf instance which ultimately - submits data to InfluxDB determines the destination database. - introduced: 1.9.0 - tags: [linux, macos, windows, data-stores] - - - name: InfluxDB v2 Listener - id: influxdb_v2_listener - description: | - The InfluxDB v2 Listener input plugin listens for requests sent - according to the [InfluxDB HTTP API](/influxdb/v2/reference/api/). - The intent of the plugin is to allow Telegraf to serve as a proxy, or router, - for the HTTP `/api/v2/write` endpoint of the InfluxDB HTTP API. - - The `/api/v2/write` endpoint supports the `precision` query parameter and - can be set to `ns`, `u`, `ms`, or `s`. Other parameters are ignored and - defer to the output plugins configuration. - introduced: 1.16.0 - tags: [linux, macos, windows, data-stores] - - - name: Intel Baseband - id: intel_baseband - description: | - Collects metrics from both dedicated and integrated Intel devices that provide Wireless Baseband hardware acceleration - introduced: 1.27.0 - tags: [linux, systems] - - - name: Intel DLB - id: intel_dlb - description: | - The Intel DLB input plugin reads metrics from DPDK using the telemetry v2 interface. - introduced: 1.25.0 - tags: [linux, systems] - - - name: Intel PMT - id: intel_pmt - description: | - Intel Platform Monitoring Technology plugin exposes Intel PMT metrics available through the Intel PMT kernel space. - introduced: 1.28.0 - tags: [linux, systems] - - - name: Intel PMU - id: intel_pmu - description: | - The Intel PMU input plugin exposes Intel PMU (Performance Monitoring Unit) metrics available through Linux Perf subsystem. - introduced: 1.21.0 - tags: [linux, systems] - - - name: Intel Powerstat - id: intel_powerstat - description: | - The Intel Powerstat input plugin collects information provided by the monitoring features of Intel Powerstat. - introduced: 1.17.0 - tags: [linux] - - - name: Intel Data Plane Development Kit (DPDK) - id: dpdk - description: | - The DPDK plugin collects metrics exposed by applications built with Data Plane Development Kit, an extensive set of open source libraries designed for accelerating packet processing workloads. - introduced: 1.19.0 - tags: [networking] - - - name: Intel RDT - id: intel_rdt - description: | - The Intel RDT input plugin collects information provided by the monitoring features of Intel Resource Director Technology (RDT). - introduced: 1.16.0 - tags: [linux, macos, windows, systems] - - - name: Internet Speed Monitor - id: internet_speed - description: | - The Internet Speed Monitor plugin collects data about the internet speed on the system. - introduced: 1.20.0 - tags: [linux, macos, windows, systems, iot, networking] - - - name: Interrupts - id: interrupts - description: | - The Interrupts input plugin gathers metrics about IRQs, including `interrupts` - (from `/proc/interrupts`) and `soft_interrupts` (from `/proc/softirqs`). - introduced: 1.3.0 - tags: [linux, macos, windows, systems] - - - name: IPMI Sensor - id: ipmi_sensor - description: | - The IPMI Sensor input plugin queries the local machine or remote host - sensor statistics using the `ipmitool` utility. - introduced: 0.12.0 - tags: [linux, macos, windows, iot] - - - name: Ipset - id: ipset - description: | - The Ipset input plugin gathers packets and bytes counters from Linux `ipset`. - It uses the output of the command `ipset save`. Ipsets created without the `counters` option are ignored. - introduced: 1.6.0 - tags: [linux, macos, windows, networking, security, systems] - - - name: IPtables - id: iptables - description: | - The IPtables input plugin gathers packets and bytes counters for rules within - a set of table and chain from the Linux iptables firewall. - introduced: 1.1.0 - tags: [linux, macos, windows, systems] - - - name: IPVS - id: ipvs - description: | - The IPVS input plugin uses the Linux kernel netlink socket interface to - gather metrics about IPVS virtual and real servers. - introduced: 1.9.0 - tags: [linux, macos, windows, systems] - - - name: Jenkins - id: jenkins - description: | - The Jenkins input plugin gathers information about the nodes and jobs running - in a jenkins instance. - - This plugin does not require a plugin on Jenkins and it makes use of Jenkins - API to retrieve all the information needed. - introduced: 1.9.0 - tags: [linux, macos, windows, build-deploy] - - - name: Jolokia2 Agent - id: jolokia2_agent - description: | - The Jolokia2 Agent input plugin reads JMX metrics from one or more - [Jolokia](https://jolokia.org/) agent REST endpoints using the - [JSON-over-HTTP protocol](https://jolokia.org/reference/html/manual/jolokia_protocol.html). - link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia2_agent/README.md - introduced: 1.5.0 - tags: [linux, macos, windows, networking] - - - name: Jolokia2 Proxy - id: jolokia2_proxy - description: | - The Jolokia2 Proxy input plugin reads JMX metrics from one or more targets by - interacting with a [Jolokia](https://jolokia.org/) proxy REST endpoint using the - [Jolokia](https://jolokia.org/) [JSON-over-HTTP protocol](https://jolokia.org/reference/html/protocol.html). - link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia2/README.md - introduced: 1.5.0 - tags: [linux, macos, windows, networking] - - - name: JTI OpenConfig Telemetry - id: jti_openconfig_telemetry - description: | - The JTI OpenConfig Telemetry input plugin reads Juniper Networks implementation - of OpenConfig telemetry data from listed sensors using the Junos Telemetry Interface. - Refer to [openconfig.net](http://openconfig.net/) for more details about OpenConfig - and [Junos Telemetry Interface (JTI)](https://www.juniper.net/documentation/en_US/junos/topics/concept/junos-telemetry-interface-oveview.html). - introduced: 1.7.0 - tags: [linux, macos, windows, iot] - - - name: Kapacitor - id: kapacitor - description: | - The Kapacitor input plugin will collect metrics from the given Kapacitor instances. - introduced: 1.3.0 - tags: [linux, macos, windows, applications] - - - name: Kernel - id: kernel - description: | - The Kernel input plugin gathers kernel statistics from `/proc/stat`. - introduced: 0.11.0 - tags: [linux, macos, windows, systems] - - - name: Kernel VMStat - id: kernel_vmstat - description: | - The Kernel VMStat input plugin gathers kernel statistics from `/proc/vmstat`. - introduced: 1.0.0 - tags: [linux, macos, windows, systems] - - - name: Kibana - id: kibana - description: | - The Kibana input plugin queries the Kibana status API to obtain the health - status of Kibana and some useful metrics. - introduced: 1.8.0 - tags: [linux, macos, windows, applications] - - - name: Knot - id: knot - description: | - The Knot input plugin collect stats from [Knot DNS](https://knot.readthedocs.io/en/master/operation.html#statistics). - link: https://github.com/x70b1/telegraf-knot/blob/master/README.md - introduced: 1.21.4 - tags: [server, external] - external: true - - - name: KNX - id: knx_listener - description: | - The KNX input plugin that listens for messages on the KNX (Konnex) home-automation bus. - introduced: 1.19.0 - tags: [iot] - - - name: Kubernetes - id: kubernetes - description: | - > The Kubernetes input plugin is experimental and may cause high cardinality - > issues with moderate to large Kubernetes deployments. - - The Kubernetes input plugin talks to the kubelet API using the `/stats/summary` - endpoint to gather metrics about the running pods and containers for a single host. - It is assumed that this plugin is running as part of a daemonset within a - Kubernetes installation. This means that Telegraf is running on every node within the cluster. - Therefore, you should configure this plugin to talk to its locally running kubelet. - introduced: 1.1.0 - tags: [linux, macos, windows, build-deploy, containers] - - - name: Kubernetes Inventory - id: kube_inventory - description: | - The Kubernetes Inventory input plugin generates metrics derived from the state - of the following Kubernetes resources: - - - daemonsets - - deployments - - nodes - - persistentvolumes - - persistentvolumeclaims - - pods (containers) - - statefulsets - - introduced: 1.10.0 - tags: [linux, macos, windows, build-deploy, containers] - - - name: LDAP - id: ldap - description: | - This plugin gathers metrics from LDAP servers' monitoring (cn=Monitor) backend. - introduced: 1.29.0 - tags: [linux, macos, windows, build-deploy, containers] - - - name: ldap_org - id: ldap_org - description: | - This plugin monitors the number of entries inside LDAP trees. - introduced: 1.19.0 - link: https://github.com/falon/CSI-telegraf-plugins/blob/master/plugins/inputs/ldap_org/README.md - tags: [external] - external: true - - - name: LeoFS - id: leofs - description: | - The LeoFS input plugin gathers metrics of LeoGateway, LeoManager, and LeoStorage using SNMP. - See [System monitoring](https://leo-project.net/leofs/docs/admin/system_admin/monitoring/) - in the [LeoFS documentation](https://leo-project.net/leofs/docs/) for more information. - introduced: 0.1.5 - tags: [linux, macos, windows, systems, data-stores] - - - name: Libvirt - id: libvirt - description: | - The Libvirt plugin collects statistics from virtualized guests using - virtualization libvirt API. - introduced: 1.25.0 - tags: [linux, systems] - - - name: Linux CPU - id: linux_cpu - description: | - The Linux CPU input plugin athers CPU metrics exposed on Linux-based systems. - introduced: 1.24.0 - tags: [linux, systems] - - - name: Linux Sysctl FS - id: linux_sysctl_fs - description: | - The Linux Sysctl FS input plugin provides Linux system level file (`sysctl fs`) metrics. - The documentation on these fields can be found [here](https://www.kernel.org/doc/Documentation/sysctl/fs.txt). - introduced: 1.3.0 - tags: [linux, macos, windows, systems] - - - name: Logical Volume Manager - id: lvm - description: | - The Logical Volume Manager collects information about physical volumes, volume groups, and logical volumes in Linux. - introduced: 1.21.0 - tags: [linux, systems] - - - name: Logparser - id: logparser - description: | - The Logparser input plugin streams and parses the given log files. - Currently, it has the capability of parsing "grok" patterns - from log files, which also supports regular expression (regex) patterns. - introduced: 1.0.0 - tags: [linux, macos, windows, logging] - - - name: Logstash - id: logstash - description: | - The Logstash input plugin reads metrics exposed by the [Logstash Monitoring API](https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html). - The plugin supports Logstash 5 and later. - introduced: 1.12.0 - tags: [linux, macos, windows, logging] - - - name: Lustre2 - id: lustre2 - description: | - Lustre Jobstats allows for RPCs to be tagged with a value, such as a job's ID. - This allows for per job statistics. - The Lustre2 input plugin collects statistics and tags the data with the `jobid`. - introduced: 0.1.5 - tags: [linux, macos, windows, systems] - - - name: Mailchimp - id: mailchimp - description: | - The Mailchimp input plugin gathers metrics from the `/3.0/reports` MailChimp API. - introduced: 0.2.4 - tags: [linux, macos, windows, cloud, web] - - - name: MarkLogic - id: marklogic - description: | - The MarkLogic input plugin gathers health status metrics from one or more MarkLogic hosts. - introduced: 1.12.0 - tags: [linux, macos, windows, data-stores] - - - name: Mcrouter - id: mcrouter - description: | - The Mcrouter input plugin gathers statistics data from a mcrouter instance. - [Mcrouter](https://github.com/facebook/mcrouter) is a memcached protocol router, - developed and maintained by Facebook, for scaling memcached (http://memcached.org/) deployments. - It's a core component of cache infrastructure at Facebook and Instagram where mcrouter - handles almost 5 billion requests per second at peak. - introduced: 1.7.0 - tags: [linux, macos, windows, networking] - - - name: Mdstat - id: mdstat - description: | - The mdstat plugin gathers statistics about any Linux MD RAID arrays configured on the host by reading /proc/mdstat. - introduced: 1.20.0 - tags: [linux, macos, windows, systems] - - - name: Mem - id: mem - description: | - The Mem input plugin collects system memory metrics. - For a more complete explanation of the difference between used and actual_used RAM, - see [Linux ate my ram](https://www.linuxatemyram.com/). - introduced: 0.1.5 - tags: [linux, macos, windows, systems] - - - name: Memcached - id: memcached - description: | - The Memcached input plugin gathers statistics data from a Memcached server. - introduced: 0.1.2 - tags: [linux, macos, windows, data-stores] - - - name: Mesosphere DC/OS - id: dcos - description: | - The Mesosphere DC/OS input plugin gathers metrics from a DC/OS cluster's - [metrics component](https://docs.mesosphere.com/1.10/metrics/). - introduced: 1.5.0 - tags: [linux, macos, windows, containers] - - - name: Microsoft SQL Server - id: sqlserver - description: | - The Microsoft SQL Server input plugin provides metrics for your Microsoft SQL Server instance. - It currently works with SQL Server versions 2008+. - Recorded metrics are lightweight and use Dynamic Management Views supplied by SQL Server. - introduced: 0.10.1 - tags: [linux, macos, windows, data-stores] - - - name: Minecraft - id: minecraft - description: | - The Minecraft input plugin uses the RCON protocol to collect statistics from - a scoreboard on a Minecraft server. - introduced: 1.4.0 - tags: [linux, macos, windows, gaming] - - - name: Mock - id: mock - description: | - The mock input plugin generates random data based on a selection of different algorithms. - introduced: 1.22.0 - tags: [build-deploy] - - - name: Modbus - id: modbus - description: | - The Modbus input plugin collects `discrete_inputs`, `coils`, `input_registers` and `holding_registers` - via Modbus TCP or Modbus RTU/ASCII. - introduced: 1.14.0 - tags: [linux, macos, windows, networking] - - - name: MongoDB - id: mongodb - description: | - The MongoDB input plugin collects MongoDB stats exposed by `serverStatus` and - few more and create a single measurement containing values. - introduced: 0.1.5 - tags: [linux, macos, windows, data-stores] - - - name: Monit - id: monit - description: | - The Monit input plugin gathers metrics and status information about local processes, remote hosts, files, - file systems, directories, and network interfaces managed and watched by Monit. To use this plugin, - enable the [HTTPD TCP port](https://mmonit.com/monit/documentation/monit.html#TCP-PORT) in Monit. - introduced: 1.14.0 - tags: [linux, macos, windows, systems, networking] - - - name: MQTT Consumer - id: mqtt_consumer - description: | - The MQTT Consumer input plugin reads from specified MQTT topics and adds messages to InfluxDB. - Messages are in the [Telegraf input data formats](/telegraf/v1/data_formats/input/). - introduced: 0.10.3 - tags: [linux, macos, windows, messaging, iot] - - - name: Multifile - id: multifile - description: | - The Multifile input plugin allows Telegraf to combine data from multiple files - into a single metric, creating one field or tag per file. - This is often useful creating custom metrics from the `/sys` or `/proc` filesystems. - - > To parse metrics from a single file formatted in one of the supported - > [input data formats](/telegraf/v1/data_formats/input), - > use the [file input plugin](#file). - introduced: 1.10.0 - tags: [linux, macos, windows] - - - name: MySQL - id: mysql - description: | - The MySQL input plugin gathers the statistics data from MySQL, MariaDB, and Percona servers. - introduced: 0.1.1 - tags: [linux, macos, windows, data-stores] - - - name: NATS Consumer - id: nats_consumer - description: | - The NATS Consumer input plugin reads from specified NATS subjects and adds messages to InfluxDB. - Messages are expected in the [Telegraf input data formats](/telegraf/v1/data_formats/input/). - A Queue Group is used when subscribing to subjects so multiple instances of Telegraf - can read from a NATS cluster in parallel. - introduced: 0.10.3 - tags: [linux, macos, windows, messaging] - - - name: NATS Server Monitoring - id: nats - description: | - The NATS Server Monitoring input plugin gathers metrics when using the - [NATS Server monitoring server](https://docs.nats.io/running-a-nats-service/introduction). - introduced: 1.6.0 - tags: [linux, macos, windows, messaging] - - - name: Neptune Apex - id: neptune_apex - description: | - The Neptune Apex input plugin collects real-time data from the Apex `status.xml` page. - The Neptune Apex controller family allows an aquarium hobbyist to monitor and - control their tanks based on various probes. - The data is taken directly from the `/cgi-bin/status.xml` at the interval specified - in the `telegraf.conf` configuration file. - introduced: 1.10.0 - tags: [linux, macos, windows, iot] - - - name: Net - id: net - description: | - The Net input plugin gathers metrics about network interface usage (Linux only). - link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/net/README.md - introduced: 0.1.1 - tags: [linux, macos, networking] - - - name: Netflow - id: netflow - description: | - The Netflow input plugin gathers metrics from Netflow v5, Netflow v9 and - IPFIX collectors. - introduced: 1.25.0 - tags: [linux, networking] - - - name: Netstat - id: netstat - description: | - The Netstat input plugin gathers TCP metrics such as established, time-wait - and sockets counts by using `lsof`. - link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/netstat/README.md - introduced: 0.2.0 - tags: [linux, macos, windows, networking, systems] - - - name: Network Response - id: net_response - description: | - The Network Response input plugin tests UDP and TCP connection response time. - It can also check response text. - introduced: 0.10.3 - tags: [linux, macos, windows, networking] - - - name: NFS - id: nfsclient - description: | - The NFS input plugin collects data from an NFS Client per-mount statistics (`/proc/self/mountstats`). By default, the plugin collects only a limited number of general system-level metrics. - introduced: 1.18.0 - tags: [linux, macos, windows, networking, systems] - - - name: NGINX - id: nginx - description: | - The NGINX input plugin reads NGINX basic status information (`ngx_http_stub_status_module`). - introduced: 0.1.5 - tags: [linux, macos, windows, servers, web] - - - name: NGINX VTS - id: nginx_vts - description: | - The NGINX VTS input plugin gathers NGINX status using external virtual host - traffic status module - https://github.com/vozlt/nginx-module-vts. - This is an NGINX module that provides access to virtual host status information. - It contains the current status such as servers, upstreams, caches. - This is similar to the live activity monitoring of NGINX Plus. - For module configuration details, see the - [NGINX VTS module documentation](https://github.com/vozlt/nginx-module-vts#synopsis). - introduced: 1.9.0 - tags: [linux, macos, windows, servers, web] - - - name: NGINX Plus - id: nginx_plus - description: | - The NGINX Plus input plugin is for NGINX Plus, the commercial version of the open source web server NGINX. - To use this plugin you will need a license. - For more information, see [What’s the Difference between Open Source NGINX and NGINX Plus?](https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/). - - Structures for NGINX Plus have been built based on history of - [status module documentation](http://nginx.org/en/docs/http/ngx_http_status_module.html). - introduced: 1.5.0 - tags: [linux, macos, windows, servers, web] - - - name: NGINX Plus API - id: nginx_plus_api - description: | - The NGINX Plus API input plugin gathers advanced status information for NGINX Plus servers. - introduced: 1.9.0 - tags: [linux, macos, windows, servers, web] - - - name: NGINX Stream STS - id: nginx_sts - description: | - The NGINX Plus API input plugin gathers NGINX status using external virtual host traffic status. - introduced: 1.15.0 - tags: [linux, macos, windows, servers, web] - - - name: NGINX Upstream Check - id: nginx_upstream_check - description: | - The NGINX Upstream Check input plugin reads the status output of the - [nginx_upstream_check](https://github.com/yaoweibin/nginx_upstream_check_module). - This module can periodically check the NGINX upstream servers using the configured - request and interval to determine if the server is still available. - If checks are failed, then the server is marked as `down` and will not receive - any requests until the check passes and the server will be marked as `up` again. - - The status page displays the current status of all upstreams and servers as well - as number of the failed and successful checks. This information can be exported - in JSON format and parsed by this input. - introduced: 1.10.0 - tags: [linux, macos, windows, servers, web] - - - name: NSD - id: nsd - description: | - The NSD input plugin collects metrics from [NSD](https://www.nlnetlabs.nl/projects/nsd/about/), an authoritative DNS name server. - introduced: 1.0.0 - tags: [linux, macos, windows, web, servers] - - - name: NSDP - id: nsdp - description: | - The NSDP plugin gathers metrics from devices via - [Netgear Switch Discovery Protocol (NSDP)](https://en.wikipedia.org/wiki/Netgear_Switch_Discovery_Protocol) for all available switches and ports. - introduced: 1.34.0 - tags: [linux, macos, windows, networking, systems] - - - name: NSQ - id: nsq - description: | - The NSQ input plugin collects metrics from NSQD API endpoints. - introduced: 1.16.0 - tags: [linux, macos, windows, messaging] - - - name: NSQ Consumer - id: nsq_consumer - description: | - The NSQ Consumer input plugin polls a specified NSQD topic and adds messages to InfluxDB. - This plugin allows a message to be in any of the supported data_format types. - introduced: 0.10.1 - tags: [linux, macos, windows, messaging] - - - name: Nstat - id: nstat - description: | - The Nstat input plugin collects network metrics from `/proc/net/netstat`, - `/proc/net/snmp`, and `/proc/net/snmp6` files. - introduced: 0.13.1 - tags: [linux, macos, windows, networking, systems] - - - name: NTPq - id: ntpq - description: | - The NTPq input plugin gets standard NTP query metrics, requires ntpq executable. - introduced: 0.11.0 - tags: [linux, macos, windows, networking, systems] - - - name: NVIDIA SMI - id: nvidia_smi - description: | - The NVIDIA SMI input plugin uses a query on the [NVIDIA System Management Interface - (`nvidia-smi`)](https://developer.nvidia.com/nvidia-system-management-interface) - binary to pull GPU stats including memory and GPU usage, temp and other. - introduced: 1.7.0 - tags: [linux, macos, windows, systems] - - - name: Octoprint - id: octoprint - description: | - The Octoprint input plugin gathers metrics from the [Octoprint API](https://docs.octoprint.org/en/master/api/index.html). - introduced: 1.16.0 - link: https://github.com/sspaink/octoprint-telegraf-plugin/blob/master/README.md - tags: [linux, macos, windows, external] - external: true - - - name: OPC UA - id: opcua - description: | - The OPC UA plugin gathers metrics from client devices using the [OPC Foundation's Unified Architecture (UA)](https://opcfoundation.org/about/opc-technologies/opc-ua/) machine-to-machine communication protocol for industrial automation. - introduced: 1.16.0 - tags: [linux, macos, windows, iot] - - - name: OPC UA Listener - id: opcua_listener - description: | - The OPC UA plugin gathers metrics from subscriptions to OPC UA devices. - introduced: 1.25.0 - tags: [linux, macos, windows, iot] - - - name: OpenSearch Query - id: opensearch_query - description: | - Gathers metrics from OpenSearch query endpoints. - introduced: 1.26.0 - tags: [linux, macos, windows] - - - name: OpenLDAP - id: openldap - description: | - The OpenLDAP input plugin gathers metrics from OpenLDAP's `cn=Monitor` backend. - introduced: 1.4.0 - tags: [linux, macos, windows, data-stores] - - - name: OpenNTPD - id: openntpd - description: | - The OpenNTPD input plugin gathers standard Network Time Protocol (NTP) query - metrics from OpenNTPD using the `ntpctl` command. - introduced: 1.12.0 - tags: [linux, macos, windows, networking] - - - name: OpenSMTPD - id: opensmtpd - description: | - The OpenSMTPD input plugin gathers stats from [OpenSMTPD](https://www.opensmtpd.org/), - a free implementation of the server-side SMTP protocol. - introduced: 1.5.0 - tags: [linux, macos, windows, applications] - - - name: OpenStack - id: openstack - description: | - The OpenStack input plugin collects metrics on [OpenStack services](https://www.openstack.org/). - introduced: 1.21.0 - tags: [linux, macos, windows, applications] - - - name: OpenTelemetry - id: opentelemetry - description: | - Receives traces, metrics and logs from OpenTelemetry clients and agents via gRPC. - introduced: 1.19.0 - tags: [logging, messaging] - - - name: OpenWeatherMap - id: openweathermap - description: | - Collect current weather and forecast data from OpenWeatherMap. - introduced: 1.11.0 - tags: [linux, macos, windows, applications] - - - name: Oracle - id: oracle - description: | - The Oracle plugin collects metrics from Oracle RDBMS using Dynamic Performance Views. - introduced: 1.20.3 - link: https://github.com/bonitoo-io/telegraf-input-oracle/blob/main/README.md - external: true - tags: [linux, macos, windows, oracle, datastores, external] - - - name: P4 Runtime - id: p4runtime - description: | - Collects metrics from P4 programmable pipelines over gRPC. - introduced: 1.26.0 - tags: [linux, macos, windows] - - - name: PF - id: pf - description: | - The PF input plugin gathers information from the FreeBSD/OpenBSD pf firewall. - Currently it can retrieve information about the state table: the number of current - entries in the table, and counters for the number of searches, inserts, and removals - to the table. The pf plugin retrieves this information by invoking the `pfstat` command. - introduced: 1.5.0 - tags: [linux, macos, windows, networking, security] - - - name: PgBouncer - id: pgbouncer - description: | - The PgBouncer input plugin provides metrics for your PgBouncer load balancer. - For information about the metrics, see the [PgBouncer documentation](https://pgbouncer.github.io/usage.html). - introduced: 1.8.0 - tags: [linux, macos, windows, data-stores] - - - name: Phusion Passenger - id: passenger - description: | - The Phusion Passenger input plugin gets Phusion Passenger statistics using - their command line utility `passenger-status`. - introduced: 0.10.1 - tags: [linux, macos, windows, web] - - - name: PHP-FPM - id: phpfpm - description: | - The PHP-FPM input plugin gets phpfpm statistics using either HTTP status page or fpm socket. - introduced: 0.1.10 - tags: [linux, macos, windows, servers, web] - - - name: Ping - id: ping - description: | - The Ping input plugin measures the round-trip for ping commands, response time, - and other packet statistics. - introduced: 0.1.8 - tags: [linux, macos, windows, networking] - - - name: Plex Webhook - id: plex - description: | - The Plex Webhook input plugin listens for events from [Plex Media Server Webhooks](https://support.plex.tv/articles/115002267687-webhooks/). - introduced: 1.18.0 - link: https://github.com/russorat/telegraf-webhooks-plex/blob/master/README.md - tags: [linux, macos, windows, applications, external] - external: true - - - name: Postfix - id: postfix - description: | - The Postfix input plugin reports metrics on the postfix queues. - For each of the active, hold, incoming, maildrop, and deferred - [queues](http://www.postfix.org/QSHAPE_README.html#queues), - it will report the queue length (number of items), - size (bytes used by items), and age (age of oldest item in seconds). - introduced: 1.5.0 - tags: [linux, macos, windows, services, web] - - - name: PostgreSQL - id: postgresql - description: | - The PostgreSQL input plugin provides metrics for your PostgreSQL database. - It currently works with PostgreSQL versions 8.1+. - It uses data from the built-in `pg_stat_database` and `pg_stat_bgwriter` views. - The metrics recorded depend on your version of PostgreSQL. - introduced: 0.10.3 - tags: [linux, macos, windows, data-stores] - - - name: PostgreSQL Extensible - id: postgresql_extensible - description: | - This PostgreSQL Extensible input plugin provides metrics for your Postgres database. - It has been designed to parse SQL queries in the plugin section of `telegraf.conf` files. - introduced: 0.12.0 - tags: [linux, macos, windows, data-stores] - - - name: PowerDNS - id: powerdns - description: | - The PowerDNS input plugin gathers metrics about PowerDNS using UNIX sockets. - introduced: 0.10.2 - tags: [linux, macos, windows, networking, web] - - - name: PowerDNS Recursor - id: powerdns_recursor - description: | - The PowerDNS Recursor input plugin gathers metrics about PowerDNS Recursor using UNIX sockets. - introduced: 1.11.0 - tags: [linux, macos, windows, networking, web] - - - name: Processes - id: processes - description: | - The Processes input plugin gathers info about the total number of processes - and groups them by status (zombie, sleeping, running, etc.). On Linux, this - plugin requires access to `procfs` (`/proc`); on other operating systems, - it requires access to execute `ps`. - introduced: 0.11.0 - tags: [linux, macos, windows, systems] - - - name: Procstat - id: procstat - description: | - The Procstat input plugin monitors system resource usage of an individual - processes using their `/proc` data. - - Processes can be specified either by `pid` file, by executable name, by command - line pattern matching, by username, by systemd unit name, or by cgroup name/path - (in this order or priority). This plugin uses `pgrep` when an executable name is - provided to obtain the `pid`. The Procstat plugin transmits IO, memory, cpu, - file descriptor-related measurements for every process specified. A prefix can - be set to isolate individual process specific measurements. - - The Procstat input plugin will tag processes according to how they are specified - in the configuration. If a pid file is used, a "pidfile" tag will be generated. - On the other hand, if an executable is used an "exe" tag will be generated. - introduced: 0.2.0 - tags: [linux, macos, windows, systems] - - - name: Prometheus Format - id: prometheus - description: | - The Prometheus Format input plugin gathers metrics from HTTP - servers exposing metrics in Prometheus format. - introduced: 0.2.1 - tags: [linux, macos, windows, applications] - - - name: Proxmox - id: proxmox - description: | - The Proxmox plugin gathers metrics about containers and VMs using the [Proxmox API](https://pve.proxmox.com/wiki/Proxmox_VE_API). - introduced: 1.16.0 - tags: [linux, macos, windows] - - - name: PSI - id: psi - description: | - The PSI input plugins push pressure stall information (PSI) from the Linux Kernel to InfluxDB. - introduced: 1.22.1 - link: https://github.com/gridscale/linux-psi-telegraf-plugin/blob/main/README.md - tags: [linux, external] - external: true - - - name: Puppet Agent - id: puppetagent - description: | - The Puppet Agent input plugin collects variables outputted from the `last_run_summary.yaml` - file usually located in `/var/lib/puppet/state/` Puppet Agent Runs. For more information, see - [Puppet Monitoring: How to Monitor the Success or Failure of Puppet Runs](https://puppet.com/blog/puppet-monitoring-how-to-monitor-success-or-failure-of-puppet-runs) - introduced: 0.2.0 - tags: [linux, macos, windows, build-deploy] - - - name: RabbitMQ - id: rabbitmq - description: | - The RabbitMQ input plugin reads metrics from RabbitMQ servers via the - [Management Plugin](https://www.rabbitmq.com/management.html). - introduced: 0.1.5 - tags: [linux, macos, windows, messaging] - - - name: Radius - id: radius - description: | - Collects authentication response time metrics from Radius. - introduced: 1.26.0 - tags: [linux, macos, windows] - - - name: Raindrops Middleware - id: raindrops - description: | - The Raindrops Middleware input plugin reads from the specified - [Raindrops middleware](http://raindrops.bogomips.org/Raindrops/Middleware.html) - URI and adds the statistics to InfluxDB. - introduced: 0.10.3 - tags: [linux, macos, windows, servers, web] - - - name: RAS - id: ras - description: | - The RAS input plugin gathers and counts errors provided by [RASDaemon](https://github.com/mchehab/rasdaemon), a RAS (reliability, availability, and serviceability) logging tool. - introduced: 1.16.0 - tags: [linux, macos, windows, servers, web] - - - name: RavenDB - id: ravendb - description: | - The RavenDB input plugin reads metrics from [RavenDB](https://ravendb.net/). - introduced: 1.18.0 - tags: [linux, macos, windows, data-stores] - - - name: Redfish - id: redfish - description: | - The Redfish input plugin gathers metrics and status information of hardware servers for which [DMTF's Redfish](https://redfish.dmtf.org/) is enabled. - introduced: 1.15.0 - tags: [linux, macos, windows, servers, networking] - - - name: Redis - id: redis - description: | - The Redis input plugin gathers the results of the INFO Redis command. - There are two separate measurements: `redis` and `redis_keyspace`, - the latter is used for gathering database-related statistics. - - Additionally the plugin also calculates the hit/miss ratio (`keyspace_hitrate`) - and the elapsed time since the last RDB save (`rdb_last_save_time_elapsed`). - introduced: 0.1.1 - tags: [linux, macos, windows, data-stores] - - - name: Redis sentinel - id: redis_sentinel - description: | - A plugin for Redis Sentinel to monitor multiple Sentinel instances that - are monitoring multiple Redis servers and replicas. - introduced: 1.22.0 - tags: [linux, macos, windows] - - - name: RethinkDB - id: rethinkdb - description: | - The RethinkDB input plugin works with RethinkDB 2.3.5+ databases that requires - username, password authorization, and Handshake protocol v1.0. - introduced: 0.1.3 - tags: [linux, macos, windows, data-stores] - - - name: Riak - id: riak - description: | - The Riak input plugin gathers metrics from one or more Riak instances. - introduced: 0.10.4 - tags: [linux, macos, windows, data-stores] - - - name: Riemann Listener - id: riemann_listener - description: | - The Riemann Listener input plugin listens for messages Riemann clients using Riemann-Protobuff format. - introduced: 1.17.0 - tags: [linux, macos, windows] - - name: AMD ROCm System Management Interface (SMI) id: amd_rocm_smi description: | - The AMD ROCm System Management Interface plugin pulls statistics from AMD GPUs including memory, usage, and temperature. - introduced: 1.20.0 - tags: [linux, macos, windows, systems] - - - name: Salesforce - id: salesforce - description: | - The Salesforce input plugin gathers metrics about the limits in your Salesforce - organization and the remaining usage. - It fetches its data from the limits endpoint of the Salesforce REST API. - introduced: 1.4.0 - tags: [linux, macos, windows, applications, cloud] - - - name: Sensors - id: sensors - description: | - The Sensors input plugin collects collects sensor metrics with the sensors - executable from the `lm-sensor` package. - introduced: 0.10.1 - tags: [linux, macos, windows, iot] - - - name: SFlow - id: sflow - description: | - The SFlow input plugin provides support for acting as an SFlow V5 collector - in accordance with the [sflow.org](https://sflow.org/) specification. - introduced: 1.14.0 - tags: [linux, macos, windows, networking] - - - name: Siemens S7 Comm - id: s7comm - description: | - This plugin gathers information from Siemens PLC (Programmatic Logic Controller). - introduced: 1.28.0 - link: https://github.com/nicolasme/s7comm/blob/main/README.md - tags: [linux, macos, windows, iot] - - - name: Slab - id: slab - description: | - This plugin collects details on how much memory each entry in Slab cache is consuming. - introduced: 1.23.0 - link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/slab/README.md - tags: [linux, system] - - - name: SLURM - id: slurm - description: | - This plugin gather diag, jobs, nodes, partitions and reservation metrics - by leveraging SLURM's REST API as provided by the slurmrestd daemon - introduced: 1.32.0 - tags: [linux, macos, windows] - - - name: S.M.A.R.T. - id: smart - description: | - The SMART input plugin gets metrics using the command line utility `smartctl` - for SMART (Self-Monitoring, Analysis and Reporting Technology) storage devices. - SMART is a monitoring system included in computer hard disk drives (HDDs) - and solid-state drives (SSDs), which include most modern ATA/SATA, SCSI/SAS and NVMe disks. - The plugin detects and reports on various indicators of drive reliability, - with the intent of enabling the anticipation of hardware failures. - See [smartmontools](https://www.smartmontools.org/). - introduced: 1.5.0 - tags: [linux, macos, windows, systems] - - - name: smartctl - id: smartctl - description: | - The SMART input plugin gets metrics using the command line utility `smartctl` - for SMART (Self-Monitoring, Analysis and Reporting Technology) storage devices. - This plugin parses and uses the JSON output from `smartctl`. This is only - available in newer versions of the tool. - introduced: 1.31.0 - tags: [linux, macos, windows, systems] - - - name: SNMP - id: snmp - description: | - The SNMP input plugin gathers metrics from SNMP agents. - introduced: 0.10.1 - tags: [linux, macos, windows, networking] - - - name: SNMP Trap - id: snmp_trap - description: | - The SNMP Trap plugin receives SNMP notifications (traps and inform requests). - Notifications are received over UDP on a configurable port. - Resolve OIDs to strings using system MIB files (just like with the [SNMP input plugin](#snmp)). - introduced: 1.13.0 - tags: [linux, macos, windows, networking] - - - name: Socket Listener - id: socket_listener - description: | - The Socket Listener input plugin listens for messages from streaming (TCP, UNIX) - or datagram (UDP, unixgram) protocols. Messages are expected in the - [Telegraf Input Data Formats](/telegraf/v1/data_formats/input/). - introduced: 1.3.0 - tags: [linux, macos, windows, networking] - - - name: Socketstat - id: socketstat - description: | - The Socketstat input plugin gathers indicators from established socket connections. - introduced: 1.22.0 - tags: [linux] - - - name: SQL - id: sql - description: | - The SQL plugin reads and ingests SQL data from a variety of SQL databases into InfluxDB. - introduced: 1.19.0 - tags: [linux, macos, windows, data-stores] - - - name: Stackdriver - id: stackdriver - description: | - The Stackdriver input plugin gathers metrics from the - [Stackdriver Monitoring API](https://cloud.google.com/monitoring/api/v3/). - - > This plugin accesses APIs that are [chargeable](https://cloud.google.com/stackdriver/pricing#monitoring-costs). - > You may incur costs. - introduced: 1.10.0 - tags: [linux, macos, windows, cloud] - - - name: StatsD - id: statsd - description: | - The StatsD input plugin is a special type of plugin which runs a backgrounded - `statsd` listener service while Telegraf is running. - StatsD messages are formatted as described in the original - [etsy statsd](https://github.com/etsy/statsd/blob/master/docs/metric_types.md) implementation. - introduced: 0.2.0 - tags: [linux, macos, windows, applications] - - - name: Supervisor - id: supervisor - description: | - The supervisor input gathers information about processes that running under supervisor using XML-RPC API. - introduced: 1.24.0 - tags: [linux, macos, windows] - - - name: Suricata - id: suricata - description: | - The Suricata input plugin reports internal performance counters of the Suricata - IDS/IPS engine, such as captured traffic volume, memory usage, uptime, flow counters, and more. - It provides a socket for the Suricata log output to write JSON output to - and processes the incoming data to fit Telegraf's format. - introduced: 1.13.0 - tags: [linux, macos, windows, networking] - - - name: Swap - id: swap - description: | - Supports: Linux only. - - The Swap input plugin gathers metrics about swap memory usage. - For more information about Linux swap spaces, see - [All about Linux swap space](https://www.linux.com/news/all-about-linux-swap-space) - - introduced: 1.7.0 - tags: [linux, macos, systems] - - - name: Synproxy - id: synproxy - description: | - The Synproxy plugin gathers synproxy metrics. - Synproxy is a Linux netfilter module used for SYN attack mitigation. - introduced: 1.13.0 - tags: [linux, macos, windows, networking] - - - name: Syslog - id: syslog - description: | - The Syslog input plugin listens for syslog messages transmitted over - [UDP](https://tools.ietf.org/html/rfc5426) or [TCP](https://tools.ietf.org/html/rfc5425). - Syslog messages should be formatted according to [RFC 5424](https://tools.ietf.org/html/rfc5424). - introduced: 1.7.0 - tags: [linux, macos, windows, logging, systems] - - - name: Sysstat - id: sysstat - description: | - The Sysstat input plugin collects [sysstat](https://github.com/sysstat/sysstat) - system metrics with the sysstat collector utility `sadc` and parses the created - binary data file with the `sadf` utility. - introduced: 0.12.1 - tags: [linux, macos, windows, systems] - - - name: System - id: system - description: | - The System input plugin gathers general stats on system load, uptime, and - number of users logged in. It is basically equivalent to the UNIX `uptime` command. - introduced: 0.1.6 - tags: [linux, macos, windows, systems] - - - name: SystemD Timings - id: systemd_timings - description: | - The SystemD Timings plugin collects systemd boot timing metrics. - introduced: 1.16.0 - tags: [linux, macos, windows, systems, external] - link: https://github.com/pdmorrow/telegraf-execd-systemd-timings/blob/main/README.md - external: true - - - name: Systemd Units - id: systemd_units - description: | - The Systemd Units plugin gathers systemd unit status metrics on Linux. - It relies on `systemctl list-units --all --type=service` to collect data on service status. - - Results are tagged with the unit name and provide enumerated fields for loaded, - active, and running fields, indicating the unit health. - - This plugin can gather other unit types as well. - See `systemctl list-units --all --type help` for possible options. - - > This plugin is related to the [Windows Services input plugin](#win_services), - > which fulfills the same purpose on Windows. - - introduced: 1.13.0 - tags: [linux, systems] - - - name: Tacacs - id: tacacs - description: | - Tacacs plugin collects successful tacacs authentication response times. - introduced: 1.28.0 - tags: [linux, networking] - - - name: Tail - id: tail - description: | - The Tail input plugin "tails" a log file and parses each log message. - introduced: 1.1.2 - tags: [linux, macos, windows, logging] - - - name: Teamspeak 3 - id: teamspeak - description: | - The Teamspeak 3 input plugin uses the Teamspeak 3 ServerQuery interface of - the Teamspeak server to collect statistics of one or more virtual servers. - introduced: 1.5.0 - tags: [linux, macos, windows, applications, gaming] - - - name: Telegraf v1.x - id: internal - description: | - The Telegraf v1.x input plugin collects metrics about the Telegraf v1.x agent itself. - Note that some metrics are aggregates across all instances of one type of plugin. - introduced: 1.2.0 - tags: [linux, macos, windows, applications] - - - name: Temp - id: temp - description: | - The Temp input plugin collects temperature data from sensors. - introduced: 1.8.0 - tags: [linux, macos, windows, iot] - - - name: Tengine Web Server - id: tengine - description: | - The Tengine Web Server input plugin gathers status metrics from the - [Tengine Web Server](http://tengine.taobao.org/) using the - [Reqstat module](http://tengine.taobao.org/document/http_reqstat.html). - introduced: 1.8.0 - tags: [linux, macos, windows, servers, web] - - - name: Trig - id: trig - description: | - The Trig input plugin inserts sine and cosine waves for demonstration purposes. - link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/trig - introduced: 0.3.0 - tags: [linux, macos, windows] - - - name: Twemproxy - id: twemproxy - description: | - The Twemproxy input plugin gathers data from Twemproxy instances, processes - Twemproxy server statistics, processes pool data, and processes backend server - (Redis/Memcached) statistics. - link: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/twemproxy - introduced: 0.3.0 - tags: [linux, macos, windows, servers, web] - - - name: Unbound - id: unbound - description: | - The Unbound input plugin gathers statistics from [Unbound](https://www.unbound.net/), - a validating, recursive, and caching DNS resolver. - introduced: 1.5.0 - tags: [linux, macos, windows, networking] - - - name: UPSD - id: upsd - description: | - The UPSD input plugin reads data of one or more Uninterruptible Power Supplies from an upsd daemon using its NUT network protocol. - introduced: 1.24.0 - tags: [linux, macos, windows, networking] - - - name: uWSGI - id: uwsgi - description: | - The uWSGI input plugin gathers metrics about uWSGI using the [uWSGI Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html). - introduced: 1.12.0 - tags: [linux, macos, windows, cloud] - - - name: Varnish - id: varnish - description: | - The Varnish input plugin gathers stats from [Varnish HTTP Cache](https://varnish-cache.org/). - introduced: 0.13.1 - tags: [linux, macos, windows, networking] - - - name: VMware vSphere - id: vsphere - description: | - The VMware vSphere input plugin uses the vSphere API to gather metrics from - multiple vCenter servers (clusters, hosts, VMs, and data stores). - For more information on the available performance metrics, see - [Common vSphere Performance Metrics](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/vsphere/METRICS.md) - introduced: 1.8.0 - tags: [linux, macos, windows, containers] - - - name: Webhooks - id: webhooks - description: | - The Webhooks input plugin starts an HTTPS server and registers multiple webhook listeners. - - #### Available webhooks - - [Filestack](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/filestack/) - - [Github](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/github/) - - [Mandrill](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/mandrill/) - - [Rollbar](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/rollbar/) - - [Papertrail](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/papertrail/) - - [Particle](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/webhooks/particle/) - - #### Add new webhooks - If you need a webhook that is not supported, consider - [adding a new webhook](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/webhooks#adding-new-webhooks-plugin) - - introduced: 1.0.0 - tags: [linux, macos, windows, applications, web] - - - name: Windows Performance Counters - id: win_perf_counters - description: | - The Windows Performance Counters input plugin reads Performance Counters on the - Windows operating system. **Windows only**. - introduced: 0.10.2 - tags: [windows, systems] - - - name: Windows Eventlog - id: win_eventlog - description: | - The Windows Eventlog input plugin reports Windows event logging. **Windows Vista and later only**. - introduced: 1.16.0 - tags: [windows, servers, systems, logging] - - - name: Windows Services - id: win_services - description: | - The Windows Services input plugin reports Windows services info. **Windows only**. - introduced: 1.4.0 - tags: [windows, servers, systems] - - - name: Windows Management Instrumentation - id: win_wmi - description: | - Queries Windows Management Instrumentation (WMI) classes. - introduced: 1.26.0 - tags: [windows, servers, systems] - - - name: Wireless - id: wireless - description: | - The Wireless input plugin gathers metrics about wireless link quality by - reading the `/proc/net/wireless` file. **This plugin currently supports Linux only**. - introduced: 1.9.0 - tags: [linux, networking] - - - name: Wireguard - id: wireguard - description: | - The Wireguard input plugin collects statistics on the local Wireguard server using the `wgctrl` library. - Reports gauge metrics for Wireguard interface device(s) and its peers. - introduced: 1.14.0 - tags: [linux, macos, windows, networking] - - - name: X.509 Certificate - id: x509_cert - description: | - The X.509 Certificate input plugin provides information about X.509 certificate - accessible using the local file or network connection. - introduced: 1.8.0 - tags: [linux, macos, windows, networking] - - - name: x509crl - id: x509_crl - description: | - This plugin provides information about X509 CRL (Certificate Revocation Lists) accessible via a file. - introduced: 1.19.0 - link: https://github.com/jcgonnard/telegraf-input-x590crl/blob/master/README.md - tags: [external] - external: true - - - name: XtremIO - id: xtremio - description: | - The Xtremio plugin gathers metrics from Dell EMC XtremIO Storage Array. - introduced: 1.22.0 - tags: [networking] - - - name: YouTube - id: youtube - description: | - The YouTube input plugin gathers information from YouTube channels, including views, subscribers, and videos. - introduced: 1.16.0 - link: https://github.com/inabagumi/youtube-telegraf-plugin/blob/main/README.md - tags: [linux, macos, windows, external] - external: true - - - name: ZFS - id: zfs - description: | - Supports: FreeBSD, Linux - - The ZFS input plugin provides metrics from your ZFS filesystems. - It supports ZFS on Linux and FreeBSD. - It gets ZFS statistics from `/proc/spl/kstat/zfs` on Linux and from `sysctl` and `zpool` on FreeBSD. - introduced: 0.2.1 - tags: [linux, macos, windows, systems] - -########## %%%% %% %% %%%%%% %%%%% %% %% %%%%%% %%%% ########## -########## %% %% %% %% %% %% %% %% %% %% %% ########## -########## %% %% %% %% %% %%%%% %% %% %% %%%% ########## -########## %% %% %% %% %% %% %% %% %% %% ########## -########## %%%% %%%% %% %% %%%% %% %%%% ########## - -output: - - name: Amazon CloudWatch - id: cloudwatch - description: | - The Amazon CloudWatch output plugin send metrics to Amazon CloudWatch. - introduced: 0.10.1 - tags: [linux, macos, windows, cloud] - - - name: Amazon CloudWatch logs - id: cloudwatch_logs - description: | - This output plugin sends logs to Amazon CloudWatch. - introduced: 1.19.0 - tags: [amazon, cloud, logging] - - - name: Amazon Kinesis - id: kinesis - description: | - The Amazon Kinesis output plugin is an experimental plugin that is still - in the early stages of development. It will batch up all of the points into - one `PUT` request to Kinesis. This should save the number of API requests - by a considerable level. - introduced: 0.2.5 - tags: [linux, macos, windows, cloud, messaging] - - - name: AWS Timestream - id: timestream - description: | - The Timestream output plugin writes metrics to the [Amazon Timestream](https://aws.amazon.com/timestream/) service. - introduced: 1.16.0 - tags: [linux, macos, windows, cloud] - - - name: Amon - id: amon - description: | - The Amon output plugin writes metrics to an [Amon server](https://github.com/amonapp/amon). - For details on the Amon Agent, see [Monitoring Agent](https://docs.amon.cx/agent/) - and requires a `apikey` and `amoninstance` URL. - - If the point value being sent cannot be converted to a float64 value, the metric is skipped. - - Metrics are grouped by converting any `_` characters to `.` in the Point Name. - introduced: 0.2.1 - tags: [linux, macos, windows, databases] - - - name: AMQP - id: amqp - description: | - The AMQP output plugin writes to an AMQP 0-9-1 exchange, a prominent implementation - of the Advanced Message Queuing Protocol (AMQP) protocol being [RabbitMQ](https://www.rabbitmq.com/). - - Metrics are written to a topic exchange using `tag`, defined in configuration - file as `RoutingTag`, as a routing key. - introduced: 0.1.9 - tags: [linux, macos, windows, messaging] - - - name: Apache Kafka - id: kafka - description: | - The Apache Kafka output plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.html) - acting a Kafka Producer. - introduced: 0.1.7 - tags: [linux, macos, windows, messaging] - - - name: Azure Data Explorer - id: azure_data_explorer - description: | - The Azure Data Explorer output plugin writes metrics to [Azure Data Explorer](https://azure.microsoft.com/en-au/services/data-explorer/). - introduced: 1.20.0 - tags: [linux, macos, windows, cloud, datastores] - - - name: Azure Event Hubs - id: event_hubs - description: | - The Azure Data Explorer output plugin writes metrics to a single Azure Event Hub within an [Event Hubs](https://azure.microsoft.com/en-gb/services/event-hubs/) namespace. - introduced: 1.21.0 - tags: [linux, macos, windows, cloud, datastores] - - - name: BigQuery - id: bigquery - description: | - The BigQuery output plugin writes to [Google Cloud's BigQuery](https://cloud.google.com/bigquery). - introduced: 1.18.0 - tags: [linux, macos, windows, cloud] - - - name: CrateDB - id: cratedb - description: | - The CrateDB output plugin writes to [CrateDB](https://crate.io/), a real-time SQL database for - machine data and IoT, using its [PostgreSQL protocol](https://crate.io/docs/crate/reference/protocols/postgres.html). - introduced: 1.5.0 - tags: [linux, macos, windows, data-stores] - - - name: Clarify - id: clarify - description: | - Write data to Clarify - introduced: 1.27.0 - tags: [linux, macos, windows, cloud, data-stores] - - - name: Datadog - id: datadog - description: | - The Datadog output plugin writes to the [Datadog Metrics API](http://docs.datadoghq.com/api/v1/metrics/) - and requires an `apikey` which can be obtained [here](https://app.datadoghq.com/account/settings#api) - for the account. - introduced: 0.1.6 - tags: [linux, macos, windows, applications, cloud] - - - name: Discard - id: discard - description: | - The Discard output plugin simply drops all metrics that are sent to it. - It is only meant to be used for testing purposes. - introduced: 1.2.0 - tags: [linux, macos, windows] - - - name: Dynatrace - id: dynatrace - description: | - The Dynatrace output plugin sends metrics to [Dynatrace](http://www.dynatrace.com). - introduced: 1.16.0 - tags: [linux, macos, windows] - - - name: Elasticsearch - id: elasticsearch - description: | - The Elasticsearch output plugin writes to Elasticsearch via HTTP using - [Elastic](http://olivere.github.io/elastic/). - It supports Elasticsearch releases from 5.x up to 7.x. - introduced: 0.1.5 - tags: [linux, macos, windows, data-stores] - - - name: Exec - id: exec - description: | - The Exec output plugin sends Telegraf metrics to an external application over `stdin`. - introduced: 1.12.0 - tags: [linux, macos, windows, systems] - - - name: Execd - id: execd - description: | - The Execd output plugin runs an external program as a daemon. - introduced: 1.15.0 - tags: [linux, macos, windows, systems] - - - name: File - id: file - description: | - The File output plugin writes Telegraf metrics to files. - introduced: 0.10.3 - tags: [linux, macos, windows, systems] - + This plugin gathers statistics including memory and GPU usage, + temperatures etc from [AMD ROCm platform](https://rocm.docs.amd.com/) + GPUs. + + > [!IMPORTANT] + > The [`rocm-smi` binary]() is required and needs to be installed on the + > system. + introduced: v1.20.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [hardware, system] + - name: AMQP Consumer + id: amqp_consumer + description: | + This plugin consumes messages from an Advanced Message Queuing Protocol + v0.9.1 broker. A prominent implementation of this protocol is + [RabbitMQ](https://www.rabbitmq.com). + + Metrics are read from a topic exchange using the configured queue and + binding key. The message payloads must be formatted in one of the + supported [data formats](/telegraf/v1/data_formats/input). + + For an introduction check the [AMQP concepts + page](https://www.rabbitmq.com/tutorials/amqp-concepts.html) and the + [RabbitMQ getting started + guide](https://www.rabbitmq.com/getstarted.html). + introduced: v1.3.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [messaging] + - name: Apache + id: apache + description: | + This plugin collects performance information from [Apache HTTP + Servers](https://httpd.apache.org) using the [`mod_status` module](). + Typically, this module is configured to expose a page at the + `/server-status?auto` endpoint the server. + + The [ExtendedStatus + option](https://httpd.apache.org/docs/current/mod/core.html#extendedstatus) + must be enabled in order to collect all available fields. For information + about configuration of your server check the [module + documentation](https://httpd.apache.org/docs/current/mod/mod_status.html). + introduced: v1.8.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server, web] + - name: APC UPSD + id: apcupsd + description: | + This plugin gathers data from one or more [apcupsd + daemon](https://sourceforge.net/projects/apcupsd/) over the NIS network + protocol. To query a server, the daemon must be running and be + accessible. + introduced: v1.12.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [hardware, server] + - name: Apache Aurora + id: aurora + description: | + This plugin gathers metrics from [Apache + Aurora](https://aurora.apache.org) schedulers. For monitoring + recommendations check the [Monitoring your Aurora + cluster](https://aurora.apache.org/documentation/latest/operations/monitoring) + article. + introduced: v1.7.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications, server] + - name: Azure Monitor + id: azure_monitor + description: | + This plugin gathers metrics of Azure resources using the [Azure + Monitor](https://docs.microsoft.com/en-us/azure/azure-monitor) API. The + plugin requires a `client_id`, `client_secret` and `tenant_id` for + authentication via access token. The `subscription_id` is required for + accessing Azure resources. + + Check the [supported metrics + page](https://docs.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-supported) + for available resource types and their metrics. + + > [!IMPORTANT] + > The Azure API has a read limit of 12,000 requests per hour. Please make + > sure you don't exceed this limit with the total number of metrics you + > are in the configured interval. + introduced: v1.25.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud] + - name: Azure Queue Storage + id: azure_storage_queue + description: | + This plugin gathers queue sizes from the [Azure Queue + Storage](https://learn.microsoft.com/en-us/azure/storage/queues) service, + storing a large numbers of messages. + introduced: v1.13.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud] + - name: Bcache + id: bcache + description: | + This plugin gathers statistics for the [block layer + cache](https://docs.kernel.org/admin-guide/bcache.html) from the + `stats_total` directory and `dirty_data` file. + introduced: v0.2.0 + os_support: [linux] + tags: [system] + - name: Beanstalkd + id: beanstalkd + description: | + This plugin collects server statistics as well as tube statistics from a + [Beanstalkd work queue](https://beanstalkd.github.io/) as reported by the + `stats` and `stats-tube` server commands. + introduced: v1.8.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [messaging] + - name: Beat + id: beat + description: | + This plugin will collect metrics from a + [Beats](https://www.elastic.co/beats) instances. It is known to work with + Filebeat and Kafkabeat. + introduced: v1.18.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications] + - name: BIND 9 Nameserver + id: bind + description: | + This plugin collects metrics from [BIND 9 + nameservers](https://www.isc.org/bind) using the XML or JSON endpoint. + + For _XML_, version 2 statistics (BIND 9.6 to 9.9) and version 3 + statistics (BIND 9.9+) are supported. Version 3 statistics are the + default and only XML format in BIND 9.10+. + + > [!NOTE] + > For BIND 9.9 to support version 3 statistics, it must be built with the + > `--enable-newstats` compile flag, and the statistics must be + > specifically requested via the correct URL. + + For _JSON_, version 1 statistics (BIND 9.10+) are supported. As of + writing, some distros still do not enable support for JSON statistics in + their BIND packages. + introduced: v1.11.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Bond + id: bond + description: | + This plugin collects metrics for both the network bond interface as well + as its slave interfaces using `/proc/net/bonding/*` files. + introduced: v1.5.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: Burrow + id: burrow + description: | + This plugin collect Kafka topic, consumer and partition status from the + [Burrow - Kafka Consumer Lag + Checking](https://github.com/linkedin/Burrow) companion via [HTTP + API](https://github.com/linkedin/Burrow/wiki/HTTP-Endpoint). Burrow v1.x + versions are supported. + introduced: v1.7.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [messaging] + - name: Ceph Storage + id: ceph + description: | + This plugin collects performance metrics from MON and OSD nodes in a + [Ceph storage cluster](https://ceph.com). Support for Telegraf has been + introduced in the v13.x Mimic release where data is sent to a socket (see + [their documnetation](https://docs.ceph.com/en/latest/mgr/telegraf)). + introduced: v0.13.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: Control Group + id: cgroup + description: | + This plugin gathers statistics per [control group + (cgroup)](https://docs.kernel.org/admin-guide/cgroup-v2.html). + + > [!NOTE] + > Consider restricting paths to the set of cgroups you are interested in + > if you have a large number of cgroups, to avoid cardinality issues. + + The plugin supports the _single value format_ in the form + + the _new line separated values format_ in the form + + the _space separated values format_ in the form + + and the _space separated keys and value, separated by new line format_ in + the form + introduced: v1.0.0 + os_support: [linux] + tags: [system] + - name: chrony + id: chrony + description: | + This plugin queries metrics from a [chrony NTP + server](https://chrony-project.org). For details on the meaning of the + gathered fields please check the [chronyc + manual](https://chrony-project.org/doc/4.4/chronyc.html). + introduced: v0.13.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: Cisco Model-Driven Telemetry (MDT) + id: cisco_telemetry_mdt + description: | + This plugin consumes [Cisco model-driven telemetry + (MDT)](https://www.cisco.com/c/en/us/products/collateral/switches/catalyst-9300-series-switches/model-driven-telemetry-wp.html) + data from Cisco IOS XR, IOS XE and NX-OS platforms via TCP or GRPC. + GRPC-based transport can utilize TLS for authentication and encryption. + Telemetry data is expected to be GPB-KV (self-describing-gpb) encoded. + + The GRPC dialout transport is supported on various IOS XR (64-bit) 6.1.x + and later, IOS XE 16.10 and later, as well as NX-OS 7.x and later + platforms. The TCP dialout transport is supported on IOS XR (32-bit and + 64-bit) 6.1.x and later. + introduced: v1.11.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications] + - name: ClickHouse + id: clickhouse + description: | + This plugin gathers statistics data from a [ClickHouse + server](https://github.com/ClickHouse/ClickHouse). Users on Clickhouse + Cloud will not see the Zookeeper metrics as they may not have permissions + to query those tables. + introduced: v1.14.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] - name: Google Cloud PubSub id: cloud_pubsub description: | - The Google PubSub output plugin publishes metrics to a [Google Cloud PubSub](https://cloud.google.com/pubsub) - topic as one of the supported [output data formats](/telegraf/v1/data_formats/output). - introduced: 1.10.0 - tags: [linux, macos, windows, messaging, cloud] - - - name: Graphite - id: graphite + This plugin consumes messages from the [Google Cloud + PubSub](https://cloud.google.com/pubsub) service and creates metrics + using one of the supported [data + formats](/telegraf/v1/data_formats/input). + introduced: v1.10.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, messaging] + - name: Google Cloud PubSub Push + id: cloud_pubsub_push description: | - The Graphite output plugin writes to [Graphite](http://graphite.readthedocs.org/en/latest/index.html) via raw TCP. - introduced: 0.10.1 - tags: [linux, macos, windows, data-stores] + This plugin listens for messages sent via an HTTP POST from [Google Cloud + PubSub](https://cloud.google.com/pubsub) and expects messages in Google's + Pub/Sub _JSON format_. The plugin allows Telegraf to serve as an endpoint + of push service. - - name: Grafana Loki - id: loki + Google's PubSub service will __only__ send over HTTPS/TLS so this plugin + must be behind a valid proxy or must be configured to use TLS by setting + the `tls_cert` and `tls_key` accordingly. + + Enable mutually authenticated TLS and authorize client connections by + signing certificate authority by including a list of allowed CA + certificate file names in `tls_allowed_cacerts`. + introduced: v1.10.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, messaging] + - name: Amazon CloudWatch Statistics + id: cloudwatch description: | - The Grafana Loki output sends logs to [Loki](https://grafana.com/oss/loki/). - introduced: 1.18.0 - tags: [linux, macos, windows, logging] + This plugin will gather metric statistics from [Amazon + CloudWatch](https://aws.amazon.com/cloudwatch). + introduced: v0.12.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud] + - name: Amazon CloudWatch Metric Streams + id: cloudwatch_metric_streams + description: | + This plugin listens for metrics sent via HTTP by [Cloudwatch metric + streams](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Metric-Streams.html) + implementing the required [response + specifications](https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html). - - name: Graylog + > [!IMPORTANT] + > Using this plugin can incure costs, see the _Metric Streams example_ in + > [CloudWatch pricing](https://aws.amazon.com/cloudwatch/pricing). + introduced: v1.24.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud] + - name: Netfilter Conntrack + id: conntrack + description: | + This plugin collects metrics from [Netfilter's conntrack + tools](https://conntrack-tools.netfilter.org/). There are two collection + mechanisms for this plugin: + + 1. Extracting information from `/proc/net/stat/nf_conntrack` files if the + `collect` option is set accordingly for finding CPU specific values. + 1. Using specific files and directories by specifying the `dirs` option. + At runtime, conntrack exposes many of those connection statistics within + `/proc/sys/net`. Depending on your kernel version, these files can be + found in either `/proc/sys/net/ipv4/netfilter` or + `/proc/sys/net/netfilter` and will be prefixed with either `ip` or `nf`. + + In order to simplify configuration in a heterogeneous environment, a + superset of directory and filenames can be specified. Any locations that + doesn't exist is ignored. + introduced: v1.0.0 + os_support: [linux] + tags: [system] + - name: Hashicorp Consul + id: consul + description: | + This plugin will collect statistics about all health checks registered in + [Consul](https://www.consul.io) using the [Consul + API](https://www.consul.io/docs/agent/http/health.html#health_state). The + plugin will not report any [telemetry + metrics](https://www.consul.io/docs/agent/telemetry.html) but Consul can + report those statistics using the StatsD protocol if needed. + introduced: v1.0.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Hashicorp Consul Agent + id: consul_agent + description: | + This plugin collects metrics from a [Consul + agent](https://developer.hashicorp.com/consul/commands/agent). Telegraf + may be present in every node and connect to the agent locally. Tested on + Consul v1.10. + introduced: v1.22.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Couchbase + id: couchbase + description: | + This plugin collects metrics from + [Couchbase](https://www.couchbase.com/), a distributed NoSQL database. + Metrics are collected for each node, as well as detailed metrics for each + bucket, for a given couchbase server. + introduced: v0.12.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Apache CouchDB + id: couchdb + description: | + This plugin gathers metrics from [Apache + CouchDB](https://couchdb.apache.org/) instances using the + [stats](http://docs.couchdb.org/en/1.6.1/api/server/common.html?highlight=stats#get--_stats) + endpoint. + introduced: v0.10.3 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: CPU + id: cpu + description: | + This plugin gathers metrics about the system's CPUs. + introduced: v0.1.5 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: Counter-Strike Global Offensive (CSGO) + id: csgo + description: | + This plugin gather metrics from [Counter-Strike: Global + Offensive](https://www.counter-strike.net/) servers. + introduced: v1.18.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Bosch Rexroth ctrlX Data Layer + id: ctrlx_datalayer + description: | + This plugin gathers data from the [ctrlX Data + Layer](https://ctrlx-automation.com) a communication middleware running + on Bosch Rexroth's [ctrlX CORE devices](https://ctrlx-core.com). The + platform is used for professional automation applications like industrial + automation, building automation, robotics, IoT Gateways or as classical + PLC. + introduced: v1.27.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [iot, messaging] + - name: Mesosphere Distributed Cloud OS + id: dcos + description: | + This input plugin gathers metrics from a [Distributed Cloud + OS](https://dcos.io/) cluster's [metrics + component](https://docs.mesosphere.com/1.10/metrics/). + + > [!WARNING] + > Depending on the workload of your DC/OS cluster, this plugin can quickly + > create a high number of series which, when unchecked, can cause high + > load on your database! + introduced: v1.5.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [containers] + - name: Directory Monitor + id: directory_monitor + description: | + This plugin monitors a single directory (traversing sub-directories), and + processes each file placed in the directory. The plugin will gather all + files in the directory at the configured interval, and parse the ones + that haven't been picked up yet. + + > [!NOTE] + > Files should not be used by another process or the plugin may fail. + > Furthermore, files should not be written _live_ to the monitored + > directory. If you absolutely must write files directly, they must be + > guaranteed to finish writing before `directory_duration_threshold`. + introduced: v1.18.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: Disk + id: disk + description: | + This plugin gathers metrics about disk usage. + + > [!NOTE] + > The `used_percent` field is calculated by `used / (used + free)` and + > _not_ `used / total` as the unix `df` command does it. See [wikipedia - + > df](https://en.wikipedia.org/wiki/Df_(Unix)) for more details. + introduced: v0.1.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: DiskIO + id: diskio + description: | + This plugin gathers metrics about disk traffic and timing. + introduced: v0.10.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: Disque + id: disque + description: | + This plugin gathers data from a + [Disque](https://github.com/antirez/disque) instance, an experimental + distributed, in-memory, message broker. + introduced: v0.10.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [messaging] + - name: Device Mapper Cache + id: dmcache + description: | + This plugin provide a native collection for dmsetup based statistics for + [dm-cache](https://docs.kernel.org/admin-guide/device-mapper/cache.html). + + > [!NOTE] + > This plugin requires super-user permissions! Please make sure, Telegraf + > is able to run `sudo /sbin/dmsetup status --target cache` without + > requiring a password. + introduced: v1.3.0 + os_support: [linux] + tags: [system] + - name: DNS Query + id: dns_query + description: | + This plugin gathers information about DNS queries such as response time + and result codes. + introduced: v1.4.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network, system] + - name: Docker + id: docker + description: | + This plugin uses the [Docker Engine + API](https://docs.docker.com/engine/api) to gather metrics on running + Docker containers. + + > [!NOTE] + > Make sure Telegraf has sufficient permissions to access the + > configured endpoint. + introduced: v0.1.9 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [containers] + - name: Docker Log + id: docker_log + description: | + This plugin uses the [Docker Engine + API](https://docs.docker.com/engine/api) to gather logs from running + Docker containers. + + > [!NOTE] + > This plugin works only for containers with the `local` or `json-file` or + > `journald` logging driver. Please make sure Telegraf has sufficient + > permissions to access the configured endpoint. + introduced: v1.12.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [containers, logging] + - name: Dovecot + id: dovecot + description: | + This plugin uses the Dovecot [v2.1 stats + protocol](https://doc.dovecot.org/configuration_manual/stats/old_statistics/#old-statistics) + to gather metrics about configured domains of + [Dovecot](https://www.dovecot.org/) servers. You can use this plugin on + Dovecot up to and including version v2.3.x. + + > [!IMPORTANT] + > Dovecot v2.4+ has the old protocol removed and this plugin will not + > work. Please use Dovecot's [Openmetrics + > exporter](https://doc.dovecot.org/latest/core/config/statistics.html#openmetrics) + > in combination with the [http input + > plugin](/telegraf/v1/plugins/#input-http) and `openmetrics` data format + > for newer versions of Dovecot. + introduced: v0.10.3 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Data Plane Development Kit (DPDK) + id: dpdk + description: | + This plugin collects metrics exposed by applications built with the [Data + Plane Development Kit](https://www.dpdk.org) which is an extensive set of + open source libraries designed for accelerating packet processing + workloads. + + > [!NOTE] + > Since DPDK will most likely run with root privileges, the telemetry + > socket exposed by DPDK will also require root access. Please adjust + > permissions accordingly! + + Refer to the [Telemetry User + Guide](https://doc.dpdk.org/guides/howto/telemetry.html) for details and + examples on how to use DPDK in your application. + + > [!IMPORTANT] + > This plugin uses the `v2` interface to read telemetry > data from + > applications and required DPDK version `v20.05` or higher. Some metrics + > might require later versions. The recommended version, especially in + > conjunction with the `in_memory` option is `DPDK 21.11.2` or higher. + introduced: v1.19.0 + os_support: [linux] + tags: [applications, network] + - name: Amazon Elastic Container Service + id: ecs + description: | + This plugin gathers statistics on running containers in a Task from the + [Amazon Elastic Container Service](https://aws.amazon.com/ecs/) using the + [Amazon ECS + metadata](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint.html) + and the + [v2](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v2.html) + or + [v3](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v3.html) + statistics API endpoints. + + > [!IMPORTANT] + > The telegraf container must be run in the same Task as the workload it + > is inspecting. + + The amazon-ecs-agent (though it _is_ a container running on the host) is + not present in the metadata/stats endpoints. + introduced: v1.11.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud] + - name: Elasticsearch + id: elasticsearch + description: | + This plugin queries endpoints of a + [Elasticsearch](https://www.elastic.co/) instance to obtain [node + statistics](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) + and optionally + [cluster-health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) + metrics. Additionally, the plugin is able to query + [cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html), + [indices and + shard](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html) + statistics for the master node. + + > [!NOTE] + > Specific statistics information can change between Elasticsearch + > versions. In general, this plugin attempts to stay as version-generic as + > possible by tagging high-level categories only and creating unique field + > names of whatever statistics names are provided at the mid-low level. + introduced: v0.1.5 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Elasticsearch Query + id: elasticsearch_query + description: | + This plugin allows to query an [Elasticsearch](https://www.elastic.co/) + instance to obtain metrics from data stored in the cluster. The plugins + supports counting the number of hits for a search query, calculating + statistics for numeric fields, filtered by a query, aggregated per tag + and to count the number of terms for a particular field. + + > [!IMPORTANT] + > This plugins supports Elasticsearch 5.x and 6.x but is known to break on + > 7.x or higher. + introduced: v1.20.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: Ethtool + id: ethtool + description: | + This plugin collects ethernet device statistics. The available + information strongly depends on the network device and driver. + introduced: v1.13.0 + os_support: [linux] + tags: [network, system] + - name: Azure Event Hub Consumer + id: eventhub_consumer + description: | + This plugin allows consuming messages from [Azure Event + Hubs](https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-about) + and [Azure IoT Hub](https://azure.microsoft.com/en-us/products/iot-hub) + instances. + introduced: v1.14.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [iot, messaging] + - name: Exec + id: exec + description: | + This plugin executes the given `commands` on every interval and parses + metrics from their output in any one of the supported [data + formats](/telegraf/v1/data_formats/input). This plugin can be used to + poll for custom metrics from any source. + introduced: v0.1.5 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: Execd + id: execd + description: | + This plugin runs the given external program as a long-running daemon and + collects the metrics in one of the supported [data + formats](/telegraf/v1/data_formats/input) on the process's `stdout`. The + program is expected to stay running and output data when receiving the + configured `signal`. + + The `stderr` output of the process will be relayed to Telegraf's logging + facilities and will be logged as _error_ by default. However, you can log + to other levels by prefixing your message with `E!` for error, `W!` for + warning, `I!` for info, `D!` for debugging and `T!` for trace levels + followed by a space and the actual message. For example outputting `I! A + log message` will create a `info` log line in your Telegraf logging + output. + introduced: v1.14.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: Fail2ban + id: fail2ban + description: | + This plugin gathers the count of failed and banned IP addresses using + [fail2ban](https://www.fail2ban.org) by running the `fail2ban-client` + command. + + > [!NOTE] + > The `fail2ban-client` requires root access, so please make sure to + > either allow Telegraf to run that command using `sudo` without a + > password or by running telegraf as root (not recommended). + introduced: v1.4.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network, system] + - name: Fibaro + id: fibaro + description: | + This plugin gathers data from devices connected to a + [Fibaro](https://www.fibaro.com) controller. Those values could be true + (1) or false (0) for switches, percentage for dimmers, temperature, etc. + Both _Home Center 2_ and _Home Center 3_ devices are supported. + introduced: v1.7.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [iot] + - name: File + id: file + description: | + This plugin reads the __complete__ contents of the configured files in + __every__ interval. The file content is split line-wise and parsed + according to one of the supported [data + formats](/telegraf/v1/data_formats/input). + + > [!TIP] + > If you wish to only process newly appended lines use the + > [tail](/telegraf/v1/plugins/#input-tail) input plugin instead. + introduced: v1.8.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: Filecount + id: filecount + description: | + This plugin reports the number and total size of files in specified + directories. + introduced: v1.8.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: File statistics + id: filestat + description: | + This plugin gathers metrics about file existence, size, and other file + statistics. + introduced: v0.13.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: Fireboard + id: fireboard + description: | + This plugin gathers real-time temperature data from + [fireboard](https://www.fireboard.com) thermometers. + + > [!NOTE] + > You will need to sign up to for the [Fireboard REST + > API](https://docs.fireboard.io/reference/restapi.html) in order to use + > this plugin. + introduced: v1.12.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [iot] + - name: AWS Data Firehose + id: firehose + description: | + This plugin listens for metrics sent via HTTP from [AWS Data + Firehose](https://aws.amazon.com/de/firehose/) in one of the supported + [data formats](/telegraf/v1/data_formats/input). The plugin strictly + follows the request-response schema as describe in the official + [documentation](https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html). + introduced: v1.34.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, messaging] + - name: Fluentd + id: fluentd + description: | + This plugin gathers internal metrics of a + [fluentd](https://www.fluentd.org/) instance provided by fluentd's + [monitor agent plugin](https://docs.fluentd.org/input/monitor_agent). + Data provided by the `/api/plugin.json` resource, `/api/config.json` is + not covered. + + > [!IMPORTANT] + > This plugin might produce high-cardinality series as the `plugin_id` + > value is random after each restart of fluentd. You might need to adjust + > your fluentd configuration, in order to reduce series cardinality in + > case your fluentd restarts frequently by adding the `@id` parameter to + > each plugin. See [fluentd's + > documentation](https://docs.fluentd.org/configuration/config-file#common-plugin-parameter) + > for details. + introduced: v1.4.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Fritzbox + id: fritzbox + description: | + This plugin gathers status information from [AVM](https://en.avm.de/) + devices (routers, repeaters, etc) using the device's + [TR-064](https://avm.de/service/schnittstellen/) interface. + introduced: v1.35.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [iot, network] + - name: GitHub + id: github + description: | + This plugin gathers information from projects and repositories hosted on + [GitHub](https://www.github.com). + + > [!NOTE] + > Telegraf also contains the [webhook input + > plugin](/telegraf/v1/plugins/#input-webhooks) which can be used as an + > alternative method for collecting repository information. + introduced: v1.11.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications] + - name: gNMI (gRPC Network Management Interface) + id: gnmi + description: | + This plugin consumes telemetry data based on + [gNMI](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md) + subscriptions. TLS is supported for authentication and encryption. This + plugin is vendor-agnostic and is supported on any platform that supports + the gNMI specification. + + For Cisco devices the plugin has been optimized to support gNMI telemetry + as produced by Cisco IOS XR (64-bit) version 6.5.1, Cisco NX-OS 9.3 and + Cisco IOS XE 16.12 and later. + introduced: v1.15.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network] + - name: Google Cloud Storage + id: google_cloud_storage + description: | + This plugin will collect metrics from the given [Google Cloud + Storage](https://cloud.google.com/storage) buckets in any of the + supported [data formats](/telegraf/v1/data_formats/input). + introduced: v1.25.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, datastore] + - name: GrayLog id: graylog description: | - The Graylog output plugin writes to a Graylog instance using the `gelf` format. - introduced: 1.0.0 - tags: [linux, macos, windows, logging] + This plugin collects data from [Graylog servers](https://graylog.org/), + currently supporting two type of end points `multiple` (e.g. + `http://:9000/api/system/metrics/multiple`) and `namespace` (e.g. + `http://:9000/api/system/metrics/namespace/{namespace}`). - - name: GroundWork - id: groundwork + Multiple endpoint can be queried and mixing `multiple` and serveral + `namespace` end points is possible. Check + `http://:9000/api/api-browser` for the full list of available + endpoints. + + > [!NOTE] + > When specifying a `namespace` endpoint without an actual namespace, the + > metrics array will be ignored. + introduced: v1.0.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [logging] + - name: HAProxy + id: haproxy description: | - The GroundWork output plugin writes to a [GroundWork Monitor](https://www.gwos.com/product/groundwork-monitor/) instance. - introduced: 1.21.0 - tags: [linux, macos, windows, applications, messaging] + This plugin gathers statistics of [HAProxy](http://www.haproxy.org/) + servers using sockets or the HTTP protocol. + introduced: v0.1.5 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network, server] + - name: HDDtemp + id: hddtemp + description: | + This plugin reads data from a + [hddtemp](https://savannah.nongnu.org/projects/hddtemp/) daemon. + > [!IMPORTANT] + > This plugin requires `hddtemp` to be installed and running as a daemon. + + As the upstream project is not activly maintained anymore and various + distributions (e.g. Debian Bookwork and later) don't ship packages for + `hddtemp` anymore, the binary might not be available (e.g. in Ubuntu + 22.04 or later). + + > [!TIP] + > As an alternative consider using the + > [smartctl](/telegraf/v1/plugins/#input-smartctl) relying on SMART + > information or [sensors](/telegraf/v1/plugins/#input-sensors) plugins to + > retrieve temperature data of your hard-drive. + introduced: v1.0.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [hardware, system] - name: HTTP id: http description: | - The HTTP output plugin sends metrics in a HTTP message encoded using one of the output data formats. - For `data_formats` that support batching, metrics are sent in batch format. - introduced: 1.7.0 - tags: [linux, macos, windows, applications] - - - name: Health - id: health + This plugin collects metrics from one or more HTTP endpoints providing + data in one of the supported [data + formats](/telegraf/v1/data_formats/input). + introduced: v1.6.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications, server] + - name: HTTP Listener v2 + id: http_listener_v2 description: | - The health plugin provides a HTTP health check resource that can be configured to return a failure status code based on the value of a metric. - When the plugin is healthy it will return a 200 response; when unhealthy it will return a 503 response. The default state is healthy, one or more checks must fail in order for the resource to enter the failed state. - introduced: 1.11.0 - tags: [linux, macos, windows, applications] + This plugin listens for metrics sent via HTTP in any of the supported + [data formats](/telegraf/v1/data_formats/input). - - name: InfluxDB v1.x + > [!NOTE] + > If you would like Telegraf to act as a proxy/relay for InfluxDB v1 or + > InfluxDB v2 it is recommended to use the [influxdb__listener]() or + > [influxdb_v2_listener]() plugin instead. + introduced: v1.9.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: HTTP Response + id: http_response + description: | + This plugin generates metrics from HTTP responses including the status + code and response statistics. + introduced: v0.12.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: HueBridge + id: huebridge + description: | + This plugin gathers status from [Hue + Bridge](https://www.philips-hue.com/) devices using the [CLIP + API](https://developers.meethue.com/develop/hue-api-v2/) interface of the + devices. + introduced: v1.34.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [iot] + - name: Hugepages + id: hugepages + description: | + This plugin gathers metrics from the Linux' [Transparent Huge Pages (THP) + memory management + system](https://www.kernel.org/doc/html/latest/admin-guide/mm/hugetlbpage.html) + that reduces the overhead of Translation Lookaside Buffer (TLB) lookups + on machines with large amounts of memory. + introduced: v1.22.0 + os_support: [linux] + tags: [system] + - name: Icinga2 + id: icinga2 + description: | + This plugin gather services and hosts status information using the + [Icinga2 remote + API](https://docs.icinga.com/icinga2/latest/doc/module/icinga2/chapter/icinga2-api). + introduced: v1.8.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network, server, system] + - name: InfiniBand + id: infiniband + description: | + This plugin gathers statistics for all InfiniBand devices and ports on + the system. These are the counters that can be found in + `/sys/class/infiniband//port//counters/` and RDMA counters can + be found in `/sys/class/infiniband//ports//hw_counters/` + introduced: v1.14.0 + os_support: [linux] + tags: [network] + - name: InfluxDB id: influxdb description: | - The InfluxDB v1.x output plugin writes to InfluxDB using HTTP or UDP. - introduced: 0.1.1 - tags: [linux, macos, windows, data-stores] + This plugin collects metrics on the given InfluxDB v1 servers from the + `/debug/vars` endpoint. Read the + [documentation](https://docs.influxdata.com/platform/monitoring/influxdata-platform/tools/measurements-internal/) + for detailed information about `influxdb` metrics. - - name: InfluxDB v2 - id: influxdb_v2 + Additionally, this plugin can gather metrics from endpoints exposing + InfluxDB-formatted endpoints. + + > [!TIP] + > To gather [InfluxDB v2 + > metrics](https://docs.influxdata.com/influxdb/latest/reference/internals/metrics/) + > use the [prometheus plugin](/telegraf/v1/plugins/#input-prometheus) + > with[[inputs.prometheus]] urls = ["http://localhost:8086/metrics"] + > metric_version = 1 + introduced: v0.2.5 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: InfluxDB Listener + id: influxdb_listener description: | - The InfluxDB v2 output plugin writes metrics to [InfluxDB 2.x](https://github.com/influxdata/influxdb) OSS or Cloud. - introduced: 1.8.0 - tags: [linux, macos, windows, data-stores] + This plugin listens for requests sent according to the [InfluxDB HTTP v1 + API](https://docs.influxdata.com/influxdb/v1.8/guides/write_data/). This + allows Telegraf to serve as a proxy/router for the `/write` endpoint of + the InfluxDB HTTP API. - - name: Instrumental - id: instrumental + > [!NOTE] + > This plugin was previously known as `http_listener`. If you wish to send + > general metrics via HTTP it is recommended to use the + > [`http_listener_v2`]() instead. + + The `/write` endpoint supports the `precision` query parameter and can be + set to one of `ns`, `u`, `ms`, `s`, `m`, `h`. All other parameters are + ignored and defer to the output plugins configuration. + + > [!IMPORTANT] + > When chaining Telegraf instances using this plugin, `CREATE DATABASE` + > requests receive a `200 OK` response with message body `{"results":[]}` + > but they are not relayed. The configuration of the output plugin + > ultimately submits data to InfluxDB determines the destination database. + introduced: v1.9.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: InfluxDB V2 Listener + id: influxdb_v2_listener description: | - The Instrumental output plugin writes to the [Instrumental Collector API](https://instrumentalapp.com/docs/tcp-collector) - and requires a Project-specific API token. + This plugin listens for requests sent according to the [InfluxDB HTTP v2 + API](https://docs.influxdata.com/influxdb/v2/api/). This allows Telegraf + to serve as a proxy/router for the `/api/v2/write` endpoint of the + InfluxDB HTTP API. - Instrumental accepts stats in a format very close to Graphite, with the only - difference being that the type of stat (gauge, increment) is the first token, - separated from the metric itself by whitespace. The increment type is only used - if the metric comes in as a counter through `[[inputs.statsd]]`. - introduced: 0.13.1 - tags: [linux, macos, windows, applications] - - - name: IoTDB - id: iotdb + The `/api/v2/write` endpoint supports the `precision` query parameter and + can be set to one of `ns`, `us`, `ms`, `s`. All other parameters are + ignored and defer to the output plugins configuration. + introduced: v1.16.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: Intel Baseband Accelerator + id: intel_baseband description: | - This output plugin saves Telegraf metrics to an Apache IoTDB backend, - supporting session connection and data insertion. - introduced: 1.24.0 - tags: [linux, macos, windows, data-stores] + This plugin collects metrics from both dedicated and integrated Intel + devices providing Wireless Baseband hardware acceleration. These devices + play a key role in accelerating 5G and 4G Virtualized Radio Access + Networks (vRAN) workloads, increasing the overall compute capacity of + commercial, off-the-shelf platforms by integrating e.g. - - name: Librato - id: librato + - Forward Error Correction (FEC) processing, + - 4G Turbo FEC processing, + - 5G Low Density Parity Check (LDPC) + - Fast Fourier Transform (FFT) block providing DFT/iDFT processing offload + for the 5G Sounding Reference Signal (SRS) + introduced: v1.27.0 + os_support: [linux] + tags: [hardware, network, system] + - name: Intel® Dynamic Load Balancer + id: intel_dlb description: | - The Librato output plugin writes to the [Librato Metrics API](http://dev.librato.com/v1/metrics#metrics) - and requires an `api_user` and `api_token` which can be obtained - [here](https://metrics.librato.com/account/api_tokens) for the account. - introduced: 0.2.0 - tags: [linux, macos, windows, cloud] - - - name: Logz.io - id: logzio + This plugin collects metrics exposed by applications built with the [Data + Plane Development Kit](https://www.dpdk.org/), an extensive set of open + source libraries designed for accelerating packet processing workloads, + plugin is also using bifurcated driver. More specifically it's targeted + for applications using Intel DLB as eventdev devices accessed via + bifurcated driver (allowing access from kernel and user-space). + introduced: v1.25.0 + os_support: [linux] + tags: [applications] + - name: Intel® Platform Monitoring Technology + id: intel_pmt description: | - The Logz.io output plugin sends metrics to Logz.io over HTTPs. - introduced: 1.17.0 - tags: [linux, macos, windows] - - - name: Microsoft Azure Application Insights - id: application_insights + This plugin collects metrics via the Linux kernel driver for Intel® + Platform Monitoring Technology (Intel® PMT), an architecture capable of + enumerating and accessing hardware monitoring capabilities on supported + devices. + introduced: v1.28.0 + os_support: [linux] + tags: [hardware, system] + - name: Intel Performance Monitoring Unit + id: intel_pmu description: | - The Microsoft Azure Application Insights output plugin writes Telegraf metrics to - [Application Insights (Microsoft Azure)](https://azure.microsoft.com/en-us/services/application-insights/). - introduced: 1.7.0 - tags: [linux, macos, windows, cloud, applications] + This plugin gathers Intel Performance Monitoring Unit metrics available + via the [Linux Perf](https://perf.wiki.kernel.org/index.php/Main_Page) + subsystem. - - name: Microsoft Azure Monitor - id: azure_monitor + PMU metrics provide insights into performance and health of IA + processors' internal components, including core and uncore units. With + the number of cores increasing and processor topology getting more + complex the insight into those metrics is vital to assure the best CPU + performance and utilization. + + Performance counters are CPU hardware registers that count hardware + events such as instructions executed, cache-misses suffered, or branches + mispredicted. They form a basis for profiling applications to trace + dynamic control flow and identify hotspots. + introduced: v1.21.0 + os_support: [linux] + tags: [hardware, system] + - name: Intel PowerStat + id: intel_powerstat description: | - > The Azure Monitor custom metrics service is currently in preview and not - > available in a subset of Azure regions. + This plugin gathers power statistics on Intel-based platforms providing + insights into power saving and workload migration. Those are beneficial + for Monitoring and Analytics systems to take preventive or corrective + actions based on platform busyness, CPU temperature, actual CPU + utilization and power statistics. + introduced: v1.17.0 + os_support: [linux] + tags: [hardware, system] + - name: Intel RDT + id: intel_rdt + description: | + This plugin collects information provided by monitoring features of the + [Intel Resource Director + Technology](https://www.intel.com/content/www/us/en/architecture-and-technology/resource-director-technology.html), + a hardware framework to monitor and control the utilization of shared + resources (e.g. last level cache, memory bandwidth). - The Microsoft Azure Monitor output plugin sends custom metrics to - [Microsoft Azure Monitor](https://azure.microsoft.com/en-us/services/monitor/). - Azure Monitor has a metric resolution of one minute. To handle this in Telegraf, - the Azure Monitor output plugin automatically aggregates metrics into one minute buckets, - which are then sent to Azure Monitor on every flush interval. + Intel’s Resource Director Technology (RDT) framework consists of: - For a Microsoft blog posting on using Telegraf with Microsoft Azure Monitor, - see [Collect custom metrics for a Linux VM with the InfluxData Telegraf Agent](https://docs.microsoft.com/en-us/azure/monitoring-and-diagnostics/metrics-store-custom-linux-telegraf). + - Cache Monitoring Technology (CMT) + - Memory Bandwidth Monitoring (MBM) + - Cache Allocation Technology (CAT) + - Code and Data Prioritization (CDP) - The metrics from each input plugin will be written to a separate Azure Monitor namespace, - prefixed with `Telegraf/` by default. The field name for each metric is written - as the Azure Monitor metric name. All field values are written as a summarized set - that includes `min`, `max`, `sum`, and `count`. Tags are written as a dimension - on each Azure Monitor metric. - introduced: 1.8.0 - tags: [linux, macos, windows, cloud] + As multithreaded and multicore platform architectures emerge, the last + level cache and memory bandwidth are key resources to manage for running + workloads in single-threaded, multithreaded, or complex virtual machine + environments. Intel introduces CMT, MBM, CAT and CDP to manage these + workloads across shared resources. + introduced: v1.16.0 + os_support: [freebsd, linux, macos] + tags: [hardware, system] + - name: Telegraf Internal + id: internal + description: | + This plugin collects metrics about the telegraf agent and its plugins. + > [!NOTE] + > Some metrics are aggregates across all instances of a plugin type. + introduced: v1.2.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications] + - name: Internet Speed Monitor + id: internet_speed + description: | + This plugin collects metrics about the internet speed on the system like + download/upload speed, latency etc using the [speedtest.net + service](https://www.speedtest.net/). + introduced: v1.20.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network] + - name: Interrupts + id: interrupts + description: | + This plugin gathers metrics about IRQs from interrupts + (`/proc/interrupts`) and soft-interrupts (`/proc/softirqs`). + introduced: v1.3.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: IPMI Sensor + id: ipmi_sensor + description: | + This plugin gathers metrics from the [Intelligent Platform Management + Interface](https://www.intel.com/content/dam/www/public/us/en/documents/specification-updates/ipmi-intelligent-platform-mgt-interface-spec-2nd-gen-v2-0-spec-update.pdf) + using the [`ipmitool`]() command line utility. + + > [!IMPORTANT] + > The `ipmitool` requires access to the IPMI device. Please check the + > permission section for possible solutions. + introduced: v0.12.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [hardware, system] + - name: Ipset + id: ipset + description: | + This plugin gathers packets and bytes counters from [Linux IP + sets](https://ipset.netfilter.org/) using the `ipset` command line tool. + + > [!NOTE] + > IP sets created without the "counters" option are ignored. + introduced: v1.6.0 + os_support: [linux] + tags: [network, system] + - name: Iptables + id: iptables + description: | + This plugin gathers packets and bytes counters for rules within a set of + table and chain from the Linux's iptables firewall. + + > [!IMPORTANT] + > Rules are identified through associated comment, so you must ensure that + > the rules you want to monitor do have a **unique** comment using the + > `--comment` flag when adding them. Rules without comments are ignored. + + The rule number cannot be used as identifier as it is not constant and + may vary when rules are inserted/deleted at start-up or by automatic + tools (interactive firewalls, fail2ban, ...). + + > [!IMPORTANT] + > The `iptables` command requires `CAP_NET_ADMIN` and `CAP_NET_RAW` + > capabilities. Check the permissions section for ways to grant them. + introduced: v1.1.0 + os_support: [linux] + tags: [network, system] + - name: IPVS + id: ipvs + description: | + This plugin gathers metrics about the [IPVS virtual and real + servers](http://www.linuxvirtualserver.org/software/ipvs.html) using the + netlink socket interface of the Linux kernel. + + > [!IMPORTANT] + > The plugin requires `CAP_NET_ADMIN` and `CAP_NET_RAW` capabilities. + > Check the permissions section for ways to grant them. + introduced: v1.9.0 + os_support: [linux] + tags: [network, system] + - name: Jenkins + id: jenkins + description: | + This plugin gathers information about the nodes and jobs running in a + [Jenkins](https://www.jenkins.io/) instance. The plugin uses the Jenkins + API and does not require a plugin on the server. + introduced: v1.9.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications] + - name: Jolokia2 Agent + id: jolokia2_agent + description: | + This plugin reads JMX metrics from one or more [Jolokia + agent](https://jolokia.org/agent/jvm.html) REST endpoints. + introduced: v1.5.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications, network] + - name: Jolokia2 Proxy + id: jolokia2_proxy + description: | + This plugin reads JMX metrics from one or more _targets_ by interacting + with a [Jolokia proxy](https://jolokia.org/features/proxy.html) REST + endpoint. + introduced: v1.5.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications, network] + - name: Juniper Telemetry + id: jti_openconfig_telemetry + description: | + This service plugin reads [OpenConfig](http://openconfig.net/) telemetry + data via the [Junos Telemetry Interface + (JTI)](https://www.juniper.net/documentation/en_US/junos/topics/concept/junos-telemetry-interface-oveview.html) + from configured from listed sensors. + introduced: v1.7.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [iot, network] + - name: Apache Kafka Consumer + id: kafka_consumer + description: | + This service plugin consumes messages from [Kafka + brokers](https://kafka.apache.org) in one of the supported [data + formats](/telegraf/v1/data_formats/input). The plugin uses [consumer + groups](http://godoc.org/github.com/wvanbergen/kafka/consumergroup) when + talking to the Kafka cluster so multiple instances of Telegraf can + consume messages from the same topic in parallel. + introduced: v0.2.3 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [messaging] + - name: Kapacitor + id: kapacitor + description: | + This plugin collects metrics from the configured [InfluxData + Kapacitor](https://www.influxdata.com/time-series-platform/kapacitor/) + instances. + introduced: v1.3.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications] + - name: Kernel + id: kernel + description: | + This plugin gathers metrics about the [Linux kernel](https://kernel.org/) + including, among others, the [available + entropy](https://www.kernel.org/doc/html/latest/admin-guide/sysctl/kernel.html#random), + [Kernel Samepage + Merging](https://www.kernel.org/doc/html/latest/mm/ksm.html) and + [Pressure Stall + Information](https://www.kernel.org/doc/html/latest/accounting/psi.html). + introduced: v0.11.0 + os_support: [linux] + tags: [system] + - name: Kernel VM Statistics + id: kernel_vmstat + description: | + This plugin gathers virtual memory statistics of the [Linux + kernel](https://kernel.org/) by reading `/proc/vmstat`. For a full list + of available fields check the `/proc/vmstat` section of the [proc man + page](http://man7.org/linux/man-pages/man5/proc.5.html) and for a + detailed description about the fields see the [vmstat man + page](https://man7.org/linux/man-pages/man8/vmstat.8.html). + introduced: v1.0.0 + os_support: [linux] + tags: [system] + - name: Kibana + id: kibana + description: | + This plugin collects metrics about service status from + [Kibana](https://www.elastic.co/kibana) instances via the server's API. + + > [!NOTE] + > This plugin requires Kibana version 6.0+. + introduced: v1.8.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications, server] + - name: Kinesis Consumer + id: kinesis_consumer + description: | + This service input plugin consumes messages from [AWS + Kinesis](https://aws.amazon.com/kinesis/) data stream in one of the + supported [data formats](/telegraf/v1/data_formats/input). + introduced: v1.10.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [iot, messaging] + - name: KNX + id: knx_listener + description: | + This service plugin listens for messages on the [KNX home-automation + bus](https://www.knx.org) by connecting via a KNX-IP interface. + Information about supported KNX datapoint-types can be found at the + underlying [`knx-go` project](). + introduced: v1.19.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [iot] + - name: Kubernetes Inventory + id: kube_inventory + description: | + This plugin gathers metrics from [Kubernetes](https://kubernetes.io/) + resources. + + > [!NOTE] + > This plugin requires Kubernetes version 1.11+. + + The gathered resources include for example daemon sets, deployments, + endpoints, ingress, nodes, persistent volumes and many more. + + > [!CRITICAL] + > This plugin produces high cardinality data, which when not controlled + > for will cause high load on your database. Please make sure to + > [filter](/telegraf/v1/configuration/#metric-filtering) the produced + > metrics or configure your database to avoid cardinality issues! + introduced: v1.10.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [containers] + - name: Kubernetes + id: kubernetes + description: | + This plugin gathers metrics about running pods and containers of a + [Kubernetes](https://kubernetes.io/) instance via the Kubelet API. + + > [!NOTE] + > This plugin has to run as part of a `daemonset` within a Kubernetes + > installation, i.e. Telegraf is running on every node within the cluster. + + You should configure this plugin to talk to its locally running kubelet. + + > [!CRITICAL] + > This plugin produces high cardinality data, which when not controlled + > for will cause high load on your database. Please make sure to + > [filter](/telegraf/v1/configuration/#metric-filtering) the produced + > metrics or configure your database to avoid cardinality issues! + introduced: v1.1.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [containers] + - name: Arista LANZ Consumer + id: lanz + description: | + This service plugin consumes messages from the [Arista Networks’ + Latency Analyzer + (LANZ)](https://www.arista.com/en/um-eos/eos-latency-analyzer-lanz) by + receiving the datastream on TCP (usually through port 50001) on the + switch's management IP. + + > [!NOTE] + > You will need to configure LANZ and enable streaming LANZ data, see the + > [documentation](https://www.arista.com/en/um-eos/eos-section-44-3-configuring-lanz) + > for more details. + introduced: v1.14.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network] + - name: LDAP + id: ldap + description: | + This plugin gathers metrics from LDAP servers' monitoring (`cn=Monitor`) + backend. Currently this plugin supports + [OpenLDAP](https://www.openldap.org/devel/admin/monitoringslapd.html) and + [389ds](https://www.port389.org/) servers. + introduced: v1.29.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network, server] + - name: LeoFS + id: leofs + description: | + This plugin gathers metrics of the [LEO + filesystem](https://leo-project.net/leofs/) services _LeoGateway_, + _LeoManager_, and _LeoStorage_ via SNMP. Check the [LeoFS system + monitoring + documentation](https://leo-project.net/leofs/docs/admin/system_admin/monitoring/) + for details. + introduced: v0.1.5 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network, server] + - name: Libvirt + id: libvirt + description: | + This plugin collects statistics about virtualized guests on a system by + using the [libvirt](https://libvirt.org/) virtualization API. Metrics are + gathered directly from the hypervisor on a host system, so Telegraf + doesn't have to be installed and configured on a guest system. + introduced: v1.25.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Linux CPU + id: linux_cpu + description: | + This plugin gathers CPU metrics exposed on [Linux](https://kernel.org/) + systems. + introduced: v1.24.0 + os_support: [linux] + tags: [system] + - name: Linux Sysctl Filesystem + id: linux_sysctl_fs + description: | + This plugin gathers metrics by reading the [system + filesystem](https://www.kernel.org/doc/Documentation/sysctl/fs.txt) files + on [Linux](https://kernel.org/) systems. + introduced: v1.24.0 + os_support: [linux] + tags: [system] + - name: Logstash + id: logstash + description: | + This plugin gathers metrics from a + [Logstash](https://www.elastic.co/logstash) endpoint using the + [Monitoring + API](https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html). + + > [!NOTE] + > This plugin supports Logstash 5+. + introduced: v1.12.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Lustre + id: lustre2 + description: | + This plugin gathers metrics for the [Lustre® file + system](http://lustre.org/) using its entries in the `proc` filesystem. + Reference the [Lustre Monitoring and Statistics + Guide](http://wiki.lustre.org/Lustre_Monitoring_and_Statistics_Guide) for + the reported information. + + > [!NOTE] + > This plugin doesn't report _all_ information available but only a + > limited set of items. Check the metrics section. + introduced: v0.1.5 + os_support: [linux] + tags: [system] + - name: Logical Volume Manager + id: lvm + description: | + This plugin collects information about physical volumes, volume groups + and logical volumes from the Logical Volume Management (LVM) of the + [Linux kernel](https://www.kernel.org/). + introduced: v1.21.0 + os_support: [linux] + tags: [system] + - name: Mailchimp + id: mailchimp + description: | + This plugin gathers metrics from the [Mailchimp](https://mailchimp.com) + service using the [Mailchimp API](https://developer.mailchimp.com/). + introduced: v0.2.4 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, web] + - name: MarkLogic + id: marklogic + description: | + This plugin gathers health status metrics from one or more + [MarkLogic](https://www.progress.com/marklogic) hosts. + introduced: v1.12.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: MavLink + id: mavlink + description: | + This plugin collects metrics from + [MavLink](https://mavlink.io/)-compatible flight controllers such as + [ArduPilot](https://ardupilot.org/) or [PX4](https://px4.io/) to live + ingest flight metrics from unmanned systems (drones, planes, boats, etc.) + Currently the ArduPilot-specific Mavlink dialect is used, check the + [Mavlink + documentation](https://mavlink.io/en/messages/ardupilotmega.html) for + more details and the various messages available. + + > [!WARNING] + > This plugin potentially generates a large amount of data. If your output + > plugin cannot handle the rate of messages, use [Metric + > filters](/telegraf/v1/configuration/#metric-filtering) to limit the + > metrics written to outputs, and/or the `filters` configuration parameter + > to limit which Mavlink messages this plugin parses. + introduced: v1.35.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [iot] + - name: Mcrouter + id: mcrouter + description: | + This plugin gathers statistics data from + [Mcrouter](https://github.com/facebook/mcrouter) instances, a protocol + router, developed and maintained by Facebook, for scaling + [memcached](http://memcached.org/) deployments. + introduced: v1.7.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications, network] + - name: MD RAID Statistics + id: mdstat + description: | + This plugin gathers statistics about any [Linux MD RAID + arrays](https://docs.kernel.org/admin-guide/md.html) configured on the + host by reading `/proc/mdstat`. For a full list of available fields see + the `/proc/mdstat` section of the [proc man + page](http://man7.org/linux/man-pages/man5/proc.5.html). For details on + the fields check the [mdstat + wiki](https://raid.wiki.kernel.org/index.php/Mdstat). + introduced: v1.20.0 + os_support: [linux] + tags: [system] + - name: Memory + id: mem + description: | + This plugin collects metrics about the system memory. + + > [!TIP] + > For an explanation of the difference between *used* and *actual_used* + > RAM, see [Linux ate my ram](http://www.linuxatemyram.com/). + introduced: v0.1.5 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: Memcached + id: memcached + description: | + This plugin gathers statistics data from + [Memcached](https://memcached.org/) instances. + introduced: v0.1.2 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Apache Mesos + id: mesos + description: | + This plugin gathers metrics from [Apache + Mesos](https://mesos.apache.org/) instances. For more information, please + check the [Mesos Observability + Metrics](http://mesos.apache.org/documentation/latest/monitoring/) page. + introduced: v0.10.3 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [containers] + - name: Minecraft + id: minecraft + description: | + This plugin collects score metrics from a + [Minecraft](https://www.minecraft.net/) server using the RCON protocol. + + > [!NOTE] + > This plugin supports Minecraft Java Edition versions 1.11 - 1.14. When + > using a version earlier than 1.13, be aware that the values for some + > criteria has changed and need to be modified. + introduced: v1.4.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Mock Data + id: mock + description: | + The plugin generates mock-metrics based on different algorithms like + sine-wave functions, random numbers and more with the configured names + and tags. Those metrics are usefull during testing (e.g. processors) or + if random data is required. + introduced: v1.22.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [testing] + - name: Modbus + id: modbus + description: | + This plugin collects data from [Modbus](https://www.modbus.org/) + registers using e.g. Modbus TCP or serial interfaces with Modbus RTU or + Modbus ASCII. + introduced: v1.14.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [iot] - name: MongoDB id: mongodb description: | - The MongoDB output plugin writes to MongoDB and automatically creates the collections as time series collections when they don't already exist. - introduced: 1.21.0 - tags: [linux, macos, windows, data-stores] + This plugin collects metrics about [MongoDB](https://www.mongodb.com) + server instances by running database commands. - - name: MQTT Producer - id: mqtt + > [!NOTE] + > This plugin supports all versions marked as supported in the [MongoDB + > Software Lifecycle + > Schedules](https://www.mongodb.com/support-policy/lifecycles). + introduced: v0.1.5 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: Monit + id: monit description: | - The MQTT Producer output plugin writes to the MQTT server using - [supported output data formats](/telegraf/v1/data_formats/output/). - introduced: 0.2.0 - tags: [linux, macos, windows, messaging, iot] + This plugin gathers metrics and status information about local processes, + remote hosts, files, file systems, directories and network interfaces + managed and watched over by [Monit](https://mmonit.com/). - - name: NATS Output + > [!NOTE] + > The plugin supports Monit version 5.16+. To use this plugin you have to + > enable the [HTTPD TCP + > port](https://mmonit.com/monit/documentation/monit.html#TCP-PORT) in + > Monit. + introduced: v1.14.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network] + - name: MQTT Consumer + id: mqtt_consumer + description: | + This service plugin consumes messages from [MQTT](https://mqtt.org) + brokers for the configured topics in one of the supported [data + formats](/telegraf/v1/data_formats/input). + introduced: v0.10.3 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [messaging] + - name: Multifile + id: multifile + description: | + This plugin reads the combined data from multiple files into a single + metric, creating one field or tag per file. This is often useful creating + custom metrics from the `/sys` or `/proc` filesystems. + + > [!NOTE] + > To parse metrics from a single file you should use the + > [file](/telegraf/v1/plugins/#input-file) input plugin instead. + introduced: v1.10.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: MySQL + id: mysql + description: | + This plugin gathers statistics from [MySQL](https://www.mysql.com/) + server instances. + + > [!NOTE] + > To gather metrics from the performance schema, it must first be enabled + > in MySQL. See the performance schema [quick + > start](https://dev.mysql.com/doc/refman/8.0/en/performance-schema-quick-start.html) + > for details. + introduced: v0.1.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: NATS Server Monitoring id: nats description: | - The NATS Output output plugin writes to a (list of) specified NATS instance(s). - introduced: 1.1.0 - tags: [linux, macos, windows, messaging] - - - name: Nebius Cloud Monitoring - id: nebius_cloud_monitoring + This plugin gathers metrics of a [NATS](http://www.nats.io) server + instance using its [monitoring + endpoints](https://docs.nats.io/running-a-nats-service/nats_admin/monitoring). + introduced: v1.6.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: NATS Consumer + id: nats_consumer description: | - Send aggregated metrics to Nebius.Cloud Monitoring - introduced: 1.27.0 - tags: [linux, macos, windows, cloud, data-stores] - - - name: New Relic - id: newrelic + This service plugin consumes messages from + [NATS](https://www.nats.io/about/) instances in one of the supported + [data formats](/telegraf/v1/data_formats/input). A [Queue + Group](https://www.nats.io/documentation/concepts/nats-queueing/) is used + when subscribing to subjects so multiple instances of telegraf can + consume messages in parallel. The plugin supports authenticating via + [username/password](https://docs.nats.io/using-nats/developer/connecting/userpass), + a [credentials + file](https://docs.nats.io/using-nats/developer/connecting/creds) (NATS + 2.0), or an [nkey seed + file](https://docs.nats.io/using-nats/developer/connecting/nkey) (NATS + 2.0). + introduced: v0.10.3 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [messaging] + - name: Neoom Beaam + id: neoom_beaam description: | - The New Relic output plugin writes to New Relic insights using the [Metric API](https://docs.newrelic.com/docs/data-ingest-apis/get-data-new-relic/metric-api/introduction-metric-api). - introduced: 1.15.0 - tags: [linux, macos, windows, applications, web] + This plugin gathers metrics from a [Neoom Beaam + gateway](https://neoom.com/en/products/beaam) using the [Beaam + API](https://developer.neoom.com/reference/concepts-terms-1) with access + token that can be created in the Neoom web interface. Please follow the + [developer instructions](https://neoom.com/developers) to create the + token. + introduced: v1.33.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [iot] + - name: Neptune Apex + id: neptune_apex + description: | + This plugin gathers metrics from [Neptune Apex + controller](https://www.neptunesystems.com) instances, allowing aquarium + hobbyists to monitor and control their tanks based on various probes. + introduced: v1.10.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [iot] + - name: Network + id: net + description: | + This plugin gathers metrics about network interface and protocol usage. + introduced: v0.1.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network] + - name: Network Response + id: net_response + description: | + This plugin tests UDP/TCP connection and produces metrics from the + result, the response time and optionally verifies text in the response. + introduced: v0.10.3 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network] + - name: Netflow + id: netflow + description: | + This service plugin acts as a collector for Netflow v5, Netflow v9 and + IPFIX flow information. The Layer 4 protocol numbers are gathered from + the [official IANA + assignments](https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml). + The internal field mappings for Netflow v5 fields are defined according + to [Cisco's Netflow v5 + documentation](https://www.cisco.com/c/en/us/td/docs/net_mgmt/netflow_collection_engine/3-6/user/guide/format.html#wp1006186), + Netflow v9 fields are defined according to [Cisco's Netflow v9 + documentation](https://www.cisco.com/en/US/technologies/tk648/tk362/technologies_white_paper09186a00800a3db9.html) + and the [ASA + extensions](https://www.cisco.com/c/en/us/td/docs/security/asa/special/netflow/asa_netflow.html). + Definitions for IPFIX are according to [IANA assignment + document](https://www.iana.org/assignments/ipfix/ipfix.xhtml#ipfix-nat-type). + introduced: v1.25.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network] + - name: Network Connection Statistics + id: netstat + description: | + This plugin collects statistics about TCP connection states and UDP + socket counts. + introduced: v0.2.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network] + - name: Network Filesystem + id: nfsclient + description: | + This plugin collects metrics about operations on [Network + Filesystem](https://www.ietf.org/rfc/rfc1813.txt?number=1813) mounts. By + default, only a limited number of general system-level metrics are + collected, including basic read/write counts but more detailed metrics + can be enabled. + > [!NOTE] + > Many of the metrics, even if tagged with a mount point, are really + > _per-server_. E.g. if you mount two shares: `nfs01:/vol/foo/bar` and + > `nfs01:/vol/foo/baz`, there will be two near identical entries in + > `/proc/self/mountstats`. This is a limitation of the metrics exposed by + > the kernel, not by this plugin. + introduced: v1.18.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network, system] + - name: Nginx + id: nginx + description: | + This plugin gathers metrics from the open source [Nginx web + server](https://www.nginx.com). Nginx Plus is a commercial version. For + more information about differences between Nginx (F/OSS) and Nginx Plus, + see the Nginx + [documentation](https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/). + introduced: v0.1.5 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server, web] + - name: Nginx Plus + id: nginx_plus + description: | + This plugin gathers metrics from the commercial [Nginx Plus web + server](https://www.f5.com/products/nginx/nginx-plus) via the [status + module](http://nginx.org/en/docs/http/ngx_http_status_module.html). + + > [!NOTE] + > Using this plugin requires a license. + + For more information about differences between Nginx (F/OSS) and Nginx + Plus, see the Nginx + [documentation](https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/). + introduced: v1.5.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server, web] + - name: Nginx Plus API + id: nginx_plus_api + description: | + This plugin gathers metrics from the commercial [Nginx Plus web + server](https://www.f5.com/products/nginx/nginx-plus) via the [REST + API](https://demo.nginx.com/swagger-ui/). + + > [!NOTE] + > Using this plugin requires a license. + + For more information about differences between Nginx (F/OSS) and Nginx + Plus, see the Nginx + [documentation](https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/). + introduced: v1.9.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server, web] + - name: Nginx Stream Server Traffic + id: nginx_sts + description: | + This plugin gathers metrics from the [Nginx web + server](https://www.nginx.com) using the [external stream server traffic + status module](https://github.com/vozlt/nginx-module-sts). This module + provides access to stream host status information containing the current + status of servers, upstreams and caches, similar to the live activity + monitoring of Nginx plus. For module configuration details please see the + [module + documentation](https://github.com/vozlt/nginx-module-sts#synopsis). + introduced: v1.15.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server, web] + - name: Nginx Upstream Check + id: nginx_upstream_check + description: | + This plugin gathers metrics from the [Nginx web + server](https://www.nginx.com) using the [upstream check + module](https://github.com/yaoweibin/nginx_upstream_check_module). This + module periodically sends the configured requests to servers in the + Nginx's upstream determining their availability. + introduced: v1.10.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server, web] + - name: Nginx Virtual Host Traffic + id: nginx_vts + description: | + This plugin gathers metrics from the [Nginx web + server](https://www.nginx.com) using the [external virtual host traffic + status module](https://github.com/vozlt/nginx-module-vts). This module + provides access to virtual host status information containing the current + status of servers, upstreams and caches, similar to the live activity + monitoring of Nginx plus. For module configuration details please see the + [module + documentation](https://github.com/vozlt/nginx-module-vts#synopsis). + introduced: v1.9.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server, web] + - name: Hashicorp Nomad + id: nomad + description: | + This plugin collects metrics from every [Nomad + agent](https://www.nomadproject.io/) of the specified cluster. Telegraf + may be present in every node and connect to the agent locally. + introduced: v1.22.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: NLnet Labs Name Server Daemon + id: nsd + description: | + This plugin gathers statistics from a [NLnet Labs Name Server + Daemon](https://www.nlnetlabs.nl/projects/nsd/about), an authoritative + DNS name server. + introduced: v1.0.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Netgear Switch Discovery Protocol + id: nsdp + description: | + This plugin gathers metrics from devices via the [Netgear Switch + Discovery + Protocol](https://en.wikipedia.org/wiki/Netgear_Switch_Discovery_Protocol) + for all available switches and ports. + introduced: v1.34.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network] - name: NSQ id: nsq description: | - The NSQ output plugin writes to a specified NSQD instance, usually local to the producer. - It requires a server name and a topic name. - introduced: 0.2.1 - tags: [linux, macos, windows, messaging] - - - name: OpenSearch - id: opensearch + This plugin gathers metrics from [NSQ](https://nsq.io/) realtime + distributed messaging platform instances using the [NSQD + API](https://nsq.io/components/nsqd.html). + introduced: v1.16.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: NSQ Consumer + id: nsq_consumer description: | - The OpenSearch plugin writes metrics via HTTP to OpenSearch instances. - introduced: 1.29.0 - tags: [linux, macos, windows, logging, data-stores] + This service plugin consumes messages from [NSQ](https://nsq.io/) + realtime distributed messaging platform brokers in one of the supported + [data formats](/telegraf/v1/data_formats/input). + introduced: v0.10.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [messaging] + - name: Kernel Network Statistics + id: nstat + description: | + This plugin collects network metrics from `/proc/net/netstat`, + `/proc/net/snmp` and `/proc/net/snmp6` files + introduced: v0.13.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network, system] + - name: Network Time Protocol Query + id: ntpq + description: | + This plugin gathers metrics about [Network Time + Protocol](https://ntp.org/) queries. + > [!IMPORTANT] + > This plugin requires the `ntpq` executable to be installed on the + > system. + introduced: v0.11.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network] + - name: Nvidia System Management Interface (SMI) + id: nvidia_smi + description: | + This plugin collects metrics for [NVIDIA GPUs](https://www.nvidia.com/) + including memory and GPU usage, temperature and other, using the [NVIDIA + System Management + Interface](https://developer.nvidia.com/nvidia-system-management-interface). + + > [!IMPORTANT] + > This plugin requires the `nvidia-smi` binary to be installed on the + > system. + introduced: v1.7.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [hardware, system] + - name: OPC UA Client Reader + id: opcua + description: | + This plugin gathers data from an [OPC + UA](https://opcfoundation.org/about/opc-technologies/opc-ua/) server by + subscribing to the configured nodes. + introduced: v1.16.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [iot] + - name: OPC UA Client Listener + id: opcua_listener + description: | + This service plugin receives data from an [OPC + UA](https://opcfoundation.org/about/opc-technologies/opc-ua/) server by + subscribing to nodes and events. + introduced: v1.25.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [iot] + - name: OpenLDAP + id: openldap + description: | + This plugin gathers metrics from [OpenLDAP](https://www.openldap.org/)'s + `cn=Monitor` backend. To use this plugin you must enable the [slapd + monitoring](https://www.openldap.org/devel/admin/monitoringslapd.html) + backend. + + > [!NOTE] + > It is recommended to use the newer [`ldap` input plugin]() instead. + introduced: v1.4.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network, server] + - name: OpenNTPD + id: openntpd + description: | + This plugin gathers metrics from [OpenNTPD](http://www.openntpd.org/) + using the `ntpctl` command. + + > [!NOTE] + > The `ntpctl` binary must be present on the system and executable by + > Telegraf. The plugin supports using `sudo` for execution. + introduced: v1.12.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network, server] + - name: OpenSearch Query + id: opensearch_query + description: | + This plugin queries [OpenSearch](https://opensearch.org/) endpoints to + derive metrics from data stored in an OpenSearch cluster like the number + of hits for a search query, statistics on numeric fields, document + counts, etc. + + > [!NOTE] + > This plugins is tested against OpenSearch 2.5.0 and 1.3.7 but newer + > version should also work. + introduced: v1.26.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: OpenSMTPD + id: opensmtpd + description: | + This plugin gathers statistics from + [OpenSMTPD](https://www.opensmtpd.org/) using the `smtpctl` binary. + + > [!NOTE] + > The `smtpctl` binary must be present on the system and executable by + > Telegraf. The plugin supports using `sudo` for execution. + introduced: v1.5.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network, server] + - name: OpenStack + id: openstack + description: | + This plugin collects metrics about services from + [OpenStack](https://www.openstack.org/) endpoints. + + > [!CAUTION] + > Due to the large number of unique tags generated by the plugin it is + > **highly recommended** to use [metric + > filtering](/telegraf/v1/configuration/#modifiers) like `taginclude` and + > `tagexclude` to reduce cardinality. + introduced: v1.21.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, server] - name: OpenTelemetry id: opentelemetry description: | - The OpenTelemetry plugin sends metrics to [OpenTelemetry](https://opentelemetry.io/) servers and agents via gRPC. - introduced: 1.20.0 - tags: [linux, macos, windows, logging, messaging] - - - name: OpenTSDB - id: opentsdb + This service plugin receives traces, metrics, logs and profiles from + [OpenTelemetry](https://opentelemetry.io) clients and compatible agents + via gRPC. + introduced: v1.19.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [logging, messaging] + - name: OpenWeatherMap + id: openweathermap description: | - The OpenTSDB output plugin writes to an OpenTSDB instance using either the telnet or HTTP mode. + This plugin collects weather and forecast data from the + [OpenWeatherMap](https://openweathermap.org) service. - Using the HTTP API is the recommended way of writing metrics since OpenTSDB 2.0. - To use HTTP mode, set `useHttp` to true in config. You can also control how many - metrics are sent in each HTTP request by setting `batchSize` in config. - See the [OpenTSDB documentation](http://opentsdb.net/docs/build/html/api_http/put.html) for details. - introduced: 0.1.9 - tags: [linux, macos, windows, data-stores] - - - name: Parquet - id: parquet + > [!IMPORTANT] + > To use this plugin you will need an + > [APP-ID](https://openweathermap.org/appid) to work. + introduced: v1.11.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications, web] + - name: P4 Runtime + id: p4runtime description: | - This plugin writes metrics to parquet files. By default, it groups metrics by name, and then writes each group to a separate file. If a metric schema doesn't match, the metrics are dropped. - introduced: 1.32.0 - tags: [linux, macos, windows, data-stores] + This plugin collects metrics from the data plane of network devices, such + as Programmable Switches or Programmable Network Interface Cards by + reading the `Counter` values of the [P4 program](https://p4.org) running + on the device. Metrics are collected through a gRPC connection with the + [P4 runtime](https://github.com/p4lang/p4runtime) server. - - name: Postgre SQL + > [!TIP] + > If you want to gather information about the program name, please follow + > the instruction in [6.2.1. Annotating P4 code with + > PkgInfo](https://p4.org/p4-spec/p4runtime/main/P4Runtime-Spec.html#sec-annotating-p4-code-with-pkginfo) + > to modify your P4 program. + introduced: v1.26.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications, network] + - name: Passenger + id: passenger + description: | + This plugin gathers metrics from the [Phusion + Passenger](https://www.phusionpassenger.com/) service. + + > [!WARNING] + > Depending on your environment, this plugin can create a high number of + > series which can cause high load on your database. Please use + > [measurement + > filtering](https://docs.influxdata.com/telegraf/latest/administration/configuration/#measurement-filtering) + > to manage your series cardinality! + + The plugin uses the `passenger-status` command line tool. + + > [!NOTE] + > This plugin requires the `passenger-status` binary to be installed on + > the system and to be executable by Telegraf. + introduced: v0.10.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [web] + - name: PF + id: pf + description: | + This plugin gathers information from the FreeBSD or OpenBSD pf firewall + like the number of current entries in the table, counters for the number + of searches, inserts, and removals to tables using the `pfctl` command. + + > [!NOTE] + > This plugin requires the `pfctl` binary to be executable by Telegraf. It + > requires read access to the device file `/dev/pf`. + introduced: v1.5.0 + os_support: [freebsd] + tags: [network, system] + - name: PgBouncer + id: pgbouncer + description: | + This plugin collects metrics from a [PgBouncer load + balancer](https://pgbouncer.github.io) instance. Check the + [documentation](https://pgbouncer.github.io/usage.html) for available + metrics and their meaning. + + > [!NOTE] + > This plugin requires PgBouncer v1.5+. + introduced: v1.8.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server, web] + - name: PHP-FPM + id: phpfpm + description: | + This plugin gathers statistics of the [PHP FastCGI Process + Manager](https://www.php.net/manual/en/install.fpm.php) using either the + HTTP status page or the fpm socket. + introduced: v0.1.10 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server, web] + - name: Ping + id: ping + description: | + This plugin collects metrics on ICMP ping packets including the + round-trip time, response times and other packet statistics. + + > [!NOTE] + > When using the `exec` method the `ping` command must be available on the + > systems and executable by Telegraf. + introduced: v0.1.8 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network] + - name: Postfix + id: postfix + description: | + This plugin collects metrics on a local + [Postfix](https://www.postfix.org/) instance reporting the length, size + and age of the active, hold, incoming, maildrop, and deferred + [queues](https://www.postfix.org/QSHAPE_README.html#queues). + introduced: v1.5.0 + os_support: [freebsd, linux, macos, solaris] + tags: [server] + - name: PostgreSQL id: postgresql description: | - The Postgre SQL output plugin writes metrics to PostgreSQL (or compatible database). - introduced: 1.24.0 - tags: [linux, macos, windows] - - - name: Prometheus Client - id: prometheus_client + This plugin provides metrics for a + [PostgreSQL](https://www.postgresql.org/) Server instance. Recorded + metrics are lightweight and use Dynamic Management Views supplied by + PostgreSQL. + introduced: v0.10.3 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: PostgreSQL Extensible + id: postgresql_extensible description: | - The Prometheus Client output plugin starts a [Prometheus](https://prometheus.io/) Client, - it exposes all metrics on `/metrics` (default) to be polled by a Prometheus server. - introduced: 0.2.1 - tags: [linux, macos, windows, applications] + This plugin queries a [PostgreSQL](https://www.postgresql.org/) server + and provides metrics for the returned result. This is useful when using + PostgreSQL extensions to collect additional metrics. - - name: Redis time series - id: redistimeseries + > [!TIP] + > Please also check the more generic [sql input + > plugin](/telegraf/v1/plugins/#input-sql). + introduced: v0.12.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: PowerDNS + id: powerdns description: | - The Redis time series output plugin writes metrics to the RedisTimeSeries server. - introduced: 1.24.0 - tags: [linux, macos, windows, networking] + This plugin gathers metrics from [PowerDNS](https://www.powerdns.com/) + servers using unix sockets. - - name: Remote file - id: remotefile + > [!NOTE] + > This plugin will need access to the powerdns control socket. + introduced: v0.10.2 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: PowerDNS Recursor + id: powerdns_recursor description: | - This plugin writes telegraf metrics to files in remote locations using - the rclone library. Multiple backends such as Amazon S3 or SFTP are - supported. - introduced: 1.32.0 - tags: [linux, macos, windows, data-stores] + This plugin gathers metrics from [PowerDNS + Recursor](https://www.powerdns.com/powerdns-recursor) instances using the + unix control-sockets. - - name: Riemann - id: riemann + > [!NOTE] + > Telegraf will need read and write access to the control socket and the + > `socket_dir`. + introduced: v1.11.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Processes + id: processes description: | - The Riemann output plugin writes to [Riemann](http://riemann.io/) using TCP or UDP. - introduced: 1.3.0 - tags: [linux, macos, windows, networking, systems] + This plugin gathers info about the total number of processes and groups + them by status (zombie, sleeping, running, etc.) - - name: Sensu - id: sensu + > [!NOTE] + > On Linux this plugin requires access to procfs (/proc), on other + > operating systems the plugin must be able to execute the `ps` command. + introduced: v0.11.0 + os_support: [freebsd, linux, macos] + tags: [system] + - name: Procstat + id: procstat description: | - The Sensu output plugin writes metrics events to [Sensu Go](https://sensu.io/). - introduced: 1.18.0 - tags: [linux, macos, windows, applications] - - - name: SignalFX - id: signalfx + This plugin allows to monitor the system resource usage of one or more + processes. The plugin provides metrics about the individual processes as + well as accumulated metrics on the number of PIDs returned on a search. + Processes can be filtered e.g. by regular expressions on the command, the + user owning the process or the service that started the process. + introduced: v0.2.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: Prometheus + id: prometheus description: | - The SignalFX output plugin sends metrics to [SignalFX](https://docs.signalfx.com/en/latest/). - introduced: 1.18.0 - tags: [linux, macos, windows, applications] - - - name: Socket Writer - id: socket_writer + This plugin gathers metrics from [Prometheus](https://prometheus.io/) + metric endpoints such as applications implementing such an endpoint or + node-exporter instances. This plugin also supports various + service-discovery methods. + introduced: v0.1.5 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications, server] + - name: Proxmox + id: proxmox description: | - The Socket Writer output plugin writes to a UDP, TCP, or UNIX socket. - It can output data in any of the [supported output formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md). - introduced: 1.3.0 - tags: [linux, macos, windows, networking] - - - name: Stackdriver - id: stackdriver + This plugin gathers metrics about containers and VMs running on a + [Proxmox](https://www.proxmox.com) instance using the Proxmox API. + introduced: v1.16.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Puppet Agent + id: puppetagent description: | - The Stackdriver output plugin writes to the [Google Cloud Stackdriver API](https://cloud.google.com/monitoring/api/v3/) - and requires [Google Cloud authentication](https://cloud.google.com/docs/authentication/getting-started) - with Google Cloud using either a service account or user credentials. - For details on pricing, see the [Stackdriver documentation](https://cloud.google.com/stackdriver/pricing). - - Requires `project` to specify where Stackdriver metrics will be delivered to. - - Metrics are grouped by the `namespace` variable and metric key, for example - `custom.googleapis.com/telegraf/system/load5`. - introduced: 1.9.0 - tags: [linux, macos, windows, cloud] - - - name: Stomp (ActiveMQ) - id: stomp + This plugin gathers metrics of a [Puppet agent](https://www.puppet.com/) + by parsing variables from the local last-run-summary file. + introduced: v0.2.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: RabbitMQ + id: rabbitmq description: | - The Stomp (ActiveMQ) output plugin writes to an [ActiveMQ Broker](https://activemq.apache.org/) for [STOMP](http://stomp.github.io). - introduced: 1.24.0 - tags: [linux, macos, windows] + This plugin gathers statistics from [RabbitMQ](https://www.rabbitmq.com) + servers via the [Management + Plugin](https://www.rabbitmq.com/management.html). + introduced: v0.1.5 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Radius + id: radius + description: | + This plugin collects response times for + [Radius](https://datatracker.ietf.org/doc/html/rfc2865) authentication + requests. + introduced: v1.26.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Raindrops Middleware + id: raindrops + description: | + This plugin collects statistics for [Raindrops + middleware](http://raindrops.bogomips.org/Raindrops/Middleware.html) + instances. + introduced: v0.10.3 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: RAS Daemon + id: ras + description: | + This plugin gathers statistics and error counts provided by the local + [RAS (reliability, availability and + serviceability)](https://github.com/mchehab/rasdaemon) daemon. + > [!NOTE] + > This plugin requires access to SQLite3 database from `RASDaemon`. Please + > make sure the Telegraf user has the required permissions to this + > database! + introduced: v1.16.0 + os_support: [linux] + tags: [server] + - name: RavenDB + id: ravendb + description: | + This plugin gathers metrics from [RavenDB](https://ravendb.net/) servers + via the monitoring API. + + > [!NOTE] + > This plugin requires RavenDB Server v5.2+. + introduced: v1.18.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Redfish + id: redfish + description: | + This plugin gathers metrics and status information of server hardware + with enabled [DMTF's Redfish](https://redfish.dmtf.org/) support. + introduced: v1.15.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Redis + id: redis + description: | + This plugin gathers metrics from [Redis](https://redis.io/) servers. + introduced: v0.1.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Redis Sentinel + id: redis_sentinel + description: | + This plugin collects metrics for [Redis + Sentinel](https://redis.io/docs/latest/operate/oss_and_stack/management/sentinel/) + instances monitoring Redis servers and replicas. + introduced: v1.22.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: RethinkDB + id: rethinkdb + description: | + This plugin collects metrics from [RethinkDB](https://www.rethinkdb.com/) + servers. + introduced: v0.1.3 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Riak + id: riak + description: | + This plugin gathers metrics from [Riak](https://riak.com/) instances. + introduced: v0.10.4 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Riemann Listener + id: riemann_listener + description: | + This service plugin listens for messages from + [Riemann](https://riemann.io/) clients using the protocol buffer format. + introduced: v1.17.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: Siemens S7 + id: s7comm + description: | + This plugin reads metrics from Siemens PLCs via the S7 protocol. + introduced: v1.28.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [hardware] + - name: Salesforce + id: salesforce + description: | + This plugin gathers metrics about the limits in your + [Salesforce](https://salesforce.com) organization and the remaining usage + using the [limits + endpoint](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_limits.htm) + of Salesforce's REST API. + introduced: v1.4.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, server] + - name: LM Sensors + id: sensors + description: | + This plugin collects metrics from hardware sensors using + [lm-sensors](https://en.wikipedia.org/wiki/Lm_sensors). + + > [!NOTE] + > This plugin requires the lm-sensors package to be installed on the + > system and `sensors` to be executable from Telegraf. + introduced: v0.10.1 + os_support: [linux] + tags: [hardware, system] + - name: SFlow + id: sflow + description: | + This service plugin produces metrics from information received by acting + as a [SFlow V5](https://sflow.org/sflow_version_5.txt) collector. + Currently, the plugin can collect Flow Samples of Ethernet / IPv4, IPv4 + TCP and UDP headers. Counters and other header samples are ignored. + Please use the [netflow plugin](/telegraf/v1/plugins/#input-netflow) for + a more modern and sophisticated implementation. + + > [!CRITICAL] + > This plugin produces high cardinality data, which when not controlled + > for will cause high load on your database. Please make sure to + > [filter](/telegraf/v1/configuration/#metric-filtering) the produced + > metrics or configure your database to avoid cardinality issues! + introduced: v1.14.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network] + - name: Slab + id: slab + description: | + This plugin collects details on memory consumption of [Slab + cache](https://www.kernel.org/doc/gorman/html/understand/understand011.html) + entries by parsing the `/proc/slabinfo` file respecting the `HOST_PROC` + environment variable. + + > [!NOTE] + > This plugin requires `/proc/slabinfo` to be readable by the Telegraf + > user. + introduced: v1.23.0 + os_support: [linux] + tags: [system] + - name: SLURM + id: slurm + description: | + This plugin gather diagnoses, jobs, nodes, partitions and reservation + metrics for a [SLURM](https://slurm.schedmd.com) instance using the REST + API provided by the `slurmrestd` daemon. + + > [!NOTE] + > This plugin supports the [REST API + > v0.0.38](https://slurm.schedmd.com/rest.html) which must be enabled in + > the `slurmrestd` daemon. For more information, check the + > [documentation](https://slurm.schedmd.com/rest_quickstart.html#customization). + introduced: v1.32.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: S.M.A.R.T. + id: smart + description: | + This plugin collects [Self-Monitoring, Analysis and Reporting + Technology](https://en.wikipedia.org/wiki/Self-Monitoring,_Analysis_and_Reporting_Technology) + information for storage devices information using the [`smartmontools`]() + package. This plugin also supports NVMe devices by using the + [`nvme-cli`]() package. + + > [!NOTE] + > This plugin requires the [`smartmontools`]() and, for NVMe devices, the + > [`nvme-cli`]() packages to be installed on your system. The `smartctl` + > and `nvme` commands must to be executable by Telegraf. + introduced: v1.5.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [hardware, system] + - name: smartctl JSON + id: smartctl + description: | + This plugin collects [Self-Monitoring, Analysis and Reporting + Technology](https://en.wikipedia.org/wiki/Self-Monitoring,_Analysis_and_Reporting_Technology) + information for storage devices information using the [`smartmontools`]() + package. Contrary to the [smart + plugin](/telegraf/v1/plugins/#input-smart), this plugin does not use the + [`nvme-cli`]() package to collect additional information about NVMe + devices. + + > [!NOTE] + > This plugin requires [`smartmontools`]() to be installed on your system. + > The `smartctl` command must to be executable by Telegraf and must + > supporting JSON output. JSON output was added in v7.0 and improved in + > subsequent releases + introduced: v1.31.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [hardware, system] + - name: SNMP + id: snmp + description: | + This plugin gathers metrics by polling + [SNMP](https://datatracker.ietf.org/doc/html/rfc1157) agents with + individual OIDs or complete SNMP tables. + + > [!NOTE] + > The path setting is shared between all instances of all SNMP plugin + > types! + introduced: v0.10.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [hardware, network] + - name: SNMP Trap + id: snmp_trap + description: | + This service plugin listens for + [SNMP](https://datatracker.ietf.org/doc/html/rfc1157) notifications like + traps and inform requests. Notifications are received on plain UDP with a + configurable port. + + > [!NOTE] + > The path setting is shared between all instances of all SNMP plugin + > types! + introduced: v1.13.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [hardware, network] + - name: Socket Listener + id: socket_listener + description: | + This service plugin listens for messages on sockets (TCP, UDP, Unix or + Unixgram) and parses the packets received in one of the supported [data + formats](/telegraf/v1/data_formats/input). + introduced: v1.3.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network] + - name: Socket Statistics + id: socketstat + description: | + This plugin gathers metrics for established network connections using + [iproute2](https://github.com/iproute2/iproute2)'s `ss` command. The `ss` + command does not require specific privileges. + + > [!CRITICAL] + > This plugin produces high cardinality data, which when not controlled + > for will cause high load on your database. Please make sure to + > [filter](/telegraf/v1/configuration/#metric-filtering) the produced + > metrics or configure your database to avoid cardinality issues! + introduced: v1.22.0 + os_support: [freebsd, linux, macos] + tags: [network] + - name: Apache Solr + id: solr + description: | + This plugin collects statistics from + [Solr](http://lucene.apache.org/solr/) instances using the [MBean Request + Handler](https://cwiki.apache.org/confluence/display/solr/MBean+Request+Handler). + For additional details on performance statistics check the [performance + statistics + reference](https://cwiki.apache.org/confluence/display/solr/Performance+Statistics+Reference). + + > [!NOTE] + > This plugin requires Apache Solr v3.5+. + introduced: v1.5.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] - name: SQL id: sql description: | - The SQL output plugin saves Telegraf metric data to an SQL database. - introduced: 1.19.0 - tags: [datastores] - - - name: Sumo Logic - id: sumologic + This plugin reads metrics from performing + [SQL](https://www.iso.org/standard/76583.html) queries against a SQL + server. Different server types are supported and their settings might + differ (especially the connection parameters). Please check the list of + [supported SQL drivers](../../../docs/SQL_DRIVERS_INPUT.md) for the + `driver` name and options for the data-source-name (`dsn`) options. + introduced: v1.19.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: Microsoft SQL Server + id: sqlserver description: | - This plugin sends metrics to [Sumo Logic HTTP Source](https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source) - in HTTP messages using one of the following supported data formats - * `graphite` - for Content-Type of `application/vnd.sumologic.graphite` - * `carbon2` - for Content-Type of `application/vnd.sumologic.carbon2` - * `prometheus` - for Content-Type of `application/vnd.sumologic.prometheus` - introduced: 1.16.0 - tags: [linux, macos, windows, logging] + This plugin provides metrics for your [SQL + Server](https://docs.microsoft.com/en-us/sql/sql-server) instance. + Recorded metrics are lightweight and use Dynamic Management Views + supplied by SQL Server. + > [!NOTE] + > This plugin supports SQL server versions supported by Microsoft (see + > [lifecycle + > dates](https://docs.microsoft.com/en-us/sql/sql-server/end-of-support/sql-server-end-of-life-overview?view=sql-server-ver15#lifecycle-dates)), + > Azure SQL Databases (Single), Azure SQL Managed Instances, Azure SQL + > Elastic Pools and Azure Arc-enabled SQL Managed Instances. + introduced: v0.10.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: Stackdriver Google Cloud Monitoring + id: stackdriver + description: | + This plugin collects metrics from [Google Cloud + Monitoring](https://cloud.google.com/monitoring) (formerly Stackdriver) + using the [Cloud Monitoring API + v3](https://cloud.google.com/monitoring/api/v3/). + + > [!IMPORTANT] + > This plugin accesses APIs which are + > [chargeable](https://cloud.google.com/stackdriver/pricing#stackdriver_monitoring_services), + > cost might incur. + introduced: v1.10.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud] + - name: StatsD + id: statsd + description: | + This service plugin gathers metrics from a + [Statsd](https://github.com/statsd/statsd) server. + introduced: v0.2.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications] + - name: Supervisor + id: supervisor + description: | + This plugin gathers information about processes running under + [supervisord](https://supervisord.org/) using the [XML-RPC + API](https://supervisord.org/api.html). + + > [!NOTE] + > This plugin requires supervisor v3.3.2+. + introduced: v1.24.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications] + - name: Suricata + id: suricata + description: | + This service plugin reports internal performance counters of the + [Suricata IDS/IPS](https://suricata.io/) engine, such as captured traffic + volume, memory usage, uptime, flow counters, and much more. This plugin + provides a socket for the Suricata log output to write JSON stats output + to, and processes the incoming data to fit Telegraf's format. It can also + report for triggered Suricata IDS/IPS alerts. + introduced: v1.13.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications, network] + - name: Swap + id: swap + description: | + This plugin collects metrics on the operating-system's swap memory. + introduced: v1.7.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: Synproxy + id: synproxy + description: | + This plugin gathers metrics about the Linux netfilter's + [synproxy](https://wiki.nftables.org/wiki-nftables/index.php/Synproxy) + module used for mitigating SYN attacks. + introduced: v1.13.0 + os_support: [linux] + tags: [network] - name: Syslog id: syslog description: | - The syslog output plugin sends syslog messages transmitted over UDP or TCP or TLS, with or without the octet counting framing. - Syslog messages are formatted according to RFC 5424. - introduced: 1.11.0 - tags: [linux, macos, windows, logging] + This service plugin listens for + [syslog](https://en.wikipedia.org/wiki/Syslog) messages transmitted over + a Unix Domain socket, [UDP](https://tools.ietf.org/html/rfc5426), + [TCP](https://tools.ietf.org/html/rfc6587) or + [TLS](https://tools.ietf.org/html/rfc5425) with or without the octet + counting framing. + Syslog messages should be formatted according to the [syslog + protocol](https://tools.ietf.org/html/rfc5424) or the [BSD syslog + protocol](https://tools.ietf.org/html/rfc3164). + introduced: v1.7.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [logging] + - name: System Performance Statistics + id: sysstat + description: | + This plugin collects Linux [system performance + statistics](https://github.com/sysstat/sysstat) using the `sysstat` + package. This plugin uses the `sadc` collector utility and and parses the + created binary data file using the `sadf` utility. + + > [!NOTE] + > This plugin requires the `sysstat` package to be installed on the system + > and both `sadc` and `sadf` to be executable by Telegraf. + introduced: v0.12.1 + os_support: [linux] + tags: [system] + - name: System + id: system + description: | + This plugin gathers general system statistics like system load, uptime or + the number of users logged in. It is similar to the unix `uptime` + command. + introduced: v0.1.6 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: Systemd-Units + id: systemd_units + description: | + This plugin gathers the status of systemd-units on Linux, using systemd's + DBus interface. + + > [!NOTE] + > This plugin requires systemd v230+! + introduced: v1.13.0 + os_support: [linux] + tags: [system] + - name: Tacacs + id: tacacs + description: | + This plugin collects metrics on [Terminal Access Controller Access + Control System](https://datatracker.ietf.org/doc/html/rfc1492) + authentication requests like response status and response time from + servers such as [Aruba + ClearPass](https://www.hpe.com/de/de/aruba-clearpass-policy-manager.html), + [FreeRADIUS](https://www.freeradius.org/) or + [TACACS+](https://datatracker.ietf.org/doc/html/rfc8907). + + The plugin is primarily meant to monitor how long it takes for the server + to fully handle an authentication request, including all potential + dependent calls (for example to AD servers, or other sources of truth). + introduced: v1.28.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network] + - name: Tail + id: tail + description: | + This service plugin continuously reads a file and parses new data as it + arrives similar to the [tail -f + command](https://man7.org/linux/man-pages/man1/tail.1.html). The incoming + messages are expected to be in one of the supported [data + formats](/telegraf/v1/data_formats/input). + introduced: v1.1.2 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [logging] + - name: Teamspeak + id: teamspeak + description: | + This plugin collects statistics of one or more virtual + [Teamspeak](https://www.teamspeak.com) servers using the `ServerQuery` + interface. Currently this plugin only supports Teamspeak 3 servers. + + > [!NOTE] + > For querying external Teamspeak server, make sure to add the Telegraf + > host to the `query_ip_allowlist.txt` file in the Teamspeak Server + > directory. + introduced: v1.5.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Temperature + id: temp + description: | + This plugin gathers metrics on system temperatures. + introduced: v1.8.0 + os_support: [linux, macos, windows] + tags: [hardware, system] + - name: Tengine Web Server + id: tengine + description: | + This plugin gathers metrics from the [Tengine Web + Server](http://tengine.taobao.org) via the + [reqstat](http://tengine.taobao.org/document/http_reqstat.html) module. + introduced: v1.8.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server, web] + - name: Apache Tomcat + id: tomcat + description: | + This plugin collects statistics from a [Tomcat + server](https://tomcat.apache.org) instance using the manager status + page. See the [Tomcat + documentation](https://tomcat.apache.org/tomcat-9.0-doc/manager-howto.html#Server_Status) + for details of these statistics. + introduced: v1.4.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server, web] + - name: Trig + id: trig + description: | + This plugin is for demonstration purposes and inserts sine and cosine + values as metrics. + introduced: v0.3.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [testing] + - name: Twemproxy + id: twemproxy + description: | + This plugin gathers statistics from + [Twemproxy](https://github.com/twitter/twemproxy) servers. + introduced: v0.3.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: Unbound + id: unbound + description: | + This plugin gathers stats from an [Unbound](https://www.unbound.net) DNS + resolver. + introduced: v1.5.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network, server] + - name: UPSD + id: upsd + description: | + This plugin reads data of one or more Uninterruptible Power Supplies from + a [Network UPS Tools](https://networkupstools.org/) daemon using its NUT + network protocol. + introduced: v1.24.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [hardware, server] + - name: uWSGI + id: uwsgi + description: | + This plugin gathers metrics about + [uWSGI](https://uwsgi-docs.readthedocs.io/en/latest/) using its [Stats + Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html). + introduced: v1.12.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud] + - name: Varnish + id: varnish + description: | + This plugin gathers statistics from a local [Varnish HTTP + Cache](https://varnish-cache.org) instance using the `varnishstat` + command. + + > [!NOTE] + > This plugin requires the `varnishstat` executable to be installed on the + > system and executable by Telegraf. Furthermore, the plugin requires + > Varnish v6.0.2+. + introduced: v0.13.1 + os_support: [freebsd, linux, macos] + tags: [server, web] + - name: Hashicorp Vault + id: vault + description: | + This plugin collects metrics from every + [Vault](https://www.hashicorp.com/de/products/vault) agent of a cluster. + + > [!NOTE] + > This plugin requires Vault v1.8.5+ + introduced: v1.22.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [server] + - name: VMware vSphere + id: vsphere + description: | + This plugin gathers metrics from + [vSphere](https://www.vmware.com/products/cloud-infrastructure/vsphere) + servers of a vCenter including clusters, hosts, resource pools, VMs, + datastores and vSAN information. + + > [!NOTE] + > This plugin requires vSphere v7.0+. + introduced: v1.8.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [containers] + - name: Webhooks + id: webhooks + description: | + This service plugin provides an HTTP server and register for multiple + webhook listeners. + introduced: v1.0.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications, web] + - name: WHOIS + id: whois + description: | + This plugin queries [WHOIS + information](https://datatracker.ietf.org/doc/html/rfc3912) for + configured domains and provides metrics such as expiration timestamps, + registrar details and domain status from e.g. + [IANA](https://www.iana.org/whois) or [ICANN](https://lookup.icann.org/) + servers. + introduced: v1.35.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network, web] + - name: Windows Eventlog + id: win_eventlog + description: | + This plugin gathers metrics from the [Windows event + log](https://learn.microsoft.com/en-us/shows/inside/event-viewer) on + Windows Vista and higher. + + > [!NOTE] + > Some event channels, like the System Log, require Administrator + > permissions to subscribe. + introduced: v1.16.0 + os_support: [windows] + tags: [logging] + - name: Windows Performance Counters + id: win_perf_counters + description: | + This plugin produces metrics from the collected [Windows Performance + Counters](https://learn.microsoft.com/en-us/windows/win32/perfctrs/about-performance-counters). + introduced: v0.10.2 + os_support: [windows] + tags: [system] + - name: Windows Services + id: win_services + description: | + This plugin collects information about the status of Windows services. + + > [!NOTE] + > Monitoring some services may require running Telegraf with administrator + > privileges. + introduced: v1.4.0 + os_support: [windows] + tags: [system] + - name: Windows Management Instrumentation + id: win_wmi + description: | + This plugin queries information or invokes methods using [Windows + Management + Instrumentation](https://learn.microsoft.com/en-us/windows/win32/wmisdk/wmi-start-page) + classes. This allows capturing and filtering virtually any configuration + or metric value exposed through WMI. + + > [!NOTE] + > The telegraf service user must have at least permission to + > [read](https://learn.microsoft.com/en-us/windows/win32/wmisdk/access-to-wmi-namespaces) + > the WMI namespace being queried. + introduced: v1.26.0 + os_support: [windows] + tags: [system] + - name: Wireguard + id: wireguard + description: | + This plugin collects statistics on a local + [Wireguard](https://www.wireguard.com/) server using the [`wgctrl` + library](). The plugin reports gauge metrics for Wireguard interface + device(s) and its peers. + introduced: v1.14.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network] + - name: Wireless + id: wireless + description: | + This plugin gathers metrics about wireless link quality by reading the + `/proc/net/wireless` file. + introduced: v1.9.0 + os_support: [linux] + tags: [network] + - name: x509 Certificate + id: x509_cert + description: | + This plugin provides information about + [X.509](https://en.wikipedia.org/wiki/X.509) certificates accessible e.g. + via local file, tcp, udp, https or smtp protocols. + + > [!NOTE] + > When using a UDP address as a certificate source, the server must + > support + > [DTLS](https://en.wikipedia.org/wiki/Datagram_Transport_Layer_Security). + introduced: v1.8.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network] + - name: Dell EMC XtremIO + id: xtremio + description: | + This plugin gathers metrics from a [Dell EMC XtremIO Storage + Array](https://www.delltechnologies.com/asset/en-sa/products/storage/industry-market/h16444-introduction-xtremio-x2-storage-array-wp.pdf) + instance using the [v3 Rest + API](https://dl.dell.com/content/docu96624_xtremio-storage-array-x1-and-x2-cluster-types-with-xms-6-3-0-to-6-3-3-and-xios-4-0-15-to-4-0-31-and-6-0-0-to-6-3-3-restful-api-3-x-guide.pdf). + introduced: v1.22.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [network] + - name: ZFS + id: zfs + description: | + This plugin gathers metrics from [ZFS](https://en.wikipedia.org/wiki/ZFS) + filesystems using `/proc/spl/kstat/zfs` on Linux and `sysctl`, `zfs` and + `zpool` on FreeBSD. + introduced: v0.2.1 + os_support: [freebsd, linux] + tags: [system] + - name: Zipkin + id: zipkin + description: | + This service plugin implements the [Zipkin](https://zipkin.io/) HTTP + server to gather trace and timing data needed to troubleshoot latency + problems in microservice architectures. + + > [!CRITICAL] + > This plugin produces high cardinality data, which when not controlled + > for will cause high load on your database. Please make sure to + > [filter](/telegraf/v1/configuration/#metric-filtering) the produced + > metrics or configure your database to avoid cardinality issues! + introduced: v1.4.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud] + - name: Apache Zookeeper + id: zookeeper + description: | + This plugin collects variables from + [Zookeeper](https://zookeeper.apache.org) instances using the [`mntr` + command](). + + > [!NOTE] + > If the Prometheus Metric provider is enabled in Zookeeper use the + > [prometheus plugin](/telegraf/v1/plugins/#input-prometheus) instead with + > `http://:7000/metrics`. + introduced: v0.2.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications] +output: + - name: Amon + id: amon + description: | + This plugin writes metrics to [Amon monitoring + platform](https://www.amon.cx). It requires a `serverkey` and + `amoninstance` URL which can be obtained + [here](https://www.amon.cx/docs/monitoring/) for your account. + + > [!IMPORTANT] + > If point values being sent cannot be converted to a `float64`, the + > metric is skipped. + introduced: v0.2.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: AMQP + id: amqp + description: | + This plugin writes to an Advanced Message Queuing Protocol v0.9.1 broker. + A prominent implementation of this protocol is + [RabbitMQ](https://www.rabbitmq.com). + + > [!NOTE] + > This plugin does not bind the AMQP exchange to a queue. + + For an introduction check the [AMQP concepts + page](https://www.rabbitmq.com/tutorials/amqp-concepts.html) and the + [RabbitMQ getting started + guide](https://www.rabbitmq.com/getstarted.html). + introduced: v0.1.9 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [messaging] + - name: Azure Application Insights + id: application_insights + description: | + This plugin writes metrics to the [Azure Application + Insights](https://azure.microsoft.com/en-us/services/application-insights/) + service. + introduced: v1.7.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications, cloud] + - name: Azure Data Explorer + id: azure_data_explorer + description: | + This plugin writes metrics to the [Azure Data + Explorer](https://docs.microsoft.com/en-us/azure/data-explorer), [Azure + Synapse Data + Explorer](https://docs.microsoft.com/en-us/azure/synapse-analytics/data-explorer/data-explorer-overview), + and [Real time analytics in + Fabric](https://learn.microsoft.com/en-us/fabric/real-time-analytics/overview) + services. + + Azure Data Explorer is a distributed, columnar store, purpose built for + any type of logs, metrics and time series data. + introduced: v1.20.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, datastore] + - name: Azure Monitor + id: azure_monitor + description: | + This plugin writes metrics to [Azure + Monitor](https://learn.microsoft.com/en-us/azure/azure-monitor) which has + a metric resolution of one minute. To accomodate for this in Telegraf, + the plugin will automatically aggregate metrics into one minute buckets + and send them to the service on every flush interval. + + > [!IMPORTANT] + > The Azure Monitor custom metrics service is currently in preview and + > might not be available in all Azure regions. Please also take the metric + > time limitations into account! + + The metrics from each input plugin will be written to a separate Azure + Monitor namespace, prefixed with `Telegraf/` by default. The field name + for each metric is written as the Azure Monitor metric name. All field + values are written as a summarized set that includes: min, max, sum, + count. Tags are written as a dimension on each Azure Monitor metric. + introduced: v1.8.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, datastore] + - name: Google BigQuery + id: bigquery + description: | + This plugin writes metrics to the [Google Cloud + BigQuery](https://cloud.google.com/bigquery) service and requires + [authentication](https://cloud.google.com/bigquery/docs/authentication) + with Google Cloud using either a service account or user credentials. + + > [!IMPORTANT] + > Be aware that this plugin accesses APIs that are + > [chargeable](https://cloud.google.com/bigquery/pricing) and might incur + > costs. + introduced: v1.18.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, datastore] + - name: Clarify + id: clarify + description: | + This plugin writes metrics to [Clarify](https://clarify.io). To use this + plugin you will need to obtain a set of + [credentials](https://docs.clarify.io/users/admin/integrations/credentials). + introduced: v1.27.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, datastore] + - name: Google Cloud PubSub + id: cloud_pubsub + description: | + This plugin publishes metrics to a [Google Cloud + PubSub](https://cloud.google.com/pubsub) topic in one of the supported + [data formats](/telegraf/v1/data_formats/output). + introduced: v1.10.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, messaging] + - name: Amazon CloudWatch + id: cloudwatch + description: | + This plugin writes metrics to the [Amazon + CloudWatch](https://aws.amazon.com/cloudwatch) service. + introduced: v0.10.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud] + - name: Amazon CloudWatch Logs + id: cloudwatch_logs + description: | + This plugin writes log-metrics to the [Amazon + CloudWatch](https://aws.amazon.com/cloudwatch) service. + introduced: v1.19.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, logging] + - name: CrateDB + id: cratedb + description: | + This plugin writes metrics to [CrateDB](https://crate.io/) via its + [PostgreSQL + protocol](https://crate.io/docs/crate/reference/protocols/postgres.html). + introduced: v1.5.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, datastore] + - name: Datadog + id: datadog + description: | + This plugin writes metrics to the [Datadog Metrics + API](https://docs.datadoghq.com/api/v1/metrics/#submit-metrics) and + requires an `apikey` which can be obtained + [here](https://app.datadoghq.com/account/settings#api) for the account. > + [!NOTE] This plugin supports the v1 API. + introduced: v0.1.6 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications, cloud, datastore] + - name: Discard + id: discard + description: | + This plugin discards all metrics written to it and is meant for testing + purposes. + introduced: v1.2.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [testing] + - name: Dynatrace + id: dynatrace + description: | + This plugin writes metrics to [Dynatrace](https://www.dynatrace.com) via + the [Dynatrace Metrics API + V2](https://docs.dynatrace.com/docs/shortlink/api-metrics-v2). It may be + run alongside the Dynatrace OneAgent for automatic authentication or it + may be run standalone on a host without OneAgent by specifying a URL and + API Token. + + More information on the plugin can be found in the [Dynatrace + documentation](https://docs.dynatrace.com/docs/shortlink/api-metrics-v2-post-datapoints). + + > [!NOTE] + > All metrics are reported as gauges, unless they are specified to be + > delta counters using the `additional_counters` or + > `additional_counters_patterns` config option (see below). See the + > [Dynatrace Metrics ingestion protocol + > documentation](https://docs.dynatrace.com/docs/shortlink/metric-ingestion-protocol) + > for details on the types defined there. + introduced: v1.16.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, datastore] + - name: Elasticsearch + id: elasticsearch + description: | + This plugin writes metrics to [Elasticsearch](https://www.elastic.co) via + HTTP using the [Elastic client + library](http://olivere.github.io/elastic/). The plugin supports + Elasticsearch releases from v5.x up to v7.x. + introduced: v0.1.5 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore, logging] + - name: Azure Event Hubs + id: event_hubs + description: | + This plugin writes metrics to the [Azure Event + Hubs](https://azure.microsoft.com/en-gb/services/event-hubs/) service in + any of the supported [data formats](/telegraf/v1/data_formats/output). + Metrics are sent as batches with each message payload containing one + metric object, preferably as JSON as this eases integration with + downstream components. + + Each patch is sent to a single Event Hub within a namespace. In case no + partition key is specified the batches will be automatically + load-balanced (round-robin) across all the Event Hub partitions. + introduced: v1.21.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, datastore] + - name: Executable + id: exec + description: | + This plugin writes metrics to an external application via `stdin`. The + command will be executed on each write creating a new process. Metrics + are passed in one of the supported [data + formats](/telegraf/v1/data_formats/output). + + The executable and the individual parameters must be defined as a list. + All outputs of the executable to `stderr` will be logged in the Telegraf + log. + + > [!TIP] + > For better performance consider execd which runs continuously. + introduced: v1.12.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: Executable Daemon + id: execd + description: | + This plugin writes metrics to an external daemon program via `stdin`. The + command will be executed once and metrics will be passed to it on every + write in one of the supported [data + formats](/telegraf/v1/data_formats/output). The executable and the + individual parameters must be defined as a list. + + All outputs of the executable to `stderr` will be logged in the Telegraf + log. Telegraf minimum version: Telegraf 1.15.0 + introduced: v1.15.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: File + id: file + description: | + This plugin writes metrics to one or more local files in one of the + supported [data formats](/telegraf/v1/data_formats/output). + introduced: v0.10.3 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [system] + - name: Graphite + id: graphite + description: | + This plugin writes metrics to + [Graphite](http://graphite.readthedocs.org/en/latest/index.html) via TCP. + For details on the translation between Telegraf Metrics and Graphite + output see the [Graphite data + format](/telegraf/v1/plugins/#serializer-graphite). + introduced: v0.10.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: Graylog + id: graylog + description: | + This plugin writes metrics to a [Graylog](https://graylog.org/) instance + using the [GELF data + format](https://docs.graylog.org/en/3.1/pages/gelf.html#gelf-payload-specification). + introduced: v1.0.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore, logging] + - name: GroundWork + id: groundwork + description: | + This plugin writes metrics to a [GroundWork + Monitor](https://www.gwos.com/product/groundwork-monitor/) instance. + + > [!IMPORTANT] + > Plugin only supports GroundWork v8 or later. + introduced: v1.21.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications, messaging] + - name: Health + id: health + description: | + This plugin provides a HTTP health check endpoint that can be configured + to return failure status codes based on the value of a metric. + + When the plugin is healthy it will return a 200 response; when unhealthy + it will return a 503 response. The default state is healthy, one or more + checks must fail in order for the resource to enter the failed state. + introduced: v1.11.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications] + - name: HTTP + id: http + description: | + This plugin writes metrics to a HTTP endpoint using one of the supported + [data formats](/telegraf/v1/data_formats/output). For data formats + supporting batching, metrics are sent in batches by default. + introduced: v1.7.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications] + - name: InfluxDB v1.x + id: influxdb + description: | + This plugin writes metrics to a [InfluxDB + v1.x](https://docs.influxdata.com/influxdb/v1) instance via HTTP or UDP + protocol. + introduced: v0.1.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: InfluxDB v2.x + id: influxdb_v2 + description: | + This plugin writes metrics to a [InfluxDB + v2.x](https://docs.influxdata.com/influxdb/v2) instance via HTTP. + introduced: v1.8.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: Inlong + id: inlong + description: | + This plugin publishes metrics to an [Apache + InLong](https://inlong.apache.org) instance. + introduced: v1.35.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [messaging] + - name: Instrumental + id: instrumental + description: | + This plugin writes metrics to the [Instrumental Collector + API](https://instrumentalapp.com/docs/tcp-collector) and requires a + project-specific API token. + + Instrumental accepts stats in a format very close to Graphite, with the + only difference being that the type of stat (gauge, increment) is the + first token, separated from the metric itself by whitespace. The + `increment` type is only used if the metric comes in as a counter via the + [statsd input plugin](/telegraf/v1/plugins/#input-statsd). + introduced: v0.13.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications] + - name: Apache IoTDB + id: iotdb + description: | + This plugin writes metrics to an [Apache IoTDB](https://iotdb.apache.org) + instance, a database for the Internet of Things, supporting session + connection and data insertion. + introduced: v1.24.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: Kafka + id: kafka + description: | + This plugin writes metrics to a [Kafka Broker](http://kafka.apache.org) + acting a Kafka Producer. + introduced: v0.1.7 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [messaging] + - name: Amazon Kinesis + id: kinesis + description: | + This plugin writes metrics to a [Amazon + Kinesis](https://aws.amazon.com/kinesis) endpoint. It will batch all + Points in one request to reduce the number of API requests. + + Please consult [Amazon's official + documentation](http://docs.aws.amazon.com/kinesis/latest/dev/key-concepts.html) + for more details on the Kinesis architecture and concepts. + introduced: v0.2.5 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, messaging] + - name: Librato + id: librato + description: | + This plugin writes metrics to the [Librato](https://www.librato.com/) + service. It requires an `api_user` and `api_token` which can be obtained + [here](https://metrics.librato.com/account/api_tokens) for your account. + + The `source_tag` option in the Configuration file is used to send + contextual information from Point Tags to the API. Besides from this, the + plugin currently does not send any additional associated Point Tags. + + > [!IMPOTANT] + > If the point value being sent cannot be converted to a `float64`, the + > metric is skipped. + introduced: v0.2.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, datastore] + - name: Logz.io + id: logzio + description: | + This plugin writes metrics to the [Logz.io](https://logz.io) service via + HTTP. + introduced: v1.17.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, datastore] + - name: Grafana Loki + id: loki + description: | + This plugin writes logs to a [Grafana Loki](https://grafana.com/loki) + instance, using the metric name and tags as labels. The log line will + contain all fields in `key="value"` format easily parsable with the + `logfmt` parser in Loki. + + Logs within each stream are sorted by timestamp before being sent to + Loki. + introduced: v1.18.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [logging] + - name: Microsoft Fabric + id: microsoft_fabric + description: | + This plugin writes metrics to [Real time analytics in + Fabric](https://learn.microsoft.com/en-us/fabric/real-time-analytics/overview) + services. + introduced: v1.35.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: MongoDB + id: mongodb + description: | + This plugin writes metrics to [MongoDB](https://www.mongodb.com) + automatically creating collections as time series collections if they + don't exist. + + > [!NOTE] + > This plugin requires MongoDB v5 or later for time series collections. + introduced: v1.21.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: MQTT Producer + id: mqtt + description: | + This plugin writes metrics to a [MQTT broker](http://http://mqtt.org/) + acting as a MQTT producer. The plugin supports the MQTT protocols `3.1.1` + and `5`. + + > [!NOTE] + > In v2.0.12+ of the mosquitto MQTT server, there is a + > [bug](https://github.com/eclipse/mosquitto/issues/2117) requiring the + > `keep_alive` value to be set non-zero in Telegraf. Otherwise, the server + > will return with `identifier rejected`. As a reference + > `eclipse/paho.golang` sets the `keep_alive` to 30. + introduced: v0.2.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [messaging] + - name: NATS + id: nats + description: | + This plugin writes metrics to subjects of a set of + [NATS](https://nats.io) instances in one of the supported [data + formats](/telegraf/v1/data_formats/output). + introduced: v1.1.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [messaging] + - name: Nebius Cloud Monitoring + id: nebius_cloud_monitoring + description: | + This plugin writes metrics to the [Nebuis Cloud + Monitoring](https://nebius.com/il/services/monitoring) service. + introduced: v1.27.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, datastore] + - name: New Relic + id: newrelic + description: | + This plugins writes metrics to [New Relic Insights](https://newrelic.com) + using the [Metrics + API](https://docs.newrelic.com/docs/data-ingest-apis/get-data-new-relic/metric-api/introduction-metric-api). + To use this plugin you have to obtain an [Insights API + Key](https://docs.newrelic.com/docs/apis/get-started/intro-apis/types-new-relic-api-keys#user-api-key). + introduced: v1.15.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications] + - name: NSQ + id: nsq + description: | + This plugin writes metrics to the given topic of a [NSQ](https://nsq.io) + instance as a producer in one of the supported [data + formats](/telegraf/v1/data_formats/output). + introduced: v0.2.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [messaging] + - name: OpenSearch + id: opensearch + description: | + This plugin writes metrics to a [OpenSearch](https://opensearch.org/) + instance via HTTP. It supports OpenSearch releases v1 and v2 but future + comparability with 1.x is not guaranteed and instead will focus on 2.x + support. + + > [!TIP] + > Consider using the existing Elasticsearch plugin for 1.x. + introduced: v1.29.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore, logging] + - name: OpenTelemetry + id: opentelemetry + description: | + This plugin writes metrics to [OpenTelemetry](https://opentelemetry.io) + servers and agents via gRPC. + introduced: v1.20.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [logging, messaging] + - name: OpenTSDB + id: opentsdb + description: | + This plugin writes metrics to an [OpenTSDB](http://opentsdb.net/) + instance using either the telnet or HTTP mode. Using the HTTP API is + recommended since OpenTSDB 2.0. + introduced: v0.1.9 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: Parquet + id: parquet + description: | + This plugin writes metrics to [parquet](https://parquet.apache.org) + files. By default, metrics are grouped by metric name and written all to + the same file. + + > [!IMPORTANT] + > If a metric schema does not match the schema in the file it will be + > dropped. + + To lean more about the parquet format, check out the [parquet + docs](https://parquet.apache.org/docs/) as well as a blog post on + [querying + parquet](https://www.influxdata.com/blog/querying-parquet-millisecond-latency/). + introduced: v1.32.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: PostgreSQL + id: postgresql + description: | + This plugin writes metrics to a [PostgreSQL](https://www.postgresql.org/) + (or compatible) server managing the schema and automatically updating + missing columns. + introduced: v1.24.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: Prometheus + id: prometheus_client + description: | + This plugin starts a [Prometheus](https://prometheus.io) client and + exposes the written metrics on a `/metrics` endpoint by default. This + endpoint can then be polled by a Prometheus server. + introduced: v0.2.1 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications] + - name: Quix + id: quix + description: | + This plugin writes metrics to a [Quix](https://quix.io) endpoint. + + Please consult Quix's [official documentation](https://quix.io/docs/) for + more details on the Quix platform architecture and concepts. + introduced: v1.33.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, messaging] + - name: Redis Time Series + id: redistimeseries + description: | + This plugin writes metrics to a [Redis + time-series](https://redis.io/timeseries) server. + introduced: v1.0.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: Remote File + id: remotefile + description: | + This plugin writes metrics to files in a remote location using the + [rclone library](https://rclone.org). Currently the following backends + are supported: + + - `local`: [Local filesystem](https://rclone.org/local/) + - `s3`: [Amazon S3 storage providers](https://rclone.org/s3/) + - `sftp`: [Secure File Transfer Protocol](https://rclone.org/sftp/) + introduced: v1.32.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: Riemann + id: riemann + description: | + This plugin writes metric to the [Riemann](http://riemann.io) serice via + TCP or UDP. + introduced: v1.3.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: Sensu Go + id: sensu + description: | + This plugin writes metrics to [Sensu Go](https://sensu.io) via its HTTP + events API. + introduced: v1.18.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications] + - name: SignalFx + id: signalfx + description: | + This plugin writes metrics to + [SignalFx](https://docs.signalfx.com/en/latest/). + introduced: v1.18.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications] + - name: Socket Writer + id: socket_writer + description: | + This plugin writes metrics to a network service e.g. via UDP or TCP in + one of the supported [data formats](/telegraf/v1/data_formats/output). + introduced: v1.3.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications, network] + - name: SQL + id: sql + description: | + This plugin writes metrics to a supported SQL database using a simple, + hard-coded database schema. There is a table for each metric type with + the table name corresponding to the metric name. There is a column per + field and a column per tag with an optional column for the metric + timestamp. + + A row is written for every metric. This means multiple metrics are never + merged into a single row, even if they have the same metric name, tags, + and timestamp. + + The plugin uses Golang's generic "database/sql" interface and third party + drivers. See the driver-specific section for a list of supported drivers + and details. + introduced: v1.19.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] + - name: Google Cloud Monitoring + id: stackdriver + description: | + This plugin writes metrics to a `project` in [Google Cloud + Monitoring](https://cloud.google.com/monitoring/api/v3/) (formerly called + Stackdriver). + [Authentication](https://cloud.google.com/docs/authentication/getting-started) + with Google Cloud is required using either a service account or user + credentials. + + > [!IMPORTANT] + > This plugin accesses APIs which are + > [chargeable](https://cloud.google.com/stackdriver/pricing#google-clouds-operations-suite-pricing) + > and might incur costs. + + By default, Metrics are grouped by the `namespace` variable and metric + key, eg: `custom.googleapis.com/telegraf/system/load5`. However, this is + not the best practice. Setting `metric_name_format = "official"` will + produce a more easily queried format of: + `metric_type_prefix/[namespace_]name_key/kind`. If the global namespace + is not set, it is omitted as well. + introduced: v1.9.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, datastore] + - name: ActiveMQ STOMP + id: stomp + description: | + This plugin writes metrics to an [Active MQ + Broker](http://activemq.apache.org/) for [STOMP](https://stomp.github.io) + but also supports [Amazon MQ](https://aws.amazon.com/amazon-mq) brokers. + Metrics can be written in one of the supported [data + formats](/telegraf/v1/data_formats/output). + introduced: v1.24.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [messaging] + - name: Sumo Logic + id: sumologic + description: | + This plugin writes metrics to a [Sumo Logic HTTP + Source](https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source) + using one of the following data formats: + + - `graphite` for Content-Type of `application/vnd.sumologic.graphite` + - `carbon2` for Content-Type of `application/vnd.sumologic.carbon2` + - `prometheus` for Content-Type of `application/vnd.sumologic.prometheus` + introduced: v1.16.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [logging] + - name: Syslog + id: syslog + description: | + This plugin writes metrics as syslog messages via UDP in [RFC5426 + format](https://tools.ietf.org/html/rfc5426) or via TCP in [RFC6587 + format](https://tools.ietf.org/html/rfc6587) or via TLS in [RFC5425 + format](https://tools.ietf.org/html/rfc5425), with or without the octet + counting framing. + + > [!IMPORTANT] + > Syslog messages are formatted according to + > [RFC5424](https://tools.ietf.org/html/rfc5424) limiting the field sizes + > when sending messages according to the [syslog message + > format](https://datatracker.ietf.org/doc/html/rfc5424#section-6) section + > of the RFC. Sending messages beyond these sizes may get dropped by a + > strict receiver silently. + introduced: v1.11.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [logging] + - name: Amazon Timestream + id: timestream + description: | + This plugin writes metrics to the [Amazon + Timestream](https://aws.amazon.com/timestream) service. + introduced: v1.16.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, datastore] - name: Warp10 id: warp10 description: | - The Warp10 output plugin writes metrics to [SenX Warp 10](https://www.warp10.io/). - introduced: 1.14.0 - tags: [linux, macos, windows] - + This plugin writes metrics to the [Warp 10](https://www.warp10.io) + service. + introduced: v1.14.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud, datastore] - name: Wavefront id: wavefront description: | - The Wavefront output plugin writes to a Wavefront proxy, in Wavefront data format over TCP. - introduced: 1.5.0 - tags: [linux, macos, windows, applications, cloud] - + This plugin writes metrics to a [Wavefront](https://www.wavefront.com) + instance or a Wavefront Proxy instance over HTTP or HTTPS. + introduced: v1.5.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications, cloud] - name: Websocket id: websocket description: | - The Websocket output plugin can write to a WebSocket endpoint. - introduced: 1.19.0 - tags: [web, servers] - + This plugin writes metrics to a WebSocket endpoint in one of the + supported [data formats](/telegraf/v1/data_formats/output). + introduced: v1.19.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [applications, web] - name: Yandex Cloud Monitoring id: yandex_cloud_monitoring description: | - The Yandex Cloud Monitoring output plugin sends custom metrics to Yandex Cloud Monitoring. - introduced: 1.17.0 - tags: [linux, macos, windows] - + This plugin writes metrics to the [Yandex Cloud + Monitoring](https://cloud.yandex.com/services/monitoring) service. + introduced: v1.17.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [cloud] - name: Zabbix id: zabbix description: | - This plugin send metrics to Zabbix via traps. It has been tested with - versions 3.0, 4.0 and 6.0. It should work with newer versions as long as - Zabbix does not change the protocol. - introduced: 1.30.0 - tags: [linux, macos, windows, data-stores] - -# %%%% %%%% %%%% %%%%% %%%%%% %%%% %%%% %%%%%% %%%% %%%%% %%%% # -# %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% # -# %%%%%% %% %%% %% %%% %%%%% %%%% %% %%% %%%%%% %% %% %% %%%%% %%%% # -# %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% # -# %% %% %%%% %%%% %% %% %%%%%% %%%% %% %% %% %%%% %% %% %%%% # - + This plugin writes metrics to [Zabbix](https://www.zabbix.com/) via + [traps](https://www.zabbix.com/documentation/current/en/manual/appendix/items/trapper). + It has been tested with versions v3.0, v4.0 and v6.0 but should work with + newer versions of Zabbix as long as the protocol doesn't change. + introduced: v1.30.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [datastore] aggregator: - - name: BasicStats + - name: Basic Statistics id: basicstats description: | - The BasicStats aggregator plugin gives `count`, `max`, `min`, `mean`, `s2`(variance), - and `stdev` for a set of values, emitting the aggregate every period seconds. - introduced: 1.5.0 - tags: [linux, macos, windows] - + This plugin computes basic statistics such as counts, differences, + minima, maxima, mean values, non-negative differences etc. for a set of + metrics and emits these statistical values every `period`. + introduced: v1.5.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [statistics] - name: Derivative id: derivative description: | - The derivative aggregator plugin estimates the derivative for all fields of the aggregated metrics. - introduced: 1.18.0 - tags: [] - + This plugin computes the derivative for all fields of the aggregated + metrics. + introduced: v1.18.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [math] - name: Final id: final description: | - The final aggregator emits the last metric of a contiguous series. - A contiguous series is defined as a series which receives updates within the time period in series_timeout. - The contiguous series may be longer than the time interval defined by period. - This is useful for getting the final value for data sources that produce discrete time series, such as procstat, cgroup, kubernetes, etc. - introduced: 1.11.0 - tags: [linux, macos, windows] + This plugin emits the last metric of a contiguous series, defined as a + series which receives updates within the time period in `series_timeout`. + The contiguous series may be longer than the time interval defined by + `period`. When a series has not been updated within the `series_timeout`, + the last metric is emitted. + Alternatively, the plugin emits the last metric in the `period` for the + `periodic` output strategy. + + This is useful for getting the final value for data sources that produce + discrete time series such as procstat, cgroup, kubernetes etc. or to + downsample metrics collected at a higher frequency. + + > [!NOTE] + > All emited metrics do have fields with `_final` appended to the + > field-name by default. + introduced: v1.11.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [sampling] - name: Histogram id: histogram description: | - The Histogram aggregator plugin creates histograms containing the counts of - field values within a range. + This plugin creates histograms containing the counts of field values + within the configured range. The histogram metric is emitted every + `period`. - Values added to a bucket are also added to the larger buckets in the distribution. - This creates a [cumulative histogram](https://upload.wikimedia.org/wikipedia/commons/5/53/Cumulative_vs_normal_histogram.svg). - - Like other Telegraf aggregator plugins, the metric is emitted every period seconds. - Bucket counts, however, are not reset between periods and will be non-strictly - increasing while Telegraf is running. - introduced: 1.4.0 - tags: [linux, macos, windows] + In `cumulative` mode, values added to a bucket are also added to the + consecutive buckets in the distribution creating a [cumulative + histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg). + > [!NOTE] + > By default bucket counts are not reset between periods and will be + > non-strictly increasing while Telegraf is running. This behavior can be + > by setting the `reset` parameter. + introduced: v1.4.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [statistics] - name: Merge id: merge description: | - The Merge aggregator plugin merges metrics together and generates line protocol with - multiple fields per line. This optimizes memory and network transfer efficiency. - Use this plugin when fields are split over multiple lines of line protocol - with the same measurement, tag set, and timestamp on each. - introduced: 1.13.0 - tags: [linux, macos, windows] + This plugin merges metrics of the same series and timestamp into new + metrics with the super-set of fields. A series here is defined by the + metric name and the tag key-value set. - - name: MinMax + Use this plugin when fields are split over multiple metrics, with the + same measurement, tag set and timestamp. + introduced: v1.13.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] + - name: Minimum-Maximum id: minmax description: | - The MinMax aggregator plugin aggregates `min` and `max` values of each field it sees, - emitting the aggregate every period seconds. - introduced: 1.1.0 - tags: [linux, macos, windows] - + This plugin aggregates the minimum and maximum values of each field it + sees, emitting the aggrate every `period` seconds with field names + suffixed by `_min` and `_max` respectively. + introduced: v1.1.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [statistics] - name: Quantile id: quantile description: | - The quantile aggregator plugin aggregates specified quantiles for each numeric field per metric it sees and emits the quantiles every designated `period`. - introduced: 1.18.0 - tags: [] - + This plugin aggregates each numeric field per metric into the specified + quantiles and emits the quantiles every `period`. Different aggregation + algorithms are supported with varying accuracy and limitations. + introduced: v1.18.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [statistics] - name: Starlark id: starlark description: | - The Starlark aggregator plugin allows a user to implement a custom aggregator plugin with a [Starlark](https://github.com/google/starlark-go/blob/master/doc/spec.md) script. - introduced: 1.21.0 - tags: [] + This plugin allows to implement a custom aggregator plugin via a + [Starlark](https://github.com/google/starlark-go) script. - - name: ValueCounter + The Starlark language is a dialect of Python and will be familiar to + those who have experience with the Python language. However, there are + major differences. Existing Python code is unlikely to work unmodified. + + > [!NOTE] + > The execution environment is sandboxed, and it is not possible to access + > the local filesystem or perfoming network operations. This is by design + > of the Starlark language as a configuration language. + + The Starlark script used by this plugin needs to be composed of the three + methods defining an aggreagtor named `add`, `push` and `reset`. + + The `add` method is called as soon as a new metric is added to the plugin + the metrics to the aggregator. After `period`, the `push` method is + called to output the resulting metrics and finally the aggregation is + reset by using the `reset` method of the Starlark script. + + The Starlark functions might use the global function `state` to keep + aggregation information such as added metrics etc. + + More details on the syntax and available functions can be found in the + [Starlark + specification](https://github.com/google/starlark-go/blob/d1966c6b9fcd/doc/spec.md). + introduced: v1.21.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] + - name: Value Counter id: valuecounter description: | - The ValueCounter aggregator plugin counts the occurrence of values in fields - and emits the count at regular intervals of 'period' seconds. - This plugin exclusively operates on fields and doesn't affect tags. + This plugin counts the occurrence of unique values in fields and emits + the counter once every `period` with the field-names being suffixed by + the unique value converted to `string`. - To count specific fields, configure them using the fields configuration directive. - If no fields are specified, the plugin won't count any fields. - The results are emitted in fields, formatted as `originalfieldname_fieldvalue = count`. + > [!NOTE] + > The fields to be counted must be configured using the `fields` setting, + > otherwise no field will be counted and no metric is emitted. - ValueCounter only works on fields of the type `int`, `bool`, or `string`. - Float fields are being dropped to prevent the creating of too many fields. - introduced: 1.8.0 - tags: [linux, macos, windows] - -# %%%%% %%%%% %%%% %%%% %%%%%% %%%% %%%% %%%% %%%%% %%%% # -# %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% # -# %%%%% %%%%% %% %% %% %%%% %%%% %%%% %% %% %%%%% %%%% # -# %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% # -# %% %% %% %%%% %%%% %%%%%% %%%% %%%% %%%% %% %% %%%% # + This plugin is useful to e.g. count the occurrances of HTTP status codes + or other categorical values in the defined `period`. + > [!IMPORTANT] + > Counting fields with a high number of potential values may produce a + > significant amounts of new fields and results in an increased memory + > usage. Take care to only count fields with a limited set of values. + introduced: v1.8.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [statistics] processor: - name: AWS EC2 Metadata id: aws_ec2 description: | - The AWS EC2 Metadata processor plugin appends metadata gathered from [AWS IMDS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) to metrics associated with EC2 instances. - introduced: 1.18.0 - tags: [linux, macos, windows, cloud] - - - name: Converter - id: converter + This plugin appends metadata gathered from [AWS + IMDS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) + to metrics associated with EC2 instances. + introduced: v1.18.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [annotation, cloud] + - name: Batch + id: batch description: | - The Converter processor plugin is used to change the type of tag or field values. - In addition to changing field types, it can convert between fields and tags. - Values that cannot be converted are dropped. - introduced: 1.7.0 - tags: [linux, macos, windows] + This plugin groups metrics into batches by adding a batch tag. This is + useful for parallel processing of metrics where downstream processors, + aggregators or outputs can then select a batch using `tagpass` or + `metricpass`. + Metrics are distributed across batches using the round-robin scheme. + introduced: v1.33.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [grouping] - name: Clone id: clone description: | - The Clone processor plugin creates a copy of each metric to preserve the - original metric and allow modifications in the copied metric. - introduced: 1.13.0 - tags: [linux, macos, windows] + This plugin creates a copy of each metric passing through it, preserving + the original metric and allowing modifications such as [metric + modifiers](/telegraf/v1/configuration/#modifiers) in the copied metric. + > [!NOTE] + > [Metric filtering](/telegraf/v1/configuration/#metric-filtering) options + > apply to both the clone and the original metric. + introduced: v1.13.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] + - name: Converter + id: converter + description: | + This plugin allows transforming tags into fields or timestamps, and + converting fields into tags or timestamps. The plugin furthermore allows + to change the field type. + + > [!IMPORTANT] + > When converting tags to fields, take care to ensure the series is still + > uniquely identifiable. Fields with the same series key (measurement + + > tags) will overwrite one another. + introduced: v1.7.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] + - name: Cumulative Sum + id: cumulative_sum + description: | + This plugin accumulates field values per-metric over time and emit + metrics with cumulative sums whenever a metric is updated. This is useful + when using outputs relying on monotonically increasing values + + > [!NOTE] + > Metrics within a series are accumulated in the **order of arrival** and + > not in order of their timestamps! + introduced: v1.35.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] - name: Date id: date description: | - The Date processor plugin adds the metric timestamp as a human readable tag. - introduced: 1.12.0 - tags: [linux, macos, windows] - + This plugin adds the metric timestamp as a human readable tag. A common + use is to add a tag that can be used to group by month or year. + introduced: v1.12.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] - name: Dedup id: dedup description: | - The Dedup processor plugin filters metrics whose field values are exact repetitions of the previous values. - introduced: 1.14.0 - tags: [linux, macos, windows] - + This plugin filters metrics whose field values are exact repetitions of + the previous values. This plugin will store its state between runs if the + `statefile` option in the agent config section is set. + introduced: v1.14.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [filtering] - name: Defaults id: defaults description: | - The Defaults processor plugin allows you to ensure certain fields will always exist with a specified default value on your metrics. - introduced: 1.15.0 - tags: [linux, macos, windows] - + This plugin allows to specify default values for fields and tags for + cases where the tag or field does not exist or has an empty value. + introduced: v1.15.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] - name: Enum id: enum description: | - The Enum processor plugin allows the configuration of value mappings for metric fields. - The main use case for this is to rewrite status codes such as `red`, `amber`, and `green` - by numeric values such as `0`, `1`, `2`. The plugin supports string and bool types for the field values. - Multiple Fields can be configured with separate value mappings for each field. - Default mapping values can be configured to be used for all values, which are - not contained in the value_mappings. - The processor supports explicit configuration of a destination field. - By default the source field is overwritten. - introduced: 1.8.0 - tags: [linux, macos, windows] - + This plugin allows the mapping of field or tag values according to the + configured enumeration. The main use-case is to rewrite numerical values + into human-readable values or vice versa. Default mappings can be + configured to be used for all remaining values. + introduced: v1.8.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] - name: Execd id: execd description: | - The `execd` processor plugin executes an external program as a separate process. It pipes metrics into the process's STDIN and reads processed metrics from its STDOUT. - introduced: 1.15.0 - tags: [linux, macos, windows] - + This plugin runs an external program as a separate process and pipes + metrics in to the process's `stdin` and reads processed metrics from its + `stdout`. Program output on `stderr` is logged. + introduced: v1.15.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [general purpose] - name: Filepath id: filepath description: | - The `filepath` processor plugin maps certain Go functions from [path/filepath](https://golang.org/pkg/path/filepath/) onto tag and field values. - introduced: 1.15.0 - tags: [linux, macos, windows] - + This plugin allows transforming a path, using e.g. basename to extract + the last path element, for tag and field values. Values can be modified + in place or stored in another key. + introduced: v1.15.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] - name: Filter id: filter description: | - The `filter` processor allows to specify a set of rules for metrics with the ability to keep or drop those metrics. - introduced: 1.29.0 - tags: [linux, macos, windows] - - - name: GeoIP - id: geoip - description: | - The GeoIP processor plugin looks up IP addresses in the [MaxMind GeoLite2 database](https://dev.maxmind.com/geoip/geoip2/geolite2/) and adds the respective ISO country code, city name, latitude and longitude. - introduced: 1.18.0 - link: https://github.com/a-bali/telegraf-geoip/blob/master/README.md - tags: [linux, macos, windows, external] - external: true - - - name: Lookup - id: lookup - description: | - Reads specific files and apply a table of annotations to metrics. - introduced: 1.26.0 - tags: [linux, macos, windows] + This plugin allows specifying a set of rules for metrics with the ability + to _keep_ or _drop_ those metrics. It does _not_ modify the metric. As + such a user might want to apply this processor to remove metrics from the + processing/output stream. + > [!NOTE] + > The filtering is _not_ output specific, but will apply to the metrics + > processed by this processor. + introduced: v1.29.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [filtering] - name: Network Interface Name id: ifname description: | - The Network Interface Name processor plugin looks up network interface names using SNMP. - introduced: 1.15.0 - tags: [linux, macos, windows] + This plugin looks up network interface names using SNMP. + introduced: v1.15.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [annotation] + - name: Lookup + id: lookup + description: | + This plugin allows to use one or more files containing lookup-tables for + annotating incoming metrics. The lookup is _static_ as the files are only + used on startup. The main use-case for this is to annotate metrics with + additional tags e.g. dependent on their source. Multiple tags can be + added depending on the lookup-table _files_. + The lookup key can be generated using a Golang template with the ability + to access the metric name via `{{.Name}}`, the tag values via `{{.Tag + "mytag"}}`, with `mytag` being the tag-name and field-values via + `{{.Field "myfield"}}`, with `myfield` being the field-name. Non-existing + tags and field will result in an empty string or `nil` respectively. In + case the key cannot be found, the metric is passed-through unchanged. By + default all matching tags are added and existing tag-values are + overwritten. + + > [!NOTE] + > The plugin only supports the addition of tags and thus all mapped + > tag-values need to be strings! + introduced: v1.15.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [annotation] - name: Noise id: noise description: | - The noise processor plugin is used to add noise to numerical field values. This helps users to add some noise to sensitive data by anonymizing it and further prevent linkage attacks. - introduced: 1.22.0 - tags: [security] - + This plugin is used to add noise to numerical field values. For each + field a noise is generated using a defined probability density function + and added to the value. The function type can be configured as _Laplace_, + _Gaussian_ or _Uniform_. + introduced: v1.22.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] - name: Override id: override description: | - The Override processor plugin allows overriding all modifications that are supported - by input plugins and aggregator plugins: - - - `name_override` - - `name_prefix` - - `name_suffix` - - tags - - All metrics passing through this processor will be modified accordingly. - Select the metrics to modify using the standard measurement filtering options. - - Values of `name_override`, `name_prefix`, `name_suffix`, and already present - tags with conflicting keys will be overwritten. Absent tags will be created. - - Use case of this plugin encompass ensuring certain tags or naming conventions - are adhered to irrespective of input plugin configurations, e.g., by `taginclude`. - introduced: 1.6.0 - tags: [linux, macos, windows] + This plugin allows to modify metrics using [metric + modifiers](/telegraf/v1/configuration/#modifiers). Use-cases of this + plugin encompass ensuring certain tags or naming conventions are adhered + to irrespective of input plugin configurations, e.g. by `taginclude`. + > [!NOTE] + > [Metric filtering](/telegraf/v1/configuration/#metric-filtering) options + > apply to both the clone and the original metric. + introduced: v1.6.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] - name: Parser id: parser description: | - The Parser processor plugin parses defined fields containing the specified data - format and creates new metrics based on the contents of the field. - introduced: 1.8.0 - tags: [linux, macos, windows] - + This plugin parses defined fields or tags containing the specified [data + format](/telegraf/v1/data_formats/input) and creates new metrics based on + the resulting fields and tags. + introduced: v1.8.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] - name: Pivot id: pivot description: | - The Pivot processor plugin rotates single-valued metrics into a multi-field metric. - This transformation often results in data that is easier to use with mathematical operators and comparisons. - It also flattens data into a more compact representation for write operations with some output data formats. - - *To perform the reverse operation, use the [Unpivot](#unpivot) processor.* - introduced: 1.12.0 - tags: [linux, macos, windows] + This plugin rotates single-valued metrics into a multi-field metric. The + result is a more compact representation for applying mathematical + operators to or do comparisons between metrics or flatten fields. + > [!TIP] + > To perform the reverse operation use the + > [unpivot](/telegraf/v1/plugins/#processor-unpivot) processor. + introduced: v1.12.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] - name: Port Name Lookup id: port_name description: | - The Port Name Lookup processor plugin converts a tag containing a well-known port number to the registered service name. - introduced: 1.15.0 - tags: [linux, macos, windows] - + This plugin allows converting a tag or field containing a well-known + port, either a number (e.g. `80`) for TCP ports or a port and protocol + (e.g. `443/tcp`), to the registered service name. + introduced: v1.15.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [annotation] - name: Printer id: printer description: | - The Printer processor plugin simply prints every metric passing through it. - introduced: 1.1.0 - tags: [linux, macos, windows] - + This plugin prints every metric passing through it to the standard + output. + introduced: v1.1.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [testing] - name: Regex id: regex description: | - The Regex processor plugin transforms tag and field values using a regular expression (regex) pattern. - If `result_key `parameter is present, it can produce new tags and fields from existing ones. - introduced: 1.7.0 - tags: [linux, macos, windows] + This plugin transforms tag and field _values_ as well as renaming tags, + fields and metrics using regular expression patterns. Tag and field + _values_ can be transformed using named-groups in a batch fashion. + The regex processor **only operates on string fields**. It will not work + on any other data types, like an integer or float. + introduced: v1.7.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] - name: Rename id: rename description: | - The Rename processor plugin renames InfluxDB measurements, fields, and tags. - introduced: 1.8.0 - tags: [linux, macos, windows] - + This plugin allows to rename measurements, fields and tags. + introduced: v1.8.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] - name: Reverse DNS id: reverse_dns description: | - The Reverse DNS processor plugin processor does a reverse-dns lookup on tags (or fields) with IPs in them. - introduced: 1.15.0 - tags: [linux, macos, windows] - + This plugin does a reverse-dns lookup on tags or fields containing IPs + and creates a tag or field containing the corresponding DNS name. + introduced: v1.15.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [annotation] - name: S2 Geo id: s2geo description: | - The S2 Geo processor plugin adds tags with an S2 cell ID token of a specified [cell level](https://s2geometry.io/resources/s2cell_statistics.html). - Tags are used in Flux `experimental/geo` functions. - Specify `lat` and `lon` field values with WGS-84 coordinates in decimal degrees. - introduced: 1.14.0 - tags: [linux, macos, windows] - + This plugin uses the WGS-84 coordinates in decimal degrees specified in + the latitude and longitude fields and adds a tag with the corresponding + S2 cell ID token of specified [cell + level](https://s2geometry.io/resources/s2cell_statistics.html). + introduced: v1.14.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [annotation] - name: Scale id: scale description: | - Processor to scale values from a specified range into another range - introduced: 1.27.0 - tags: [linux, macos, windows] + This plugin allows to scale field-values from an input range into the + given output range according to this formula: - - name: SNMP lookup + Alternatively, you can apply a factor and offset to the input according + to this formula + + Input fields are converted to floating point values if possible. + Otherwise, fields that cannot be converted are ignored and keep their + original value. + + > [!NOTE] + > Neither the input nor output values are clipped to their respective + > ranges! + introduced: v1.27.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] + - name: SNMP Lookup id: snmp_lookup description: | - Processor to look-up extra tags using SNMP - introduced: 1.30.0 - tags: [linux, macos, windows] - + This plugin looks up extra information via SNMP and adds it to the metric + as tags. + introduced: v1.30.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [annotation] - name: Split id: split description: | - Split a metric into one or more metrics with the specified field(s)/tag(s) - introduced: 1.28.0 - tags: [linux, macos, windows] + This plugin splits a metric up into one or more metrics based on a + configured template. The resulting metrics will be timestamped according + to the source metric. Templates can overlap, where a field or tag, is + used across templates and as a result end up in multiple metrics. + > [!NOTE] + > If drop original is changed to true, then the plugin can result in + > dropping all metrics when no match is found! Please ensure to test + > templates before putting into production *and* use metric filtering to + > avoid data loss. + introduced: v1.28.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] - name: Starlark id: starlark description: | - The Starlark processor plugin calls a [Starlark function](https://github.com/google/starlark-go/blob/master/doc/spec.md) for each matched metric, allowing for custom programmatic metric processing. - introduced: 1.15.0 - tags: [linux, macos, windows] + This plugin calls the provided Starlark function for each matched metric, + allowing for custom programmatic metric processing. + The Starlark language is a dialect of Python, and will be familiar to + those who have experience with the Python language. However, there are + major differences. Existing Python code is unlikely to work unmodified. + The execution environment is sandboxed, and it is not possible to do I/O + operations such as reading from files or sockets. + + The **[Starlark + specification](https://github.com/google/starlark-go/blob/d1966c6b9fcd/doc/spec.md)** + has details about the syntax and available functions. + introduced: v1.15.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [general purpose] - name: Strings id: strings description: | - The Strings processor plugin maps certain Go string functions onto InfluxDB - measurement, tag, and field values. Values can be modified in place or stored - in another key. - - Implemented functions are: - - - `lowercase` - - `uppercase` - - `trim` - - `trim_left` - - `trim_right` - - `trim_prefix` - - `trim_suffix` - - Note that in this implementation these are processed in the order that they appear above. - You can specify the `measurement`, `tag` or `field` that you want processed in each - section and optionally a `dest` if you want the result stored in a new tag or field. - You can specify lots of transformations on data with a single strings processor. - introduced: 1.8.0 - tags: [linux, macos, windows] - + This plugin allows to manipulate strings in the measurement name, tag and + field values using different functions. + introduced: v1.8.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] - name: Tag Limit id: tag_limit description: | - The Tag Limit processor plugin preserves only a certain number of tags for any given metric - and chooses the tags to preserve when the number of tags appended by the data source is over the limit. - - This can be useful when dealing with output systems (e.g. Stackdriver) that impose - hard limits on the number of tags or labels per metric or where high levels of - cardinality are computationally or financially expensive. - introduced: 1.12.0 - tags: [linux, macos, windows] + This plugin ensures that only a certain number of tags are preserved for + any given metric, and to choose the tags to preserve when the number of + tags appended by the data source is over the limit. + This can be useful when dealing with output systems (e.g. Stackdriver) + that impose hard limits on the number of tags/labels per metric or where + high levels of cardinality are computationally and/or financially + expensive. + introduced: v1.12.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [filtering] - name: Template id: template description: | - The Template processor plugin applies a Go template to metrics to generate a new tag. - Primarily used to create a tag for dynamic routing to multiple output plugins - or to an output specific routing option. The template has access to each metric's measurement name, - tags, fields, and timestamp using the interface in [template_metric.go](https://github.com/influxdata/telegraf/blob/release-1.14/plugins/processors/template/template_metric.go). - introduced: 1.14.0 - tags: [linux, macos, windows] + This plugin applies templates to metrics for generatuing a new tag. The + primary use case of this plugin is to create a tag that can be used for + dynamic routing to multiple output plugins or using an output specific + routing option. + The template has access to each metric's measurement name, tags, fields, + and timestamp. Templates follow the [Go Template + syntax](https://golang.org/pkg/text/template/) and may contain [Sprig + functions](http://masterminds.github.io/sprig/). + introduced: v1.14.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] - name: Timestamp id: timestamp description: | - Generate a metric timestamp based on values in a metric field. - introduced: 1.31.0 - tags: [linux, macos, windows] - + This plugin allows to parse fields containing timestamps into timestamps + of other format. + introduced: v1.31.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] - name: TopK id: topk description: | - The TopK processor plugin is a filter designed to get the top series over a period of time. - It can be tweaked to do its top `K` computation over a period of time, so spikes - can be smoothed out. - - This processor goes through the following steps when processing a batch of metrics: - - 1. Groups metrics in buckets using their tags and name as key. - 2. Aggregates each of the selected fields for each bucket by the selected aggregation function (sum, mean, etc.). - 3. Orders the buckets by one of the generated aggregations, returns all metrics in the top `K` buckets, then reorders the buckets by the next of the generated aggregations, returns all metrics in the top `K` buckets, etc, etc, etc, until it runs out of fields. - - The plugin makes sure not to duplicate metrics. - - Note that depending on the amount of metrics on each computed bucket, more - than `K` metrics may be returned. - introduced: 1.7.0 - tags: [linux, macos, windows] - + This plugin filters the top series over a period of time and calculates + the top metrics via different aggregation functions. The processing steps + comprise grouping the metrics based on the metric name and tags, + computing the aggregate functions for each group every period and + outputting the top `K` groups. + introduced: v1.7.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] - name: Unpivot id: unpivot description: | - The Unpivot processor plugin rotates a multi-field series into single-valued metrics. - This transformation often results in data that is easier to aggregate across fields. + This plugin allows to rotate a multi-field series into single-valued + metrics. The resulting metrics allow to more easily aggregate data across + fields. - *To perform the reverse operation, use the [Pivot](#pivot) processor.* - introduced: 1.12.0 - tags: [linux, macos, windows] + > [!TIP] + > To perform the reverse operation use the + > [pivot](/telegraf/v1/plugins/#processor-pivot) processor. + introduced: v1.12.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] diff --git a/deploy/edge.js b/deploy/edge.js index eeed914c1..668cffa72 100644 --- a/deploy/edge.js +++ b/deploy/edge.js @@ -3,31 +3,32 @@ const path = require('path'); const latestVersions = { - 'influxdb': 'v2', - 'influxdbv2': 'v2', - 'telegraf': 'v1', - 'chronograf': 'v1', - 'kapacitor': 'v1', - 'enterprise': 'v1', - 'flux': 'v0', + influxdb: 'v2', + influxdbv2: 'v2', + telegraf: 'v1', + chronograf: 'v1', + kapacitor: 'v1', + enterprise: 'v1', + flux: 'v0', }; const archiveDomain = 'https://archive.docs.influxdata.com'; const docsDomain = 'https://docs.influxdata.com'; exports.handler = (event, context, callback) => { - function temporaryRedirect(condition, newUri) { if (condition) { return callback(null, { status: '302', statusDescription: 'Found', headers: { - location: [{ - key: 'Location', - value: newUri, - }], - } + location: [ + { + key: 'Location', + value: newUri, + }, + ], + }, }); } } @@ -38,14 +39,18 @@ exports.handler = (event, context, callback) => { status: '301', statusDescription: 'Moved Permanently', headers: { - 'location': [{ - key: 'Location', - value: newUri, - }], - 'cache-control': [{ - key: 'Cache-Control', - value: "max-age=3600" - }], + location: [ + { + key: 'Location', + value: newUri, + }, + ], + 'cache-control': [ + { + key: 'Cache-Control', + value: 'max-age=3600', + }, + ], }, }); } @@ -55,34 +60,40 @@ exports.handler = (event, context, callback) => { const parsedPath = path.parse(request.uri); const indexPath = 'index.html'; const validExtensions = { - '.html': true, '.css': true, - '.js': true, - '.xml': true, - '.png': true, - '.gif': true, - '.jpg': true, - '.ico': true, - '.svg': true, '.csv': true, - '.txt': true, - '.lp': true, - '.json': true, - '.rb': true, '.eot': true, + '.gif': true, + '.gz': true, + '.html': true, + '.ico': true, + '.jpg': true, + '.js': true, + '.json': true, + '.lp': true, + '.md': true, + '.md5': true, + '.markdown': true, + '.otf': true, + '.png': true, + '.rb': true, + '.sha256': true, + '.svg': true, + '.tar': true, '.ttf': true, + '.txt': true, '.woff': true, '.woff2': true, - '.otf': true, - '.gz': true, - '.tar': true, + '.yaml': true, + '.yml': true, '.zip': true, - '.md5': true, - '.sha256': true, }; // Remove index.html from path - permanentRedirect(request.uri.endsWith('index.html'), request.uri.substr(0, request.uri.length - indexPath.length)); + permanentRedirect( + request.uri.endsWith('index.html'), + request.uri.substr(0, request.uri.length - indexPath.length) + ); // If file has a valid extension, return the request unchanged if (validExtensions[parsedPath.ext]) { @@ -92,117 +103,536 @@ exports.handler = (event, context, callback) => { ////////////////////// START PRODUCT-SPECIFIC REDIRECTS ////////////////////// //////////////////////// Distributed product redirects /////////////////////// - permanentRedirect(/\/influxdb\/cloud-serverless/.test(request.uri), request.uri.replace(/\/influxdb\/cloud-serverless/, '/influxdb3/cloud-serverless')); - permanentRedirect(/\/influxdb\/cloud-dedicated/.test(request.uri), request.uri.replace(/\/influxdb\/cloud-dedicated/, '/influxdb3/cloud-dedicated')); - permanentRedirect(/\/influxdb\/clustered/.test(request.uri), request.uri.replace(/\/influxdb\/clustered/, '/influxdb3/clustered')); + permanentRedirect( + /\/influxdb\/cloud-serverless/.test(request.uri), + request.uri.replace( + /\/influxdb\/cloud-serverless/, + '/influxdb3/cloud-serverless' + ) + ); + permanentRedirect( + /\/influxdb\/cloud-dedicated/.test(request.uri), + request.uri.replace( + /\/influxdb\/cloud-dedicated/, + '/influxdb3/cloud-dedicated' + ) + ); + permanentRedirect( + /\/influxdb\/clustered/.test(request.uri), + request.uri.replace(/\/influxdb\/clustered/, '/influxdb3/clustered') + ); //////////////////////////// v2 subdomain redirect /////////////////////////// - permanentRedirect(request.headers.host[0].value === 'v2.docs.influxdata.com', `https://docs.influxdata.com${request.uri}`); + permanentRedirect( + request.headers.host[0].value === 'v2.docs.influxdata.com', + `https://docs.influxdata.com${request.uri}` + ); ///////////////////////// Force v in version numbers ///////////////////////// - permanentRedirect(/(^\/[\w]*\/)(\d\.)/.test(request.uri), request.uri.replace(/(^\/[\w]*\/)(\d\.)/, `$1v$2`)); + permanentRedirect( + /(^\/[\w]*\/)(\d\.)/.test(request.uri), + request.uri.replace(/(^\/[\w]*\/)(\d\.)/, `$1v$2`) + ); /////////////////// cloud-iox to cloud-serverless redirect ////////////////// - permanentRedirect(/\/influxdb\/cloud-iox/.test(request.uri), request.uri.replace(/\/influxdb\/cloud-iox/, '/influxdb/cloud-serverless')); - + permanentRedirect( + /\/influxdb\/cloud-iox/.test(request.uri), + request.uri.replace(/\/influxdb\/cloud-iox/, '/influxdb/cloud-serverless') + ); + ////////////// CLI InfluxQL link (catch before latest redirect) ////////////// - permanentRedirect(/\/influxdb\/latest\/query_language\/spec/.test(request.uri), request.uri.replace(/latest/, 'v1')); + permanentRedirect( + /\/influxdb\/latest\/query_language\/spec/.test(request.uri), + request.uri.replace(/latest/, 'v1') + ); ////////////////////////// Latest version redirects ////////////////////////// - temporaryRedirect(/\/influxdb\/latest/.test(request.uri), request.uri.replace(/\/latest/, `/${latestVersions['influxdb']}`)); - temporaryRedirect(/\/telegraf\/latest/.test(request.uri), request.uri.replace(/\/latest/, `/${latestVersions['telegraf']}`)); - temporaryRedirect(/\/chronograf\/latest/.test(request.uri), request.uri.replace(/\/latest/, `/${latestVersions['chronograf']}`)); - temporaryRedirect(/\/kapacitor\/latest/.test(request.uri), request.uri.replace(/\/latest/, `/${latestVersions['kapacitor']}`)); - temporaryRedirect(/\/enterprise_influxdb\/latest/.test(request.uri), request.uri.replace(/\/latest/, `/${latestVersions['enterprise']}`)); - temporaryRedirect(/\/flux\/latest/.test(request.uri), request.uri.replace(/\/latest/, `/${latestVersions['flux']}`)); + temporaryRedirect( + /\/influxdb\/latest/.test(request.uri), + request.uri.replace(/\/latest/, `/${latestVersions['influxdb']}`) + ); + temporaryRedirect( + /\/telegraf\/latest/.test(request.uri), + request.uri.replace(/\/latest/, `/${latestVersions['telegraf']}`) + ); + temporaryRedirect( + /\/chronograf\/latest/.test(request.uri), + request.uri.replace(/\/latest/, `/${latestVersions['chronograf']}`) + ); + temporaryRedirect( + /\/kapacitor\/latest/.test(request.uri), + request.uri.replace(/\/latest/, `/${latestVersions['kapacitor']}`) + ); + temporaryRedirect( + /\/enterprise_influxdb\/latest/.test(request.uri), + request.uri.replace(/\/latest/, `/${latestVersions['enterprise']}`) + ); + temporaryRedirect( + /\/flux\/latest/.test(request.uri), + request.uri.replace(/\/latest/, `/${latestVersions['flux']}`) + ); ////////////////////////// Versionless URL redirects ///////////////////////// - temporaryRedirect(request.uri === '/influxdb/', `/influxdb/${latestVersions['influxdb']}/`); - temporaryRedirect(request.uri === '/telegraf/', `/telegraf/${latestVersions['telegraf']}/`); - temporaryRedirect(request.uri === '/chronograf/', `/chronograf/${latestVersions['chronograf']}/`); - temporaryRedirect(request.uri === '/kapacitor/', `/kapacitor/${latestVersions['kapacitor']}/`); - temporaryRedirect(request.uri === '/enterprise_influxdb/', `/enterprise_influxdb/${latestVersions['enterprise']}/`); - temporaryRedirect(request.uri === '/flux/', `/flux/${latestVersions['flux']}/`); + temporaryRedirect( + request.uri === '/influxdb/', + `/influxdb/${latestVersions['influxdb']}/` + ); + temporaryRedirect( + request.uri === '/telegraf/', + `/telegraf/${latestVersions['telegraf']}/` + ); + temporaryRedirect( + request.uri === '/chronograf/', + `/chronograf/${latestVersions['chronograf']}/` + ); + temporaryRedirect( + request.uri === '/kapacitor/', + `/kapacitor/${latestVersions['kapacitor']}/` + ); + temporaryRedirect( + request.uri === '/enterprise_influxdb/', + `/enterprise_influxdb/${latestVersions['enterprise']}/` + ); + temporaryRedirect( + request.uri === '/flux/', + `/flux/${latestVersions['flux']}/` + ); /////////////////////// VERSION RESTRUCTURE REDIRECTS //////////////////////// - permanentRedirect(/^\/\w+\/(v\d{1})\.[\dx]+/.test(request.uri), request.uri.replace(/^\/(\w+)\/(v\d{1})\.[\dx]+(.*$)/, `/$1/$2$3`)); + permanentRedirect( + /^\/\w+\/(v\d{1})\.[\dx]+/.test(request.uri), + request.uri.replace(/^\/(\w+)\/(v\d{1})\.[\dx]+(.*$)/, `/$1/$2$3`) + ); /////////////////////////////// Flux redirects /////////////////////////////// - // Redirect old Flux guides and introduction - permanentRedirect(/\/flux\/(?:v0\.[0-9]{1,2})\/guides\//.test(request.uri), request.uri.replace(/\/flux\/(?:v0\.[0-9]{1,2}|latest)\/guides\//, `/influxdb/${latestVersions['influxdb']}/query-data/flux/`)); - permanentRedirect(/\/flux\/(?:v0\.[0-9]{1,2})\/introduction\//.test(request.uri), `/flux/${latestVersions['flux']}/get-started/`); + // Redirect old Flux guides and introduction + permanentRedirect( + /\/flux\/(?:v0\.[0-9]{1,2})\/guides\//.test(request.uri), + request.uri.replace( + /\/flux\/(?:v0\.[0-9]{1,2}|latest)\/guides\//, + `/influxdb/${latestVersions['influxdb']}/query-data/flux/` + ) + ); + permanentRedirect( + /\/flux\/(?:v0\.[0-9]{1,2})\/introduction\//.test(request.uri), + `/flux/${latestVersions['flux']}/get-started/` + ); // Redirect Flux language (spec) sections to Flux docs - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/language\//.test(request.uri), request.uri.replace(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/language\//, `/flux/${latestVersions['flux']}/spec/`)); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/language\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/language\//, + `/flux/${latestVersions['flux']}/spec/` + ) + ); // Redirect Flux stdlib/built-in sections to Flux stdlib/universe docs - temporaryRedirect(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/(?:inputs\/|outputs\/|misc\/|tests\/)(\w+\/$)/.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/(?:inputs\/|outputs\/|misc\/|tests\/)(\w+\/$)/, `/flux/${latestVersions['flux']}/stdlib/universe/$1`)); - temporaryRedirect(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/(?:inputs\/|outputs\/|misc\/|tests\/)$/.test(request.uri), `/flux/${latestVersions['flux']}/function-types/`); - temporaryRedirect(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/transformations\/(?:aggregates\/|selectors\/|stream-table\/|type-conversions\/)(\w+\/$)/.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/transformations\/(?:aggregates\/|selectors\/|stream-table\/|type-conversions\/)(\w+\/$)/, `/flux/${latestVersions['flux']}/stdlib/universe/$1`)); - temporaryRedirect(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/transformations\/(?:aggregates\/|selectors\/|stream-table\/|type-conversions\/)/.test(request.uri), `/flux/${latestVersions['flux']}/function-types/`); - temporaryRedirect(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/transformations\/(\w+\/$)/.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/transformations\/(\w+\/$)/, `/flux/${latestVersions['flux']}/stdlib/universe/$1`)); - temporaryRedirect(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/transformations\/$/.test(request.uri), `/flux/${latestVersions['flux']}/function-types/`); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/$/.test(request.uri), `/flux/${latestVersions['flux']}/stdlib/universe/`); + temporaryRedirect( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/(?:inputs\/|outputs\/|misc\/|tests\/)(\w+\/$)/.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/(?:inputs\/|outputs\/|misc\/|tests\/)(\w+\/$)/, + `/flux/${latestVersions['flux']}/stdlib/universe/$1` + ) + ); + temporaryRedirect( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/(?:inputs\/|outputs\/|misc\/|tests\/)$/.test( + request.uri + ), + `/flux/${latestVersions['flux']}/function-types/` + ); + temporaryRedirect( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/transformations\/(?:aggregates\/|selectors\/|stream-table\/|type-conversions\/)(\w+\/$)/.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/transformations\/(?:aggregates\/|selectors\/|stream-table\/|type-conversions\/)(\w+\/$)/, + `/flux/${latestVersions['flux']}/stdlib/universe/$1` + ) + ); + temporaryRedirect( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/transformations\/(?:aggregates\/|selectors\/|stream-table\/|type-conversions\/)/.test( + request.uri + ), + `/flux/${latestVersions['flux']}/function-types/` + ); + temporaryRedirect( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/transformations\/(\w+\/$)/.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/transformations\/(\w+\/$)/, + `/flux/${latestVersions['flux']}/stdlib/universe/$1` + ) + ); + temporaryRedirect( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/transformations\/$/.test( + request.uri + ), + `/flux/${latestVersions['flux']}/function-types/` + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/built-in\/$/.test( + request.uri + ), + `/flux/${latestVersions['flux']}/stdlib/universe/` + ); - temporaryRedirect(/\/flux\/v0\.x\/stdlib\/built-in\/(?:inputs\/|outputs\/|misc\/|tests\/)(\w+\/$)/.test(request.uri), request.uri.replace(/\/flux\/v0\.x\/stdlib\/built-in\/(?:inputs\/|outputs\/|misc\/|tests\/)(\w+\/$)/, `/flux/${latestVersions['flux']}/stdlib/universe/$1`)); - temporaryRedirect(/\/flux\/v0\.x\/stdlib\/built-in\/(?:inputs\/|outputs\/|misc\/|tests\/)$/.test(request.uri), `/flux/${latestVersions['flux']}/function-types/`); - temporaryRedirect(/\/flux\/v0\.x\/stdlib\/built-in\/transformations\/(?:aggregates\/|selectors\/|stream-table\/|type-conversions\/)(\w+\/$)/.test(request.uri), request.uri.replace(/\/flux\/v0\.x\/stdlib\/built-in\/transformations\/(?:aggregates\/|selectors\/|stream-table\/|type-conversions\/)(\w+\/$)/, `/flux/${latestVersions['flux']}/stdlib/universe/$1`)); - temporaryRedirect(/\/flux\/v0\.x\/stdlib\/built-in\/transformations\/(?:aggregates\/|selectors\/|stream-table\/|type-conversions\/)/.test(request.uri), `/flux/${latestVersions['flux']}/function-types/`); - temporaryRedirect(/\/flux\/v0\.x\/stdlib\/built-in\/transformations\/(\w+\/$)/.test(request.uri), request.uri.replace(/\/flux\/v0\.x\/stdlib\/built-in\/transformations\/(\w+\/$)/, `/flux/${latestVersions['flux']}/stdlib/universe/$1`)); - temporaryRedirect(/\/flux\/v0\.x\/stdlib\/built-in\/transformations\/$/.test(request.uri), `/flux/${latestVersions['flux']}/function-types/`); - temporaryRedirect(/\/flux\/v0\.x\/stdlib\/built-in\/$/.test(request.uri), `/flux/${latestVersions['flux']}/stdlib/universe/`); - temporaryRedirect(/\/flux\/v0\.x\/stdlib\/universe\/(?:inputs\/|outputs\/|misc\/|tests\/|transformations\/|selectors\/|aggregates\/)$/.test(request.uri), `/flux/${latestVersions['flux']}/function-types/`); + temporaryRedirect( + /\/flux\/v0\.x\/stdlib\/built-in\/(?:inputs\/|outputs\/|misc\/|tests\/)(\w+\/$)/.test( + request.uri + ), + request.uri.replace( + /\/flux\/v0\.x\/stdlib\/built-in\/(?:inputs\/|outputs\/|misc\/|tests\/)(\w+\/$)/, + `/flux/${latestVersions['flux']}/stdlib/universe/$1` + ) + ); + temporaryRedirect( + /\/flux\/v0\.x\/stdlib\/built-in\/(?:inputs\/|outputs\/|misc\/|tests\/)$/.test( + request.uri + ), + `/flux/${latestVersions['flux']}/function-types/` + ); + temporaryRedirect( + /\/flux\/v0\.x\/stdlib\/built-in\/transformations\/(?:aggregates\/|selectors\/|stream-table\/|type-conversions\/)(\w+\/$)/.test( + request.uri + ), + request.uri.replace( + /\/flux\/v0\.x\/stdlib\/built-in\/transformations\/(?:aggregates\/|selectors\/|stream-table\/|type-conversions\/)(\w+\/$)/, + `/flux/${latestVersions['flux']}/stdlib/universe/$1` + ) + ); + temporaryRedirect( + /\/flux\/v0\.x\/stdlib\/built-in\/transformations\/(?:aggregates\/|selectors\/|stream-table\/|type-conversions\/)/.test( + request.uri + ), + `/flux/${latestVersions['flux']}/function-types/` + ); + temporaryRedirect( + /\/flux\/v0\.x\/stdlib\/built-in\/transformations\/(\w+\/$)/.test( + request.uri + ), + request.uri.replace( + /\/flux\/v0\.x\/stdlib\/built-in\/transformations\/(\w+\/$)/, + `/flux/${latestVersions['flux']}/stdlib/universe/$1` + ) + ); + temporaryRedirect( + /\/flux\/v0\.x\/stdlib\/built-in\/transformations\/$/.test(request.uri), + `/flux/${latestVersions['flux']}/function-types/` + ); + temporaryRedirect( + /\/flux\/v0\.x\/stdlib\/built-in\/$/.test(request.uri), + `/flux/${latestVersions['flux']}/stdlib/universe/` + ); + temporaryRedirect( + /\/flux\/v0\.x\/stdlib\/universe\/(?:inputs\/|outputs\/|misc\/|tests\/|transformations\/|selectors\/|aggregates\/)$/.test( + request.uri + ), + `/flux/${latestVersions['flux']}/function-types/` + ); // Redirect Flux stdlib/influxdb sections to Flux stdlib/influxdata docs - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb\//, `/flux/${latestVersions['flux']}/stdlib/influxdata/influxdb/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/monitor\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/monitor\//, `/flux/${latestVersions['flux']}/stdlib/influxdata/influxdb/monitor/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb-sample\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb-sample\//, `/flux/${latestVersions['flux']}/stdlib/influxdata/influxdb/sample/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb-schema\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb-schema\//, `/flux/${latestVersions['flux']}/stdlib/influxdata/influxdb/schema/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/secrets\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/secrets\//, `/flux/${latestVersions['flux']}/stdlib/influxdata/influxdb/secrets/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb-tasks\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb-tasks\//, `/flux/${latestVersions['flux']}/stdlib/influxdata/influxdb/tasks/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb-v1\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb-v1\//, `/flux/${latestVersions['flux']}/stdlib/influxdata/influxdb/v1/`)); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb\//, + `/flux/${latestVersions['flux']}/stdlib/influxdata/influxdb/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/monitor\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/monitor\//, + `/flux/${latestVersions['flux']}/stdlib/influxdata/influxdb/monitor/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb-sample\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb-sample\//, + `/flux/${latestVersions['flux']}/stdlib/influxdata/influxdb/sample/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb-schema\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb-schema\//, + `/flux/${latestVersions['flux']}/stdlib/influxdata/influxdb/schema/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/secrets\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/secrets\//, + `/flux/${latestVersions['flux']}/stdlib/influxdata/influxdb/secrets/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb-tasks\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb-tasks\//, + `/flux/${latestVersions['flux']}/stdlib/influxdata/influxdb/tasks/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb-v1\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/influxdb-v1\//, + `/flux/${latestVersions['flux']}/stdlib/influxdata/influxdb/v1/` + ) + ); // Redirect Flux stdlib/contrib sections to Flux stdlib/contrib/user docs - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/alerta\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/alerta\//, `/flux/${latestVersions['flux']}/stdlib/contrib/bonitoo-io/alerta/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/bigpanda\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/bigpanda\//, `/flux/${latestVersions['flux']}/stdlib/contrib/rhajek/bigpanda/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/discord\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/discord\//, `/flux/${latestVersions['flux']}/stdlib/contrib/chobbs/discord/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/events\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/events\//, `/flux/${latestVersions['flux']}/stdlib/contrib/tomhollingworth/events/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/influxdb\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/influxdb\//, `/flux/${latestVersions['flux']}/stdlib/contrib/jsternberg/influxdb/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/teams\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/teams\//, `/flux/${latestVersions['flux']}/stdlib/contrib/sranka/teams/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/opsgenie\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/opsgenie\//, `/flux/${latestVersions['flux']}/stdlib/contrib/sranka/opsgenie/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/rows\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/rows\//, `/flux/${latestVersions['flux']}/stdlib/contrib/jsternberg/rows/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/sensu\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/sensu\//, `/flux/${latestVersions['flux']}/stdlib/contrib/sranka/sensu/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/telegram\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/telegram\//, `/flux/${latestVersions['flux']}/stdlib/contrib/sranka/telegram/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/tickscript\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/tickscript\//, `/flux/${latestVersions['flux']}/stdlib/contrib/bonitoo-io/tickscript/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/victorops\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/victorops\//, `/flux/${latestVersions['flux']}/stdlib/contrib/bonitoo-io/victorops/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/webexteams\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/webexteams\//, `/flux/${latestVersions['flux']}/stdlib/contrib/sranka/webexteams/`)); - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/zenoss\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/zenoss\//, `/flux/${latestVersions['flux']}/stdlib/contrib/bonitoo-io/zenoss/`)); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/alerta\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/alerta\//, + `/flux/${latestVersions['flux']}/stdlib/contrib/bonitoo-io/alerta/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/bigpanda\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/bigpanda\//, + `/flux/${latestVersions['flux']}/stdlib/contrib/rhajek/bigpanda/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/discord\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/discord\//, + `/flux/${latestVersions['flux']}/stdlib/contrib/chobbs/discord/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/events\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/events\//, + `/flux/${latestVersions['flux']}/stdlib/contrib/tomhollingworth/events/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/influxdb\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/influxdb\//, + `/flux/${latestVersions['flux']}/stdlib/contrib/jsternberg/influxdb/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/teams\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/teams\//, + `/flux/${latestVersions['flux']}/stdlib/contrib/sranka/teams/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/opsgenie\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/opsgenie\//, + `/flux/${latestVersions['flux']}/stdlib/contrib/sranka/opsgenie/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/rows\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/rows\//, + `/flux/${latestVersions['flux']}/stdlib/contrib/jsternberg/rows/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/sensu\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/sensu\//, + `/flux/${latestVersions['flux']}/stdlib/contrib/sranka/sensu/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/telegram\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/telegram\//, + `/flux/${latestVersions['flux']}/stdlib/contrib/sranka/telegram/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/tickscript\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/tickscript\//, + `/flux/${latestVersions['flux']}/stdlib/contrib/bonitoo-io/tickscript/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/victorops\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/victorops\//, + `/flux/${latestVersions['flux']}/stdlib/contrib/bonitoo-io/victorops/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/webexteams\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/webexteams\//, + `/flux/${latestVersions['flux']}/stdlib/contrib/sranka/webexteams/` + ) + ); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/zenoss\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\/contrib\/zenoss\//, + `/flux/${latestVersions['flux']}/stdlib/contrib/bonitoo-io/zenoss/` + ) + ); // Generic Flux stdlib redirect - temporaryRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\//.test(request.uri), request.uri.replace(/\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\//, `/flux/${latestVersions['flux']}/stdlib/`)); - temporaryRedirect(/\/flux\/v0\.x\/functions\//.test(request.uri), request.uri.replace(/(\/flux\/v0\.x\/)functions\/(.*)/, `$1stdlib/$2`)); - temporaryRedirect(/\/flux\/v0\.x\/stdlib\/experimental\/to\/.+/.test(request.uri), request.uri.replace(/(\/flux\/v0\.x\/stdlib\/experimental\/)to\/(.+)/, `$1$2`)); + temporaryRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\//.test( + request.uri + ), + request.uri.replace( + /\/influxdb\/(?:v2\.[0-9]{1,2}|cloud)\/reference\/flux\/stdlib\//, + `/flux/${latestVersions['flux']}/stdlib/` + ) + ); + temporaryRedirect( + /\/flux\/v0\.x\/functions\//.test(request.uri), + request.uri.replace(/(\/flux\/v0\.x\/)functions\/(.*)/, `$1stdlib/$2`) + ); + temporaryRedirect( + /\/flux\/v0\.x\/stdlib\/experimental\/to\/.+/.test(request.uri), + request.uri.replace( + /(\/flux\/v0\.x\/stdlib\/experimental\/)to\/(.+)/, + `$1$2` + ) + ); // Redirect outdated Chronograf links - temporaryRedirect(/\/flux\/v[0,1]\.x\/stdlib\/built-in\/(?:inputs\/|outputs\/|misc\/|tests\/)(\w+\/$)/.test(request.uri), request.uri.replace(/\/flux\/v[0,1]\.x\/stdlib\/built-in\/(?:inputs\/|outputs\/|misc\/|tests\/)(\w+\/$)/, `/flux/${latestVersions['flux']}/stdlib/universe/$1`)); - temporaryRedirect(/\/flux\/v[0,1]\.x\/stdlib\/built-in\/transformations\/(?:aggregates\/|selectors\/|stream-table\/|type-conversions\/)(\w+\/$)/.test(request.uri), request.uri.replace(/\/flux\/v[0,1]\.x\/stdlib\/built-in\/transformations\/(?:aggregates\/|selectors\/|stream-table\/|type-conversions\/)(\w+\/$)/, `/flux/${latestVersions['flux']}/stdlib/universe/$1`)); - temporaryRedirect(/\/flux\/v[0,1]\.x\/stdlib\/built-in\/transformations\/(\w+\/$)/.test(request.uri), request.uri.replace(/\/flux\/v[0,1]\.x\/stdlib\/built-in\/transformations\/(\w+\/$)/, `/flux/${latestVersions['flux']}/stdlib/universe/$1`)); - temporaryRedirect(/\/flux\/v[0,1]\.x\/stdlib\/secrets\//.test(request.uri), request.uri.replace(/\/flux\/v[0,1]\.x\/stdlib\/secrets\//, `/flux/${latestVersions['flux']}/stdlib/influxdata/influxdb/secrets/`)); - temporaryRedirect(/\/flux\/v[0,1]\.x\/stdlib\/influxdb-v1\//.test(request.uri), request.uri.replace(/\/flux\/v[0,1]\.x\/stdlib\/influxdb-v1\//, `/flux/${latestVersions['flux']}/stdlib/influxdata/influxdb/v1/`)); + temporaryRedirect( + /\/flux\/v[0,1]\.x\/stdlib\/built-in\/(?:inputs\/|outputs\/|misc\/|tests\/)(\w+\/$)/.test( + request.uri + ), + request.uri.replace( + /\/flux\/v[0,1]\.x\/stdlib\/built-in\/(?:inputs\/|outputs\/|misc\/|tests\/)(\w+\/$)/, + `/flux/${latestVersions['flux']}/stdlib/universe/$1` + ) + ); + temporaryRedirect( + /\/flux\/v[0,1]\.x\/stdlib\/built-in\/transformations\/(?:aggregates\/|selectors\/|stream-table\/|type-conversions\/)(\w+\/$)/.test( + request.uri + ), + request.uri.replace( + /\/flux\/v[0,1]\.x\/stdlib\/built-in\/transformations\/(?:aggregates\/|selectors\/|stream-table\/|type-conversions\/)(\w+\/$)/, + `/flux/${latestVersions['flux']}/stdlib/universe/$1` + ) + ); + temporaryRedirect( + /\/flux\/v[0,1]\.x\/stdlib\/built-in\/transformations\/(\w+\/$)/.test( + request.uri + ), + request.uri.replace( + /\/flux\/v[0,1]\.x\/stdlib\/built-in\/transformations\/(\w+\/$)/, + `/flux/${latestVersions['flux']}/stdlib/universe/$1` + ) + ); + temporaryRedirect( + /\/flux\/v[0,1]\.x\/stdlib\/secrets\//.test(request.uri), + request.uri.replace( + /\/flux\/v[0,1]\.x\/stdlib\/secrets\//, + `/flux/${latestVersions['flux']}/stdlib/influxdata/influxdb/secrets/` + ) + ); + temporaryRedirect( + /\/flux\/v[0,1]\.x\/stdlib\/influxdb-v1\//.test(request.uri), + request.uri.replace( + /\/flux\/v[0,1]\.x\/stdlib\/influxdb-v1\//, + `/flux/${latestVersions['flux']}/stdlib/influxdata/influxdb/v1/` + ) + ); // Redirect Flux release notes - permanentRedirect(/\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/release-notes\/flux\//.test(request.uri), `/flux/${latestVersions['flux']}/release-notes/`); + permanentRedirect( + /\/influxdb\/(v2\.[0-9]{1,2}|cloud)\/reference\/release-notes\/flux\//.test( + request.uri + ), + `/flux/${latestVersions['flux']}/release-notes/` + ); ////////////////////////////// v2 path redirect ////////////////////////////// - permanentRedirect(/^\/v2\.0\//.test(request.uri), request.uri.replace(/^\/v2\.0\//, `/influxdb/v2.0/`)); + permanentRedirect( + /^\/v2\.0\//.test(request.uri), + request.uri.replace(/^\/v2\.0\//, `/influxdb/v2.0/`) + ); ////////////////////////// Archive version redirects ///////////////////////// - permanentRedirect(/\/influxdb\/(?:v0\.[0-9]{1,2}|v1\.[0-2])\//.test(request.uri), `${archiveDomain}${request.uri}`); - permanentRedirect(/\/telegraf\/(?:v0\.[0-9]{1,2}|v1\.[0-8])\//.test(request.uri), `${archiveDomain}${request.uri}`); - permanentRedirect(/\/chronograf\/(?:v0\.[0-9]{1,2}|v1\.[0-5])\//.test(request.uri), `${archiveDomain}${request.uri}`); - permanentRedirect(/\/kapacitor\/(?:v0\.[0-9]{1,2}|v1\.[0-3])\//.test(request.uri), `${archiveDomain}${request.uri}`); - permanentRedirect(/\/enterprise_influxdb\/v1\.[0-3]\//.test(request.uri), `${archiveDomain}${request.uri}`); - permanentRedirect(/\/enterprise_kapacitor\//.test(request.uri), `${archiveDomain}${request.uri}`); + permanentRedirect( + /\/influxdb\/(?:v0\.[0-9]{1,2}|v1\.[0-2])\//.test(request.uri), + `${archiveDomain}${request.uri}` + ); + permanentRedirect( + /\/telegraf\/(?:v0\.[0-9]{1,2}|v1\.[0-8])\//.test(request.uri), + `${archiveDomain}${request.uri}` + ); + permanentRedirect( + /\/chronograf\/(?:v0\.[0-9]{1,2}|v1\.[0-5])\//.test(request.uri), + `${archiveDomain}${request.uri}` + ); + permanentRedirect( + /\/kapacitor\/(?:v0\.[0-9]{1,2}|v1\.[0-3])\//.test(request.uri), + `${archiveDomain}${request.uri}` + ); + permanentRedirect( + /\/enterprise_influxdb\/v1\.[0-3]\//.test(request.uri), + `${archiveDomain}${request.uri}` + ); + permanentRedirect( + /\/enterprise_kapacitor\//.test(request.uri), + `${archiveDomain}${request.uri}` + ); /////////////////////// END PRODUCT-SPECIFIC REDIRECTS /////////////////////// diff --git a/eslint.config.js b/eslint.config.js index 0f7cd7e65..23104f7a1 100644 --- a/eslint.config.js +++ b/eslint.config.js @@ -106,6 +106,33 @@ export default [ files: ['assets/js/**/*.js'], rules: { // Rules specific to JavaScript in Hugo assets + // Prevent imports from debug-helpers.js + 'no-restricted-imports': [ + 'error', + { + paths: [ + { + name: './utils/debug-helpers.js', + message: + 'Remove debugging functions before committing. Debug helpers should not be used in production code.', + }, + { + name: '/utils/debug-helpers.js', + message: + 'Remove debugging functions before committing. Debug helpers should not be used in production code.', + }, + ], + }, + ], + // Prevent use of debug functions in production code + 'no-restricted-syntax': [ + 'error', + { + selector: 'CallExpression[callee.name=/^debug(Log|Break|Inspect)$/]', + message: + 'Remove debugging functions before committing. Debug helpers should not be used in production code.', + }, + ], }, }, { diff --git a/helper-scripts/README.md b/helper-scripts/README.md new file mode 100644 index 000000000..73f3c2067 --- /dev/null +++ b/helper-scripts/README.md @@ -0,0 +1,36 @@ +# InfluxData documentation helper scripts + +This directory contains scripts designed to help make specific maintenance +processes easier. + +## InfluxDB Clustered release artifacts + +**Script:** `./clustered-release-artifacts.sh` + +Each InfluxDB Clustered release has the following associated artifacts that need +to be provided with the release notes: + +- `example-customer.yaml` +- `app-instance-schema.json` + +This script uses an InfluxDB Clustered pull secret to pull down the required +assets and store them in `static/downloads/clustered-release-artifacts/`. + +1. **Set up the pull secret:** + + The **Clustered Pull Secret** (config.json) is available in Docs Team + 1Password vault. Download the pull secret and store it in the + `/tmp/influxdbsecret` directory on your local machine. + +2. Install dependencies: + + - [Install `crane`](https://github.com/google/go-containerregistry/tree/main/cmd/crane#installation). + - [Install `jq`](https://jqlang.org/download/) + +3. From the root of the docs project directory, run the following command to + execute the script. Provide the release version as an argument to the + script--for example: + + ```sh + sh ./helper-scripts/clustered-release-artifacts.sh 20250508-1719206 + ``` diff --git a/helper-scripts/clustered-release-artifacts.sh b/helper-scripts/clustered-release-artifacts.sh new file mode 100644 index 000000000..a3eede6e5 --- /dev/null +++ b/helper-scripts/clustered-release-artifacts.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +set -euo pipefail + +if [ $# -ne 1 ]; then + echo "Usage: $0 " + exit 1 +fi + +RELEASE="$1" +IMAGE="us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:$RELEASE" +WORKDIR=$(mktemp -d) + +# Target directory relative to where the script is run +BASE_DIR="./static/downloads/clustered-release-artifacts" +TARGET_DIR="$BASE_DIR/$RELEASE" + +echo "Creating release directory: $TARGET_DIR" +mkdir -p "$TARGET_DIR" + +echo "Fetching manifest digest..." +DIGEST=$(DOCKER_CONFIG=/tmp/influxdbsecret crane manifest "$IMAGE" | jq -r '.layers[1].digest') + +echo "Downloading and extracting assets..." +DOCKER_CONFIG=/tmp/influxdbsecret \ +crane blob "$IMAGE@$DIGEST" | tar -xvzf - -C "$WORKDIR" + +# Find the top-level extracted directory +SUBDIR=$(find "$WORKDIR" -mindepth 1 -maxdepth 1 -type d) + +echo "Copying selected files to release directory..." +cp "$SUBDIR/app-instance-schema.json" "$TARGET_DIR/" +cp "$SUBDIR/example-customer.yml" "$TARGET_DIR/" + +echo "Cleaning up temporary directory..." +rm -rf "$WORKDIR" + +echo "Done. Selected assets for $RELEASE are in $TARGET_DIR" diff --git a/helper-scripts/generate-release-notes.sh b/helper-scripts/generate-release-notes.sh new file mode 100755 index 000000000..d47ae5592 --- /dev/null +++ b/helper-scripts/generate-release-notes.sh @@ -0,0 +1,354 @@ +#!/bin/bash + +# Script to generate release notes for InfluxDB v3.x releases +# Usage: ./generate-release-notes.sh [--no-fetch] [--pull] [additional_repo_paths...] +# +# Options: +# --no-fetch Skip fetching latest commits from remote +# --pull Pull latest changes (implies fetch) - use with caution as it may change your working directory +# +# Example: ./generate-release-notes.sh v3.1.0 v3.2.0 /path/to/influxdb /path/to/influxdb_pro /path/to/influxdb_iox +# Example: ./generate-release-notes.sh --no-fetch v3.1.0 v3.2.0 /path/to/influxdb +# Example: ./generate-release-notes.sh --pull v3.1.0 v3.2.0 /path/to/influxdb /path/to/influxdb_pro + +set -e + +# Parse command line options +FETCH_COMMITS=true +PULL_COMMITS=false + +while [[ $# -gt 0 ]]; do + case $1 in + --no-fetch) + FETCH_COMMITS=false + shift + ;; + --pull) + PULL_COMMITS=true + FETCH_COMMITS=true + shift + ;; + -*) + echo "Unknown option $1" + exit 1 + ;; + *) + break + ;; + esac +done + +# Parse remaining arguments +FROM_VERSION="${1:-v3.1.0}" +TO_VERSION="${2:-v3.2.0}" +PRIMARY_REPO="${3:-/Users/ja/Documents/github/influxdb}" + +# Collect additional repositories (all arguments after the third) +ADDITIONAL_REPOS=() +shift 3 2>/dev/null || true +while [ $# -gt 0 ]; do + ADDITIONAL_REPOS+=("$1") + shift +done + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[0;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +echo -e "${BLUE}Generating release notes for ${TO_VERSION}${NC}" +echo -e "Primary Repository: ${PRIMARY_REPO}" +if [ ${#ADDITIONAL_REPOS[@]} -gt 0 ]; then + echo -e "Additional Repositories:" + for repo in "${ADDITIONAL_REPOS[@]}"; do + echo -e " - ${repo}" + done +fi +echo -e "From: ${FROM_VERSION} To: ${TO_VERSION}\n" + +# Function to extract PR number from commit message +extract_pr_number() { + echo "$1" | grep -oE '#[0-9]+' | head -1 | sed 's/#//' +} + +# Function to get commits from a repository +get_commits_from_repo() { + local repo_path="$1" + local pattern="$2" + local format="${3:-%h %s}" + + if [ -d "$repo_path" ]; then + git -C "$repo_path" log --format="$format" "${FROM_VERSION}..${TO_VERSION}" 2>/dev/null | grep -E "$pattern" || true + fi +} + +# Function to analyze API-related commits +analyze_api_changes() { + local repo_path="$1" + local repo_name="$2" + + if [ ! -d "$repo_path" ]; then + return + fi + + # Look for API-related file changes + local api_files=$(git -C "$repo_path" diff --name-only "${FROM_VERSION}..${TO_VERSION}" 2>/dev/null | grep -E "(api|handler|endpoint|route)" | head -10 || true) + + # Look for specific API endpoint patterns in commit messages and diffs + local api_commits=$(git -C "$repo_path" log --format="%h %s" "${FROM_VERSION}..${TO_VERSION}" 2>/dev/null | \ + grep -iE "(api|endpoint|/write|/query|/ping|/health|/metrics|v1|v2|v3)" || true) + + if [ -n "$api_files" ] || [ -n "$api_commits" ]; then + echo " Repository: $repo_name" + if [ -n "$api_files" ]; then + echo " Modified API files:" + echo "$api_files" | while read -r file; do + echo " - $file" + done + fi + if [ -n "$api_commits" ]; then + echo " API-related commits:" + echo "$api_commits" | while read -r commit; do + echo " - $commit" + done + fi + echo + fi +} + +# Get the release date +RELEASE_DATE=$(git -C "$PRIMARY_REPO" log -1 --format=%ai "$TO_VERSION" | cut -d' ' -f1) +echo -e "${GREEN}Release Date: ${RELEASE_DATE}${NC}\n" + +# Create array of all repositories +ALL_REPOS=("$PRIMARY_REPO") +for repo in "${ADDITIONAL_REPOS[@]}"; do + ALL_REPOS+=("$repo") +done + +# Fetch latest commits from all repositories (if enabled) +if [ "$FETCH_COMMITS" = true ]; then + if [ "$PULL_COMMITS" = true ]; then + echo -e "${YELLOW}Pulling latest changes from all repositories...${NC}" + echo -e "${RED}Warning: This will modify your working directories!${NC}" + else + echo -e "${YELLOW}Fetching latest commits from all repositories...${NC}" + fi + + for repo in "${ALL_REPOS[@]}"; do + if [ -d "$repo" ]; then + repo_name=$(basename "$repo") + + if [ "$PULL_COMMITS" = true ]; then + echo -e " Pulling changes in $repo_name..." + if git -C "$repo" pull origin 2>/dev/null; then + echo -e " ${GREEN}✓${NC} Successfully pulled changes in $repo_name" + else + echo -e " ${RED}✗${NC} Failed to pull changes in $repo_name (trying fetch only)" + if git -C "$repo" fetch origin 2>/dev/null; then + echo -e " ${GREEN}✓${NC} Successfully fetched from $repo_name" + else + echo -e " ${RED}✗${NC} Failed to fetch from $repo_name (continuing with local commits)" + fi + fi + else + echo -e " Fetching from $repo_name..." + if git -C "$repo" fetch origin 2>/dev/null; then + echo -e " ${GREEN}✓${NC} Successfully fetched from $repo_name" + else + echo -e " ${RED}✗${NC} Failed to fetch from $repo_name (continuing with local commits)" + fi + fi + else + echo -e " ${RED}✗${NC} Repository not found: $repo" + fi + done +else + echo -e "${YELLOW}Skipping fetch (using local commits only)${NC}" +fi + +# Collect commits by category from all repositories +echo -e "\n${YELLOW}Analyzing commits across all repositories...${NC}" + +# Initialize variables +FEATURES="" +FIXES="" +BREAKING="" +PERF="" +API_CHANGES="" + +# Collect commits from all repositories +for repo in "${ALL_REPOS[@]}"; do + if [ -d "$repo" ]; then + repo_name=$(basename "$repo") + echo -e " Analyzing $repo_name..." + + # Features + repo_features=$(get_commits_from_repo "$repo" "^[a-f0-9]+ feat:" | sed "s/^[a-f0-9]* feat: /- [$repo_name] /") + if [ -n "$repo_features" ]; then + FEATURES="$FEATURES$repo_features"$'\n' + fi + + # Fixes + repo_fixes=$(get_commits_from_repo "$repo" "^[a-f0-9]+ fix:" | sed "s/^[a-f0-9]* fix: /- [$repo_name] /") + if [ -n "$repo_fixes" ]; then + FIXES="$FIXES$repo_fixes"$'\n' + fi + + # Breaking changes + repo_breaking=$(get_commits_from_repo "$repo" "^[a-f0-9]+ .*(BREAKING|breaking change)" | sed "s/^[a-f0-9]* /- [$repo_name] /") + if [ -n "$repo_breaking" ]; then + BREAKING="$BREAKING$repo_breaking"$'\n' + fi + + # Performance improvements + repo_perf=$(get_commits_from_repo "$repo" "^[a-f0-9]+ perf:" | sed "s/^[a-f0-9]* perf: /- [$repo_name] /") + if [ -n "$repo_perf" ]; then + PERF="$PERF$repo_perf"$'\n' + fi + + # API changes + repo_api=$(get_commits_from_repo "$repo" "(api|endpoint|/write|/query|/ping|/health|/metrics|v1|v2|v3)" | sed "s/^[a-f0-9]* /- [$repo_name] /") + if [ -n "$repo_api" ]; then + API_CHANGES="$API_CHANGES$repo_api"$'\n' + fi + fi +done + +# Analyze API changes in detail +echo -e "\n${YELLOW}Analyzing HTTP API changes...${NC}" +for repo in "${ALL_REPOS[@]}"; do + repo_name=$(basename "$repo") + analyze_api_changes "$repo" "$repo_name" +done + +# Generate markdown output +OUTPUT_FILE="release-notes-${TO_VERSION}.md" +cat > "$OUTPUT_FILE" << EOF +## ${TO_VERSION} {date="${RELEASE_DATE}"} + +### Features + +EOF + +# Add features +if [ -n "$FEATURES" ]; then + echo "$FEATURES" | while IFS= read -r line; do + if [ -n "$line" ]; then + PR=$(extract_pr_number "$line") + # Clean up the commit message + CLEAN_LINE=$(echo "$line" | sed -E 's/ \(#[0-9]+\)$//') + if [ -n "$PR" ]; then + echo "$CLEAN_LINE ([#$PR](https://github.com/influxdata/influxdb/pull/$PR))" >> "$OUTPUT_FILE" + else + echo "$CLEAN_LINE" >> "$OUTPUT_FILE" + fi + fi + done +else + echo "- No new features in this release" >> "$OUTPUT_FILE" +fi + +# Add bug fixes +cat >> "$OUTPUT_FILE" << EOF + +### Bug Fixes + +EOF + +if [ -n "$FIXES" ]; then + echo "$FIXES" | while IFS= read -r line; do + if [ -n "$line" ]; then + PR=$(extract_pr_number "$line") + CLEAN_LINE=$(echo "$line" | sed -E 's/ \(#[0-9]+\)$//') + if [ -n "$PR" ]; then + echo "$CLEAN_LINE ([#$PR](https://github.com/influxdata/influxdb/pull/$PR))" >> "$OUTPUT_FILE" + else + echo "$CLEAN_LINE" >> "$OUTPUT_FILE" + fi + fi + done +else + echo "- No bug fixes in this release" >> "$OUTPUT_FILE" +fi + +# Add breaking changes if any +if [ -n "$BREAKING" ]; then + cat >> "$OUTPUT_FILE" << EOF + +### Breaking Changes + +EOF + echo "$BREAKING" | while IFS= read -r line; do + if [ -n "$line" ]; then + PR=$(extract_pr_number "$line") + CLEAN_LINE=$(echo "$line" | sed -E 's/ \(#[0-9]+\)$//') + if [ -n "$PR" ]; then + echo "$CLEAN_LINE ([#$PR](https://github.com/influxdata/influxdb/pull/$PR))" >> "$OUTPUT_FILE" + else + echo "$CLEAN_LINE" >> "$OUTPUT_FILE" + fi + fi + done +fi + +# Add performance improvements if any +if [ -n "$PERF" ]; then + cat >> "$OUTPUT_FILE" << EOF + +### Performance Improvements + +EOF + echo "$PERF" | while IFS= read -r line; do + if [ -n "$line" ]; then + PR=$(extract_pr_number "$line") + CLEAN_LINE=$(echo "$line" | sed -E 's/ \(#[0-9]+\)$//') + if [ -n "$PR" ]; then + echo "$CLEAN_LINE ([#$PR](https://github.com/influxdata/influxdb/pull/$PR))" >> "$OUTPUT_FILE" + else + echo "$CLEAN_LINE" >> "$OUTPUT_FILE" + fi + fi + done +fi + +# Add HTTP API changes if any +if [ -n "$API_CHANGES" ]; then + cat >> "$OUTPUT_FILE" << EOF + +### HTTP API Changes + +EOF + echo "$API_CHANGES" | while IFS= read -r line; do + if [ -n "$line" ]; then + PR=$(extract_pr_number "$line") + CLEAN_LINE=$(echo "$line" | sed -E 's/ \(#[0-9]+\)$//') + if [ -n "$PR" ]; then + echo "$CLEAN_LINE ([#$PR](https://github.com/influxdata/influxdb/pull/$PR))" >> "$OUTPUT_FILE" + else + echo "$CLEAN_LINE" >> "$OUTPUT_FILE" + fi + fi + done +fi + +# Add API analysis summary +cat >> "$OUTPUT_FILE" << EOF + +### API Analysis Summary + +The following endpoints may have been affected in this release: +- v1 API endpoints: \`/write\`, \`/query\`, \`/ping\` +- v2 API endpoints: \`/api/v2/write\`, \`/api/v2/query\` +- v3 API endpoints: \`/api/v3/*\` +- System endpoints: \`/health\`, \`/metrics\` + +Please review the commit details above and consult the API documentation for specific changes. + +EOF + +echo -e "\n${GREEN}Release notes generated in: ${OUTPUT_FILE}${NC}" +echo -e "${YELLOW}Please review and edit the generated notes before adding to documentation.${NC}" +echo -e "${BLUE}API changes have been automatically detected and included.${NC}" \ No newline at end of file diff --git a/hugo.staging.yml b/hugo.staging.yml deleted file mode 100644 index dd936bf3a..000000000 --- a/hugo.staging.yml +++ /dev/null @@ -1,60 +0,0 @@ -baseURL: https://test2.docs.influxdata.com/ -languageCode: en-us -title: InfluxDB Documentation - -# Git history information for lastMod-type functionality -enableGitInfo: true - -# Syntax Highlighting -pygmentsCodefences: true -pygmentsUseClasses: true - -# Preserve case in article tags -preserveTaxonomyNames: true - -# Generate a robots.txt -enableRobotsTXT: true - -# Custom staging params -params: - environment: staging - -# Markdown rendering options -blackfriday: - hrefTargetBlank: true - smartDashes: false - -taxonomies: - influxdb/v2/tag: influxdb/v2/tags - influxdb/cloud/tag: influxdb/cloud/tags - influxdb3/cloud-serverless/tag: influxdb/cloud-serverless/tags - influxdb/cloud-dedicated/tag: influxdb/cloud-dedicated/tags - influxdb/clustered/tag: influxdb/clustered/tags - influxdb3/core/tag: influxdb3/core/tags - influxdb3/enterprise/tag: influxdb3/enterprise/tags - flux/v0/tag: flux/v0/tags - -markup: - goldmark: - renderer: - unsafe: true - extensions: - linkify: false - parser: - attribute: - block: true - -privacy: - googleAnalytics: - anonymizeIP: false - disable: false - respectDoNotTrack: true - useSessionStorage: false - youtube: - disable: false - privacyEnhanced: true -outputFormats: - json: - mediaType: application/json - baseName: pages - isPlainText: true diff --git a/hugo_stats.json b/hugo_stats.json new file mode 100644 index 000000000..7550a0b1f --- /dev/null +++ b/hugo_stats.json @@ -0,0 +1,18019 @@ +{ + "htmlElements": { + "tags": [ + "--", + "?xml", + "a", + "api-access-token", + "article", + "aside", + "b", + "blockquote", + "body", + "bot-username", + "br", + "button", + "circle", + "code", + "database", + "div", + "ellipse", + "em", + "form", + "g", + "h1", + "h2", + "h3", + "h4", + "h5", + "h6", + "head", + "host", + "hr", + "html", + "iframe", + "img", + "input", + "label", + "li", + "limit", + "limitevery", + "line", + "link", + "meta", + "n", + "nil", + "node-type", + "nolink", + "noscript", + "ol", + "p", + "path", + "polygon", + "polyline", + "pre", + "qid", + "rect", + "script", + "section", + "span", + "strong", + "style", + "sup", + "svg", + "table", + "tbody", + "td", + "text", + "textarea", + "th", + "thead", + "title", + "tr", + "ul", + "var" + ], + "classes": [ + "-depth0", + "-depth1", + "-depth2", + "AddCell_New", + "AlertTriangle", + "Annotate_New", + "BarChart_New", + "Bell", + "Bill", + "BookCode", + "BucketSolid", + "CMpTe", + "Calendar", + "CaretOutlineRight", + "Chat", + "Checkmark_New", + "Clock_New", + "Cloud", + "CogOutline_New", + "CogSolid_New", + "CrownSolid_New", + "CuboSolid", + "Darkmode_New", + "DashH", + "Download_New", + "Duplicate_New", + "EoFth", + "ExpandB", + "Export_New", + "EyeClosed", + "EyeOpen", + "Flask", + "GraphLine_New", + "Group", + "KWWXd", + "Lightmode_New", + "Link", + "More", + "Pause", + "Pencil", + "Play", + "Plus_New", + "Refresh_New", + "Remove_New", + "Search_New", + "Share", + "Shield", + "SidebarClose", + "SidebarOpen", + "Text_New", + "Trash_New", + "Upload_New", + "User", + "YJDJL", + "abbr", + "account", + "acct-icon", + "acct-icon-v3", + "acct-icon-v4", + "acct-inner", + "acct-inner-v3", + "acct-inner-v4", + "acct-label", + "acct-label-v3", + "acct-label-v4", + "actions", + "active", + "add-btn-round", + "add-cell-new", + "alert-triangle-new", + "all-caps", + "annotate-new", + "api-content", + "api-endpoint", + "api-info", + "array", + "arrow", + "article", + "article--content", + "ask-ai-open", + "ask-ai-trigger", + "asterisk", + "auth-item", + "avoid-wrap", + "bBFKjV", + "bCvCHz", + "bHzJuy", + "bIbZvd", + "bJnWIW", + "bPmeoW", + "bXnXQF", + "back", + "banner-content", + "banner-cta", + "bar-chart-new", + "bbfNvi", + "bcnRwz", + "bell", + "beta", + "beta-content", + "bg-overlay", + "bjdvNh", + "block", + "blue", + "bmwRob", + "bnCoiE", + "bold", + "book-pencil", + "boolean", + "border-left", + "bottom", + "bowlofsweets", + "bsGeIE", + "btn", + "buttons", + "bwNcZa", + "bwfRit", + "bymHyU", + "c1", + "c2", + "cAOCuf", + "cEsevO", + "cJyzuM", + "cTueGk", + "cTzVOd", + "cVHUjN", + "cWoVOF", + "calendar", + "caption", + "card", + "cards", + "categories", + "category", + "category-head", + "caution", + "center", + "cf-icon", + "cfctgs", + "checkbox", + "checkbox-text", + "checkmark-new", + "checkpoint", + "children", + "children-links", + "children-list", + "children-toggle", + "chroma", + "chronograf", + "circle", + "cite", + "clock-new", + "close-notification", + "closed", + "cloud", + "cloud-urls", + "cluster-name", + "clustered", + "clusters", + "code-placeholder", + "code-placeholder-edit-icon", + "code-placeholder-key", + "code-placeholder-wrapper", + "code-tab-content", + "code-tabs", + "code-tabs-wrapper", + "cog-solid-new", + "collapsed", + "collapser", + "collapsible", + "color-key", + "column-list", + "columns-4", + "columns-wrapper", + "comma", + "community", + "content-wrapper", + "copyright", + "crown-solid-new", + "csngrC", + "cuYWRV", + "cubo", + "current-date", + "current-row", + "current-time", + "current-timestamp", + "current-version", + "custom", + "custom-time-trigger", + "custom-time-trigger__button", + "custom-timestamps", + "dFWqin", + "dXjyFC", + "dYlGyN", + "dashed", + "date", + "delete", + "deleted", + "deleted-label", + "deprecated", + "df-icon", + "diagram-line", + "discord", + "divider", + "dkqrXs", + "dmghQN", + "dnuELe", + "docs-home", + "download", + "download-new", + "dropdown", + "dropdown-arrow", + "dropdown-option", + "dropdown-selector", + "dropdown-selector-content", + "dropdown-selector-search", + "dropdown-selector-value", + "duplicate", + "dzKtIW", + "eONCmm", + "ePwgUU", + "eQQUSD", + "eQzShU", + "eSYQnm", + "eUdCtG", + "eWToXe", + "edit", + "efuQZt", + "egQuEZ", + "el", + "ellipsis", + "enterprise-eol-date", + "enterprise_influxdb", + "ephemeral", + "error-code", + "error-code-border", + "error-content", + "error-page", + "euJMtE", + "euRMgx", + "evYMTo", + "even-wrap", + "example", + "exclude", + "expand", + "expand-b", + "expand-content", + "expand-label", + "expand-link", + "expand-toggle", + "expand-wrapper", + "experimental", + "export-new", + "external", + "eye-closed", + "eye-open-new", + "fABPTr", + "fKFAhr", + "fQwboL", + "fXuQNl", + "fake-radio", + "fcIjHV", + "feather", + "feather-chevron-down", + "feedback", + "feedback-channels", + "ffLgqz", + "fguZND", + "fieldset", + "filter-category", + "filter-item", + "filter-list", + "flex-container", + "flex-wrapper", + "flux", + "flux-influxdb", + "flux-influxdb-versions", + "flux-water-diagram", + "foNyhx", + "focused", + "footer-widgets", + "form-buttons", + "fqzhkP", + "frame", + "fs-diagram", + "full", + "fullscreen-close", + "fullscreen-code", + "function-list", + "fxZJZV", + "gAerEa", + "gBTuHc", + "gBwOdz", + "gCgqqY", + "gEtKwR", + "gGvkZD", + "gJKPGC", + "gKXwn", + "gNOLNk", + "gPCWYe", + "gWxDzL", + "ga-announcement", + "gaEEuU", + "gcushC", + "gemyvL", + "geo-cell", + "geo-point", + "geo-region", + "get", + "git-head", + "github", + "github-link", + "gjiGnZ", + "gkwAYh", + "gmNZmS", + "graph-line-new", + "graphline-2", + "green", + "group", + "group-title", + "group-wrapper", + "groups", + "groups-with-frame", + "hDhyRZ", + "hFbBvr", + "hHRjJL", + "hINeXe", + "hQgNml", + "hUSnpT", + "half", + "handle-new", + "head", + "helpful", + "hide", + "hide-commas", + "hide-elements", + "hide-whitespace", + "highlight", + "hikpbD", + "home", + "home-content", + "horizontal", + "hoverable", + "hsSsLr", + "http-verb", + "hyMKIl", + "hzjqvH", + "iARuYK", + "iAqBwY", + "iEFeuB", + "iEcHcE", + "iULjjV", + "iUxAWq", + "iXmHCl", + "iZkjfb", + "icZuVc", + "icon", + "icon-add-cell", + "icon-alert", + "icon-bar-chart", + "icon-bell", + "icon-book-pencil", + "icon-calendar", + "icon-chat", + "icon-checkmark", + "icon-chevron-right", + "icon-cloud", + "icon-cog-thick", + "icon-crown", + "icon-dashboards", + "icon-data-explorer", + "icon-disks-nav", + "icon-download", + "icon-duplicate", + "icon-export", + "icon-eye-closed", + "icon-eye-open", + "icon-fullscreen", + "icon-github", + "icon-influx-icon", + "icon-influx-logo", + "icon-influx-logotype", + "icon-nav-chat", + "icon-note", + "icon-organizations", + "icon-pause", + "icon-pencil", + "icon-play", + "icon-refresh", + "icon-remove", + "icon-search", + "icon-trash", + "icon-triangle", + "icon-ui-plus", + "icon-user", + "icon-users-trio", + "icon-wrench-2", + "icon-wrench-nav", + "icon-x", + "iglPxx", + "ihkizP", + "ikafbi", + "ikavEm", + "important", + "inNGOu", + "inactive", + "influx-home", + "influxdb", + "influxdb-connector", + "influxdb-logo", + "influxdb-versions", + "influxdb3", + "influxdbu-banner", + "influxdbu-logo", + "influxql-table-meta", + "info", + "ingest-new", + "initial", + "inline", + "inner", + "interval", + "ipvKNC", + "issue", + "italic", + "item", + "item-list", + "iyzZle", + "jLtOTj", + "jRjoAh", + "jSPrUM", + "jaCkRh", + "jdCbTS", + "jlyJRK", + "jnEbBv", + "joKODG", + "joaceI", + "jolsGY", + "juYXUf", + "jvHOSr", + "jxJlxZ", + "jzRrfm", + "kBgcMI", + "kHJfMF", + "kHKMOg", + "kZsYIY", + "kafka-toggle", + "kapacitor", + "keSXcO", + "keep", + "keep-tab", + "keep-url", + "key", + "key-geo-cell", + "key-geo-point", + "key-geo-region", + "keybinding", + "keyword", + "kmMwfl", + "large", + "last", + "learn-items", + "left", + "lfknEY", + "lhdonw", + "lifecycle-wrapper", + "list-links", + "list-note", + "lkJIic", + "llGFDD", + "lndJCi", + "loader", + "loader-wrapper", + "logo-row", + "long", + "magenta", + "main", + "measurement", + "medium", + "menu-content", + "mermaid", + "meta", + "metadata", + "middle", + "min", + "modal", + "modal-body", + "modal-content", + "modal-overlay", + "modal-wrapper", + "monospace", + "more-info", + "nav-category", + "nav-icon", + "nav-icon-", + "nav-icon-label", + "nav-icon-v3", + "nav-icon-v4", + "nav-item", + "nav-item-", + "nav-item-v3", + "nav-item-v4", + "nav-items", + "nav-items-", + "nav-items-v3", + "nav-items-v4", + "nav-label-", + "nav-label-v3", + "nav-label-v4", + "new", + "next", + "no-shorthand", + "node", + "normal", + "note", + "notebook-add-cell", + "notification", + "notification-content", + "notification-slug", + "notification-title", + "nowrap", + "number", + "obj", + "old-version", + "one", + "one-quarter", + "one-third", + "op25", + "op50", + "op65", + "op70", + "open", + "operation", + "operation-type", + "operator-example", + "orange", + "orgname", + "outer", + "padding-wrapper", + "page-nav-btns", + "page-wrapper", + "patch", + "pause", + "pcWDP", + "pencil", + "pending", + "periods", + "pink", + "plan-arrow", + "plan-block", + "plan-column", + "plan-double-column", + "plan-single-column", + "platform", + "play", + "plugin-card", + "plus-new", + "point", + "points", + "post", + "powered-by-example", + "pref-tab", + "prev", + "product", + "product-group", + "product-info", + "product-links", + "product-list", + "products", + "property", + "provider", + "providers", + "punctuation", + "put", + "q-link", + "quarter", + "quix-connector", + "quix-logo", + "quix-stream-component", + "quix-stream-container", + "quix-streams-logo", + "rVwLz", + "radio", + "radio-btns", + "range-interval", + "range-numeric", + "react-tabs__tab", + "react-tabs__tab--selected", + "react-tabs__tab-list", + "react-tabs__tab-panel", + "react-tabs__tab-panel--selected", + "read-more", + "reddit", + "redoc-json", + "redoc-markdown", + "redoc-wrap", + "reference", + "refresh-new", + "region", + "region-group", + "related", + "remove-new", + "req", + "required", + "resources", + "retention-label", + "right", + "row", + "rows", + "sc-Arkif", + "sc-EZqKI", + "sc-FRrlG", + "sc-TtZnY", + "sc-WZYut", + "sc-amkrK", + "sc-bBjRSN", + "sc-bCwfaz", + "sc-bQCEYZ", + "sc-bTDOke", + "sc-bXexck", + "sc-bYwzuL", + "sc-bkbkJK", + "sc-bqGGPW", + "sc-cBoqAE", + "sc-cKRKFl", + "sc-cOifOu", + "sc-cTJkRt", + "sc-carFqZ", + "sc-cbeScs", + "sc-ciSkZP", + "sc-ckTSus", + "sc-crzoAE", + "sc-cxNHIi", + "sc-dIsUp", + "sc-dPaNzc", + "sc-dTSzeu", + "sc-dWBRfb", + "sc-dkQUaI", + "sc-dlMDgC", + "sc-dtLLSn", + "sc-dvUynV", + "sc-dvXYtj", + "sc-eCApnc", + "sc-eGJWMs", + "sc-eJocfa", + "sc-eWnToP", + "sc-efHYUO", + "sc-eirqVv", + "sc-ellfGf", + "sc-euEtCV", + "sc-fFSPTT", + "sc-fHCHyC", + "sc-fIxmyt", + "sc-fKgJPI", + "sc-fWWYYk", + "sc-fXazdy", + "sc-fXgAZx", + "sc-fbIWvP", + "sc-fcmMJX", + "sc-fuISkM", + "sc-fujyAs", + "sc-gGLxEB", + "sc-gIvpjk", + "sc-gKAaRy", + "sc-gVFcvn", + "sc-gXfVKN", + "sc-giAqHp", + "sc-gsWcmt", + "sc-gstuGz", + "sc-gzcbmu", + "sc-hBMUJo", + "sc-hHEiqL", + "sc-hKFxyN", + "sc-hOPeYd", + "sc-hhIiOg", + "sc-hkeOVe", + "sc-hmbstg", + "sc-iBzEeX", + "sc-iCoGMd", + "sc-iIgjPs", + "sc-iJCRrE", + "sc-iNiQyp", + "sc-iemWCZ", + "sc-iqAclL", + "sc-irKDMX", + "sc-iwajpm", + "sc-jHNicF", + "sc-jHcXXw", + "sc-jNnpgg", + "sc-jOFryr", + "sc-jQAxuV", + "sc-jSFjdj", + "sc-jUfyBS", + "sc-jXcxbT", + "sc-jcwpoC", + "sc-jffHpj", + "sc-jlZJtj", + "sc-jtiXyc", + "sc-kEqXSa", + "sc-kLojOw", + "sc-kTCsyW", + "sc-kYPZxB", + "sc-kfYoZR", + "sc-khIgEk", + "sc-kizEQm", + "sc-lbVvki", + "sc-lmgQwP", + "sc-oeezt", + "sc-pNWdM", + "sc-xGAEC", + "scaling-strategy", + "scrollbar-container", + "search", + "search-and-nav-toggle", + "search-btn", + "search-icon", + "search-input", + "section", + "security-details", + "selected", + "selector-dropdowns", + "series-diagram", + "shape", + "shard", + "shard-group", + "shard-groups", + "shift-left", + "short", + "shorthand-flags", + "show", + "show-more", + "sidebar", + "sidebar--search", + "sidebar--search-field", + "sidebar-toggle", + "slack", + "sm", + "small", + "small-plus", + "solid", + "spbic", + "spinner", + "split", + "sql", + "st0", + "st1", + "st10", + "st11", + "st12", + "st13", + "st14", + "st15", + "st16", + "st17", + "st18", + "st19", + "st2", + "st20", + "st21", + "st3", + "st4", + "st5", + "st6", + "st7", + "st8", + "st9", + "state", + "storage-description", + "stream-of-tables", + "string", + "submit-wrapper", + "support", + "supported", + "tab-content", + "tab-error", + "tab-redirect", + "tab-success", + "tab-view-output", + "table-group", + "table-group-key", + "tabs", + "tabs-wrapper", + "tag", + "tags", + "tagset", + "telegraf", + "text", + "text-new", + "theme-switch", + "theme-switch-dark", + "theme-switch-light", + "third", + "three-quarters", + "timeline", + "timestamp", + "tip", + "title", + "title-tag", + "toggle-hamburger", + "toggle-icon", + "token", + "tooltip", + "top", + "topnav", + "topnav-left", + "topnav-right", + "trash-new", + "triangle", + "truncate", + "truncate-bottom", + "truncate-content", + "truncate-toggle", + "two", + "two-columns", + "two-thirds", + "ui-toggle", + "undefined", + "updated-in", + "url-trigger", + "username", + "users-duo", + "v1", + "v2", + "v3", + "v3-wayfinding-btn", + "v4", + "version", + "version-col", + "version-list", + "version-row", + "vertical", + "video", + "video-card", + "video-content", + "video-date", + "video-title", + "videos-wrapper", + "view-in-chronograf", + "visible", + "warn", + "warning", + "wayfinding", + "wayfinding-actions", + "wayfinding-content", + "wayfinding-content-info", + "wayfinding-wrapper", + "whitespace", + "widget", + "window-frame-units", + "window-hour", + "windows", + "wrench-nav", + "xsmall" + ], + "ids": [ + "--all-", + "--auth-duration", + "--auth0-client-id", + "--auth0-client-secret", + "--auth0-domain", + "--auth0-organizations", + "--basepath---p", + "--batch-size-", + "--blocks-", + "--bolt-path---b", + "--c-int-", + "--c-number-", + "--canned-path---c", + "--cert", + "--compact-series-file-", + "--compress-", + "--concurrency-", + "--conflicts-file-string-", + "--conflicts-file-string--1", + "--custom-auto-refresh", + "--custom-link-display_namelink_address", + "--database-db_name-", + "--database-db_name--1", + "--db-db_name-", + "--detailed-", + "--detailed--1", + "--detailed--2", + "--develop---d", + "--dir-path-", + "--dir-path--1", + "--end-timestamp-", + "--etcd-cert", + "--etcd-dial-timeout", + "--etcd-endpoints---e", + "--etcd-key", + "--etcd-password", + "--etcd-request-timeout", + "--etcd-root-ca", + "--etcd-username", + "--exact-", + "--exact--1", + "--filter-key-key_name-", + "--generic-api-url", + "--generic-auth-url", + "--generic-client-id", + "--generic-client-secret", + "--generic-domains", + "--generic-name", + "--generic-scopes", + "--generic-token-url", + "--github-client-id---i", + "--github-client-secret---s", + "--github-organization---o", + "--github-url", + "--google-client-id", + "--google-client-secret", + "--google-domains", + "--help---h", + "--heroku-client-id", + "--heroku-organization", + "--heroku-secret", + "--host", + "--host-page-disabled---h", + "--inactivity-duration", + "--index-", + "--influxdb-org", + "--influxdb-password", + "--influxdb-token", + "--influxdb-url", + "--influxdb-username", + "--kapacitor-password", + "--kapacitor-url", + "--kapacitor-username", + "--key", + "--log-level---l", + "--lponly-", + "--max-cache-size-", + "--max-log-file-size-", + "--measurement-filter-regular_expression-", + "--measurements-", + "--oauth-no-pkce", + "--out-export_dir-", + "--out-export_dir-or--out--", + "--path-string-", + "--pattern-regular-expressionwildcard-", + "--port", + "--public-url", + "--reporting-disabled---r", + "--resources-path", + "--retention-rp_name-", + "--retention-rp_name--", + "--rollup-string-", + "--schema-file-string-", + "--schema-file-string--1", + "--series-", + "--series-file-path-", + "--shard-shard_id-", + "--show-duplicates-", + "--start-timestamp-", + "--status-feed-url", + "--tag-key-filter-regular_expression-", + "--tag-keys-", + "--tag-value-filter-regular_expression-", + "--tag-value-series-", + "--tag-values-", + "--token-secret---t", + "--top-n-", + "--v-", + "--v--1", + "--v--2", + "--version---v", + "--vv-", + "--vvv-", + "--waldir-wal_dir-", + "-datadir-data_dir", + "-datadir-data_dir-1", + "-db-path-path-to-db", + "-db-path-string", + "-dir-storage_root", + "-measurement", + "-not--exists", + "-not--in", + "-not-exists-syntax", + "-not-in-examples", + "-not-in-syntax", + "-sanitize", + "-series-file-series_path", + "-skipverify", + "-url", + "-v", + "-waldir-wal_dir", + "1-chronografs-data-explorer", + "1-create-the-database", + "1-define-your-data-source", + "1-disable-existing-integrations", + "1-identify-the-leader-node", + "1-locate-your-desired-backup-file", + "1-provision-a-new-data-node", + "1-stop-influxdb", + "137", + "140", + "141", + "142", + "143", + "150", + "151", + "152", + "153", + "154", + "155", + "160", + "161", + "162", + "163", + "164", + "165", + "166", + "170", + "171", + "172", + "173", + "174", + "175", + "176", + "177", + "1x-api-compatibility-and-stability", + "1x-compatible-authorizations", + "1x-configuration-groups-not-in-27", + "2-create-a-two-hour-default-retention-policy", + "2-influx-cli", + "2-remove-all-_series-directories", + "2-replace-all-non-leader-nodes", + "2-replace-the-old-data-node-with-the-new-data-node", + "2-specify-a-time-range", + "2-stop-existing-influxdb-beta-instance", + "2-stop-your-chronograf-server", + "2003", + "2019-04-05", + "2019-05-02", + "2019-05-06-public-beta", + "2019-07-23-general-availability", + "2019-09-10-monitoring--alerts", + "2020-09-02", + "2020-9-25", + "20230907-597343", + "20230908-600131", + "20230911-604209", + "20230912-619813", + "20230914-628600", + "20230915-630658", + "20230922-650371", + "20231004-666907", + "20231024-711448", + "20231115-746129", + "20231117-750011", + "20231213-791734", + "20240111-824437", + "20240214-863513", + "20240227-883344", + "20240326-922145", + "20240418-955990", + "20240430-976585", + "20240605-1035562", + "20240717-1117630", + "20240819-1176644", + "20241024-1354148", + "20241217-1494922", + "20250212-1570743", + "20250508-1719206", + "21-provision-a-new-meta-node", + "22-remove-the-non-leader-meta-node", + "23-add-the-new-meta-node", + "24-confirm-the-meta-node-was-added", + "25-remove-and-replace-all-other-non-leader-meta-nodes", + "25826", + "2x-nightly-images", + "3-confirm-the-data-node-was-added", + "3-create-a-52-week-retention-policy", + "3-filter-your-data", + "3-optional-rename-existing-influxdb-binaries", + "3-remove-all-index-directories", + "3-replace-the-leader-node", + "3-replace-your-current-database-with-the-backup", + "31---kill-the-meta-process-on-the-leader-node", + "32---remove-and-replace-the-old-leader-node", + "3d-printing", + "4-check-the-copy-shard-status", + "4-create-the-continuous-query", + "4-install-the-desired-chronograf-version", + "4-move-existing-data-and-start-the-latest-influxdb", + "4-rebuild-the-tsi-index", + "4-yield-your-queried-data", + "4242", + "5-restart-influxdb", + "5-start-old-influxdb-beta-instance", + "5-start-the-chronograf-server", + "500_errors_24hyaml", + "500_errorstick", + "6-create-configuration-profiles-for-the-influxdb-cli", + "7-copy-all-resources-from-old-instance-to-the-new-one", + "8-set-up-integrations-to-point-to-new-instance", + "8086", + "8088", + "8089", + "8089-1", + "8091", + "9-load-historical-data-into-new-instance", + "_field", + "_measurement", + "_monitoring-bucket-schema", + "_monitoring-system-bucket", + "_tasks-bucket-schema", + "_tasks-system-bucket", + "_time", + "_value", + "a", + "a-bug-was-fixed-around-missing-fields-in-the-derivative-node", + "a-configure-old-profile", + "a-dbrp-combination-can-only-be-mapped-to-a-single-bucket", + "a-destination-bucket", + "a-hypothetical-installation", + "a-note-on-rest", + "a-note-on-udpip-buffer-sizes", + "a-note-on-udpip-os-buffer-sizes", + "a-query-that-contains-an-error", + "a-real-world-example", + "a-request-with-invalid-authentication-credentials", + "a-required-identifier-is-missing", + "a-source-bucket", + "a-string-literal-is-used-instead-of-an-identifier", + "a-successful-request-that-returns-an-error", + "a-successful-request-that-returns-data", + "a-successful-write", + "a-valid-license-is-required", + "a-where-clause-query-unexpectedly-returns-no-data", + "about-chronograf-organizations", + "about-the-project", + "above-header-type-conversion", + "abs", + "absolute-time", + "abstract-syntax-tree-ast", + "access-authentication-and-authorization", + "access-billing-details", + "access-community-plugins-from-github", + "access-log-path", + "access-log-path--", + "access-log-status-filters", + "access-log-status-filters--", + "access-the-admin-ui", + "access-the-cloud-dedicated-admin-ui", + "access-the-cluster", + "access-your-operational-dashboard", + "accessible-graph-color-options", + "accessing-members-of-dynamic-types", + "accessing-values", + "accessing-values-in-string-templates", + "account-for-missing-non-group-key-values", + "account-management", + "account-management-1", + "acos", + "acosh", + "acquire-duration", + "action", + "actions", + "activate-a-commercial-license", + "activate-a-license", + "activate-a-trial-or-at-home-license", + "activate-a-v1-authorization", + "activate-an-api-token", + "active", + "actively-develop-and-extend-templates", + "activemq", + "ad-hoc-polymorphism", + "adaptive-zoom", + "add-a-cell", + "add-a-column-to-the-group-key", + "add-a-custom-telegraf-configuration-to-influxdb", + "add-a-data-node-to-a-cluster", + "add-a-data-node-to-a-cluster-using-a-remote-meta-node", + "add-a-data-source-cell", + "add-a-duration-to-a-time-value", + "add-a-duration-to-a-timestamp", + "add-a-kapacitor-instance", + "add-a-label-to-a-check", + "add-a-label-to-notification-endpoint", + "add-a-label-to-notification-rules", + "add-a-member", + "add-a-member-to-an-organization", + "add-a-member-to-an-organization-and-make-them-an-owner", + "add-a-member-to-an-organization-using-the-influx-cli", + "add-a-meta-node-to-a-cluster", + "add-a-meta-node-to-a-cluster-using-a-remote-meta-node", + "add-a-note", + "add-a-note-to-your-dashboard", + "add-a-pagerduty-deduplication-key-to-output-data", + "add-a-passive-data-node-to-a-cluster", + "add-a-processing-engine-plugin", + "add-a-secret", + "add-a-secret-using-the-influx-cli", + "add-a-secret-using-the-influxdb-api", + "add-a-secret-using-the-influxdb-cloud-ui", + "add-a-shared-secret-in-your-influxdb-configuration-file", + "add-a-task-cell", + "add-a-time-zone-offset-to-a-timestamp-without-a-specified-timezone", + "add-a-token-to-a-cli-request", + "add-a-user", + "add-a-user-to-a-role", + "add-a-validation-cell", + "add-a-visualization-cell", + "add-ability-to-share-notebooks", + "add-an-action-cell", + "add-an-alert-cell", + "add-an-arbitrary-tag", + "add-an-output-to-bucket-cell", + "add-and-subtract-time-values", + "add-and-use-event-handlers", + "add-authorizations-for-external-clients", + "add-auto-refresh-functionality-to-notebooks", + "add-data-to-your-dashboard", + "add-dns-entries", + "add-dns-entries-for-each-of-your-servers", + "add-example-plugins", + "add-existing-resources-to-the-github-repository", + "add-generic-error-counters-to-every-node-type", + "add-labels-to-a-node", + "add-labels-to-dashboard-items", + "add-n-hours-to-a-time", + "add-new-columns-and-preserve-existing-columns", + "add-new-webhooks", + "add-node-to-existing-cluster", + "add-one-month-to-yesterday", + "add-or-update-a-check-description", + "add-or-update-your-contact-information", + "add-or-update-your-payment-method", + "add-outputsfile-to-read-to-a-file-or-stdout", + "add-parser-information-to-your-telegraf-configuration", + "add-permissions-to-a-role-for-a-specific-database", + "add-permissions-to-a-role-for-all-databases", + "add-queries-to-dashboards", + "add-s2-cell-ids-to-existing-geotemporal-data", + "add-sample-data", + "add-sample-data-buckets-to-notebooks-ui", + "add-sample-data-with-community-template", + "add-scaled-mixed-duration-to-a-time", + "add-secrets", + "add-single-quotes-in-query", + "add-single-quotes-in-variable-definition", + "add-six-hours-to-a-relative-duration", + "add-six-hours-to-a-timestamp", + "add-tags", + "add-telegraf-plugins", + "add-the-handler", + "add-times-to-your-aggregates", + "add-token-permissions", + "add-users-that-need-administrative-access-to-your-influxdb-cluster", + "add-variables", + "add-your-task", + "addable-constraint", + "added-support-for-nodeaffinity-at-the-per-component-level", + "adding-a-custom-field", + "adding-a-kapacitor-connection-in-chronograf", + "adding-tags", + "addition", + "additional-appinstance-parameters", + "additional-features", + "additional-naming-guidelines", + "additional-updates", + "additional-updates-1", + "additionalinfo", + "additive-changes", + "addr", + "addremovenode", + "adjust-performance", + "adjustable-service-quotas", + "admin", + "admin-section", + "admin-section-is-no-longer-required", + "admin-user-group", + "admin-user-management", + "admin-users", + "administration", + "administration-1", + "administrative-ui", + "admins-roleadmin", + "advance-period", + "advance-period--30m", + "advanced-controls", + "advanced-examples", + "advanced-group-by-time-syntax", + "advanced-kapacitor-usage", + "advanced-syntax", + "advanced-syntax-1", + "advanced-syntax-10", + "advanced-syntax-11", + "advanced-syntax-12", + "advanced-syntax-13", + "advanced-syntax-14", + "advanced-syntax-15", + "advanced-syntax-16", + "advanced-syntax-17", + "advanced-syntax-18", + "advanced-syntax-19", + "advanced-syntax-2", + "advanced-syntax-20", + "advanced-syntax-21", + "advanced-syntax-22", + "advanced-syntax-23", + "advanced-syntax-3", + "advanced-syntax-4", + "advanced-syntax-5", + "advanced-syntax-6", + "advanced-syntax-7", + "advanced-syntax-8", + "advanced-syntax-9", + "advanced-template-variable-usage", + "advanced-trigger-configuration", + "advantages", + "ae-enterprise-only", + "aerospike", + "agent", + "agent-1", + "agent-2", + "agent-3", + "agent-4", + "agent-5", + "agent-6", + "agent-configuration", + "aggr_cpu_alertsyaml", + "aggregate", + "aggregate-and-selector-functions", + "aggregate-by-calendar-months-and-years", + "aggregate-by-time-based-intervals", + "aggregate-by-week", + "aggregate-data", + "aggregate-data-with-influxql", + "aggregate-data-with-sql", + "aggregate-examples", + "aggregate-fields-by-groups", + "aggregate-function-characteristics", + "aggregate-functions", + "aggregate-gross-and-net-profit", + "aggregate-multiple-columns", + "aggregate-or-apply-selector-functions-to-data", + "aggregate-or-select-data-based-on-type", + "aggregate-or-select-specific-data", + "aggregate-over-time", + "aggregate-package", + "aggregate-selectors", + "aggregate-windowed-data", + "aggregates", + "aggregatewindow-function", + "aggregatewindow-helper-function", + "aggregation", + "aggregation-functions", + "aggregations", + "aggregator", + "aggregator-basicstats", + "aggregator-configuration", + "aggregator-configuration-examples", + "aggregator-derivative", + "aggregator-final", + "aggregator-histogram", + "aggregator-merge", + "aggregator-minmax", + "aggregator-plugin", + "aggregator-plugin-updates", + "aggregator-plugin-updates-1", + "aggregator-plugin-updates-2", + "aggregator-plugins", + "aggregator-plugins-1", + "aggregator-quantile", + "aggregator-starlark", + "aggregator-valuecounter", + "aggregators", + "air-gapped-deployment", + "air-sensor-sample-data", + "alert", + "alert-blocks", + "alert-deduplication", + "alert-endpoint-configurations", + "alert-event-data", + "alert-management", + "alert-on-data", + "alert-template", + "alert-template-file", + "alert-template-file-1", + "alert-templates", + "alert-when-metrics-stop-reporting", + "alerta", + "alerta-package", + "alerting", + "alerting-process", + "alerts-and-ids", + "alerts_inhibited", + "alerts_triggered", + "algolia-search-input", + "alias", + "alias-queried-fields-and-tags", + "aliases", + "aliases-1", + "aliases-2", + "aliases-3", + "align", + "align-fields-into-rows-based-on-time", + "align-sample-data-to-now", + "aligngroup", + "alignto", + "aliyuncms", + "all", + "all-access-api-token", + "all-access-token", + "all-active", + "all-duration-seconds", + "all-functions", + "all-new-users-are-superadmins-configuration-option", + "alloc", + "alloc_bytes", + "allow-grpchttp2", + "allow-out-of-order-writes", + "allow-tag-column-values-to-overwrite-parsed-metadata", + "allow_no_match-optional", + "already-killed", + "alter-retention-policy", + "altering-continuous-queries", + "always-provide-a-time-format-when-using-custom-partitioning", + "amazon-athena-data-source-name", + "amazon-rds-connection-credentials", + "amazon-web-services-aws", + "ambiguous-reference-to-unqualified-field", + "amd_rocm_smi", + "amon", + "amqp", + "amqp_consumer", + "an-example", + "an-incorrectly-formatted-query", + "an-influxql-keyword-is-used-as-an-unquoted-identifier", + "anaisdg-package", + "analytical-functions", + "analyze-a-query-plan", + "analyze-a-query-plan-for-leading-edge-data", + "analyze-actual-query-cost", + "analyze-branch-structures", + "analyze-go-runtime-profiles", + "analyze-logs-with-chronograf", + "analyze-queries", + "analyze-the-go-runtime-trace", + "analyze-your-queries", + "and", + "and-operator-in-the-where-clause", + "annotated-csv", + "annotation-rows", + "annotation-shorthand", + "annotations", + "annotations-example", + "announcement-expiration", + "announcer", + "annual-plan", + "anomalydetection-package", + "another-example", + "anti-entropy", + "anti-entropy-ae-settings", + "anti-entropy-log-messages", + "anti-entropy-service-disabled-by-default", + "apache", + "apache-arrow-flight-rpc-clients", + "apcupsd", + "api", + "api-and-client-library-differences", + "api-client-libraries", + "api-error-response-messages", + "api-error-responses", + "api-examples", + "api-fix", + "api-invokable-scripts", + "api-key", + "api-reference", + "api-request-duration-seconds", + "api-requests-total", + "api-token-types", + "api-updates", + "api-updates-1", + "apikey", + "apiv2-retentionrules-syntax", + "apiv2buckets-http-endpoint", + "apiv2delete-http-endpoint", + "apiv2query-endpoint-cant-query-influxdb-3", + "apiv2query-http-endpoint", + "apiv2query-not-supported", + "apiv2write-http-endpoint", + "apiv2write-parameters", + "app-key", + "appinstance-component-schema", + "appinstance-image-override-bug-fix", + "appinstance-resource", + "appkey", + "applicable-use-cases", + "application-and-service-security", + "application_insights", + "applications", + "apply-a-stack-that-has-associated-templates", + "apply-a-template-but-skip-resources", + "apply-a-template-from-a-file", + "apply-a-template-from-a-url", + "apply-a-template-from-stdin", + "apply-a-template-to-a-stack", + "apply-abs-to-a-field", + "apply-abs-to-each-field", + "apply-abs-to-time-windows-grouped-by-time", + "apply-acos-to-a-field", + "apply-acos-to-each-field", + "apply-acos-to-time-windows-grouped-by-time", + "apply-all-templates-in-a-directory", + "apply-an-aggregate-function-to-an-aggregated-result-set", + "apply-an-ldap-configuration-from-a-file", + "apply-an-ldap-configuration-from-stdin", + "apply-and-manage-templates-using-the-influx-cli", + "apply-asin-to-a-field", + "apply-asin-to-each-field", + "apply-asin-to-time-windows-grouped-by-time", + "apply-atan-to-a-field", + "apply-atan-to-each-field", + "apply-atan-to-time-windows-grouped-by-time", + "apply-atan2-to-a-field-divided-by-another-field", + "apply-atan2-to-each-field-divided-by-a-numeric-value", + "apply-atan2-to-time-windows-grouped-by-time", + "apply-ceil-to-a-field", + "apply-ceil-to-each-field", + "apply-ceil-to-time-windows-grouped-by-time", + "apply-cos-to-a-field", + "apply-cos-to-each-field", + "apply-cos-to-time-windows-grouped-by-time", + "apply-cumulative_sum-to-a-field", + "apply-cumulative_sum-to-each-field", + "apply-cumulative_sum-to-field-keys-that-match-a-regular-expression", + "apply-cumulative_sum-to-time-windows-grouped-by-time", + "apply-derivative-to-a-field-to-calculate-the-per-5-minute-change", + "apply-derivative-to-a-field-to-calculate-the-per-second-change", + "apply-derivative-to-each-field", + "apply-derivative-to-field-keys-that-match-a-regular-expression", + "apply-derivative-to-time-windows-grouped-by-time", + "apply-difference-to-a-field", + "apply-difference-to-each-field", + "apply-difference-to-field-keys-that-match-a-regular-expression", + "apply-difference-to-time-windows-grouped-by-time", + "apply-elapsed-to-a-field-and-return-elapsed-time-in-nanoseconds", + "apply-elapsed-to-a-field-and-return-elapsed-time-in-seconds", + "apply-elapsed-to-each-field", + "apply-elapsed-to-field-keys-that-match-a-regular-expression", + "apply-exp-to-a-field", + "apply-exp-to-each-field", + "apply-exp-to-time-windows-grouped-by-time", + "apply-floor-to-a-field", + "apply-floor-to-each-field", + "apply-floor-to-time-windows-grouped-by-time", + "apply-ln-to-a-field", + "apply-ln-to-each-field", + "apply-ln-to-time-windows-grouped-by-time", + "apply-log-to-a-field-with-a-base-of-3", + "apply-log-to-each-field-with-a-base-of-5", + "apply-log-to-time-windows-grouped-by-time", + "apply-log10-to-a-field", + "apply-log10-to-each-field", + "apply-log10-to-time-windows-grouped-by-time", + "apply-log2-to-a-field", + "apply-log2-to-each-field", + "apply-log2-to-time-windows-grouped-by-time", + "apply-moving_average-to-a-field", + "apply-moving_average-to-each-field", + "apply-moving_average-to-field-keys-that-match-a-regular-expression", + "apply-moving_average-to-time-windows-grouped-by-time", + "apply-multiple-template-files-together", + "apply-non_negative_derivative-to-a-field-to-calculate-the-per-5-minute-change", + "apply-non_negative_derivative-to-a-field-to-calculate-the-per-second-change", + "apply-non_negative_derivative-to-each-field", + "apply-non_negative_derivative-to-field-keys-that-match-a-regular-expression", + "apply-non_negative_derivative-to-time-windows-grouped-by-time", + "apply-non_negative_difference-to-a-field", + "apply-non_negative_difference-to-each-field", + "apply-non_negative_difference-to-field-keys-that-match-a-regular-expression", + "apply-non_negative_difference-to-time-windows-grouped-by-time", + "apply-pow-to-a-field-with-a-power-of-3", + "apply-pow-to-each-field-with-a-power-of-5", + "apply-pow-to-time-windows-grouped-by-time", + "apply-restrictions-to-a-series-defined-by-multiple-tags", + "apply-round-to-a-field", + "apply-round-to-each-field", + "apply-round-to-time-windows-grouped-by-time", + "apply-sin-to-a-field", + "apply-sin-to-each-field", + "apply-sin-to-time-windows-grouped-by-time", + "apply-sqrt-to-a-field", + "apply-sqrt-to-each-field", + "apply-sqrt-to-time-windows-grouped-by-time", + "apply-tan-to-a-field", + "apply-tan-to-each-field", + "apply-tan-to-time-windows-grouped-by-time", + "apply-template-updates-across-multiple-influxdb-instances", + "apply-templates", + "apply-templates-from-files-and-urls", + "apply-templates-from-multiple-sources", + "apply-templates-to-an-existing-stack", + "apply-the-chande-momentum-oscillator-to-input-data", + "apply-the-changes-to-your-cluster", + "apply-the-template", + "apply-the-updated-image", + "apply-time-bounds-to-the-outer-query-to-improve-performance", + "apply-updates-from-source-controlled-templates", + "apply-your-changes", + "apply-your-configuration-changes", + "approx_distinct", + "approx_median", + "approx_percentile_cont", + "approx_percentile_cont_with_weight", + "approximate-aggregate-functions", + "approximate-functions", + "approximately-filter-geotemporal-data-by-region", + "apps-pre-created-dashboards", + "april-2021", + "april-2022", + "april-2025", + "architectural-overview", + "architecture", + "architecture-overview", + "arduino", + "arduino-onboarding", + "are-you-currently-limited-by-series-cardinality", + "are-you-reliant-on-flux-queries-and-flux-tasks", + "are-you-using-continuous-queries-cqs", + "are-you-using-docker", + "argument", + "arguments", + "arguments-1", + "arguments-10", + "arguments-11", + "arguments-12", + "arguments-13", + "arguments-14", + "arguments-15", + "arguments-16", + "arguments-17", + "arguments-18", + "arguments-19", + "arguments-2", + "arguments-20", + "arguments-21", + "arguments-22", + "arguments-23", + "arguments-24", + "arguments-25", + "arguments-26", + "arguments-27", + "arguments-28", + "arguments-29", + "arguments-3", + "arguments-30", + "arguments-31", + "arguments-32", + "arguments-33", + "arguments-34", + "arguments-35", + "arguments-36", + "arguments-37", + "arguments-38", + "arguments-39", + "arguments-4", + "arguments-5", + "arguments-6", + "arguments-7", + "arguments-8", + "arguments-9", + "arithmetic-operators", + "arm64", + "arr", + "array", + "array-literals", + "array-of-objects", + "array-of-scalar-values", + "array-package", + "array-syntax", + "array-types", + "array_agg", + "arrays", + "arrays-and-objects", + "arrow-flight-sql", + "arrow_cast", + "arrow_typeof", + "as", + "as-clause", + "ascii", + "asin", + "asinh", + "ask-or-answer-support-questions", + "assets-path", + "assign-a-new-aggregate-timestamp", + "assign-a-user-to-a-different-group", + "assign-a-user-to-the-new-role", + "assign-custom-states-to-data", + "assign-users-to-a-role", + "assignment-and-scope", + "assignment-operators", + "associate-values-to-tags-by-time", + "at-least-three-meta-nodes", + "at-time-zone", + "atan", + "atan2", + "atanh", + "athena-to-flux-data-type-conversion", + "attempt-to-write-a-string-to-a-field-that-previously-accepted-floats", + "attributes", + "auditor-user-group", + "august-2021", + "august-2022", + "aurora", + "auth-enabled", + "auth-enabled--false", + "auth-file", + "auth-file--etccollectdauth_file", + "auth0", + "auth0-organizations-optional", + "auth0-specific-oauth-20-authentication-flags", + "auth0-specific-oauth-20-authentication-options", + "authenticate-api-requests", + "authenticate-requests", + "authenticate-requests-to-influxdb-enterprise", + "authenticate-telegraf-requests-to-influxdb", + "authenticate-using-jwt-tokens", + "authenticate-with-a-token", + "authenticate-with-a-token-scheme", + "authenticate-with-a-username-and-password", + "authenticate-with-a-username-and-password-scheme", + "authenticate-with-basic-authentication", + "authenticate-with-cli-flags", + "authenticate-with-credentials-in-the-influx-shell", + "authenticate-with-environment-variables", + "authenticate-with-jwt", + "authenticate-with-kapacitor", + "authenticate-with-query-parameters-in-the-url-or-request-body", + "authenticate-with-the-cli", + "authenticate-with-the-influxdb-api", + "authenticate-with-the-kapacitor-api", + "authenticate-with-the-kapacitor-cli", + "authenticate-with-the-token-scheme", + "authenticate-with-username-and-password", + "authenticate-with-your-cluster", + "authenticate-your-connection-to-influxdb", + "authentication", + "authentication-and-authorization", + "authentication-and-authorization-auth", + "authentication-and-authorization-http-errors", + "authentication-and-authorization-in-influxdb", + "authentication-and-shared-secret", + "authentication-credentials", + "authentication-option-flags", + "authentication-options", + "authfail", + "authorization", + "auto-generate-buckets-on-write", + "auto-repair-missing", + "autogenerate-a-bucket-and-dbrp-mapping", + "autointerval", + "automate-deployments-with-gitops-and-stacks", + "automate-object-synchronization-to-an-external-s3-compatible-bucket", + "automate-processing-with-influxdb-tasks", + "automate-the-creation-of-a-stack-for-each-folder", + "automate-writing-data-from-influxdb-oss-to-influxdb-cloud", + "automated-setup-with-docker", + "automatic-dbrp-mapping", + "automatically-configure-telegraf", + "automatically-create-dbrp-mappings-on-write", + "automatically-downsampling-a-database-with-backreferencing", + "automatically-downsampling-data", + "automatically-downsampling-data-and-configuring-cq-time-boundaries", + "automatically-downsampling-data-into-another-retention-policy", + "automatically-refresh-a-dashboard", + "automating-queries-with-the-into-clause", + "auxiliary-fields", + "available-discoverers", + "available-event-handlers", + "available-nodes", + "available-operating-system-container-and-platform-support", + "available-profilers", + "available-roles", + "available-subcommands", + "available-user-groups", + "available-webhooks", + "available-with-influxdb-oss-2x-only", + "avatar-url", + "avatar_url", + "average-rate-of-change-per-window-of-time", + "avg", + "avg_exec_time_ns", + "avoid-bins-in-time-zone-discontinuities", + "avoid-bins-in-time-zone-discontinuities-1", + "avoid-data-loss", + "avoid-duplicate-names-for-tags-and-fields", + "avoid-encoding-data-in-measurements-and-keys", + "avoid-features-that-are-not-included-with-influxdb-cloud-dedicated", + "avoid-keywords-and-special-characters", + "avoid-keywords-and-special-characters-in-keys", + "avoid-name-collisions-with-multiple-subscriptions", + "avoid-over-partitioning", + "avoid-processing-filters-inline", + "avoid-putting-more-than-one-piece-of-information-in-one-tag", + "avoid-reserved-keywords-in-tag-and-field-keys", + "avoid-sending-duplicate-data", + "avoid-short-window-durations", + "avoid-sparse-schemas", + "avoid-storing-tokens-in-telegrafconf", + "avoid-the-same-name-for-a-tag-and-a-field", + "avoid-too-many-series", + "avoid-too-many-tags", + "avoid-using-apiv2query", + "avoid-using-apiv2query-1", + "avoid-wide-schemas", + "avro", + "aws", + "aws-access-key-id", + "aws-allow-http", + "aws-and-microsoft-regions", + "aws-athena-connection-strings", + "aws-default-region", + "aws-endpoint", + "aws-lambda-via-cloudformation-template", + "aws-recommendations", + "aws-secret-access-key", + "aws-session-token", + "aws-skip-signature", + "aws_ec2", + "awsalarms", + "azure-storage-access-key", + "azure-storage-account", + "azure_data_explorer", + "azure_monitor", + "azure_storage_queue", + "b", + "b-configure-new-profile", + "back-up-a-database", + "back-up-a-database-and-all-retention-policies", + "back-up-a-database-from-a-remote-influxdb-instance", + "back-up-a-database-with-a-specific-retention-policy", + "back-up-a-specific-bucket-to-a-directory", + "back-up-a-specific-database", + "back-up-a-specific-retention-policy", + "back-up-a-specific-retention-policy-legacy", + "back-up-a-specific-shard", + "back-up-a-specific-shard-legacy", + "back-up-a-specific-time-range", + "back-up-a-specific-time-range-legacy", + "back-up-all-data-from-a-remote-influxdb-instance", + "back-up-all-data-to-a-directory", + "back-up-all-data-to-the-current-working-directory", + "back-up-all-databases", + "back-up-and-restore", + "back-up-and-restore-between-influxdb-enterprise-and-oss", + "back-up-and-restore-data", + "back-up-and-restore-your-cluster", + "back-up-data", + "back-up-data-from-a-specific-time-range", + "back-up-data-from-a-specific-time-to-now", + "back-up-data-from-a-specific-time-to-now-legacy", + "back-up-data-with-the-influx-cli", + "back-up-meta-data-only", + "back-up-storage", + "back-up-your-cluster", + "backfill", + "backfilling-results-for-older-data", + "background-information", + "backup", + "backup-a-specific-shard", + "backup-after-compaction", + "backup-and-restore", + "backup-and-restore-performance", + "backup-and-restore-requirements", + "backup-and-restore-utilities", + "backup-configuration-and-data", + "backup-data-from-a-specific-time-range", + "backup-enhancements", + "backup-examples", + "backup-flags", + "backup-formats", + "backup-on-write", + "backup-processes", + "backup-strategies", + "backup-utility", + "backups", + "backward-compatible-flux-template-variables", + "backwards-compatibility", + "bad-keys-schema", + "bad-measurements-schema", + "bad-tags-schema", + "balance-time-range-and-data-precision", + "balancer", + "band", + "band-behavior", + "band-controls", + "bar-graph", + "bar-graph-controls", + "bar-graph-example", + "barrier", + "barrier-idle-time-and-window-period", + "basic-aggregate-query", + "basic-arithmetic", + "basic-auth", + "basic-authentication", + "basic-authentication-scheme", + "basic-calculations-within-a-query", + "basic-examples", + "basic-flux-query", + "basic-flux-syntax", + "basic-function-examples", + "basic-group-by-time-syntax", + "basic-matching", + "basic-mathematic-operations", + "basic-parsing", + "basic-query-examples", + "basic-query-structure", + "basic-regex-comparison-syntax", + "basic-selector-query", + "basic-syntax", + "basic-syntax-1", + "basic-syntax-10", + "basic-syntax-11", + "basic-syntax-12", + "basic-syntax-13", + "basic-syntax-14", + "basic-syntax-15", + "basic-syntax-16", + "basic-syntax-17", + "basic-syntax-18", + "basic-syntax-19", + "basic-syntax-2", + "basic-syntax-20", + "basic-syntax-21", + "basic-syntax-22", + "basic-syntax-23", + "basic-syntax-24", + "basic-syntax-25", + "basic-syntax-26", + "basic-syntax-27", + "basic-syntax-28", + "basic-syntax-29", + "basic-syntax-3", + "basic-syntax-30", + "basic-syntax-31", + "basic-syntax-4", + "basic-syntax-5", + "basic-syntax-6", + "basic-syntax-7", + "basic-syntax-8", + "basic-syntax-9", + "basic-telegraf-usage", + "basic-testcase-for-addition", + "basic-transformation", + "basic-types", + "basicstats", + "batch", + "batch-1", + "batch-field-processing-with-multi-node-selection", + "batch-format", + "batch-incoming-data", + "batch-pending", + "batch-pending--10", + "batch-pending--10-1", + "batch-pending--10-2", + "batch-pending--5", + "batch-pending-1", + "batch-pending-2", + "batch-pending-3", + "batch-size", + "batch-size--1000", + "batch-size--5000", + "batch-size--5000-1", + "batch-size--5000-2", + "batch-size-1", + "batch-size-2", + "batch-size-3", + "batch-size-4", + "batch-size-is-too-large", + "batch-timeout", + "batch-timeout--10s", + "batch-timeout--1s", + "batch-timeout--1s-1", + "batch-timeout--1s-2", + "batch-timeout-1", + "batch-timeout-2", + "batch-timeout-3", + "batch-writes", + "batch-writes-for-optimal-performance", + "batch-writing", + "batches-work-but-streams-do-not-why", + "batchsize", + "bcache", + "bcrypt", + "beanstalkd", + "beat", + "before-upgrading-to-influxdb-111", + "before-you-begin", + "before-you-migrate", + "before-you-start", + "behavior-of-processors-and-aggregators-when-used-together", + "best-practices", + "best-practices-for-writing-data", + "beta-feedback-channels", + "between", + "between-operator-in-the-where-clause", + "beware-of-infinite-loops", + "bigbluebutton", + "bigpanda", + "bigpanda-package", + "bigquery", + "bigquery-authentication-parameters", + "bigquery-credential-url-parameter", + "bigquery-credential-url-parameter-1", + "bigquery-data-source-name", + "bigtable-package", + "billing-and-usage", + "billing-and-usage-1", + "billing-cycle", + "bin", + "bin-helper-functions", + "binary", + "binary-string-functions", + "bind", + "bind-a-parameter-in-the-where-clause-to-a-numerical-field-value", + "bind-a-parameter-in-the-where-clause-to-specific-tag-value", + "bind-address", + "bind-address--1270018088", + "bind-address--2003", + "bind-address--25826", + "bind-address--4242", + "bind-address--8086", + "bind-address--8089", + "bind-address-1", + "bind-parameters", + "bind-socket", + "bind-socket--varruninfluxdbsock", + "bind-to-a-remote-meta-node", + "bind-two-parameters-in-the-where-clause-to-a-specific-tag-value-and-numerical-field-value", + "bins", + "bird-migration-sample-data", + "bit_and", + "bit_length", + "bit_or", + "bit_xor", + "bitcoin-price-data", + "bitcoin-sample-data", + "bitwise-and", + "bitwise-exclusive-or", + "bitwise-not", + "bitwise-operators", + "bitwise-or", + "bitwise-package", + "bitwise-shift-left", + "bitwise-shift-right", + "bitwise-xor", + "blacklist-http-get-or-post-operations-from-specific-cidrs", + "block", + "block-types", + "blocks", + "body", + "boilerplate-for-the-influxdb-go-client-library", + "bolt-path", + "boltdb-and-mmap-btrees", + "boltdb-statistics", + "bond", + "bonitoo-io-package", + "book", + "bool", + "bool-type-handling", + "bool_and", + "bool_or", + "boolean", + "boolean-field-value-examples", + "boolean-literals", + "boolean-package", + "boolean-syntax", + "boolean-syntax-for-writes-and-queries", + "boolean-types", + "booleans", + "bootstrap-your-application", + "boringcrypto-cryptography-library", + "bottom", + "bottom-and-a-tag-key-with-fewer-than-n-tag-values", + "bottom-tags-and-the-into-clause", + "bottom-with-a-group-by-time-clause", + "boundaries-package", + "box", + "bracket-notation", + "breaking-change", + "breaking-changes", + "breaking-changes-1", + "breaking-changes-10", + "breaking-changes-11", + "breaking-changes-12", + "breaking-changes-13", + "breaking-changes-14", + "breaking-changes-15", + "breaking-changes-16", + "breaking-changes-17", + "breaking-changes-18", + "breaking-changes-19", + "breaking-changes-2", + "breaking-changes-20", + "breaking-changes-21", + "breaking-changes-22", + "breaking-changes-23", + "breaking-changes-24", + "breaking-changes-25", + "breaking-changes-26", + "breaking-changes-27", + "breaking-changes-28", + "breaking-changes-29", + "breaking-changes-3", + "breaking-changes-4", + "breaking-changes-5", + "breaking-changes-6", + "breaking-changes-7", + "breaking-changes-8", + "breaking-changes-9", + "broker", + "brokers", + "browse-plugin-examples", + "browser-support", + "bsddarwin", + "btrim", + "buck_hash_sys_bytes", + "bucket", + "bucket-limits", + "bucket-management-with-influxql-not-supported", + "bucket-measurement-number", + "bucket-name-syntax", + "bucket-naming-examples", + "bucket-naming-restrictions", + "bucket-not-found", + "bucket-retention-period", + "bucket-schema", + "bucket-series-number", + "bucket-service-new-call-total", + "bucket-service-new-duration", + "bucket-service-new-error-total", + "bucketid", + "buckets", + "buckets-api", + "buckets-total", + "buffer", + "buffer-mem-limit-mb", + "bug-fixes", + "bug-fixes-1", + "bug-fixes-10", + "bug-fixes-100", + "bug-fixes-101", + "bug-fixes-102", + "bug-fixes-103", + "bug-fixes-104", + "bug-fixes-105", + "bug-fixes-106", + "bug-fixes-107", + "bug-fixes-108", + "bug-fixes-109", + "bug-fixes-11", + "bug-fixes-110", + "bug-fixes-111", + "bug-fixes-112", + "bug-fixes-113", + "bug-fixes-114", + "bug-fixes-115", + "bug-fixes-116", + "bug-fixes-117", + "bug-fixes-118", + "bug-fixes-119", + "bug-fixes-12", + "bug-fixes-120", + "bug-fixes-121", + "bug-fixes-122", + "bug-fixes-123", + "bug-fixes-124", + "bug-fixes-125", + "bug-fixes-126", + "bug-fixes-127", + "bug-fixes-128", + "bug-fixes-129", + "bug-fixes-13", + "bug-fixes-130", + "bug-fixes-131", + "bug-fixes-132", + "bug-fixes-133", + "bug-fixes-134", + "bug-fixes-135", + "bug-fixes-136", + "bug-fixes-137", + "bug-fixes-138", + "bug-fixes-139", + "bug-fixes-14", + "bug-fixes-140", + "bug-fixes-141", + "bug-fixes-142", + "bug-fixes-143", + "bug-fixes-144", + "bug-fixes-145", + "bug-fixes-146", + "bug-fixes-147", + "bug-fixes-148", + "bug-fixes-149", + "bug-fixes-15", + "bug-fixes-150", + "bug-fixes-151", + "bug-fixes-152", + "bug-fixes-153", + "bug-fixes-154", + "bug-fixes-155", + "bug-fixes-156", + "bug-fixes-157", + "bug-fixes-158", + "bug-fixes-159", + "bug-fixes-16", + "bug-fixes-160", + "bug-fixes-161", + "bug-fixes-162", + "bug-fixes-163", + "bug-fixes-164", + "bug-fixes-165", + "bug-fixes-166", + "bug-fixes-167", + "bug-fixes-168", + "bug-fixes-169", + "bug-fixes-17", + "bug-fixes-170", + "bug-fixes-171", + "bug-fixes-172", + "bug-fixes-173", + "bug-fixes-174", + "bug-fixes-175", + "bug-fixes-176", + "bug-fixes-177", + "bug-fixes-178", + "bug-fixes-179", + "bug-fixes-18", + "bug-fixes-180", + "bug-fixes-181", + "bug-fixes-182", + "bug-fixes-183", + "bug-fixes-184", + "bug-fixes-185", + "bug-fixes-186", + "bug-fixes-187", + "bug-fixes-188", + "bug-fixes-189", + "bug-fixes-19", + "bug-fixes-190", + "bug-fixes-191", + "bug-fixes-192", + "bug-fixes-193", + "bug-fixes-194", + "bug-fixes-195", + "bug-fixes-196", + "bug-fixes-197", + "bug-fixes-198", + "bug-fixes-199", + "bug-fixes-2", + "bug-fixes-20", + "bug-fixes-200", + "bug-fixes-201", + "bug-fixes-202", + "bug-fixes-203", + "bug-fixes-204", + "bug-fixes-205", + "bug-fixes-206", + "bug-fixes-207", + "bug-fixes-208", + "bug-fixes-209", + "bug-fixes-21", + "bug-fixes-210", + "bug-fixes-211", + "bug-fixes-212", + "bug-fixes-213", + "bug-fixes-214", + "bug-fixes-215", + "bug-fixes-216", + "bug-fixes-217", + "bug-fixes-218", + "bug-fixes-219", + "bug-fixes-22", + "bug-fixes-220", + "bug-fixes-221", + "bug-fixes-222", + "bug-fixes-223", + "bug-fixes-224", + "bug-fixes-225", + "bug-fixes-226", + "bug-fixes-227", + "bug-fixes-228", + "bug-fixes-229", + "bug-fixes-23", + "bug-fixes-230", + "bug-fixes-231", + "bug-fixes-232", + "bug-fixes-233", + "bug-fixes-24", + "bug-fixes-25", + "bug-fixes-26", + "bug-fixes-27", + "bug-fixes-28", + "bug-fixes-29", + "bug-fixes-3", + "bug-fixes-30", + "bug-fixes-31", + "bug-fixes-32", + "bug-fixes-33", + "bug-fixes-34", + "bug-fixes-35", + "bug-fixes-36", + "bug-fixes-37", + "bug-fixes-38", + "bug-fixes-39", + "bug-fixes-4", + "bug-fixes-40", + "bug-fixes-41", + "bug-fixes-42", + "bug-fixes-43", + "bug-fixes-44", + "bug-fixes-45", + "bug-fixes-46", + "bug-fixes-47", + "bug-fixes-48", + "bug-fixes-49", + "bug-fixes-5", + "bug-fixes-50", + "bug-fixes-51", + "bug-fixes-52", + "bug-fixes-53", + "bug-fixes-54", + "bug-fixes-55", + "bug-fixes-56", + "bug-fixes-57", + "bug-fixes-58", + "bug-fixes-59", + "bug-fixes-6", + "bug-fixes-60", + "bug-fixes-61", + "bug-fixes-62", + "bug-fixes-63", + "bug-fixes-64", + "bug-fixes-65", + "bug-fixes-66", + "bug-fixes-67", + "bug-fixes-68", + "bug-fixes-69", + "bug-fixes-7", + "bug-fixes-70", + "bug-fixes-71", + "bug-fixes-72", + "bug-fixes-73", + "bug-fixes-74", + "bug-fixes-75", + "bug-fixes-76", + "bug-fixes-77", + "bug-fixes-78", + "bug-fixes-79", + "bug-fixes-8", + "bug-fixes-80", + "bug-fixes-81", + "bug-fixes-82", + "bug-fixes-83", + "bug-fixes-84", + "bug-fixes-85", + "bug-fixes-86", + "bug-fixes-87", + "bug-fixes-88", + "bug-fixes-89", + "bug-fixes-9", + "bug-fixes-90", + "bug-fixes-91", + "bug-fixes-92", + "bug-fixes-93", + "bug-fixes-94", + "bug-fixes-95", + "bug-fixes-96", + "bug-fixes-97", + "bug-fixes-98", + "bug-fixes-99", + "bug-reports-and-feedback", + "bugfixes", + "bugfixes-1", + "bugfixes-10", + "bugfixes-11", + "bugfixes-12", + "bugfixes-13", + "bugfixes-14", + "bugfixes-15", + "bugfixes-16", + "bugfixes-17", + "bugfixes-2", + "bugfixes-3", + "bugfixes-4", + "bugfixes-5", + "bugfixes-6", + "bugfixes-7", + "bugfixes-8", + "bugfixes-9", + "build", + "build-&-deploy", + "build-a-dashboard", + "build-an-arbitrary-table", + "build-maintenance", + "build-the-api", + "build-the-custom-builder-tool", + "build-the-repl", + "build-visualizations-with-grafana", + "build-visualizations-with-superset", + "build-visualizations-with-tableau", + "building-a-counter", + "buildtsi", + "built-in-functions", + "built-in-iterators", + "bulk-delete-api-tokens", + "bundle-multiple-certificates", + "burrow", + "business-continuity-and-disaster-recovery", + "bymeasurement", + "bypass-your-identity-provider", + "bytes", + "bytes-and-compression", + "bytes-syntax", + "bytes-types", + "bytesread", + "bytesread-1", + "bytesrx", + "byteswritten", + "byteswritten-1", + "c", + "c-1", + "c-net", + "c-net-flight-client", + "ca-certs", + "ca-certs--", + "cache", + "cache-computation-results", + "cache-data-loading", + "cache-disk-bytes", + "cache-functions", + "cache-in-use-bytes", + "cache-latest-snapshot", + "cache-max-memory-size", + "cache-max-memory-size--1g", + "cache-snapshot-memory-size", + "cache-snapshot-memory-size--25m", + "cache-snapshot-write-cold-duration", + "cache-snapshot-write-cold-duration--10m", + "cache-subsystem-metrics", + "cache-writes-failed", + "cache-writes-total", + "cache-writes-with-dropped-points", + "cacheagems", + "cachecompactionduration", + "cachecompactionerr", + "cachecompactions", + "cachecompactionsactive", + "cachedbytes", + "caching", + "calculate-a-five-year-moving-average-every-year", + "calculate-a-new-column", + "calculate-a-percentage", + "calculate-a-percentage-from-two-fields", + "calculate-a-percentage-using-aggregate-functions", + "calculate-a-three-point-double-exponential-moving-average", + "calculate-a-three-point-exponential-moving-average", + "calculate-a-three-point-exponential-moving-average-with-null-values", + "calculate-a-three-point-moving-average", + "calculate-a-three-point-moving-average-with-null-values", + "calculate-a-three-point-relative-strength-index", + "calculate-a-three-point-triple-exponential-moving-average", + "calculate-a-two-point-triple-exponential-derivative", + "calculate-a-weekly-mean", + "calculate-and-create-a-new-table", + "calculate-arctangents-of-mean-values", + "calculate-changes-between-normalized-counter-values", + "calculate-field-values-associated-with-a-field-key-to-the-power-of-4", + "calculate-field-values-associated-with-a-field-key-to-the-power-of-4-and-include-several-clauses", + "calculate-field-values-associated-with-each-field-key-in-a-measurement-to-the-power-of-4", + "calculate-kaufmans-adaptive-moving-average-for-input-data", + "calculate-mean-values-rounded-down-to-the-nearest-integer", + "calculate-mean-values-rounded-to-the-nearest-integer", + "calculate-mean-values-rounded-up-to-the-nearest-integer", + "calculate-mean-values-to-the-power-of-4", + "calculate-multiple-quantiles-from-prometheus-histograms", + "calculate-n-to-the-p-power-with-default-parameters", + "calculate-percentages", + "calculate-percentages-in-a-query", + "calculate-percentages-using-multiple-data-sources", + "calculate-percentages-using-multiple-fields", + "calculate-percentages-using-multiple-measurements", + "calculate-quantile-values-from-prometheus-histograms", + "calculate-rates-across-joined-series--backfill", + "calculate-several-mean-values-and-place-a-condition-on-those-mean-values", + "calculate-speed", + "calculate-the-absolute-values-of-field-values-associated-with-a-field-key", + "calculate-the-absolute-values-of-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-absolute-values-of-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-absolute-values-of-mean-values", + "calculate-the-arccosine-of-field-values-associated-with-a-field-key", + "calculate-the-arccosine-of-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-arccosine-of-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-arccosine-of-mean-values", + "calculate-the-arcsine-of-field-values-associated-with-a-field-key", + "calculate-the-arcsine-of-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-arcsine-of-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-arcsine-of-mean-values", + "calculate-the-arctangent-of-field-values-associated-with-a-field-key", + "calculate-the-arctangent-of-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-arctangent-of-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-arctangent-of-field_key_b-over-field_key_a", + "calculate-the-arctangent-of-field_key_y-over-field_key_x", + "calculate-the-arctangent-of-mean-values", + "calculate-the-arctangent-of-values-associated-with-each-field-key-in-a-measurement-divided-by-field_key_a", + "calculate-the-arctangent-of-values-associated-with-each-field-key-in-a-measurement-divided-by-field_key_x", + "calculate-the-arctangents-of-field-values-and-include-several-clauses", + "calculate-the-average-difference-between-two-fields", + "calculate-the-average-of-all-values", + "calculate-the-average-percentage-of-total-weight-per-variety-each-hour", + "calculate-the-average-rate-of-change-in-data", + "calculate-the-average-rate-of-change-in-specified-time-windows", + "calculate-the-average-temperature-for-each-room", + "calculate-the-average-value-of-input-tables", + "calculate-the-ceiling-of-field-values-associated-with-a-field-key", + "calculate-the-ceiling-of-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-ceiling-of-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-cosine-of-field-values-associated-with-a-field-key", + "calculate-the-cosine-of-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-cosine-of-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-cosine-of-mean-values", + "calculate-the-covariance-between-two-columns", + "calculate-the-cumulative-sum-of-mean-values", + "calculate-the-cumulative-sum-of-the-field-values-associated-with-a-field-key", + "calculate-the-cumulative-sum-of-the-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-cumulative-sum-of-the-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-cumulative-sum-of-the-field-values-associated-with-each-field-key-that-matches-a-regular-expression", + "calculate-the-derivative-between-the-field-values-associated-with-a-field-key", + "calculate-the-derivative-between-the-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-derivative-between-the-field-values-associated-with-a-field-key-and-specify-the-unit-option", + "calculate-the-derivative-between-the-field-values-associated-with-each-field-key-in-a-measurement-and-specify-the-unit-option", + "calculate-the-derivative-between-the-field-values-associated-with-each-field-key-that-matches-a-regular-expression-and-specify-the-unit-option", + "calculate-the-derivative-of-mean-values", + "calculate-the-derivative-of-mean-values-and-specify-the-unit-option", + "calculate-the-difference-between-maximum-values", + "calculate-the-difference-between-subsequent-values", + "calculate-the-difference-between-subsequent-values-with-null-values", + "calculate-the-difference-between-the-field-values-associated-with-a-field-key", + "calculate-the-difference-between-the-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-difference-between-the-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-difference-between-the-field-values-associated-with-each-field-key-that-matches-a-regular-expression", + "calculate-the-duration-between-two-timestamps", + "calculate-the-duration-of-states", + "calculate-the-elapsed-time-between-field-values-associated-with-a-field-key", + "calculate-the-elapsed-time-between-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-elapsed-time-between-field-values-associated-with-a-field-key-and-specify-the-unit-option", + "calculate-the-elapsed-time-between-field-values-associated-with-each-field-key-in-a-measurement-and-specify-the-unit-option", + "calculate-the-elapsed-time-between-field-values-associated-with-each-field-key-that-matches-a-regular-expression-and-specify-the-unit-option", + "calculate-the-exponential-of-field-values-associated-with-a-field-key", + "calculate-the-exponential-of-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-exponential-of-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-exponential-of-mean-values", + "calculate-the-floor-of-field-values-associated-with-a-field-key", + "calculate-the-floor-of-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-floor-of-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-integral", + "calculate-the-integral-for-the-field-values-associated-with-a-field-key", + "calculate-the-integral-for-the-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-integral-for-the-field-values-associated-with-a-field-key-and-specify-the-unit-option", + "calculate-the-integral-for-the-field-values-associated-with-each-field-key-in-a-measurement-and-specify-the-unit-option", + "calculate-the-integral-for-the-field-values-associated-with-each-field-key-that-matches-a-regular-expression-and-specify-the-unit-option", + "calculate-the-integral-with-linear-interpolation", + "calculate-the-kama-of-input-tables", + "calculate-the-logarithm-base-10-of-field-values-associated-with-a-field-key", + "calculate-the-logarithm-base-10-of-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-logarithm-base-10-of-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-logarithm-base-10-of-mean-values", + "calculate-the-logarithm-base-2-of-field-values-associated-with-a-field-key", + "calculate-the-logarithm-base-2-of-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-logarithm-base-2-of-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-logarithm-base-2-of-mean-values", + "calculate-the-logarithm-base-4-of-field-values-associated-with-a-field-key", + "calculate-the-logarithm-base-4-of-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-logarithm-base-4-of-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-logarithm-base-4-of-mean-values", + "calculate-the-mean-and-median-field-values-in-one-query", + "calculate-the-mean-difference-between-two-fields", + "calculate-the-mean-field-value-associated-with-a-field-key", + "calculate-the-mean-field-value-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-mean-field-value-associated-with-each-field-key-in-a-measurement", + "calculate-the-mean-field-value-associated-with-each-field-key-that-matches-a-regular-expression", + "calculate-the-mean-value-of-a-field", + "calculate-the-mean-value-of-a-field-within-time-windows-grouped-by-time", + "calculate-the-mean-value-of-each-field", + "calculate-the-mean-value-of-fields-where-the-field-key-matches-a-regular-expression", + "calculate-the-median-field-value-associated-with-a-field-key", + "calculate-the-median-field-value-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-median-field-value-associated-with-each-field-key-in-a-measurement", + "calculate-the-median-field-value-associated-with-each-field-key-that-matches-a-regular-expression", + "calculate-the-median-value-of-a-field", + "calculate-the-median-value-of-a-field-within-time-windows-grouped-by-time", + "calculate-the-median-value-of-each-field", + "calculate-the-median-value-of-fields-where-the-field-key-matches-a-regular-expression", + "calculate-the-minimum-and-maximum-field-values-in-one-query", + "calculate-the-mode-a-field-within-time-windows-grouped-by-time", + "calculate-the-mode-field-value-associated-with-a-field-key", + "calculate-the-mode-field-value-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-mode-field-value-associated-with-each-field-key-in-a-measurement", + "calculate-the-mode-field-value-associated-with-each-field-key-that-matches-a-regular-expression", + "calculate-the-mode-of-field-keys-that-match-a-regular-expression", + "calculate-the-mode-of-two-fields-in-one-query", + "calculate-the-mode-value-of-a-field", + "calculate-the-mode-value-of-each-field", + "calculate-the-moving-average-of-maximum-values", + "calculate-the-moving-average-of-the-field-values-associated-with-a-field-key", + "calculate-the-moving-average-of-the-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-moving-average-of-the-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-moving-average-of-the-field-values-associated-with-each-field-key-that-matches-a-regular-expression", + "calculate-the-natural-logarithm-of-field-values-associated-with-a-field-key", + "calculate-the-natural-logarithm-of-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-natural-logarithm-of-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-natural-logarithm-of-mean-values", + "calculate-the-non-negative-difference-between-subsequent-values", + "calculate-the-non-negative-rate-of-change-per-second", + "calculate-the-overall-average-temperature-of-all-rooms", + "calculate-the-percentage-of-total-weight-per-apple-variety", + "calculate-the-rate-of-change-in-gauge-values", + "calculate-the-rate-of-change-in-normalized-counter-values", + "calculate-the-rate-of-change-per-second-with-null-values", + "calculate-the-running-total-of-values", + "calculate-the-sine-of-field-values-associated-with-a-field-key", + "calculate-the-sine-of-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-sine-of-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-sine-of-mean-values", + "calculate-the-spread-for-the-field-values-associated-with-a-field-key", + "calculate-the-spread-for-the-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-spread-for-the-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-spread-for-the-field-values-associated-with-each-field-key-that-matches-a-regular-expression", + "calculate-the-spread-of-a-field", + "calculate-the-spread-of-a-field-within-time-windows-grouped-by-time", + "calculate-the-spread-of-each-field", + "calculate-the-spread-of-field-keys-that-match-a-regular-expression", + "calculate-the-square-root-of-field-values-associated-with-a-field-key", + "calculate-the-square-root-of-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-square-root-of-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-square-root-of-mean-values", + "calculate-the-standard-deviation-for-the-field-values-associated-with-a-field-key", + "calculate-the-standard-deviation-for-the-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-standard-deviation-for-the-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-standard-deviation-for-the-field-values-associated-with-each-field-key-that-matches-a-regular-expression", + "calculate-the-standard-deviation-of-a-field", + "calculate-the-standard-deviation-of-a-field-within-time-windows-grouped-by-time", + "calculate-the-standard-deviation-of-each-field", + "calculate-the-standard-deviation-of-fields-where-the-field-key-matches-a-regular-expression", + "calculate-the-sum-of-several-derivative-values", + "calculate-the-sum-of-several-max-values", + "calculate-the-sum-of-the-field-values-associated-with-a-field-key", + "calculate-the-sum-of-the-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-sum-of-the-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-sum-of-the-field-values-associated-with-each-field-key-that-matches-a-regular-expression", + "calculate-the-sum-of-values-for-fields-where-the-field-key-matches-a-regular-expression", + "calculate-the-sum-of-values-in-a-field", + "calculate-the-sum-of-values-in-a-field-within-time-windows-grouped-by-time", + "calculate-the-sum-of-values-in-each-field", + "calculate-the-tangent-of-field-values-associated-with-a-field-key", + "calculate-the-tangent-of-field-values-associated-with-a-field-key-and-include-several-clauses", + "calculate-the-tangent-of-field-values-associated-with-each-field-key-in-a-measurement", + "calculate-the-tangent-of-mean-values", + "calculate-the-time-between-points-in-seconds", + "calculate-the-time-weighted-average-of-values", + "calculate-time-in-state", + "calculate-time-weighted-average", + "calculate-value-between-events", + "calculating-a-percentage-in-a-query", + "calculating-a-percentage-using-aggregate-functions", + "calculating-aggregate-percentage-per-variety", + "calculating-percentage-of-total-weight-per-apple-variety", + "calendar-months-and-years", + "call-expressions", + "call-iterators", + "can-i-change-a-fields-data-type", + "can-i-delete-a-field", + "can-i-delete-a-measurement", + "can-i-delete-multiple-measurements-at-the-same-time", + "can-i-identify-write-precision-from-returned-timestamps", + "can-i-perform-mathematical-operations-against-timestamps", + "can-i-use-influxdb-with-authentication-disabled", + "can-you-change-the-permission-level-of-members-in-your-organization", + "cancel-an-in-progress-cache-warm-operation", + "cancel-service", + "cancel-your-influxdb-cloud-serverless-subscription", + "cancel-your-influxdb-cloud-subscription", + "cannot-be-undone", + "cannot-delete-data-by-field", + "cannot-group-by-fields", + "cannot-include-both-aggregate-and-non-aggregate-field-expressions", + "cannot-join-on-an-empty-table", + "cannot-query-multiple-time-ranges", + "cannot-restore-to-an-existing-database", + "cannot-restore-to-existing-buckets", + "cannot-use-multiple-select-statements-in-a-subquery", + "cannot-use-parameters-for-durations", + "captureresponse", + "carbon2", + "cardinality", + "cardinality-metaqueries", + "cast-a-float-field-to-an-integer", + "cast-a-float-field-to-an-unsigned-integer", + "cast-an-integer-field-to-a-float", + "cast-boolean-field-values-to-integers", + "cast-booleans-to-integers", + "cast-float-field-values-to-integers", + "cast-float-field-values-to-strings-this-functionality-is-not-supported", + "cast-operations", + "cast-to-a-boolean-type", + "cast-to-a-float", + "cast-to-a-string-type", + "cast-to-a-timestamp-type", + "cast-to-an-integer", + "cast-to-an-unsigned-integer", + "cast-to-numeric-types", + "cast-unix-nanosecond-timestamps-to-a-timestamp-type", + "cast-values-to-different-types", + "catalog", + "catalog-op-latency-p90", + "catalog-operations-overview", + "catalog-ops---error", + "catalog-ops---success", + "catalog-scaling-strategies", + "catalog-service", + "catalog-store", + "catch-an-explicit-error", + "category", + "cause", + "cause-1", + "cause-2", + "cause-b1", + "cause-b2", + "cause-e1", + "cause-e2", + "cause-e3", + "cause-e4", + "causes", + "caveats", + "caveats-and-known-issues", + "cbrt", + "ceil", + "cells-can-only-be-cloned-to-the-current-dashboard", + "center", + "ceph", + "certificate--etcsslinfluxdbpem", + "cgroup", + "chaining-methods", + "chaining-methods-1", + "chaining-topics", + "chande_momentum_oscillator", + "change-a-users-password", + "change-endpoint-details", + "change-kapacitor-logging-settings", + "change-over-internal-api-to-use-message-passing-semantics", + "change-password-hashing-algorithm", + "change-the-name-of-the-value-column", + "change-the-name-of-the-value-column-and-apply-a-selector-function", + "change-the-name-of-the-value-column-and-apply-an-aggregate-function", + "change-the-name-of-window-and-then-aggregate-the-value-column", + "change-the-values-reported-for-intervals-with-no-data", + "change-your-password", + "change-your-password-using-the-influx-cli", + "changedetect", + "changes", + "changes-1", + "changes-10", + "changes-11", + "changes-12", + "changes-13", + "changes-14", + "changes-15", + "changes-16", + "changes-17", + "changes-18", + "changes-19", + "changes-2", + "changes-20", + "changes-21", + "changes-22", + "changes-23", + "changes-3", + "changes-4", + "changes-5", + "changes-6", + "changes-7", + "changes-8", + "changes-9", + "changes-to-the-ceph-plugin", + "changes-to-the-windows-ping-plugin", + "channel", + "channelurl", + "char_length", + "character_length", + "characters", + "chars", + "chat-id", + "chat_id", + "check", + "check-configuration", + "check-deployment-status", + "check-file-permissions", + "check-for-resource-dependencies", + "check-if-a-column-value-is-null", + "check-if-a-statically-defined-record-contains-a-key", + "check-if-a-value-exists-in-an-array", + "check-if-a-value-is-a-nan-float-value", + "check-if-there-is-a-difference-between-streams", + "check-influxdb-logs", + "check-interval", + "check-interval--10m", + "check-interval--30m0s", + "check-interval-1", + "check-interval-2", + "check-query", + "check-schema", + "check-status", + "check-the-http-response-in-your-logs", + "check-the-network-connection-between-nodes", + "check-the-type-of-a-value-inside-of-a-dynamic-type", + "check-types", + "check-your-influxdb-user-permissions", + "checking-current-udpip-buffer-limits", + "checking-status", + "checkname", + "checkpoint-releases", + "chobbs-package", + "choose-a-plugin-strategy", + "choose-sample-data", + "choose-the-influxdata-key-pair-for-your-os-version", + "choose-the-query-method-for-your-workload", + "choose-the-right-deployment-tool-for-your-environment", + "choose-the-write-endpoint-for-your-workload", + "choose-your-plugin-type", + "choose-your-visualization-type", + "chr", + "chronoctl", + "chronograf", + "chronograf---chronograf-server", + "chronograf-accessed-resources", + "chronograf-configuration-options", + "chronograf-frequently-asked-questions-faqs", + "chronograf-owned-resources", + "chronograf-release-notes", + "chronograf-server-flags", + "chronograf-service-options", + "chronograf-setup", + "chronograf-v18", + "chronografs-data-explorer", + "chrony", + "chunks", + "ciphers", + "ciphers---tls_ecdhe_ecdsa_with_chacha20_poly1305-tls_ecdhe_rsa_with_aes_128_gcm_sha256-", + "circle", + "circular-rewrites", + "cisco_telemetry_gnmi", + "cisco_telemetry_mdt", + "clarify", + "class", + "class-flightsqlclient", + "class-influxdb3client", + "class-influxdbclient3", + "class-point", + "class-writeoptions", + "classes", + "clauses", + "clear", + "cli", + "cli-connection-configurations-recommended", + "cli-example", + "cli-example-1", + "cli-example-2", + "cli-example-3", + "cli-example-4", + "cli-example-5", + "cli-example-6", + "cli-examples", + "cli-onboarding", + "cli-updates", + "clickhouse", + "clickhouse-package", + "client", + "client-id", + "client-libraries", + "client-libraries-for-influxdb-2x-and-18", + "client-libraries-for-influxdb-3", + "client-support-for-parameterized-queries", + "clienterror", + "clientid", + "clienturl", + "clockface-v2-icons", + "clockface-v3-icons", + "clockface-v4-icons", + "clone", + "clone-a-check", + "clone-a-dashboard", + "clone-a-task", + "clone-a-telegraf-configuration", + "clone-a-token", + "clone-an-existing-notification-rule", + "clone-cells", + "clone-dashboards", + "clone-dashboards-and-cells", + "clone-the-downsampling-template-repository", + "cloud", + "cloud-infrastructure", + "cloud-urls", + "cloud_pubsub", + "cloud_pubsub_push", + "cloudwatch", + "cloudwatch_logs", + "cloudwatch_metric_streams", + "cluster", + "cluster-commands-result-in-timeout-without-error", + "cluster-enterprise-only", + "cluster-id", + "cluster-management", + "cluster-management-tools", + "cluster-metrics-in-a-table", + "cluster-profiling", + "cluster-section", + "cluster-settings", + "cluster-setup", + "cluster-sizing-recommendation", + "cluster-specific-bug-fixes", + "cluster-specific-bug-fixes-1", + "cluster-specific-bug-fixes-2", + "cluster-specific-bug-fixes-3", + "cluster-specific-bug-fixes-4", + "cluster-specific-bug-fixes-5", + "cluster-specific-bug-fixes-6", + "cluster-specific-bugfixes", + "cluster-specific-bugfixes-1", + "cluster-specific-features", + "cluster-specific-features-1", + "cluster-specific-features-2", + "cluster-tracing", + "clustered-specific-information", + "clustered-url-field", + "clustered-urls", + "clustering", + "co-monitoring-dashboard", + "coalesce", + "cockroachdb-data-source-name", + "cockroachdb-to-flux-data-type-conversion", + "code", + "code-representation", + "codefield", + "coerce-data-types-to-avoid-rejected-point-errors", + "collaborate-with-other-users", + "collect", + "collect-compaction-information-for-the-table", + "collect-data-with-input-plugins", + "collect-kubernetes-metrics-with-telegraf", + "collect-partition-information-for-multiple-tables", + "collect-table-information", + "collectd", + "collectd-protocol-support-in-influxdb", + "collectd-section", + "collectd-section-1", + "collectd-settings", + "collected", + "collected-1", + "collection-interval", + "collection-jitter", + "collector", + "color", + "colorized-thresholds", + "column", + "column-data-types", + "column-formatted-results", + "column-labels-beginning-with-underscores", + "column-limit", + "column-limitations-when-deleting-data", + "column-settings", + "columnkey", + "columnname", + "columns", + "columns-explicitly-mapped-in-the-join-are-null", + "columns-with-the-underscore-prefix", + "combine", + "combine-filters-for-performance-improvement", + "combine-multiple-header-rows", + "command-aliases", + "command-flags", + "command-line-examples", + "command-line-flags", + "command-line-tools", + "command-line-tools-for-managing-entropy", + "command-not-supported", + "command-output", + "command-output-1", + "command-output-2", + "command-output-and-interactive-prompts", + "commands", + "comment", + "comments", + "commercial-license", + "commit-timeout", + "common--import-errors", + "common-bigquery-url-parameters", + "common-cache-operations", + "common-data-processing-tasks", + "common-issues", + "common-issues-with-advanced-syntax", + "common-issues-with-basic-syntax", + "common-issues-with-bottom", + "common-issues-with-count", + "common-issues-with-distinct", + "common-issues-with-elapsed", + "common-issues-with-fill", + "common-issues-with-functions", + "common-issues-with-holt_winters", + "common-issues-with-mathematical-operators", + "common-issues-with-percentile", + "common-issues-with-restore", + "common-issues-with-sample", + "common-issues-with-show-field-keys", + "common-issues-with-subqueries", + "common-issues-with-the-into-clause", + "common-issues-with-the-select-statement", + "common-issues-with-the-where-clause", + "common-issues-with-time-syntax", + "common-issues-with-top", + "common-log-format-clf", + "common-pitfalls", + "common-queries", + "common-tag-issues", + "common-variable-queries", + "community", + "community-templates-added-to-influxdb-ui", + "compact-a-series-file-offline", + "compact-full-write-cold-duration", + "compact-full-write-cold-duration--4h", + "compact-series-file", + "compact-series-file--false", + "compact-throughput", + "compact-throughput--48m", + "compact-throughput-burst", + "compact-throughput-burst--48m", + "compaction", + "compaction-cleanup-wait", + "compaction-cpu-utilization", + "compaction-gen2-duration", + "compaction-max-num-files-per-plan", + "compaction-memory-usage", + "compaction-multipliers", + "compaction-row-limit", + "compaction-tier-cpumem", + "compactions", + "compactions-active", + "compactions-failed", + "compactions-queued", + "compactions-since-startup", + "compactor", + "compactor-l0-file-counts-5m-bucket-width", + "compactor-scaling-strategies", + "companion-relational-sensor-data", + "comparable-constraint", + "compare-arrays", + "compare-dense_rank-rank-and-row_number-functions", + "compare-float-values", + "compare-influxdb-to-sql-databases", + "compare-integers", + "compare-month-over-month-values", + "compare-queries", + "compare-queries-1", + "compare-records", + "compare-schemas", + "compare-schemas-1", + "compare-the-lexicographical-order-of-column-values", + "compare-uintegers", + "compare-usage-metrics-to-organization-usage-limits", + "compared-to-similar-functions", + "comparing-measurements-and-creating-an-alert", + "comparing-values-from-different-buckets", + "comparison-operators", + "compatibility-apis-differ-from-native-apis", + "compatibility-endpoints", + "compiling-active", + "compiling-duration-seconds", + "complete-example", + "complete-example-query-script", + "complete-example-write-script", + "complete-list-of-flux-functions", + "completion-snippets-in-bashrc-or-zshrc", + "compliance-and-auditing", + "component", + "components-of-a-task", + "composite-types", + "compression", + "computation-methods-and-behavior", + "compute-the-099-quantile-of-a-prometheus-histogram", + "compute-the-099-quantile-of-a-prometheus-histogram-parsed-with-metric-version-1", + "compute-the-90th-percentile-of-a-histogram", + "compute-the-90th-quantile-of-a-histogram", + "compute-the-kaufmans-efficiency-ratio", + "compute-the-mode-of-input-tables", + "compute-the-product-of-all-values", + "compute-the-sum-and-count-in-a-single-reducer", + "compute-the-sum-of-the-value-column", + "concat", + "concat_ws", + "concatenate-strings", + "concepts", + "conda-install", + "conditional-expression-example", + "conditional-expression-syntax", + "conditional-expressions", + "conditional-functions", + "conditional-logic", + "conditionally-assign-a-state", + "conditionally-increment-a-count-with-reduce", + "conditionally-rename-columns-using-a-function", + "conditionally-set-the-value-of-a-variable", + "conditionally-transform-column-values-with-map", + "config", + "config-coordinator", + "config-cqs", + "config-data", + "config-example", + "config-examples", + "config-httpd", + "config-meta", + "config-monitor", + "config-override", + "config-precreator", + "config-retention", + "config-subscriber", + "configs-path", + "configurable-security-controls", + "configurable-settings", + "configuration", + "configuration-1", + "configuration-and-operational-considerations-on-a-cluster", + "configuration-batch", + "configuration-changes", + "configuration-changes-1", + "configuration-changes-2", + "configuration-changes-3", + "configuration-changes-4", + "configuration-changes-5", + "configuration-data", + "configuration-file", + "configuration-file-1", + "configuration-file-10", + "configuration-file-11", + "configuration-file-12", + "configuration-file-13", + "configuration-file-14", + "configuration-file-15", + "configuration-file-16", + "configuration-file-17", + "configuration-file-18", + "configuration-file-19", + "configuration-file-2", + "configuration-file-20", + "configuration-file-21", + "configuration-file-22", + "configuration-file-23", + "configuration-file-24", + "configuration-file-25", + "configuration-file-26", + "configuration-file-27", + "configuration-file-28", + "configuration-file-29", + "configuration-file-3", + "configuration-file-30", + "configuration-file-31", + "configuration-file-32", + "configuration-file-33", + "configuration-file-34", + "configuration-file-35", + "configuration-file-36", + "configuration-file-37", + "configuration-file-38", + "configuration-file-39", + "configuration-file-4", + "configuration-file-40", + "configuration-file-41", + "configuration-file-42", + "configuration-file-43", + "configuration-file-44", + "configuration-file-45", + "configuration-file-46", + "configuration-file-47", + "configuration-file-48", + "configuration-file-49", + "configuration-file-5", + "configuration-file-50", + "configuration-file-51", + "configuration-file-52", + "configuration-file-53", + "configuration-file-54", + "configuration-file-55", + "configuration-file-56", + "configuration-file-57", + "configuration-file-58", + "configuration-file-59", + "configuration-file-6", + "configuration-file-60", + "configuration-file-61", + "configuration-file-62", + "configuration-file-63", + "configuration-file-64", + "configuration-file-65", + "configuration-file-66", + "configuration-file-67", + "configuration-file-68", + "configuration-file-69", + "configuration-file-7", + "configuration-file-70", + "configuration-file-71", + "configuration-file-72", + "configuration-file-8", + "configuration-file-9", + "configuration-file-locations", + "configuration-help", + "configuration-management", + "configuration-options", + "configuration-overview", + "configuration-precedence", + "configuration-settings", + "configuration-settings-for-flux-query-management", + "configuration-settings-for-query-management", + "configuration-simplification", + "configure", + "configure-a-deadman-check", + "configure-a-jdbc-server-connection", + "configure-a-replication-stream", + "configure-a-threshold-check", + "configure-a-watcher-of-watchers-system-to-monitor-influxdb-1x-servers", + "configure-access-to-the-influxdb-container-registry", + "configure-apache-jmeter", + "configure-apache-nifi", + "configure-apache-pulsar", + "configure-auth0-authentication", + "configure-authentication", + "configure-authentication-credentials", + "configure-authentication-duration", + "configure-authentication-using-jwt-tokens", + "configure-azure-active-directory-authentication", + "configure-backup-and-restore-services", + "configure-bitbucket-authentication", + "configure-chronograf", + "configure-chronograf-alert-endpoints", + "configure-chronograf-to-authenticate-with-a-username-and-password", + "configure-chronograf-to-authenticate-with-oauth-20", + "configure-chronograf-to-use-any-oauth-20-provider", + "configure-connection-profiles", + "configure-credentials", + "configure-dashboard-wide-settings", + "configure-data-nodes", + "configure-distributed-environments", + "configure-dns-routing", + "configure-error-handling", + "configure-error-handling-for-a-trigger", + "configure-event-handlers", + "configure-fluentd", + "configure-github-authentication", + "configure-gitlab-authentication", + "configure-google-authentication", + "configure-grafana-to-use-flux", + "configure-grafana-to-use-influxql", + "configure-gzip-compression", + "configure-heroku-authentication", + "configure-https-over-tls", + "configure-influxctl", + "configure-influxctl-to-connect-to-your-cluster", + "configure-influxctl-to-use-the-admin-token", + "configure-influxdb-clustered-to-use-keycloak", + "configure-influxdb-clustered-to-use-microsoft-entra-id", + "configure-influxdb-enterprise-clusters", + "configure-influxdb-enterprise-data-nodes", + "configure-influxdb-enterprise-meta-nodes", + "configure-influxdb-oss", + "configure-influxdb-subscriptions", + "configure-influxdb-to-use-tls", + "configure-ingress", + "configure-kapacitor", + "configure-kapacitor-flux-tasks", + "configure-kapacitor-flux-tasks-for-influxdb-cloud-or-2x", + "configure-kapacitor-subscriptions", + "configure-kapacitor-to-connect-to-influxdb", + "configure-kapacitor-to-talk-to-the-udf", + "configure-ldap-authentication", + "configure-ldap-for-an-influxdb-enterprise-cluster", + "configure-local-storage-for-ingesters", + "configure-meta-nodes", + "configure-object-storage-permissions", + "configure-okta-authentication", + "configure-openhab", + "configure-organizations", + "configure-password-hashing", + "configure-registry-access-in-valuesyaml", + "configure-security", + "configure-security-headers", + "configure-snapshots", + "configure-tcp-and-udp-ports-used-in-influxdb-enterprise", + "configure-telegraf", + "configure-telegraf-for-influxdb", + "configure-telegraf-input-and-output-plugins", + "configure-telegraf-input-plugins", + "configure-telegraf-to-read-csv-files", + "configure-telegraf-to-write-to-influxdb", + "configure-telegraf-to-write-to-influxdb-3-core", + "configure-telegraf-to-write-to-influxdb-3-enterprise", + "configure-the-catalog-database", + "configure-the-check", + "configure-the-check-query", + "configure-the-client-library", + "configure-the-column-display-format", + "configure-the-csv-display-format", + "configure-the-http-input-plugin-in-your-telegraf-configuration-file", + "configure-the-json-display-format", + "configure-the-log-viewer", + "configure-the-migration", + "configure-the-object-store", + "configure-the-port-or-address", + "configure-the-table-display-format", + "configure-timestamps-in-the-influxql-shell", + "configure-tls-for-chronograf", + "configure-tls-transport-layer-security-and-https", + "configure-vector", + "configure-with-the-http-api", + "configure-your-appinstance", + "configure-your-cluster", + "configure-your-cluster-to-connect-to-your-identity-provider", + "configure-your-dashboard", + "configure-your-influxdb-appinstance-resource-directly", + "configure-your-influxdb-cluster-using-helm", + "configure-your-influxdb-connection", + "configure-your-influxdb-log-location", + "configure-your-log-level", + "configure-your-server", + "configure-your-token-as-an-environment-variable", + "configure-your-triggers", + "configuring-and-using-multiple-http-post-endpoints", + "configuring-chronograf-to-work-with-kapacitor", + "configuring-cq-time-ranges-and-filling-empty-results", + "configuring-diamond-to-send-metrics-to-influxdb", + "configuring-execution-intervals", + "configuring-execution-intervals-and-cq-time-ranges", + "configuring-influxdb-oss", + "configuring-influxdb-oss-instances", + "configuring-kapacitor-for-our-udf", + "configuring-returned-timestamps", + "configuring-scrapers-and-discoverers", + "configuring-the-returned-timestamps", + "configuring-time-ranges-for-resampling", + "configuring-write-client-options", + "confirm-the-data-nodes-upgrade", + "confirm-the-meta-nodes-upgrade", + "congratulations", + "connect-chronograf-to-kapacitor", + "connect-chronograf-to-your-influxdb-instance-or-influxdb-enterprise-cluster", + "connect-grafana-to-your-influxdb-instance", + "connect-telegraf-to-a-secured-influxdb-enterprise-instance", + "connect-telegraf-to-a-secured-influxdb-instance", + "connect-your-identity-provider-to-auth0", + "connecting-chronograf-to-influxdb-enterprise-clusters", + "connection-configuration-examples", + "conntrack", + "cons-of-external-monitoring", + "cons-of-internal-monitoring", + "consensus-timeout", + "consider-cache-limitations", + "consider-using-influxdb-tasks", + "consider-when-upgrading", + "consider-when-using-query-string-parameters", + "considerations-for-monitoring-the-1x-tick-stack", + "consistency-level--one", + "consistency-level--one-1", + "consoletty", + "consolidated-authentication", + "constant", + "constants", + "construct-line-protocol", + "construct-points-and-write-line-protocol", + "constructor", + "consul", + "consul-telemetry", + "consul_agent", + "contact-influxdata-sales-to-enable-sso", + "containers", + "contains", + "content", + "contents", + "contents-toggle-btn", + "continuous-queries", + "continuous-queries-and-kapacitor-tasks-may-produce-different-results", + "continuous-queries-settings", + "continuous-query", + "continuous-query-cq", + "continuous-query-execution-if-logging-enabled", + "continuous-query-management", + "continuous-query-statistics", + "continuous-query-use-cases", + "continuous_queries", + "continuous_queries-section", + "continuously-run-a-notebook", + "contour-ingress-support", + "contrib-package", + "contribute-an-external-plugin", + "contribute-code", + "contribute-to-chronograf", + "contribute-to-influxdb-oss", + "contributing-non-output-node", + "contributions-and-licenses", + "control-a-dashboard", + "control-at-the-dashboard-level", + "control-trigger-execution", + "control-your-dashboard-cell", + "convert-a-boolean-_value-column-to-floats", + "convert-a-boolean-_value-column-to-integers", + "convert-a-boolean-_value-column-to-uintegers", + "convert-a-boolean-to-a-hexadecimal-string-value", + "convert-a-duration-to-a-hexadecimal-string-value", + "convert-a-duration-to-a-sql-interval", + "convert-a-float-_value-column-to-integers", + "convert-a-float-_value-column-to-uintegers", + "convert-a-float-to-a-hexadecimal-string-value", + "convert-a-hex-color-code-to-a-name", + "convert-a-hexadecimal-string-into-bytes", + "convert-a-hexadecimal-string-to-a-uinteger", + "convert-a-hexadecimal-string-to-an-integer", + "convert-a-hexadecimal-string-to-an-unsigned-integer", + "convert-a-hexadecimal-string-to-bytes", + "convert-a-influxdb-1x-json-query-output-file-to-a-stream-of-tables", + "convert-a-influxdb-1x-json-query-output-string-to-a-stream-of-tables", + "convert-a-json-array-of-objects-to-a-flux-array", + "convert-a-json-array-of-scalar-values-to-a-flux-array", + "convert-a-json-array-to-a-flux-table", + "convert-a-json-object-to-a-flux-record", + "convert-a-monitoring-level-to-a-pagerduty-action", + "convert-a-query-builder-to-flux", + "convert-a-scientific-notation-string-to-a-float", + "convert-a-series-of-geographic-points-into-linestring", + "convert-a-severity-to-a-pagerduty-action", + "convert-a-status-level-to-a-pagerduty-severity", + "convert-a-string-and-key-to-a-base64-signed-hash", + "convert-a-string-into-a-regular-expression", + "convert-a-string-into-a-sensu-name", + "convert-a-string-to-64-bit-hash-using-xxhash", + "convert-a-string-to-a-64-bit-hash-using-cityhash64", + "convert-a-string-to-a-base64-string", + "convert-a-string-to-a-duration", + "convert-a-string-to-a-float", + "convert-a-string-to-a-regular-expression", + "convert-a-string-to-a-sha-1-hash", + "convert-a-string-to-a-sha-256-hash", + "convert-a-string-to-a-time-value", + "convert-a-string-to-an-md5-hash", + "convert-a-string-to-bytes", + "convert-a-time-to-a-hexadecimal-string-value", + "convert-a-uinteger-_value-column-to-an-integers", + "convert-a-uinteger-_value-column-to-an-uintegers", + "convert-a-uinteger-to-a-hexadecimal-string-value", + "convert-a-utc-timestamp-to-a-specified-timezone", + "convert-alert-levels-in-a-stream-of-tables-to-bigpanda-statuses", + "convert-all-values-in-a-column-to-booleans", + "convert-all-values-in-a-column-to-floats", + "convert-all-values-in-a-column-to-hexadecimal-string-values", + "convert-all-values-in-a-column-to-integers", + "convert-all-values-in-a-column-to-strings", + "convert-all-values-in-a-column-to-time", + "convert-all-values-in-a-column-to-unsigned-integers", + "convert-all-values-of-a-column-to-lower-case", + "convert-all-values-of-a-column-to-title-case", + "convert-all-values-of-a-column-to-upper-case", + "convert-an-alert-level-to-a-bigpanda-status", + "convert-an-array-of-floats-to-integers", + "convert-an-array-of-floats-to-strings", + "convert-an-array-of-floats-to-unsigned-integers", + "convert-an-array-of-integers-to-an-array-of-records", + "convert-an-array-of-integers-to-booleans", + "convert-an-array-of-integers-to-durations", + "convert-an-array-of-integers-to-floats", + "convert-an-array-of-integers-to-time-values", + "convert-an-array-of-strings-to-floats", + "convert-an-integer-_value-column-to-booleans", + "convert-an-integer-_value-column-to-floats", + "convert-an-integer-_value-column-to-times", + "convert-an-integer-to-a-float", + "convert-an-integer-to-a-hexadecimal-string-value", + "convert-an-integer-to-a-time-value", + "convert-basic-types-to-integers", + "convert-basic-types-to-strings", + "convert-basic-types-to-unsigned-integers", + "convert-batch-data-to-stream-data", + "convert-bits-into-a-float-value", + "convert-bytes-to-a-hexadecimal-string-value", + "convert-bytes-to-gigabytes", + "convert-characters-in-a-string-to-title-case", + "convert-columns-to-booleans", + "convert-columns-to-floats", + "convert-columns-to-integers", + "convert-columns-to-strings", + "convert-columns-to-time", + "convert-columns-to-uintegers", + "convert-continuous-queries-to-flux-queries", + "convert-data-types-to-booleans", + "convert-data-types-to-durations", + "convert-data-types-to-floats", + "convert-data-types-to-hexadecimal-strings", + "convert-data-types-to-integers", + "convert-data-types-to-strings", + "convert-data-types-to-time", + "convert-data-types-to-uintegers", + "convert-dynamic-types-to-flux-types", + "convert-hexadecimal-string-to-integer", + "convert-influxql-continuous-queries-to-flux", + "convert-integer-to-hexadecimal-string", + "convert-numeric-types-to-durations", + "convert-numeric-values-to-booleans", + "convert-regular-expressions-to-strings", + "convert-results-to-json", + "convert-strings-to-booleans", + "convert-strings-to-bytes", + "convert-the-_value-column-to-strings", + "convert-timestamp-format", + "convert-timestamps-into-seconds-since-the-unix-epoch", + "convert-values-in-a-column-to-durations", + "convert-windowed-data-to-stream-data", + "converter", + "converting-a-specific-shard", + "converting-all-shards-for-a-database", + "converting-all-shards-on-a-node", + "coordinator", + "copy-the-images-to-your-private-registry", + "copyright", + "copyshard", + "copyshardreq", + "core", + "core-1", + "core-2", + "core-3", + "core-4", + "core-5", + "core-6", + "core-commands", + "core-is-optimized-for-recent-data", + "core-service-dsn-parsing-errors", + "core-urls", + "corr", + "correlate", + "correlated-subqueries", + "correlated-subquery-performance", + "cors", + "cos", + "cosh", + "cot", + "couchbase", + "couchdb", + "count", + "count-and-fill", + "count-distinct-values-for-a-field", + "count-instances-of-a-substring-within-a-string", + "count-the-distinct-field-values-associated-with-a-field-key", + "count-the-distinct-field-values-associated-with-a-field-key-1", + "count-the-field-values-associated-with-a-field-key", + "count-the-field-values-associated-with-a-field-key-and-include-several-clauses", + "count-the-field-values-associated-with-each-field-key-in-a-measurement", + "count-the-field-values-associated-with-each-field-key-that-matches-a-regular-expression", + "count-the-number-of-consecutive-states", + "count-the-number-of-non-null-field-values-within-time-windows-grouped-by-time", + "count-the-number-of-non-null-values-in-a-field", + "count-the-number-of-non-null-values-in-each-field", + "count-the-number-of-non-null-values-in-fields-where-the-field-key-matches-a-regular-expression", + "count-the-number-of-points-reported-per-room-across-all-fields", + "count-the-number-of-records-in-each-input-table", + "count-the-number-of-records-with-a-specific-value", + "count-the-number-of-rows-in-a-table", + "count-the-number-rows-in-a-specific-state", + "count-the-values-that-match-a-regular-expression", + "count-unique-tag-values", + "count-unique-values", + "count-values-for-a-field", + "count-values-for-each-field-in-a-measurement", + "countcolumn", + "counter", + "counts-on-empty-tables", + "covar", + "covar_pop", + "covar_samp", + "covariance", + "covariance-between-two-columns", + "covariance-between-two-streams-of-data", + "cpu", + "cpu-accounting", + "cpu-limit", + "cpu-utilization-ingesters-k8s", + "cpu-utilization-k8s", + "cpu-utilization-routers-k8s", + "cpu_alert_batchtick", + "cpu_alert_streamtick", + "cpu_alerttick", + "cq", + "cqminute", + "cratedb", + "create", + "create-a-1x-compatible-authorization", + "create-a-basic-last-value-cache-for-one-column", + "create-a-bucket", + "create-a-bucket-and-dbrp-mapping", + "create-a-bucket-from-the-load-data-menu", + "create-a-bucket-in-the-data-explorer", + "create-a-bucket-schema", + "create-a-bucket-schema-using-the-influx-cli", + "create-a-bucket-schema-using-the-influxdb-http-api", + "create-a-bucket-that-enforces-explicit-schemas", + "create-a-bucket-that-retains-data-for-30-days", + "create-a-bucket-through-the-influxdb-cloud-api", + "create-a-bucket-using-the-influx-cli", + "create-a-bucket-with-a-custom-shard-group-duration", + "create-a-bucket-with-a-description", + "create-a-bucket-with-an-explicit-schema", + "create-a-bucket-with-infinite-data-retention", + "create-a-check", + "create-a-chronograf-ha-configuration", + "create-a-cluster-configuration-file", + "create-a-configuration-file", + "create-a-configuration-file-with-default-input-and-output-plugins", + "create-a-configuration-file-with-specific-input-and-output-plugins", + "create-a-configuration-with-default-input-and-output-plugins", + "create-a-configuration-with-specific-sections-and-plugins", + "create-a-connection-configuration-and-set-it-active", + "create-a-connection-configuration-that-uses-a-username-and-password", + "create-a-connection-configuration-without-setting-it-active", + "create-a-cumulative-histogram", + "create-a-cumulative-histogram-with-dynamically-generated-bins", + "create-a-custom-average-function", + "create-a-custom-dashboard-variable", + "create-a-custom-plugin", + "create-a-custom-telegraf-configuration", + "create-a-custom-token", + "create-a-custom-transformation", + "create-a-dashboard", + "create-a-data-write-plugin", + "create-a-database", + "create-a-database-connection-for-influxdb", + "create-a-database-default", + "create-a-database-or-table", + "create-a-database-token", + "create-a-database-using-basic-authentication", + "create-a-database-using-http-authentication", + "create-a-database-using-the-influxdb-api", + "create-a-database-with-a-30-day-retention-period", + "create-a-database-with-a-custom-partition-template", + "create-a-database-with-a-specific-retention-policy", + "create-a-database-with-an-authentication-token", + "create-a-database-with-an-infinite-retention-period", + "create-a-database-with-custom-partitioning", + "create-a-database-with-non-default-table-and-column-limits", + "create-a-dbrp-mapping", + "create-a-default-retention-policy", + "create-a-dictionary-from-a-list", + "create-a-dictionary-from-a-list-of-records", + "create-a-disabled-trigger", + "create-a-distinct-cache-for-one-column", + "create-a-distinct-value-cache", + "create-a-file-index", + "create-a-flux-task", + "create-a-flux-task-v2", + "create-a-full-configuration", + "create-a-full-configuration-as-save-it-to-a-file", + "create-a-go-module-directory", + "create-a-hierarchical-cache-with-constraints", + "create-a-histogram-from-input-data", + "create-a-kapacitor-flux-task-using-a-file", + "create-a-kapacitor-flux-task-via-stdin", + "create-a-keycloak-client-with-device-flow-enabled", + "create-a-keycloak-realm", + "create-a-label", + "create-a-last-value-cache", + "create-a-last-value-cache-with-multiple-keys-and-values", + "create-a-management-token", + "create-a-management-token-with-an-expiration-and-description", + "create-a-management-token-with-no-expiration", + "create-a-named-admin-token", + "create-a-new-account-in-a-new-region", + "create-a-new-dashboard", + "create-a-new-database", + "create-a-new-file-index-for-a-database", + "create-a-new-file-index-for-a-specific-table", + "create-a-new-influxdb-enterprise-role", + "create-a-new-influxdb-enterprise-user", + "create-a-new-influxdb-enterprise-user-role", + "create-a-new-non-admin-user", + "create-a-new-notification-rule", + "create-a-new-organization-directly-on-disk", + "create-a-new-remote-with-influxdb-cloud", + "create-a-new-table-with-new-columns", + "create-a-new-task-from-a-template", + "create-a-new-task-from-a-tickscript", + "create-a-new-template-for-batch-tasks", + "create-a-new-template-for-stream-tasks", + "create-a-new-tenant-in-microsoft-entra-id", + "create-a-new-user-directly-on-disk", + "create-a-notebook", + "create-a-notebook-from-a-preset", + "create-a-notification-endpoint", + "create-a-notification-endpoint-and-rule", + "create-a-notification-rule", + "create-a-pull-request", + "create-a-python-virtual-environment", + "create-a-query-client", + "create-a-read-only-1x-user", + "create-a-read-only-api-token", + "create-a-recording", + "create-a-replication-stream", + "create-a-resource-token", + "create-a-retention-policy", + "create-a-role", + "create-a-scheduled-plugin", + "create-a-schema-and-print-column-information", + "create-a-schema-using-the-influx-cli", + "create-a-schema-with-columns-format", + "create-a-scraper", + "create-a-scraper-in-the-influxdb-ui", + "create-a-script-from-a-file", + "create-a-script-using-raw-flux", + "create-a-slack-app", + "create-a-slack-client", + "create-a-stream-of-tables-from-an-array", + "create-a-system-token", + "create-a-system-token-for-health-information", + "create-a-table", + "create-a-table-with-a-custom-partition-template", + "create-a-table-with-custom-partitioning", + "create-a-table-with-tag-and-field-columns", + "create-a-target-database", + "create-a-task", + "create-a-task-from-a-file", + "create-a-task-from-a-template", + "create-a-task-from-the-data-explorer", + "create-a-task-in-the-influxdb-ui", + "create-a-task-in-the-task-ui", + "create-a-task-template-script", + "create-a-task-that-contains-a-flux-script", + "create-a-task-that-references-a-script", + "create-a-task-using-a-file", + "create-a-task-using-an-invokable-script", + "create-a-task-using-raw-flux", + "create-a-task-using-the-influx-cli", + "create-a-task-using-the-influxdb-api", + "create-a-telegraf-configuration", + "create-a-telegraf-configuration-via-stdin", + "create-a-telegram-bot", + "create-a-template", + "create-a-token", + "create-a-token-in-the-influxdb-ui", + "create-a-token-scoped-to-a-user", + "create-a-token-scoped-to-a-user-and-with-specified-read-and-write-permissions", + "create-a-token-that-expires-in-seven-days", + "create-a-token-using-the-influx-cli", + "create-a-token-using-the-influxdb-api", + "create-a-token-with-access-to-all-databases", + "create-a-token-with-access-to-all-system-information", + "create-a-token-with-access-to-multiple-databases", + "create-a-token-with-mixed-permissions-to-multiple-databases", + "create-a-token-with-multiple-permissions", + "create-a-token-with-read-and-write-access-to-a-database", + "create-a-token-with-read-and-write-access-to-all-databases", + "create-a-token-with-read-only-access-to-a-database", + "create-a-token-with-read-only-access-to-multiple-databases", + "create-a-token-with-specific-permissions", + "create-a-token-with-specified-permissions", + "create-a-token-with-specified-read-permissions", + "create-a-topic-handler", + "create-a-topic-handler-with-a-handler-file", + "create-a-trigger-for-a-specific-table", + "create-a-trigger-for-all-tables", + "create-a-user", + "create-a-user-against-a-follower-node", + "create-a-user-against-the-lead-node", + "create-a-user-for-recovery-purposes", + "create-a-v1-authorization", + "create-a-v1-authorization-with-read-and-write-permissions", + "create-a-valuesyaml-file", + "create-a-variable", + "create-a-variable-in-the-data-explorer", + "create-a-variable-in-the-settings-section", + "create-an-admin-token", + "create-an-alert", + "create-an-alert-email-task", + "create-an-alert-in-kapacitor-based-on-that-data", + "create-an-alert-rule", + "create-an-all-access-api-token", + "create-an-all-access-token", + "create-an-all-access-token-1", + "create-an-all-access-token-cli", + "create-an-all-access-token-in-the-influx-cli", + "create-an-annotation", + "create-an-api-token-with-read-and-write-access-to-specific-buckets", + "create-an-api-token-with-specified-read-and-write-permissions", + "create-an-authorization-for-the-device", + "create-an-empty-table", + "create-an-explicit-bucket-and-schema", + "create-an-http-request-plugin", + "create-an-influxdb-2x-connection-configuration", + "create-an-influxdb-client", + "create-an-influxdb-cloud-account", + "create-an-influxdb-cloud-connection-configuration", + "create-an-influxdb-connection", + "create-an-influxdb-data-source", + "create-an-influxdb-dbrp-mapping", + "create-an-influxdb-enterprise-user-or-role-with-kapacitor-permissions", + "create-an-influxdb-template", + "create-an-invokable-flux-script", + "create-an-invokable-script", + "create-an-operator-api-token", + "create-an-operator-token", + "create-an-operator-token-in-the-influx-cli", + "create-an-organization", + "create-an-organization-in-the-influxdb-ui", + "create-an-organization-using-the-influx-cli", + "create-an-organization-with-a-description", + "create-and-remove-topics", + "create-and-use-dashboard-variables", + "create-another-admin-user", + "create-authorizations", + "create-checks", + "create-chronograf-alert-rules", + "create-chronograf-dashboards", + "create-conditional-filters", + "create-continuous-query", + "create-custom-checks", + "create-custom-template-variables", + "create-dashboard-cells", + "create-database", + "create-dbrp-mappings", + "create-dbrp-mappings-for-unmapped-buckets", + "create-influxdb-and-kapacitor-connections", + "create-influxdb-clients", + "create-iot-virtual-device", + "create-kapacitor-flux-tasks", + "create-kapacitor-monitoring-alerts", + "create-multiple-restrictions-at-a-time", + "create-new-influxdb-tasks", + "create-notification-endpoints", + "create-notification-rules", + "create-or-edit-dashboards", + "create-organizations", + "create-retention-policies-with-create-retention-policy", + "create-retention-policy", + "create-sample-data-dashboards", + "create-scrapable-endpoints", + "create-subscription", + "create-subscriptions", + "create-the-api-to-list-devices", + "create-the-api-to-register-devices", + "create-the-application", + "create-the-consumer", + "create-the-downsampling-logic", + "create-the-flightquery-class", + "create-the-producer", + "create-the-producer-and-consumer-clients", + "create-the-producer-client", + "create-user", + "create-users", + "create-verify-and-upload-the-ldap-configuration-file", + "create-your-plugin-file", + "create-your-query", + "createdatabase", + "createempty", + "createfailures", + "createiteratorreq", + "createuserandrole", + "creating-a-database", + "creating-alerts-in-chronograf", + "credential-precedence", + "credentials-as-query-parameters", + "credentials-in-the-request-body", + "crit", + "critreset", + "crits_triggered", + "cron", + "cross-measurement-correlation", + "cross-organization-superadmin-permission", + "csgo", + "csv", + "csv-annotations", + "csv-data-with-ignored-column", + "csv-formatted-results", + "csv-package", + "csv-parsing-modes", + "csv-response-format", + "csv-variable-examples", + "csv-variable-use-cases", + "csv-with-annotation-shorthand", + "csv-with-constants", + "csv-with-non-default-boolean-values", + "csv-with-non-default-float-values", + "csv-with-non-default-integer-values", + "csv-with-non-default-timestamps", + "csv-with-non-default-uinteger-values", + "csv_timestamp_column", + "csv_timestamp_format", + "ctrlx_datalayer", + "cume_dist", + "cumulative-sum", + "cumulative_sum", + "cumulativesum", + "curl--k", + "curl-request", + "curl-request-1", + "curl-request-2", + "curl-the-ping-endpoint", + "current-limitations", + "current-row", + "current-system-time", + "current-utc-time", + "current_date", + "current_time", + "current_timestamp", + "currentfield", + "currentsegmentdiskbytes", + "cursor-type", + "cursors", + "custom", + "custom-aggregate-function-examples", + "custom-anomaly-detection-using-kapacitor", + "custom-api-token", + "custom-backup-window-_with_-object-storage-versioning", + "custom-backup-window-_without_-object-storage-versioning", + "custom-ca-certificates", + "custom-compile-telegraf", + "custom-dashboard-variables", + "custom-data-retention-periods", + "custom-date-selector", + "custom-function-examples", + "custom-functions", + "custom-mathematic-functions", + "custom-multiplication-function", + "custom-partition-data", + "custom-partitioning", + "custom-partitioning-not-supported", + "custom-percentage-function", + "custom-port-mapping", + "custom-task-http-endpoints", + "custom-template-variables", + "custom-timestamp-formats", + "custom-transformation-examples", + "custom-url", + "custom-url-field", + "custom-values-for-templates", + "customdetails", + "customize-cells", + "customize-column-names", + "customize-measurement-tag-and-field-columns-in-the-to-operation", + "customize-scale-settings", + "customize-single-stat", + "customize-the-deadman-check", + "customize-your-influxdb-oss-url", + "customized-config", + "cutset", + "d", + "daemon", + "dashboard", + "dashboard-id-in-the-cli", + "dashboard-id-in-the-ui", + "dashboard-sections-and-cells", + "dashboard-templates", + "dashboard-variable", + "dashboards", + "dashboards-total", + "dashboardtime", + "data", + "data-1", + "data-and-wal-directory-permissions", + "data-deletion", + "data-dir", + "data-directory", + "data-durability", + "data-encryption", + "data-exploration", + "data-explorer", + "data-flow", + "data-flow-for-writes", + "data-grouped-by-time-may-return-unexpected-timestamps", + "data-ingest", + "data-ingest-lifecycle-best-practices", + "data-insecure-tls", + "data-integrity", + "data-is-queryable-until-deleted", + "data-management", + "data-migration-guides", + "data-model", + "data-node", + "data-node-configuration", + "data-node-configuration-files-influxdbconf", + "data-node-configuration-settings", + "data-node-file-system-layout", + "data-node-file-system-overview", + "data-nodes", + "data-nodes-1", + "data-organization", + "data-points-older-than-retention-policy", + "data-replication", + "data-requirements", + "data-retention", + "data-retention-in-influxdb", + "data-retention-in-influxdb-cloud", + "data-retention-in-influxdb-cloud-dedicated", + "data-retention-in-influxdb-cloud-serverless", + "data-retention-in-influxdb-clustered", + "data-rows", + "data-sampling", + "data-scan-output", + "data-scanning-nodes-parquetexec-and-recordbatchesexec", + "data-section", + "data-section-1", + "data-service", + "data-set", + "data-settings", + "data-settings-for-the-tsm-engine", + "data-source", + "data-source-names", + "data-sources-and-things-to-note", + "data-sources-determine-data-structure", + "data-storage", + "data-stores", + "data-structure", + "data-type", + "data-type-conversion", + "data-type-examples", + "data-types", + "data-types-1", + "data-types-and-cast-operations", + "data-types-and-casting-operations", + "data-types-and-format", + "data-types-compatible-with-parameters", + "data-use-tls", + "data-variable", + "data-verification", + "data-visualization", + "data-write-example", + "data-written-just-before-a-snapshot-may-not-be-present-after-restoring", + "data_format", + "database", + "database--collectd", + "database--graphite", + "database--opentsdb", + "database--udp", + "database-and-cli-improvements", + "database-and-retention-policy-mapping", + "database-attributes", + "database-context-keys", + "database-engine", + "database-engine-1", + "database-engine-10", + "database-engine-11", + "database-engine-12", + "database-engine-13", + "database-engine-14", + "database-engine-15", + "database-engine-16", + "database-engine-17", + "database-engine-18", + "database-engine-19", + "database-engine-2", + "database-engine-20", + "database-engine-21", + "database-engine-3", + "database-engine-4", + "database-engine-5", + "database-engine-6", + "database-engine-7", + "database-engine-8", + "database-engine-9", + "database-limit", + "database-management", + "database-management-tools", + "database-management-with-influxql-not-supported", + "database-names-cant-be-updated", + "database-naming-examples", + "database-naming-restrictions", + "database-not-found", + "database-preparation", + "database-retention-period", + "database-table-and-column-limits", + "database-tokens", + "database-variable-use-cases", + "databases", + "datadog", + "datafusion", + "datafusion-config", + "datafusion-max-parquet-fanout", + "datafusion-num-threads", + "datafusion-query-plans", + "datafusion-runtime-disable-lifo-slot", + "datafusion-runtime-event-interval", + "datafusion-runtime-global-queue-interval", + "datafusion-runtime-max-blocking-threads", + "datafusion-runtime-max-io-events-per-tick", + "datafusion-runtime-thread-keep-alive", + "datafusion-runtime-thread-priority", + "datafusion-runtime-type", + "datafusion-use-cached-parquet-loader", + "datasets-over-individual-points", + "datasourcename", + "datatype", + "date", + "date-and-time-data-types", + "date-and-time-functions", + "date-and-time-literals", + "date-package", + "date-specifiers", + "date-time", + "date_bin", + "date_bin_gapfill", + "date_bin_wallclock", + "date_bin_wallclock_gapfill", + "date_format", + "date_part", + "date_trunc", + "datepart", + "datepart-like-queries", + "dates--times", + "datetime", + "datetimecurrent-timestamp-shortcode", + "datetrunc", + "db", + "db2", + "dbrp-http-api-now-matches-swagger-documentation", + "dbrps-map-to-influxdb-buckets", + "dbrps-map-to-influxdb-databases", + "dcos", + "deactivate-a-v1-authorization", + "deadman", + "deadman-check", + "debian-package-upgrade", + "debug-package", + "debug-pprof-enabled--false", + "debugpprof-http-endpoint", + "debugpprof-http-endpoints", + "debugpprofall-http-endpoint", + "debugrequests-http-endpoint", + "debugvars-http-endpoint", + "debugvars-removed", + "december-2021", + "decide-on-your-query-language", + "declarations", + "declined-or-late-payments", + "decode", + "decoder", + "decreasecooldown", + "dedicated-url-field", + "dedicated-urls", + "dedup", + "dedupkey", + "deduplicateexec", + "deep-linking", + "default", + "default-connection-profile-store-location", + "default-dbrp", + "default-querier-count-increased", + "default-query-concurrency-changed", + "default-retention-policies", + "default-scale-settings", + "default-storage-class", + "default-tags", + "default-templates", + "default-time-range", + "default-to-partial-write-semantics", + "default-usage", + "default-value", + "defaultapi", + "defaultconfig", + "defaultdisablewebpagepreview", + "defaultparsemode", + "defaults", + "defaultsilent", + "defaulttokenprefix", + "defaulturl", + "defaultvalue", + "define", + "define-a-custom-now-time", + "define-a-destination", + "define-a-geographic-region", + "define-a-handler-using-the-slackyaml-file", + "define-a-new-task", + "define-a-query-that-performs-time-based-aggregations", + "define-a-straight-forward-task", + "define-a-task-from-a-template", + "define-a-task-from-a-template-with-a-descriptor-file", + "define-a-template", + "define-api-responses", + "define-constants", + "define-custom-column-separator", + "define-custom-functions", + "define-custom-partitions", + "define-data-stream-variables", + "define-distance-units", + "define-environment-references", + "define-etcd-endpoints-with-command-line-flags", + "define-etcd-endpoints-with-the-etcd_endpoints-environment-variable", + "define-etcd-endpoints-with-tls-enabled", + "define-functions", + "define-functions-with-scoped-variables", + "define-or-update-a-task", + "define-parameter-defaults", + "define-stream-variables", + "define-task-options", + "define-template", + "define-template-variables-in-the-url", + "define-the-sideload-field", + "define-the-sideload-order", + "define-the-sideload-source", + "define-topic-handler", + "define-variables", + "define-your-query-patterns", + "defining-configuration-options-with-environment-variables", + "defining-the-kapacitor-task", + "defining-value-columns", + "definition", + "definition-1", + "definition-2", + "definition-3", + "definition-4", + "definitions", + "degrees", + "delete", + "delete-1", + "delete-a-bucket", + "delete-a-bucket-by-id", + "delete-a-bucket-by-name", + "delete-a-bucket-in-the-influxdb-ui", + "delete-a-bucket-using-the-influx-cli", + "delete-a-connection-configuration", + "delete-a-dashboard", + "delete-a-database", + "delete-a-database-named-mydb", + "delete-a-database-while-specifying-the-token-inline", + "delete-a-database-with-drop-database", + "delete-a-dbrp-mapping", + "delete-a-distinct-value-cache", + "delete-a-distinct-values-cache", + "delete-a-file-index", + "delete-a-file-index-from-a-database", + "delete-a-file-index-from-a-specific-table", + "delete-a-flux-task", + "delete-a-handler", + "delete-a-label", + "delete-a-last-value-cache", + "delete-a-last-values-cache", + "delete-a-measurement-from-a-single-shard", + "delete-a-measurement-from-a-tsm-file", + "delete-a-measurement-from-all-shards-in-the-database", + "delete-a-notebook", + "delete-a-notification-endpoint", + "delete-a-notification-rule", + "delete-a-recording", + "delete-a-remote", + "delete-a-replay", + "delete-a-replication", + "delete-a-role", + "delete-a-scraper", + "delete-a-scraper-from-the-influxdb-ui", + "delete-a-script", + "delete-a-secret", + "delete-a-secret-using-the-influx-cli", + "delete-a-secret-using-the-influxdb-cloud-ui", + "delete-a-shard-with-drop-shard", + "delete-a-table", + "delete-a-task", + "delete-a-task-in-the-influxdb-ui", + "delete-a-task-using-the-influxdb-api", + "delete-a-task-with-the-influx-cli", + "delete-a-template", + "delete-a-token", + "delete-a-token-using-the-influx-cli", + "delete-a-token-using-the-influxdb-api", + "delete-a-trigger", + "delete-a-user", + "delete-a-user-from-the-influxdb-ui", + "delete-a-user-using-the-influx-cli", + "delete-a-v1-authorization", + "delete-a-variable", + "delete-alert-rules", + "delete-all-measurement-data", + "delete-all-points-in-a-measurement", + "delete-all-points-in-a-specified-time-range", + "delete-all-points-in-a-specified-time-range-1", + "delete-all-points-within-a-specified-time-frame", + "delete-an-annotation", + "delete-an-api-token", + "delete-an-invokable-script", + "delete-an-organization", + "delete-an-organization-using-the-influx-cli", + "delete-checks", + "delete-data", + "delete-data-before-or-after-specified-time", + "delete-data-in-a-measurement-that-has-a-specific-tag-value", + "delete-data-to-reduce-high-cardinality", + "delete-data-using-the-api", + "delete-data-using-the-influx-cli", + "delete-kapacitor-flux-tasks", + "delete-measurements-with-drop-measurement", + "delete-multiple-connection-configurations", + "delete-multiple-databases", + "delete-node-labels", + "delete-notification-endpoints", + "delete-notification-rules", + "delete-points-by-field", + "delete-points-by-measurement", + "delete-points-by-tag-set", + "delete-points-for-a-specific-field-in-a-specified-time-range", + "delete-points-for-a-specific-field-in-a-specified-time-range-1", + "delete-points-in-a-measurement-with-a-specific-tag-value", + "delete-points-in-a-specific-measurement-with-a-specific-tag-value", + "delete-points-in-a-specific-measurement-with-a-specific-tag-value-1", + "delete-predicate", + "delete-predicate-examples", + "delete-recordings", + "delete-replays", + "delete-retention-policies-with-drop-retention-policy", + "delete-secrets", + "delete-secrets-using-the-influxdb-api", + "delete-series-with-delete", + "delete-tasks", + "delete-tasks-matching-a-glob-pattern", + "delete-templates", + "delete-tokens-in-the-influxdb-ui", + "delete-topic-handlers", + "delete-topics", + "delete-with-predicate-api-not-implemented", + "delete-your-organization", + "deleteall", + "deletes", + "deletetsm", + "deleting-a-database-cannot-be-undone", + "deleting-continuous-queries", + "deleting-data", + "deleting-data-1", + "deleting-data-without-a-delete-predicate", + "delimiter", + "demo-package-contents", + "dense_rank", + "deny-specific-cidr-ranges", + "dependency-update", + "dependency-updates", + "dependency-updates-1", + "dependency-updates-10", + "dependency-updates-11", + "dependency-updates-12", + "dependency-updates-13", + "dependency-updates-14", + "dependency-updates-15", + "dependency-updates-16", + "dependency-updates-17", + "dependency-updates-18", + "dependency-updates-19", + "dependency-updates-2", + "dependency-updates-20", + "dependency-updates-21", + "dependency-updates-22", + "dependency-updates-23", + "dependency-updates-24", + "dependency-updates-25", + "dependency-updates-26", + "dependency-updates-27", + "dependency-updates-28", + "dependency-updates-29", + "dependency-updates-3", + "dependency-updates-30", + "dependency-updates-31", + "dependency-updates-32", + "dependency-updates-33", + "dependency-updates-34", + "dependency-updates-35", + "dependency-updates-36", + "dependency-updates-37", + "dependency-updates-38", + "dependency-updates-39", + "dependency-updates-4", + "dependency-updates-40", + "dependency-updates-41", + "dependency-updates-42", + "dependency-updates-43", + "dependency-updates-44", + "dependency-updates-45", + "dependency-updates-46", + "dependency-updates-47", + "dependency-updates-48", + "dependency-updates-49", + "dependency-updates-5", + "dependency-updates-50", + "dependency-updates-51", + "dependency-updates-52", + "dependency-updates-53", + "dependency-updates-54", + "dependency-updates-55", + "dependency-updates-6", + "dependency-updates-7", + "dependency-updates-8", + "dependency-updates-9", + "deploy-a-cluster", + "deploy-and-use-the-catalog-service-by-default", + "deploy-on-aws", + "deploy-on-google-cloud-platform", + "deploy-the-1x-tick-stack", + "deploy-the-template", + "deploy-the-tick-sandbox-in-docker", + "deploy-the-tick-stack-in-kubernetes", + "deploy-with-kubernetes", + "deploy-your-cluster", + "deploying-in-air-gapped-environments", + "deployment", + "deployment-1", + "deployment-10", + "deployment-11", + "deployment-12", + "deployment-13", + "deployment-14", + "deployment-15", + "deployment-16", + "deployment-17", + "deployment-18", + "deployment-19", + "deployment-2", + "deployment-20", + "deployment-21", + "deployment-3", + "deployment-4", + "deployment-5", + "deployment-6", + "deployment-7", + "deployment-8", + "deployment-9", + "deprecated", + "deprecated-enterprise-web-console", + "deprecated-functions", + "deprecated-key", + "deprecation-removals", + "deprecations", + "deprecations-1", + "derivative", + "derive-average-values-from-a-summary-metric", + "desc", + "description", + "description-1", + "description-2", + "description-of-advanced-syntax", + "description-of-basic-syntax", + "description-of-syntax", + "description-of-syntax-1", + "description-of-syntax-2", + "description-of-syntax-3", + "description-of-syntax-4", + "description-of-syntax-5", + "design-for-performance", + "design-for-query-simplicity", + "design-to-query", + "destination", + "destination-database-must-be-empty", + "detailed-output-in-json", + "details", + "detect-a-field-type-mismatch", + "detect-a-measurement-mismatch", + "detect-if-a-host-hasnt-reported-since-a-relative-time", + "detect-if-a-host-hasnt-reported-since-a-specific-time", + "detect-when-a-series-stops-reporting", + "detect-when-the-state-changes-to-critical", + "detecting-and-repairing-entropy", + "detecting-anomalies", + "detecting-entropy", + "determine-a-state-with-existing-values", + "determine-your-backups-format", + "determine-your-batch-interval", + "determine-your-migration-start-time", + "determine-your-task-interval", + "develop-templates", + "developer-guides", + "device", + "dial-timeout", + "dialect-options", + "dict", + "dict-package", + "dictionaries", + "dictionary", + "dictionary-literals", + "dictionary-syntax", + "dictionary-types", + "difference", + "differences-between-a-heatmap-and-a-scatter-plot", + "differences-between-a-scatter-plot-and-a-heatmap", + "differences-between-tz-and-at-time-zone", + "different-data-structures-for-scraped-prometheus-metrics", + "different-results-for-the-same-query", + "digest", + "digital-signatures", + "dir", + "dir--varlibinfluxdbdata", + "dir--varlibinfluxdbmeta", + "dir-1", + "dir-2", + "directory-permissions", + "directory_monitor", + "disable", + "disable-a-specific-task", + "disable-a-task", + "disable-a-token", + "disable-a-token-using-the-influx-cli", + "disable-alert-handlers", + "disable-all-tasks-with-ids-that-match-a-glob-pattern", + "disable-anti-entropy-ae-before-restoring-a-backup", + "disable-delete-with-predicate-api", + "disable-development-features", + "disable-influxdb-subscriptions", + "disable-notification", + "disable-notification-endpoint", + "disable-parquet-mem-cache", + "disable-specific-alert-handlers", + "disable-subscriptions", + "disable-swap", + "disable-the-_internal-database-in-production", + "disable-the-_internal-database-in-production-clusters", + "disable-web-page-preview", + "disabled-administrative-features", + "disabled-influxdb-insights-monitoring", + "disabled-ports", + "disablelogicalrules", + "disablephysicalrules", + "disablewebpagepreview", + "disadvantages", + "discard", + "discord", + "discord-package", + "discordurl", + "discovery-services", + "discussion", + "disk", + "disk-size-metrics-per-shard", + "diskbytes", + "diskbytes-1", + "diskbytes-2", + "diskio", + "diskless-architecture", + "display-a-composite-value", + "display-a-dictionary", + "display-a-record", + "display-a-value-as-part-of-a-table", + "display-an-array", + "display-bytes", + "display-long-form-help-for-all-commands", + "display-runtime-configuration-in-use-by-influxd", + "display-short-form-help-for-all-commands", + "display-syntax", + "display-the-default-configurations", + "disque", + "distance", + "distance-units", + "distinct", + "distinct-and-the-into-clause", + "distinct-cache-eviction-interval", + "distinct-value-caches-are-flushed-when-the-server-stops", + "distinct-values-cache", + "distinct_cache", + "divisible-constraint", + "division", + "dmcache", + "dns_query", + "dnsmasq", + "do-i-need-to-verify-that-data-is-deleted", + "do-not-include-leading-zeros-in-duration-literals", + "do-not-modify-group-key-columns", + "do-not-use-duplicate-names-for-tags-and-fields", + "do-not-use-in-production", + "do-not-use-lvm", + "do-not-use-nfs-or-nfs-based-services", + "do-template-based-scripts-use-less-resources-or-are-they-just-an-ease-of-use-tool", + "do-you-depend-on-a-specific-cloud-provider-or-region", + "do-you-get-better-performance-with-running-one-complex-script-or-having-multiple-scripts-running-in-parallel", + "do-you-want-better-influxql-performance", + "do-you-want-to-migrate-all-your-time-series-data", + "do-you-want-to-selectively-migrate-your-time-series-data", + "do-you-want-to-use-sql-to-query-your-data", + "docker", + "docker-container-cannot-read-host-files", + "docker-default-directories", + "docker-example-with-environment-variables", + "docker-hub", + "docker-hub-default-paths", + "docker-hub-file-system-overview", + "docker-image", + "docker-install", + "docker-services", + "docker_log", + "docs-notifications", + "document-your-data-schema", + "document-your-environment", + "document-your-test-process", + "documentation", + "documentation-1", + "documentation-2", + "documenting-your-new-node", + "does-influxdb-have-a-file-system-size-limit", + "does-the-order-of-the-timestamps-matter", + "does-the-order-timestamps-in-a-query-matter", + "does-the-precision-of-the-timestamp-matter", + "doesnt-work-with-influxdb-clustered", + "dont-use-explicit-schemas-with-influxdb-3", + "dot-notation", + "double", + "double-exponential-moving-average-rules", + "double-quotes", + "double_exponential_moving_average", + "dovecot", + "downgrade", + "downgrade-influxdb", + "downgrade-to-influxdb-20", + "download-and-install", + "download-and-install-ca-certificate-files", + "download-and-install-chronograf", + "download-and-install-influxctl", + "download-and-install-influxdb-v2", + "download-and-install-instructions", + "download-and-install-on-linux-amd64", + "download-and-install-on-linux-armv8", + "download-and-install-telegraf", + "download-and-install-the-data-service", + "download-and-install-the-flight-sql-jdbc-driver", + "download-and-install-the-influx-cli", + "download-and-install-the-meta-service", + "download-and-run-telegraf-as-a-windows-service", + "download-and-run-the-sample-data-generator", + "download-and-run-the-sandbox", + "download-and-set-up-the-influx-cli", + "download-and-verify-the-configuration-file", + "download-and-write-the-data-to-influxdb", + "download-chronograf", + "download-from-the-command-line", + "download-from-your-browser", + "download-influxdb-3-core-binaries", + "download-influxdb-3-enterprise-binaries", + "download-influxdb-oss", + "download-install-and-configure-the-influx-cli", + "download-install-and-configure-the-influxctl-cli", + "download-kapacitor", + "download-or-generate-certificate-files", + "download-queries-to-a-csv-file", + "download-sample-air-sensor-data", + "download-sample-data", + "download-the-data-node-package", + "download-the-meta-node-package", + "download-the-tick-stack-components", + "downsample", + "downsample-and-retain-data", + "downsample-and-write-data-to-influxdb-cloud", + "downsample-by-calendar-month", + "downsample-by-calendar-months-and-years", + "downsample-by-calendar-week-starting-on-monday", + "downsample-by-week", + "downsample-data", + "downsample-data-by-applying-interval-based-aggregates", + "downsample-data-into-time-based-intervals", + "downsample-data-stored-in-influxdb", + "downsample-data-stored-in-influxdb-using-quix-streams", + "downsample-data-using-the-rdp-algorithm", + "downsample-data-using-the-rdp-algorithm-with-a-retention-rate-of-90", + "downsample-data-using-the-rdp-algorithm-with-an-epsilon-of-15", + "downsample-data-with-client-libraries", + "downsample-data-with-influxdb", + "downsample-data-with-notebooks", + "downsample-data-with-python-and-quix-streams", + "downsample-data-with-quix-streams", + "downsample-time-series", + "downsampling-and-data-retention", + "downsampling-process", + "dpdk", + "drivername", + "drivers", + "drop-a-list-of-columns", + "drop-a-user", + "drop-all-subscriptions", + "drop-columns-matching-a-predicate", + "drop-columns-matching-a-regex", + "drop-continuous-query", + "drop-database", + "drop-measurement", + "drop-retention-policy", + "drop-series", + "drop-series-from-the-index-with-drop-series", + "drop-shard", + "drop-subscription", + "drop-user", + "dropdata", + "dropdatabase", + "dropdown-items", + "droporiginalfieldname", + "dropped-columns", + "dropwizard", + "dry-run-prepending-csv-data-with-annotation-headers", + "dry-run-writing-annotated-csv-data-from-a-file", + "dry-run-writing-annotated-csv-data-from-a-url", + "dry-run-writing-annotated-csv-data-from-multiple-files", + "dry-run-writing-annotated-csv-data-from-multiple-sources", + "dry-run-writing-annotated-csv-data-from-multiple-urls", + "dry-run-writing-annotated-csv-data-via-stdin", + "dry-run-writing-line-protocol-from-a-file", + "dry-run-writing-line-protocol-from-a-url", + "dry-run-writing-line-protocol-from-multiple-files", + "dry-run-writing-line-protocol-from-multiple-sources", + "dry-run-writing-line-protocol-from-multiple-urls", + "dry-run-writing-line-protocol-via-stdin", + "ds389", + "dual-write-into-both-organizations", + "dual-write-to-influxdb-1x-and-influxdb-cloud", + "dual-write-to-influxdb-2x-and-influxdb-cloud", + "dual-write-to-influxdb-oss-and-influxdb-cloud", + "dual-write-to-influxdb-oss-and-influxdb-clustered", + "dumptsi", + "dumptsm", + "dumptsmwal", + "duplicate-a-column", + "duplicate-data", + "duplicate-data-points", + "duplicate-dbrp-mappings-per-database", + "duplicate-kapacitor-subscriptions", + "duplicate-keys", + "duplicate-output-tables", + "duplicate-points", + "duration", + "duration-literals", + "duration-syntax", + "duration-types", + "duration-units", + "duration_literal", + "durationcolumn", + "durationfield", + "durations", + "durationunit", + "dvc-size-and-persistence", + "dynamic-package", + "dynamic-queries", + "dynamic-sources", + "dynamic-type-syntax", + "dynamic-types", + "dynamic-types-are-not-supported-in-tables", + "dynamically-return-a-value-from-a-record", + "dynamically-set-a-column-value-using-existing-row-data", + "dynatrace", + "e", + "e2e-testing", + "east-us-virginia", + "ec2autoscale", + "ecs", + "edit-a-label", + "edit-an-annotation", + "edit-the-configuration-file", + "edit-the-configuration-file-directly-in-the-ui", + "edit-the-data-node-configuration-files", + "edit-token-permissions", + "editors-roleeditor", + "eks-irsa", + "elapsed", + "elapsed-and-units-greater-than-the-elapsed-time", + "elapsed-with-group-by-time-clauses", + "elasticsearch", + "elasticsearch_query", + "election-timeout", + "elements-of-line-protocol", + "elixir", + "elliptic-curve-cryptography", + "email", + "email-alerts-from-a-defined-handler", + "email-alerts-from-a-tickscript", + "embed-title", + "emitted", + "empty-tables", + "emptyexec", + "enable", + "enable-a-specific-task", + "enable-a-task", + "enable-a-token-using-the-influx-cli", + "enable-all-tasks-with-ids-that-match-a-glob-pattern", + "enable-and-configure-kapacitor-authentication", + "enable-and-configure-the-influxdb-v2-output-plugin", + "enable-and-disable-alert-rules", + "enable-and-disable-configuration-overrides", + "enable-authentication", + "enable-flux", + "enable-gzip-compression-in-influxdb-client-libraries", + "enable-gzip-compression-in-telegraf", + "enable-gzip-compression-in-the-telegraf-influxdb-output-plugin", + "enable-https", + "enable-https-with-influxdb", + "enable-object-versioning", + "enable-or-disable-a-check", + "enable-or-disable-a-flux-task", + "enable-or-disable-a-flux-task-api", + "enable-or-disable-a-token-in-the-influxdb-ui", + "enable-or-disable-notification-rules", + "enable-security-features", + "enable-shell-completion-optional", + "enable-short-term-object-versioning", + "enable-the-alert-stream", + "enable-the-batch-alert", + "enable-the-flux-query-log", + "enable-the-smtp-configuration", + "enable-tls-encryption", + "enable-tlsssl-https", + "enable-tsi", + "enabled", + "enabled--false", + "enabled--false-1", + "enabled--false-2", + "enabled--false-3", + "enabled--true", + "enabled--true-1", + "enabled--true-2", + "enabled--true-3", + "enabled--true-4", + "enabled-1", + "enabled-2", + "enabled-3", + "enabled-4", + "enabled-5", + "enabled-6", + "enabled-7", + "enabled-ports", + "enabledprofilers", + "encode", + "encode-a-dynamic-array-of-different-basic-types-as-json", + "encode-a-dynamic-record-as-json", + "encode-a-dynamic-type-as-json", + "encode-a-value-as-json-bytes", + "encode-credentials-with-curl", + "encode-credentials-with-flux", + "encode-credentials-with-javascript", + "encrypt-private-keys", + "end", + "end-of-life", + "end-of-life-of-native-collector---mqtt", + "endianness-optional", + "endpoint", + "endpoint-source", + "endpoints-available-in-influxdb-cloud", + "ends_with", + "engine-path", + "enqueued-write-timeout", + "enqueued-write-timeout--0", + "ensure-a-dynamic-type-contains-a-non-null-value", + "ensure-connectivity-between-machines", + "ensure-fips", + "ensure-read-access", + "enter-an-interactive-flux-repl", + "enter-cell-editor-mode", + "enter-presentation-mode-manually", + "enterprise", + "enterprise-1", + "enterprise-2", + "enterprise-3", + "enterprise-4", + "enterprise-5", + "enterprise-features", + "enterprise-license-settings", + "enterprise-overview", + "enterprise-parameter-changes", + "enterprise-urls", + "enterprise-users-and-permissions-reference", + "enterprise-web", + "enterprise-web-1", + "entitlements", + "entity", + "entitydisplayname", + "entityid", + "entityname", + "entries-definitions", + "enum", + "environment", + "environment-reference-substitution-not-supported", + "environment-variable", + "environment-variable-1", + "environment-variable-10", + "environment-variable-11", + "environment-variable-12", + "environment-variable-13", + "environment-variable-14", + "environment-variable-15", + "environment-variable-16", + "environment-variable-17", + "environment-variable-18", + "environment-variable-19", + "environment-variable-2", + "environment-variable-20", + "environment-variable-21", + "environment-variable-22", + "environment-variable-23", + "environment-variable-24", + "environment-variable-25", + "environment-variable-26", + "environment-variable-27", + "environment-variable-28", + "environment-variable-29", + "environment-variable-3", + "environment-variable-30", + "environment-variable-31", + "environment-variable-32", + "environment-variable-33", + "environment-variable-34", + "environment-variable-35", + "environment-variable-36", + "environment-variable-37", + "environment-variable-38", + "environment-variable-39", + "environment-variable-4", + "environment-variable-40", + "environment-variable-41", + "environment-variable-42", + "environment-variable-43", + "environment-variable-44", + "environment-variable-45", + "environment-variable-46", + "environment-variable-47", + "environment-variable-48", + "environment-variable-49", + "environment-variable-5", + "environment-variable-50", + "environment-variable-51", + "environment-variable-52", + "environment-variable-53", + "environment-variable-54", + "environment-variable-55", + "environment-variable-56", + "environment-variable-57", + "environment-variable-58", + "environment-variable-59", + "environment-variable-6", + "environment-variable-60", + "environment-variable-61", + "environment-variable-62", + "environment-variable-63", + "environment-variable-64", + "environment-variable-65", + "environment-variable-66", + "environment-variable-67", + "environment-variable-68", + "environment-variable-69", + "environment-variable-7", + "environment-variable-70", + "environment-variable-71", + "environment-variable-72", + "environment-variable-8", + "environment-variable-9", + "environment-variable-example", + "environment-variable-example-1", + "environment-variable-example-2", + "environment-variable-example-3", + "environment-variable-example-4", + "environment-variable-example-5", + "environment-variable-example-6", + "environment-variables", + "environment-variables-in-kap-files", + "environment-variables-in-src-files", + "environment-variables-not-in-configuration-file", + "epoch", + "epoch_time", + "epsilon", + "equal-to", + "equatable-constraint", + "equivalent-flux-task", + "erlang", + "erro0053", + "error-database-name-required", + "error-examples", + "error-max-series-per-database-exceeded--", + "error-message", + "error-message-1", + "error-message-2", + "error-message-3", + "error-messages-and-their-meaning", + "error-messages-in-the-ui", + "error-messages-when-exceeding-quotas-or-limits", + "error-messaging", + "error-messaging-1", + "error-parsing-query-found---expected-identifier-at-line---char--", + "error-parsing-query-found---expected-string-at-line---char--", + "error-parsing-query-found--expected-identifier-at-", + "error-parsing-query-mixing-aggregate-and-non-aggregate-queries-is-not-supported", + "errors", + "errors-1", + "errors-updates", + "escape-regular-expression-metacharacters-in-a-string", + "escaping-backslashes", + "essential-configuration-groups", + "establish-baseline-single-query-performance", + "establish-query-performance-degradation-conditions", + "estimate-query-cost", + "estimate-the-size-of-a-backup", + "estimate-the-total-partition-count", + "estimate_tdigest", + "etcd-flags", + "etcd-options", + "etcinfluxdbinfluxdb-metaconf", + "ethtool", + "eu-frankfurt", + "eval", + "evaluating-conditional-expressions", + "event", + "event-handlers", + "event-processing-flow", + "event_hubs", + "eventaction", + "eventclass", + "eventclasskey", + "eventhub_consumer", + "events", + "events-package", + "eventual-consistency", + "every", + "everycount", + "exact_mean", + "exact_selector", + "example", + "example-1", + "example-10", + "example-100", + "example-101", + "example-102", + "example-103", + "example-104", + "example-105", + "example-106", + "example-107", + "example-108", + "example-109", + "example-11", + "example-110", + "example-111", + "example-112", + "example-113", + "example-114", + "example-115", + "example-116", + "example-117", + "example-118", + "example-119", + "example-12", + "example-120", + "example-121", + "example-122", + "example-123", + "example-124", + "example-125", + "example-126", + "example-127", + "example-128", + "example-129", + "example-13", + "example-130", + "example-131", + "example-132", + "example-133", + "example-134", + "example-135", + "example-136", + "example-137", + "example-138", + "example-139", + "example-14", + "example-15", + "example-16", + "example-17", + "example-18", + "example-19", + "example-2", + "example-20", + "example-21", + "example-22", + "example-23", + "example-24", + "example-25", + "example-26", + "example-27", + "example-28", + "example-29", + "example-3", + "example-30", + "example-31", + "example-32", + "example-33", + "example-34", + "example-35", + "example-36", + "example-37", + "example-38", + "example-39", + "example-4", + "example-40", + "example-41", + "example-42", + "example-43", + "example-44", + "example-45", + "example-46", + "example-47", + "example-48", + "example-49", + "example-5", + "example-50", + "example-51", + "example-52", + "example-53", + "example-54", + "example-55", + "example-56", + "example-57", + "example-58", + "example-59", + "example-6", + "example-60", + "example-61", + "example-62", + "example-63", + "example-64", + "example-65", + "example-66", + "example-67", + "example-68", + "example-69", + "example-7", + "example-70", + "example-71", + "example-72", + "example-73", + "example-74", + "example-75", + "example-76", + "example-77", + "example-78", + "example-79", + "example-8", + "example-80", + "example-81", + "example-82", + "example-83", + "example-84", + "example-85", + "example-86", + "example-87", + "example-88", + "example-89", + "example-9", + "example-90", + "example-91", + "example-92", + "example-93", + "example-94", + "example-95", + "example-96", + "example-97", + "example-98", + "example-99", + "example-absolute-time-range", + "example-aggregate-queries", + "example-aggregaterate-output", + "example-aggregaterate-output-1", + "example-aggregaterate-output-2", + "example-alert-on-batch-data", + "example-alert-on-cpu-usage", + "example-alert-task-script", + "example-alerts", + "example-annotation-shorthand", + "example-api-requests", + "example-arrays", + "example-attributes", + "example-authentication-settings-in-the-kapacitorconf", + "example-barrier-based-on-idle-time", + "example-boolean-literals", + "example-box-shaped-region", + "example-changedetect-node", + "example-circular-region", + "example-close-a-client", + "example-configuration", + "example-configuration-file", + "example-connection-url", + "example-consoletty-format", + "example-continuous-query", + "example-counter-metric-in-prometheus-format", + "example-credentials-url-parameter", + "example-data", + "example-data-grouping", + "example-data-returned-by-from", + "example-data-variable", + "example-delete-predicate", + "example-derivative-output-1", + "example-derivative-output-2", + "example-dictionaries", + "example-difference-output", + "example-difference-output-1", + "example-difference-output-2", + "example-downsampling-task", + "example-downsampling-task-script", + "example-dual-write-telegraf-configuration", + "example-ec2-discovery-service-configuration", + "example-endpoints-in-prometheus-configuration-file", + "example-environment-variable-mappings", + "example-environment-variables", + "example-error-cannot-validate-certificate-for-ip_address", + "example-explain", + "example-explain-analyze", + "example-explain-analyze-statement", + "example-explain-analyze-verbose", + "example-explain-report-for-an-empty-result-set", + "example-explain-statement", + "example-export-and-import-for-disaster-recovery", + "example-filtering-show-tag-keys-by-time", + "example-float-literals", + "example-flux-script", + "example-function-type-signatures", + "example-functions", + "example-functions-with-defaults", + "example-functions-with-scoped-variables", + "example-gauge-data", + "example-gauge-metric-in-prometheus-data", + "example-github-oauth-configuration", + "example-group-key", + "example-group-key-instances", + "example-grouping-operations", + "example-handler-file", + "example-handler-file---defining-post-options-inline", + "example-handler-file---using-a-pre-configured-endpoint", + "example-helper-functions", + "example-hierarchical-alert-suppression", + "example-histogram-metric-in-prometheus-data", + "example-home-schema", + "example-import-data-from-a-file", + "example-influx-write-commands", + "example-influxdb-query-results", + "example-influxdb3-serve-command-options", + "example-influxdb_v2-configuration", + "example-initialize-a-client-using-batch-defaults-and-callbacks", + "example-initialize-a-client-with-synchronous-non-batch-defaults", + "example-initialize-with-credential-parameters", + "example-input", + "example-input-1", + "example-input-2", + "example-instantiate-options-for-batch-writing", + "example-instantiate-options-for-synchronous-writing", + "example-integer-literals", + "example-interpolation", + "example-interval-literals", + "example-joining-three-or-more-measurements", + "example-joining-two-measurements", + "example-line-protocol-elements-in-datatype-annotation", + "example-line-protocol-schemas", + "example-list-services-test-output", + "example-logfmt-format", + "example-logical-and-physical-plan", + "example-monitor-failed-tasks", + "example-multiply-function", + "example-normalized-counter-data", + "example-normalized-counter-data-1", + "example-normalized-counter-data-1-1", + "example-normalized-counter-data-1-2", + "example-normalized-counter-data-2", + "example-normalized-counter-data-2-1", + "example-normalized-counter-data-2-2", + "example-of-filling-gaps-in-data", + "example-of-mixing-data-types-and-line-protocol-elements", + "example-of-overlapping-data", + "example-of-running-service-tests", + "example-of-selecting-a-measurement-without-a-time-range", + "example-of-show-diagnostics-output", + "example-oidc-with-ad-fs", + "example-output", + "example-output-1", + "example-output-2", + "example-output-record", + "example-parse-prometheus-to-influxdb", + "example-partition-templates", + "example-physical-plan-for-a-select---order-by-query", + "example-pivoted-data", + "example-point-region", + "example-polygonal-region", + "example-predicate-function", + "example-prometheus-query-results", + "example-prometheus-query-results-1", + "example-python-plugin-for-wal-rows", + "example-queries", + "example-query", + "example-query-passing-json-parameters", + "example-query-passing-json-parameters-1", + "example-query-passing-url-encoded-parameters", + "example-query-passing-url-encoded-parameters-1", + "example-query-request-with-jwt-authentication", + "example-query-show-tables-on-the-servers-database", + "example-query-the-cpu-table-limiting-to-10-rows", + "example-query-to-count-machine-state", + "example-query-using-flight-sql", + "example-query-using-influxql", + "example-query-using-sql", + "example-query-with-statecount", + "example-query-with-stateduration", + "example-read-all-data-from-the-stream-and-return-a-pandas-dataframe", + "example-records", + "example-reduce-function", + "example-relative-time-ranges", + "example-results", + "example-results-1", + "example-results-2", + "example-results-3", + "example-retention-period-values", + "example-retrieve-the-result-schema-and-no-data", + "example-scraper-configuration", + "example-show-columns-output", + "example-show-tables-output", + "example-show-task-output", + "example-show-template-output", + "example-sorting-system-uptime", + "example-specify-the-root-certificate-path", + "example-sql-queries", + "example-square-function", + "example-start-the-influxdb-3-enterprise-server-with-your-license-email", + "example-start-the-influxdb-3-enterprise-server-with-your-license-file", + "example-status-message-template", + "example-string-literals", + "example-summary-metric-in-prometheus-data", + "example-table", + "example-taskflux", + "example-telegraf-configuration", + "example-telegraf-environment-variables", + "example-telegrafconf", + "example-template-variable-query-parameter", + "example-test-create-and-run-a-plugin", + "example-testcase-extension-to-prevent-feature-regression", + "example-tickscript", + "example-tickscript---defining-post-options-inline", + "example-tickscript---using-a-pre-configured-endpoint", + "example-tickscript-alert-for-disk-usage", + "example-timestamp-literals", + "example-tls-error", + "example-udf-configuration", + "example-unsigned-integer-literals", + "example-usage", + "example-use-batch-options-when-writing-file-data", + "example-using-influxdb-metrics-endpoint", + "example-values", + "example-view-the-schema-for-all-batches-in-the-stream", + "example-with-cli-options", + "example-with-environment-variables", + "example-write-a-line-protocol-string", + "example-write-and-query-data", + "example-write-command", + "example-write-data-to-a-bucket", + "example-write-data-using-a-dict", + "example-write-data-using-points", + "example-write-data-using-the-apiv3-http-api", + "example-write-data-using-the-influxdb3-cli", + "example-write-pivoted-data-to-influxdb", + "example_table", + "examplecsv", + "examples", + "examples-1", + "examples-10", + "examples-11", + "examples-12", + "examples-13", + "examples-14", + "examples-15", + "examples-16", + "examples-17", + "examples-18", + "examples-19", + "examples-2", + "examples-20", + "examples-21", + "examples-22", + "examples-23", + "examples-24", + "examples-25", + "examples-26", + "examples-27", + "examples-28", + "examples-29", + "examples-3", + "examples-30", + "examples-31", + "examples-32", + "examples-33", + "examples-34", + "examples-35", + "examples-36", + "examples-37", + "examples-38", + "examples-39", + "examples-4", + "examples-40", + "examples-41", + "examples-42", + "examples-43", + "examples-44", + "examples-45", + "examples-46", + "examples-47", + "examples-48", + "examples-49", + "examples-5", + "examples-50", + "examples-51", + "examples-52", + "examples-53", + "examples-54", + "examples-55", + "examples-56", + "examples-57", + "examples-58", + "examples-59", + "examples-6", + "examples-60", + "examples-61", + "examples-62", + "examples-63", + "examples-64", + "examples-65", + "examples-66", + "examples-67", + "examples-68", + "examples-69", + "examples-7", + "examples-8", + "examples-9", + "examples-how-to-apply-a-template-or-stack", + "examples-of-advanced-syntax", + "examples-of-basic-syntax", + "examples-of-call-expressions", + "examples-of-date-and-time-literals", + "examples-of-duration-literals", + "examples-of-duration-literals-1", + "examples-of-duration-types", + "examples-of-endpoints-with-authentication-enabled_", + "examples-of-expression-statements", + "examples-of-floating-point-literals", + "examples-of-function-literals", + "examples-of-geotemporal-line-protocol", + "examples-of-identifiers", + "examples-of-integer-literals", + "examples-of-log-output", + "examples-of-pipe-expressions", + "examples-of-regular-expression-literals", + "examples-of-short-notation-in-call-expressions", + "examples-of-special-characters-in-line-protocol", + "examples-of-string-literals", + "examples-of-variable-assignment", + "exceeded-limited_query-plan-limit", + "exceeded-limited_query_time-plan-limit", + "exceeded-limited_write-plan-limit", + "exceeded-rate-limits", + "exceptions", + "exclude", + "exec", + "exec-mem-pool-bytes", + "execd", + "execute-a-flux-query", + "execute-a-query", + "execute-a-task", + "execute-an-external-program-from-a-defined-handler", + "execute-an-external-program-from-a-tickscript", + "execute-an-influxql-command-and-quit-with--execute", + "execute-an-influxql-query", + "execute-an-sql-query", + "execute-flux-queries", + "execute-influxql-queries", + "execute-influxql-queries-from-a-file", + "execute-multiple-queries", + "execute-parameterized-influxql-queries", + "execute-parameterized-sql-queries", + "execute-queries", + "execute-query-example", + "execute-query-syntax", + "execute-the-query", + "executing-active", + "executing-duration-seconds", + "execution_time", + "executionplan-nodes", + "existence", + "existing-permissions-are-replaced-on-update", + "existing-users-are-admin-by-default", + "exists", + "exists-operator-with-a-subquery-in-the-where-clause", + "exit", + "exit-the-repl", + "exp", + "expandsourcesreq", + "expect-package", + "experimental-input-data", + "experimental-license-enforcement", + "experimental-output-line-protocol", + "experimental-package", + "experimental-packages-are-subject-to-change", + "expiration-behavior", + "explain", + "explain-analyze", + "explain-analyze-verbose", + "explain-report", + "explain-report-for-a-leading-edge-data-query", + "explain-report-for-the-leading-edge-data-query", + "explain-verbose", + "explanation-of-the-output", + "explicit-block", + "explicitly-map-column-names-to-new-column-names", + "explore-cell-data", + "explore-data-in-chronograf", + "explore-data-with-flux", + "explore-data-with-flux-and-the-data-explorer", + "explore-data-with-influxql", + "explore-metrics-with-influxdb", + "explore-sample-data", + "explore-your-schema", + "explore-your-schema-with-influxql", + "explore-your-schema-with-sql", + "exponential", + "exponential-moving-average-rules", + "exponential_moving_average", + "export", + "export-a-dashboard", + "export-a-database-and-all-its-retention-policies-to-a-file", + "export-a-specific-database-and-retention-policy-to-a-file", + "export-a-stack", + "export-a-stack-as-a-template", + "export-a-task", + "export-a-task-in-the-influxdb-ui", + "export-a-template", + "export-a-variable", + "export-all-bucket-or-dashboard-resources-with-label-foo", + "export-all-bucket-resources-and-with-label-foo", + "export-all-bucket-resources-as-a-template", + "export-all-data-from-_non-default_-data-and-wal-directories", + "export-all-data-in-a-bucket-as-line-protocol", + "export-all-data-in-a-database-and-retention-policy-to-a-file", + "export-all-data-to-a-compressed-file", + "export-all-data-to-a-file", + "export-all-databases-and-compress-the-output", + "export-all-resources", + "export-all-resources-associated-with-a-stack", + "export-all-resources-associated-with-label-foo", + "export-all-resources-in-an-organization-as-a-template", + "export-all-resources-to-a-template", + "export-an-updated-template", + "export-and-write-data", + "export-and-write-data-in-a-single-command", + "export-buckets-by-id", + "export-buckets-labels-and-dashboards-by-id", + "export-buckets-labels-and-dashboards-by-name", + "export-continuous-queries-before-upgrading", + "export-dashboards", + "export-data", + "export-data-and-other-artifacts", + "export-data-from-a-specific-database-and-retention-policy", + "export-data-in-measurements-as-line-protocol", + "export-data-in-specified-time-range-as-line-protocol", + "export-data-to-csv", + "export-data-within-time-bounds-to-a-file", + "export-only-dashboards-and-buckets-with-specific-labels", + "export-resources-both-associated-and-not-associated-with-a-stack", + "export-resources-filtered-by-labelname-or-resourcekind", + "export-resources-to-a-template", + "export-specific-resources", + "export-specific-resources-by-id", + "export-specific-resources-by-name", + "export-specific-resources-to-a-template", + "export-tasks", + "export-tsi-index-data-as-sql", + "exporting-and-importing-data", + "exporting-data", + "exposed-ports", + "expression", + "expression-statements", + "expressions", + "extend-a-record", + "extended-annotated-csv", + "extending-the-example", + "extending-tickscripts", + "extension-capabilities", + "external", + "external-1", + "external-2", + "external-3", + "external-plugin", + "extra-tips", + "extract", + "extract-a-column", + "extract-a-column-as-an-array", + "extract-a-column-from-the-table", + "extract-a-plugin-using-the-shim-wrapper", + "extract-a-row", + "extract-a-row-as-a-record", + "extract-a-row-from-the-table", + "extract-a-scalar-field-value", + "extract-a-table", + "extract-a-table-from-a-stream-of-tables", + "extract-an-array-of-column-values-and-display-them-in-a-table", + "extract-an-array-of-column-values-from-a-table", + "extract-scalar-row-data", + "extract-scalar-values", + "extract-the-correct-table", + "extract-the-first-row-from-a-table-as-a-record", + "extract-timestamps-from-a-time-column-using-rfc3339-format", + "f", + "f1", + "f2", + "factor", + "factorial", + "fail2ban", + "failed-to-add-to-replication-queue-metrics", + "failed-to-create-measurement", + "feature-flag-noprometheus", + "feature-flags", + "features", + "features-1", + "features-10", + "features-100", + "features-101", + "features-102", + "features-103", + "features-104", + "features-105", + "features-106", + "features-107", + "features-108", + "features-109", + "features-11", + "features-110", + "features-111", + "features-112", + "features-113", + "features-114", + "features-115", + "features-116", + "features-117", + "features-118", + "features-119", + "features-12", + "features-120", + "features-121", + "features-122", + "features-123", + "features-124", + "features-125", + "features-126", + "features-127", + "features-128", + "features-129", + "features-13", + "features-130", + "features-131", + "features-132", + "features-133", + "features-134", + "features-135", + "features-136", + "features-137", + "features-138", + "features-139", + "features-14", + "features-140", + "features-141", + "features-142", + "features-143", + "features-144", + "features-145", + "features-146", + "features-147", + "features-148", + "features-149", + "features-15", + "features-150", + "features-151", + "features-152", + "features-153", + "features-154", + "features-155", + "features-156", + "features-157", + "features-158", + "features-159", + "features-16", + "features-160", + "features-161", + "features-162", + "features-163", + "features-164", + "features-165", + "features-166", + "features-167", + "features-168", + "features-169", + "features-17", + "features-170", + "features-171", + "features-172", + "features-173", + "features-174", + "features-175", + "features-176", + "features-177", + "features-178", + "features-179", + "features-18", + "features-180", + "features-181", + "features-182", + "features-183", + "features-19", + "features-2", + "features-20", + "features-21", + "features-22", + "features-23", + "features-24", + "features-25", + "features-26", + "features-27", + "features-28", + "features-29", + "features-3", + "features-30", + "features-31", + "features-32", + "features-33", + "features-34", + "features-35", + "features-36", + "features-37", + "features-38", + "features-39", + "features-4", + "features-40", + "features-41", + "features-42", + "features-43", + "features-44", + "features-45", + "features-46", + "features-47", + "features-48", + "features-49", + "features-5", + "features-50", + "features-51", + "features-52", + "features-53", + "features-54", + "features-55", + "features-56", + "features-57", + "features-58", + "features-59", + "features-6", + "features-60", + "features-61", + "features-62", + "features-63", + "features-64", + "features-65", + "features-66", + "features-67", + "features-68", + "features-69", + "features-7", + "features-70", + "features-71", + "features-72", + "features-73", + "features-74", + "features-75", + "features-76", + "features-77", + "features-78", + "features-79", + "features-8", + "features-80", + "features-81", + "features-82", + "features-83", + "features-84", + "features-85", + "features-86", + "features-87", + "features-88", + "features-89", + "features-9", + "features-90", + "features-91", + "features-92", + "features-93", + "features-94", + "features-95", + "features-96", + "features-97", + "features-98", + "features-99", + "february-2022", + "fga-does-not-apply-to-flux", + "fibaro", + "field", + "field-key", + "field-key-variable-use-cases", + "field-keys", + "field-set", + "field-specification", + "field-templates", + "field-type-discrepancies", + "field-types", + "field-value", + "field_name_expansion-optional", + "field_selection-field_name-field_value-optional", + "fielddimensionsreq", + "fieldfn", + "fields", + "fields-and-tags-with-spaces", + "fields-arent-indexed", + "fields-sub-section", + "fields_int-sub-section", + "fieldscreate", + "figure-1-explain-report", + "file", + "file-block", + "file-bug-reports", + "file-index-settings", + "file-organization", + "file-source", + "file-system-layout", + "file-system-mounts", + "file_groups", + "filecount", + "filepath", + "fileset", + "filestat", + "filesystem-object-store", + "fill", + "fill-empty-tables", + "fill-empty-windows-of-time", + "fill-examples", + "fill-gaps-in-data", + "fill-null-values-with-a-specified-non-null-value", + "fill-null-values-with-the-previous-non-null-value", + "fill-with-a-specified-value", + "fill-with-linear-interpolation-if-there-are-not-two-values-to-interpolate-between", + "fill-with-no-data-in-the-queried-time-range", + "fill-with-previous-if-no-previous-value-exists", + "fill-with-the-previous-value", + "fill_option", + "fillfill_option", + "fillfill_option-1", + "filllinear-when-the-previous-or-following-result-falls-outside-the-querys-time-range", + "filllinear-when-the-previous-or-following-result-is-outside-the-queried-time-range", + "fillperiod", + "filter", + "filter-aggregate-values-based-on-a-threshold", + "filter-an-array", + "filter-array-of-integers", + "filter-based-on-influxdb-measurement-field-and-tag", + "filter-based-on-string-value-length", + "filter-based-on-the-presence-of-a-prefix-in-a-column-value", + "filter-based-on-the-presence-of-a-specific-characters-in-a-column-value", + "filter-based-on-the-presence-of-a-substring-in-a-column-value", + "filter-based-on-the-presence-of-a-suffix-in-a-column-value", + "filter-by-business-hours", + "filter-by-columns-with-digits-as-values", + "filter-by-columns-with-single-letter-lowercase-values", + "filter-by-columns-with-single-letter-uppercase-values", + "filter-by-date", + "filter-by-fields", + "filter-by-fields-and-tags", + "filter-by-numeric-values", + "filter-by-partition-id", + "filter-by-partition-key", + "filter-by-rows-that-contain-matches-to-a-regular-expression", + "filter-by-stack-id", + "filter-by-stack-name", + "filter-by-table-name", + "filter-by-value-type", + "filter-data-based-on-field-values", + "filter-data-based-on-specific-tag-and-field-values", + "filter-data-from-a-batch", + "filter-data-to-a-specified-region", + "filter-data-using-the-or-operator", + "filter-data-within-a-specific-time-period", + "filter-dbrp-mappings-by-bucket-id", + "filter-dbrp-mappings-by-database", + "filter-definitions", + "filter-for-admin-tokens", + "filter-geo-temporal-data-by-region", + "filter-meta-queries-by-time", + "filter-meta-query-results-using-template-variables", + "filter-null-values", + "filter-on-a-set-of-specific-fields", + "filter-out-rows-with-null-values", + "filter-stacks", + "filter-stacks-by-id", + "filter-stacks-by-name", + "filter-tasks-using-the-cli", + "filter-template-variables-with-other-template-variables", + "filter-templates", + "filter-the-list-of-tasks", + "filter-the-output-for-resource-tokens", + "filter-the-token-list", + "filter-values-based-on-thresholds", + "filterexec", + "filtering-examples", + "filters", + "final", + "final-output-record", + "final-output-record-and-table", + "final-processing", + "final-verification", + "find-and-count-unique-values-in-a-column", + "find-data-type-conflicts-and-schema-rejections", + "find-how-long-a-state-persists", + "find-out-instructions", + "find-out-toggle", + "find-parsing-errors", + "find-support-for-influxdb-3-core", + "find-support-for-influxdb-3-enterprise", + "find-supported-influx-cli-commands", + "find-the-average-of-values-closest-to-the-median", + "find-the-average-of-values-closest-to-the-quantile", + "find-the-first-regular-expression-match-in-each-row", + "find-the-index-of-the-first-occurrence-of-a-substring", + "find-the-index-of-the-first-occurrence-of-characters-from-a-string", + "find-the-index-of-the-last-occurrence-of-a-substring", + "find-the-index-of-the-last-occurrence-of-characters-from-a-string", + "find-the-point-with-the-median-value", + "find-the-point-with-the-quantile-value", + "find-the-value-representing-the-99th-percentile", + "find-the-value-that-represents-the-median", + "find-unique-values", + "find-user-ids-with-keycloak", + "find-user-ids-with-microsoft-entra-id", + "find_in_set", + "finding-all-starting-operation-log-entries", + "finding-all-trace-log-entries-for-an-influxdb-operation", + "fine-grained-authorization", + "fireboard", + "firehose", + "first", + "first-and-last", + "first-whitespace", + "first_value", + "fitted-model", + "fix-duplicate-tag-and-field-names", + "fix-measurements-with-more-than-200-total-columns", + "fix-measurements-with-more-than-250-total-columns", + "fixed-inconsistency-with-json-data-from-alerts", + "fixes", + "fixes-1", + "fixes-2", + "fixes-3", + "fixes-4", + "fixes-5", + "fixes-6", + "fixes-7", + "fixing-entropy-in-active-shards", + "flag", + "flag-input-types", + "flag-patterns-and-conventions", + "flags", + "flags-1", + "flags-2", + "flags-3", + "flags-4", + "flags-5", + "flags-6", + "flapping", + "flapping-dashboards", + "flatten", + "flight-queries-dont-use-dbrp-mappings", + "flight-sql-client", + "flight-sql-metadata-commands", + "flight-sql-query-execution-commands", + "flightsqlclientdo_get", + "flightsqlclientexecute", + "flightunavailableerror-could-not-get-default-pem-root-certs", + "float", + "float-field-value-examples", + "float-syntax", + "floating-point-literals", + "floats", + "floor", + "fluentd", + "flush-interval", + "flush-jitter", + "flushinterval", + "flux", + "flux---example-query-request", + "flux-1", + "flux-2", + "flux-3", + "flux-4", + "flux-5", + "flux-6", + "flux-7", + "flux-8", + "flux-advancement-highlights", + "flux-advancement-highlights-1", + "flux-builder-improvements", + "flux-controller", + "flux-data-model", + "flux-data-source-restrictions", + "flux-design-principles", + "flux-documentation", + "flux-enabled", + "flux-enabled--false", + "flux-example-to-query-schemas", + "flux-fixes", + "flux-functions", + "flux-group-keys-demo", + "flux-influxdb-versions", + "flux-log-enabled", + "flux-lsp-no-longer-maintained", + "flux-math-package", + "flux-must-have-access-to-the-file-system", + "flux-overview", + "flux-query", + "flux-query-basics", + "flux-query-guides", + "flux-query-management-settings", + "flux-query-results", + "flux-query-variable-use-cases", + "flux-script", + "flux-scripts-in-detail", + "flux-table-ops", + "flux-task-create", + "flux-task-list", + "flux-task-log-list", + "flux-task-retry-failed", + "flux-task-run-list", + "flux-task-run-retry", + "flux-task-update", + "flux-tasks", + "flux-to-bigquery-data-type-conversion", + "flux-to-cockroachdb-data-type-conversion", + "flux-to-mariadb-data-type-conversion", + "flux-to-mysql-data-type-conversion", + "flux-to-percona-data-type-conversion", + "flux-to-postgresql-data-type-conversion", + "flux-to-sap-hana-data-type-conversion", + "flux-to-snowflake-data-type-conversion", + "flux-to-sql-server-data-type-conversion", + "flux-to-sqlite-data-type-conversion", + "flux-to-vertica-data-type-conversion", + "flux-tools", + "flux-update", + "flux-updates", + "flux-updates-1", + "flux-updates-2", + "flux-updates-3", + "flux-v065-ready-for-production-use", + "flux-v07-technical-preview", + "flux-version-in-the-flux-repl", + "flux-vs-code-extension-no-longer-available", + "flux-vs-code-extension-no-longer-maintained", + "flux-vs-influxql", + "fluxqueryreq", + "fluxqueryreqdurationns", + "fn", + "footnote", + "for-alignment-with-the-influxdb-v1x-prometheus-remote-write-spec", + "for-automation-use-cases-only", + "force-a-script-to-exit-with-an-error-message", + "force-delete-an-active-trigger", + "force-snapshot-mem-threshold", + "forcefully-remove-an-unresponsive-meta-node-from-the-local-meta-node", + "forcefully-remove-an-unresponsive-meta-node-through-a-remote-meta-node", + "form-url-encoded", + "format", + "format--auto", + "format-the-output", + "format-with-americanew_york-timezone", + "format-with-utc-timezone", + "format-yyyy-mm-dd-hhmmss", + "formatting", + "forward-compatibility", + "frac", + "frame-boundaries", + "frame-clause", + "frame-units", + "free-plan", + "frees", + "frequently-asked-questions", + "frequently-overlooked-requirements", + "fritzbox", + "from", + "from-clause", + "from-clause-subqueries", + "from-does-not-require-a-package-import", + "from-subquery-examples", + "from-subquery-syntax", + "from_unixtime", + "fromlevel", + "fromnow", + "full-alerting-script", + "full-downsampling-script", + "full-example-flux-task-script", + "full-example-query", + "full-example-task-with-invokable-script", + "full-outer-join", + "full-outer-join-example", + "full-outer-join-result", + "full-outer-join-results", + "full-query", + "full-screen-mode", + "full-tickscript-example", + "fullscreen-code-placeholder", + "fully-qualified-reference", + "function", + "function-behavior", + "function-block", + "function-categories", + "function-clientquery", + "function-clientquerywithoptions", + "function-definition", + "function-definition-syntax", + "function-flight_client_optionskwargs", + "function-literals", + "function-new", + "function-operators", + "function-requirements", + "function-support", + "function-syntax", + "function-type-signature", + "function-type-signature-structure", + "function-types", + "function-with-parameters", + "function-without-parameters", + "function-write_client_optionskwargs", + "functionname", + "functionoperations", + "functionparameters", + "functions", + "functions-pane", + "further-information", + "future-of-flux-new", + "g", + "garbage-collector", + "garbage-collector-scaling-strategies", + "gather-debug-information", + "gather-system-information", + "gauge", + "gauge-behavior", + "gauge-controls", + "gauge-example", + "gauge-examples", + "gc-garbage-collection-duration-seconds", + "gc_sys_bytes", + "gcc_pu_fraction", + "gcd", + "gen-package", + "gen1-duration", + "general", + "general-admin-and-non-admin-user-management", + "general-aggregate-functions", + "general-authentication-flags", + "general-authentication-options", + "general-improvements", + "general-information", + "general-options", + "general-options-and-remarks", + "general-syntax-for-functions", + "general-updates", + "general-updates-1", + "general-updates-2", + "generate-a-configuration-file", + "generate-a-configuration-with-only-specific-plugins", + "generate-a-histogram-with-linear-bins", + "generate-a-histogram-with-logarithmic-bins", + "generate-a-jwt-token-that-expires-in-5-minutes", + "generate-a-list-of-exponentially-increasing-values", + "generate-a-list-of-linearly-increasing-values", + "generate-a-new-operator-token", + "generate-a-pprof-like-profile-from-trace", + "generate-a-telegraf-configuration-file", + "generate-a-token-secret", + "generate-influxdb-check-data", + "generate-package", + "generate-s2-cell-id-tokens", + "generate-s2-cell-id-tokens-language-specific-libraries", + "generate-s2-cell-id-tokens-with-flux", + "generate-s2-cell-id-tokens-with-telegraf", + "generate-sample-data", + "generate-self-signed-certificates", + "generate-your-jwt-token", + "generating-a-histogram-with-linear-bins", + "generating-a-histogram-with-logarithmic-bins", + "generating-test-data", + "generic-oauth-20-authentication-flags", + "generic-oauth-20-authentication-options", + "geo-package", + "geo-s2-cells-non-strict", + "geo-s2-cells-strict", + "geo-schema-requirements", + "geo-temporal-data", + "geoip", + "geometry", + "geotemporal", + "get-a-list-of-leaders", + "get-a-task", + "get-a-telegram-api-access-token", + "get-a-template", + "get-a-topic-handler", + "get-all-runtime-profiles", + "get-cluster-information", + "get-key-values-from-explicitly-defined-columns", + "get-more-accurate-base-e-exponentials-for-values-near-zero", + "get-rate-limits-for-a-different-influxdb-cloud-organization", + "get-rate-limits-for-your-influxdb-cloud-organization", + "get-request-format", + "get-started", + "get-started-home-sensor-data", + "get-started-using-go-to-query-influxdb", + "get-started-using-python-to-query-influxdb", + "get-started-using-the-java-flight-sql-client-to-query-influxdb", + "get-started-with-chronograf", + "get-started-with-examples", + "get-started-with-flux", + "get-started-with-flux-and-influxdb", + "get-started-with-influxdb-oss", + "get-started-with-kapacitor", + "get-started-with-tasks", + "get-started-with-the-example-app", + "get-started-with-the-shared-api", + "get-started-working-with-data", + "get-status", + "get-the-full-downsampling-code-files", + "get-the-length-of-an-array", + "get-the-status-of-an-influxdb-oss-instance", + "get-your-hipchat-api-access-token", + "get-your-telegram-chat-id", + "getting-data-in-the-database", + "getting-slightly-different-results", + "getting-started-with-influxdb-enterprise", + "getting-started-with-tick-and-docker-compose", + "gis-geometry-definitions", + "github", + "github-organizations-optional", + "github-specific-oauth-20-authentication-flags", + "github-specific-oauth-20-authentication-options", + "given-the-following-input-data", + "global", + "global-admin", + "global-flags", + "global-flags-1", + "global-flags-2", + "global-limits", + "global-options", + "global-settings", + "global-tags", + "glossary", + "gnmi", + "go", + "go-arrow-flight-client", + "go-flight-client", + "go-regex-testers", + "go-regular-expression-syntax", + "go-runtime-statistics", + "go-version", + "goal", + "going-further", + "gomaxprocs-environment-variable", + "good-measurements-schema", + "good-tags-schema", + "google-cloud-bigtable", + "google-cloud-platform-gcp", + "google-cloud-service", + "google-service-account", + "google-specific-oauth-20-authentication-flags", + "google-specific-oauth-20-authentication-options", + "google_cloud_storage", + "gopher", + "goroutines", + "gossip-frequency", + "got", + "gotcha---single-versus-double-quotes", + "gotchas", + "gpu-monitoring-example", + "grafana", + "grafana-103", + "grafana-dashboards-by-default", + "grammar", + "grant", + "grant-access-to-specific-series-in-a-measurement", + "grant-administrative-privileges-to-an-existing-user", + "grant-all-permission", + "grant-database-level-permissions-to-roles", + "grant-database-level-permissions-to-users", + "grant-kapacitor-permissions-to-the-new-role", + "grant-kapacitor-permissions-to-the-new-user", + "grant-measurement-level-permissions-to-users", + "grant-network-access", + "grant-permissions-by-database", + "grant-permissions-by-measurement-in-a-database", + "grant-permissions-by-series-in-a-database", + "grant-permissions-to-a-user-for-a-specific-database", + "grant-permissions-to-a-user-for-all-databases", + "grant-read-permission", + "grant-read-write-or-all-database-privileges-to-an-existing-user", + "grant-series-level-permissions-in-a-measurement-to-roles", + "grant-series-level-permissions-in-a-measurement-to-users", + "grant-series-level-permissions-to-roles", + "grant-series-level-permissions-to-users", + "grant-write-permission", + "graph", + "graph--single-stat", + "graph--single-stat-behavior", + "graph--single-stat-controls", + "graph--single-stat-examples", + "graph-behavior", + "graph-controls", + "graph-examples", + "graph-with-linear-interpolation-and-static-legend", + "graph-with-smooth-interpolation-and-hover-legend", + "graph-with-step-interpolation-and-no-visible-legend", + "graphing-the-percentage-difference-between-the-measurements", + "graphite", + "graphite-protocol-support-in-influxdb", + "graphite-settings", + "graphite_tag_support", + "graphs-and-dashboards", + "graylog", + "greater-than", + "greater-than-or-equal", + "grok", + "groundwork", + "group", + "group-and-aggregate-data", + "group-and-aggregate-query-results-into-1-hour-intervals-and-offset-time-boundaries-by--15-minutes", + "group-and-aggregate-query-results-into-1-hour-intervals-and-offset-time-boundaries-by-15-minutes", + "group-and-aggregate-query-results-into-1-hour-windows", + "group-and-aggregate-query-results-into-1-week-intervals-by-tag", + "group-and-aggregate-query-results-into-30-minute-intervals-and-fill-gaps-using-linear-interpolation", + "group-and-aggregate-query-results-into-30-minute-intervals-and-fill-gaps-with-0", + "group-and-aggregate-query-results-into-30-minute-intervals-and-fill-gaps-with-previous-values", + "group-by-aliases", + "group-by-any-column", + "group-by-clause", + "group-by-clause-behaviors", + "group-by-columns", + "group-by-cpu", + "group-by-cpu-and-time", + "group-by-cpu-and-time-output-tables", + "group-by-cpu-output-tables", + "group-by-everything-except-time", + "group-by-example", + "group-by-host-and-region", + "group-by-result-columns", + "group-by-specific-columns", + "group-by-tags", + "group-by-tags-examples", + "group-by-time", + "group-by-time-and-fill-gaps", + "group-by-time-and-fill-gaps-1", + "group-by-time-examples", + "group-by-time-intervals", + "group-by-time-intervals-and-fill", + "group-by-time-output-tables", + "group-by-time-with-offset", + "group-data", + "group-data-by-a-single-tag", + "group-data-by-all-tags", + "group-data-by-area", + "group-data-by-more-than-one-tag", + "group-data-by-specific-columns", + "group-data-by-tag-keys-that-match-a-regular-expression", + "group-data-by-tag-values", + "group-data-by-track-or-route", + "group-data-into-15-minute-time-intervals-by-tag", + "group-example", + "group-examples", + "group-function", + "group-geo-temporal-data", + "group-geotemporal-data-by-geographic-area", + "group-geotemporal-data-into-tracks", + "group-geotemporal-data-into-tracks-and-sort-by-specified-columns", + "group-key", + "group-key-demo", + "group-keys", + "group-query-results-by-a-single-tag", + "group-query-results-by-all-tags", + "group-query-results-by-more-than-one-tag", + "group-query-results-by-tags-that-start-with-l", + "group-query-results-into-12-minute-intervals", + "group-query-results-into-12-minute-intervals-and-by-a-tag-key", + "group-query-results-into-12-minute-intervals-and-shift-the-preset-time-boundaries-back", + "group-query-results-into-12-minute-intervals-and-shift-the-preset-time-boundaries-forward", + "group-query-results-into-12-minutes-intervals-and-by-a-tag-key", + "group-query-results-into-18-minute-intervals-and-shift-the-preset-time-boundaries-forward", + "groupby", + "groupby-with-aggregated-data", + "groupbymeasurement", + "groupcolumns", + "grouped-tables", + "grouping", + "grouping-by-tag-and-no-time-range-returns-unexpected-timestamps", + "grouping-modes", + "groupname", + "groupnametag", + "groups", + "grpc-requests-not-ok", + "grpc-requests-ok", + "guidelines-for-in-memory-caching", + "guidelines-per-cluster", + "guidelines-per-node", + "gzip", + "h", + "h2-this-is-a-header2", + "h2-this-is-a-header2-1", + "h2-this-is-a-header2-2", + "h2-this-is-a-header2-3", + "h2-this-is-a-header2-4", + "h2-this-is-a-header2-5", + "h3-this-is-a-header3", + "h3-this-is-a-header3-1", + "h3-this-is-a-header3-2", + "h3-this-is-a-header3-3", + "h3-this-is-a-header3-4", + "h3-this-is-a-header3-5", + "h4-this-is-a-header4", + "h4-this-is-a-header4-1", + "h4-this-is-a-header4-2", + "h4-this-is-a-header4-3", + "h4-this-is-a-header4-4", + "h4-this-is-a-header4-5", + "h5-this-is-a-header5", + "h5-this-is-a-header5-1", + "h5-this-is-a-header5-2", + "h5-this-is-a-header5-3", + "h5-this-is-a-header5-4", + "h5-this-is-a-header5-5", + "h6-this-is-a-header6", + "h6-this-is-a-header6-1", + "h6-this-is-a-header6-2", + "h6-this-is-a-header6-3", + "h6-this-is-a-header6-4", + "h6-this-is-a-header6-5", + "handle-duplicate-data-points", + "handle-kapacitor-alerts-during-scheduled-downtime", + "handle-late-arriving-data", + "handle-read-and-write-queries-first", + "handle-requests-for-device-information", + "handle-write-and-delete-responses", + "handle-write-responses", + "handler-file-options", + "handlers", + "handling-null-fill-values-in-outer-joins", + "handling-time-intervals-with-no-data", + "haproxy", + "hard-delete", + "hardening-enabled", + "hardware-requirements-for-influxdb", + "hardware-separation", + "hardware-sizing-guidelines", + "hash-package", + "hashing-functions", + "haskell", + "hatch", + "having-clause", + "having-clause-subqueries", + "having-clause-with-correlated-subquery", + "having-clause-with-non-scalar-subquery", + "having-clause-with-scalar-subquery", + "having-subquery-examples", + "having-subquery-syntax", + "hddtemp", + "header", + "header-row", + "headers", + "headers-1", + "headers-2", + "headers-3", + "headerscsv", + "heading", + "heading-1", + "health", + "health-endpoint-responses", + "health-http-endpoint", + "healthy-upstream-ingesters-per-router", + "heap_alloc_bytes", + "heap_idle_bytes", + "heap_in_use_bytes", + "heap_objects", + "heap_released_bytes", + "heap_sys_bytes", + "heapalloc", + "heapidle", + "heapinuse", + "heapobjects", + "heapreleased", + "heapsys", + "heartbeat-timeout", + "heatmap", + "heatmap-behavior", + "heatmap-controls", + "heatmap-examples", + "helm-air-gapped-deployment", + "helm-standard-deployment-with-internet-access", + "help", + "help-bar", + "help-bar-updates", + "helper-functions", + "helpful", + "helpful-links", + "helpful-links-1", + "helpful-links-2", + "heres-a-codeblock-with-a-title", + "heres-a-title-for-this-codeblock", + "heroku-organizations-optional", + "heroku-specific-oauth-20-authentication-flags", + "heroku-specific-oauth-20-authentication-options", + "hex-package", + "hex_encoding-optional", + "hh-enterprise-only", + "hh_database-enterprise-only", + "hh_node-enterprise-only", + "hh_processor-enterprise-only", + "high-availability", + "high-availability-with-a-dedicated-compactor", + "high-availability-with-read-replicas-and-a-dedicated-compactor", + "high-cardinality-key-columns", + "high-cardinality-limits", + "high-disk-usage", + "highlights", + "highlights-1", + "highlights-10", + "highlights-11", + "highlights-12", + "highlights-13", + "highlights-14", + "highlights-15", + "highlights-16", + "highlights-17", + "highlights-18", + "highlights-19", + "highlights-2", + "highlights-3", + "highlights-4", + "highlights-5", + "highlights-6", + "highlights-7", + "highlights-8", + "highlights-9", + "hinted-handoff", + "hinted-handoff-1", + "hinted-handoff-directory", + "hinted-handoff-improvements", + "hinted-handoff-settings", + "hipchat", + "hipchat-api-access-token", + "hipchat-setup", + "histogram", + "histogram-behavior", + "histogram-controls", + "histogram-examples", + "histogram-function", + "histogram-settings", + "histogram-visualization", + "histogram-visualization-data-structure", + "histograms", + "history", + "hold_period", + "holt_winters", + "holt_winters-and-receiving-fewer-than-n-points", + "holtwinters", + "holtwinterswithfit", + "home-sensor-actions-data", + "home-sensor-data", + "home-sensor-data-line-protocol", + "homebrew-default-directories", + "horizontal-scaling", + "horizontally-scale-a-component", + "host", + "host-renaming", + "host1yml", + "hosting-influxdb-oss-on-aws", + "hostname", + "hot-shards", + "hover-legend", + "how-a-query-plan-distributes-data-for-scanning", + "how-authentication-works", + "how-can-a-non-admin-user-use-a-database-in-the-influxdb-cli", + "how-can-i-derive-a-state-from-multiple-field-values", + "how-can-i-identify-my-influxdb-version", + "how-can-i-identify-my-version-of-influxdb", + "how-can-i-identify-the-version-of-flux-im-using-in-influxdb", + "how-can-i-optimize-kapacitor-tasks", + "how-can-i-remove-series-from-the-index", + "how-can-i-tell-what-type-of-data-is-stored-in-a-field", + "how-data-flows-through-influxdb-3-core", + "how-data-flows-through-influxdb-3-enterprise", + "how-do-i-cancel-a-long-running-query", + "how-do-i-connect-chronograf-to-an-influxdb-enterprise-cluster", + "how-do-i-include-a-single-quote-in-a-password", + "how-do-i-increase-my-organizations-rate-limits-and-quotas", + "how-do-i-manage-payment-methods", + "how-do-i-perform-mathematical-operations-in-an-influxql-function", + "how-do-i-perform-mathematical-operations-within-a-function", + "how-do-i-query-data-across-measurements", + "how-do-i-query-data-by-a-tag-with-a-null-value", + "how-do-i-query-data-with-an-identical-tag-key-and-field-key", + "how-do-i-remove-series-from-the-index", + "how-do-i-reset-my-password", + "how-do-i-select-data-with-a-tag-that-has-no-value", + "how-do-i-structure-fields-as-columns-like-influxql", + "how-do-i-switch-between-influxdb-cloud-accounts", + "how-do-i-use-influxql-with-influxdb-cloud", + "how-do-i-use-influxql-with-influxdb-v2x", + "how-do-i-use-the-influxdb-cli-to-return-human-readable-timestamps", + "how-do-i-verify-that-kapacitor-is-receiving-data-from-influxdb", + "how-do-i-view-data-my-data-usage", + "how-do-i-write-integer-and-unsigned-integer-field-values", + "how-do-i-write-integer-field-values", + "how-do-i-write-to-a-non-default-retention-policy-with-the-influxdb-cli", + "how-do-selector-functions-work", + "how-do-you-use-influxdb-3", + "how-does-influxdb-handle-duplicate-points", + "how-does-influxdb-handle-field-type-discrepancies-across-shards", + "how-does-kapacitor-handle-high-load", + "how-does-retrying-a-task-affect-relative-time-ranges", + "how-is-the-influxdata-platform-tick-different-from-prometheus", + "how-it-works", + "how-join-functions-work", + "how-kapacitor-subscriptions-work", + "how-partitioning-works", + "how-prometheus-metrics-are-parsed-in-influxdb", + "how-reduce-works", + "how-should-i-run-influxdb-in-kubernetes", + "how-subscriptions-work", + "how-write-requests-work-in-the-influxdb-cloud-api", + "http", + "http-access-log", + "http-access-log-format", + "http-access-logging", + "http-api", + "http-api-statistics", + "http-bind", + "http-bind-address", + "http-endpoint-settings", + "http-endpoints-settings", + "http-headers", + "http-idle-timeout", + "http-input-plugin", + "http-package", + "http-read-header-timeout", + "http-read-timeout", + "http-request-error-rate-servers-pov-at-router", + "http-requests-example", + "http-response-codes", + "http-response-summary", + "http-section", + "http-section-1", + "http-timeout", + "http-timeout--30s", + "http-write-timeout", + "http_listener", + "http_listener_v2", + "http_proxy", + "http_response", + "httpd", + "httpheaders", + "httpout", + "httppost", + "https-certificate", + "https-certificate--etcsslinfluxdbpem", + "https-certificate-1", + "https-enabled", + "https-enabled--false", + "https-enabled-1", + "https-insecure-tls", + "https-private-key", + "https-private-key--", + "https-private-key-1", + "https_proxy", + "huebridge", + "hugepages", + "human-readable-cluster-metrics", + "human-string-functions", + "humanbytes", + "hutils", + "i", + "icinga2", + "id", + "id-1", + "ideal-use-cases-for-influxdb-stacks", + "idealcycletime", + "identifier", + "identifier-types", + "identifiers", + "identify-and-update-unoptimized-queries", + "identify-performance-requirements", + "identify-the-version-to-upgrade-to", + "identify-your-current-influxdb-clustered-package-version", + "identity", + "identity-provider-credentials", + "identity-provider-requirements", + "idfield", + "idle", + "idle-timeout", + "idlestreams", + "idp", + "ids", + "idtag", + "idx", + "if", + "if-duplicating-data-is-not-feasible", + "if-possible-provide-a-synthetic-dataset", + "if-the-every-interval-is-greater-than-the-group-by-time-interval", + "if-the-for-interval-is-less-than-the-execution-interval", + "ifname", + "ifnull", + "ignore-case-when-comparing-two-strings", + "ignore-columns", + "ignore-null-values-in-a-custom-aggregate-function", + "ignored", + "iis", + "immediate-write-using-the-http-api", + "implementing-the-housedb-output", + "implicit-block", + "import-a-dashboard", + "import-a-task", + "import-a-variable", + "import-and-export-chronograf-dashboards", + "import-and-generate-sample-sensor-data", + "import-dashboard-templates", + "import-data-from-a-file", + "import-declaration", + "import-monitoring-dashboards", + "import-the-sample-data-dashboard", + "import-the-sample-sensor-information", + "important-changes", + "important-changes-1", + "important-changes-10", + "important-changes-11", + "important-changes-2", + "important-changes-3", + "important-changes-4", + "important-changes-5", + "important-changes-6", + "important-changes-7", + "important-changes-8", + "important-changes-9", + "important-compaction-configuration-settings", + "important-considerations", + "important-considerations-before-you-begin", + "important-definitions", + "important-if-using-windows-specify-the-windows-certificate-path", + "important-information-about-high-write-loads", + "important-notes", + "important-notes-about-providing-user-credentials", + "important-notes-about-timestamps", + "important-things-to-know-about-the-distinct-value-cache", + "important-things-to-know-about-the-last-value-cache", + "important-things-to-note", + "important-things-to-note-about-variable-queries", + "important-to-know", + "important-to-note", + "important-update", + "importing-data", + "importing-the-client", + "importing-the-module", + "importing-the-package", + "improve-performance-of-time-bound-subqueries", + "improve-your-schema", + "in", + "in-conclusion", + "in-memory-cache", + "in-memory-indexing-and-the-time-structured-merge-tree-tsm", + "in-memory-indexing-option", + "in-memory-inmem-index-settings", + "in-operator-with-a-list-in-the-where-clause", + "in-operator-with-a-subquery-in-the-where-clause", + "in-progress-features", + "in-this-section", + "inaccessible-or-decommissioned-subscription-endpoints", + "inactivate-an-api-token", + "incident-response", + "include-a-from-clause", + "include-a-secret-when-installing-a-template", + "include-a-where-clause", + "include-authentication-credentials-in-the-kapacitor-url", + "include-dynamic-types-in-a-table", + "include-null-values-in-an-ad-hoc-stream-of-tables", + "include-partial-gigabytes", + "include-tag-and-field-counts-in-your-cardinality-summary", + "include-tagpass-and-tagdrop-at-the-end-of-your-plugin-definition", + "include-the-string-representation-of-a-dictionary-in-a-table", + "include-the-string-representation-of-a-record-in-a-table", + "include-the-string-representation-of-an-array-in-a-table", + "include-the-string-representation-of-bytes-in-a-table", + "include-the-token-in-http-requests", + "include-user-definable-resource-names", + "includenull", + "including-multiple-template-variables-in-the-url", + "inconsistent-data-types", + "increase", + "increasecooldown", + "increment-the-timestamp", + "incremental-and-full-backups", + "incremental-backups", + "index-expressions", + "index-file-structure", + "index-the-bounds-of-first-regular-expression-match-in-each-row", + "index-version", + "index-version--inmem", + "index-versions-and-startup-times", + "indextype", + "inequalities", + "infiniband", + "infinity", + "influx", + "influx---influxdb-command-line-interface", + "influx-arguments", + "influx-cli", + "influx-cli-moved-to-separate-repository", + "influx-cli-not-supported", + "influx-cli-release-notes", + "influx-command-line-interface-cli", + "influx-commands", + "influx-header", + "influx-inspect-disk-shard-utility", + "influx-inspect-disk-utility", + "influx-query-language-influxql-2x-specification", + "influx-stacks---json-output-conventions", + "influx-transpile-removed", + "influx-write-command", + "influx-write-skip-header-parsing", + "influx3-data-cli", + "influx_inspect", + "influx_inspect-buildtsi", + "influx_inspect-commands", + "influx_inspect-dumptsi", + "influx_inspect-utility", + "influxctl", + "influxctl-admin-cli", + "influxctl-cli", + "influxd", + "influxd---influxdb-daemon", + "influxd---influxdb-service", + "influxd-command", + "influxd-configuration", + "influxd-ctl--bind-tls", + "influxd-ctl-cli", + "influxd-ctl-cli-improvements", + "influxd-ctl-global-flags", + "influxd-ctl-updates", + "influxd-downgrade", + "influxd-flag", + "influxd-flag-1", + "influxd-flag-10", + "influxd-flag-11", + "influxd-flag-12", + "influxd-flag-13", + "influxd-flag-14", + "influxd-flag-15", + "influxd-flag-16", + "influxd-flag-17", + "influxd-flag-18", + "influxd-flag-19", + "influxd-flag-2", + "influxd-flag-20", + "influxd-flag-21", + "influxd-flag-22", + "influxd-flag-23", + "influxd-flag-24", + "influxd-flag-25", + "influxd-flag-26", + "influxd-flag-27", + "influxd-flag-28", + "influxd-flag-29", + "influxd-flag-3", + "influxd-flag-30", + "influxd-flag-31", + "influxd-flag-32", + "influxd-flag-33", + "influxd-flag-34", + "influxd-flag-35", + "influxd-flag-36", + "influxd-flag-37", + "influxd-flag-38", + "influxd-flag-39", + "influxd-flag-4", + "influxd-flag-40", + "influxd-flag-41", + "influxd-flag-42", + "influxd-flag-43", + "influxd-flag-44", + "influxd-flag-45", + "influxd-flag-46", + "influxd-flag-47", + "influxd-flag-48", + "influxd-flag-49", + "influxd-flag-5", + "influxd-flag-50", + "influxd-flag-51", + "influxd-flag-52", + "influxd-flag-53", + "influxd-flag-54", + "influxd-flag-55", + "influxd-flag-56", + "influxd-flag-57", + "influxd-flag-58", + "influxd-flag-59", + "influxd-flag-6", + "influxd-flag-60", + "influxd-flag-61", + "influxd-flag-62", + "influxd-flag-63", + "influxd-flag-64", + "influxd-flag-65", + "influxd-flag-66", + "influxd-flag-67", + "influxd-flag-68", + "influxd-flag-69", + "influxd-flag-7", + "influxd-flag-70", + "influxd-flag-71", + "influxd-flag-72", + "influxd-flag-8", + "influxd-flag-9", + "influxd-inspect", + "influxd-updates", + "influxd-upgrade", + "influxdata-1x", + "influxdata-1x-enterprise-versions", + "influxdata-1x-tick-stack", + "influxdata-blog", + "influxdata-contributor-license-agreement-cla", + "influxdata-how-to-guides", + "influxdata-package", + "influxdata-sandbox", + "influxdata-videos", + "influxdb", + "influxdb-1x-and-2x-clis-are-unique", + "influxdb-1x-http-endpoints", + "influxdb-1x2x-compatibility", + "influxdb-20", + "influxdb-20-beta-16-or-earlier", + "influxdb-2x-api-compatibility-endpoints", + "influxdb-2x-initialization-credentials", + "influxdb-2x1x-compatibility", + "influxdb-3", + "influxdb-3-and-flux", + "influxdb-3-api-client-libraries", + "influxdb-3-cloud-regions", + "influxdb-3-compatibility", + "influxdb-3-core-and-enterprise-relationship", + "influxdb-3-core-authentication-and-authorization", + "influxdb-3-core-configuration-options", + "influxdb-3-core-internals", + "influxdb-3-core-or-enterprise-v310-or-later-required", + "influxdb-3-enterprise-authentication-and-authorization", + "influxdb-3-enterprise-configuration-options", + "influxdb-3-enterprise-internals", + "influxdb-3-storage-engine-architecture", + "influxdb-_internal-1x-measurements-and-fields", + "influxdb-_internal-measurements-and-fields", + "influxdb-and-kapacitor-users-within-chronograf", + "influxdb-and-the-influx-cli-are-separate-packages", + "influxdb-anti-entropy-api", + "influxdb-api", + "influxdb-api-client-libraries", + "influxdb-api-reference", + "influxdb-aws-module-terraform", + "influxdb-catalog", + "influxdb-client-libraries", + "influxdb-cloud", + "influxdb-cloud-api-documentation", + "influxdb-cloud-data-durability", + "influxdb-cloud-dedicated-data-durability", + "influxdb-cloud-dedicated-security", + "influxdb-cloud-free-plan-resource-limits", + "influxdb-cloud-internals", + "influxdb-cloud-plans", + "influxdb-cloud-rate-limits", + "influxdb-cloud-regions", + "influxdb-cloud-security", + "influxdb-cloud-serverless-data-durability", + "influxdb-cloud-serverless-does-not-support-data-deletion", + "influxdb-cloud-serverless-internals", + "influxdb-cloud-serverless-regions", + "influxdb-cloud-service-health", + "influxdb-cloud-service-health-1", + "influxdb-cloud-status", + "influxdb-cloud-system-buckets", + "influxdb-cloud-tsm-service-notices", + "influxdb-cloud-ttbrs", + "influxdb-cloud-updates", + "influxdb-cloud-write-limits", + "influxdb-cloud-write-rate-limits", + "influxdb-clustered-license", + "influxdb-clustered-release-notes", + "influxdb-community-templates", + "influxdb-configuration-file", + "influxdb-configuration-files", + "influxdb-configuration-options", + "influxdb-connection-configuration", + "influxdb-connection-flags", + "influxdb-connection-options", + "influxdb-data-elements", + "influxdb-data-schema", + "influxdb-data-source", + "influxdb-data-structure", + "influxdb-design-insights-and-tradeoffs", + "influxdb-design-principles", + "influxdb-enterprise", + "influxdb-enterprise-111-release-notes", + "influxdb-enterprise-and-fips-compliance", + "influxdb-enterprise-cluster-features", + "influxdb-enterprise-cluster-guidelines", + "influxdb-enterprise-configuration-files", + "influxdb-enterprise-does-not-recognize-a-new-ldap-server", + "influxdb-enterprise-downloads", + "influxdb-enterprise-file-structure", + "influxdb-enterprise-frequently-asked-questions", + "influxdb-enterprise-user-management-using-the-ui", + "influxdb-environment-variables-influxdb_", + "influxdb-error-messages", + "influxdb-file-structure", + "influxdb-file-system-layout", + "influxdb-flight-responses", + "influxdb-frequently-asked-questions", + "influxdb-glossary", + "influxdb-gs-date-select", + "influxdb-http-api", + "influxdb-http-write-apis", + "influxdb-inch-tool", + "influxdb-internals", + "influxdb-is-not-crud", + "influxdb-key-concepts", + "influxdb-line-protocol", + "influxdb-line-protocol-in-practice", + "influxdb-line-protocol-reference", + "influxdb-line-protocol-syntax", + "influxdb-line-protocol-tutorial", + "influxdb-management-api-reference-documentation", + "influxdb-metrics-http-endpoint", + "influxdb-object-and-query-statistics", + "influxdb-only-deletes-cold-shards", + "influxdb-open-source-oss", + "influxdb-open-source-tools", + "influxdb-oss", + "influxdb-oss-guidelines", + "influxdb-oss-installation-requirements", + "influxdb-oss-metrics", + "influxdb-oss-networking-ports", + "influxdb-oss-permissions", + "influxdb-oss-setup", + "influxdb-oss-urls", + "influxdb-oss-user-management", + "influxdb-oss-v2-and-influx-cli-versions", + "influxdb-oss-v2-for-creating-templates", + "influxdb-oss-v2-release-notes", + "influxdb-package", + "influxdb-ports", + "influxdb-release-artifacts-affected", + "influxdb-runtime", + "influxdb-schema-design", + "influxdb-schema-design-and-data-layout", + "influxdb-schema-design-recommendations", + "influxdb-service-health", + "influxdb-service-health-1", + "influxdb-service-statistics", + "influxdb-shards-and-shard-groups", + "influxdb-stacks", + "influxdb-status-and-error-codes", + "influxdb-storage-engine", + "influxdb-storage-statistics", + "influxdb-syntaxes", + "influxdb-system-buckets", + "influxdb-task-statistics", + "influxdb-tasks", + "influxdb-tasks-1", + "influxdb-templates", + "influxdb-templates-in-cloud", + "influxdb-ui", + "influxdb-url-list", + "influxdb-user-interface", + "influxdb-user-interface-ui", + "influxdb-user-must-have-admin-privileges", + "influxdb-v1-api-compatibility", + "influxdb-v1-client-libraries", + "influxdb-v1-compatibility", + "influxdb-v1-compatibility-api-documentation", + "influxdb-v1-compatibility-api-reference-documentation", + "influxdb-v1-release-notes", + "influxdb-v1-to-influxdb-3-data-model", + "influxdb-v18", + "influxdb-v1x-bucket-naming-convention", + "influxdb-v2-api-client-libraries", + "influxdb-v2-api-compatibility", + "influxdb-v2-api-documentation", + "influxdb-v2-api-root-endpoint", + "influxdb-v2-compatibility", + "influxdb-v2-compatibility-api-reference-documentation", + "influxdb-v2-packaging", + "influxdb-v3-storage-architecture", + "influxdb3", + "influxdb3-ga", + "influxdb3-not-found", + "influxdb_iox_create_catalog_backup_data_snapshot_files", + "influxdb_iox_delete_using_catalog_backup_data_snapshot_files", + "influxdb_iox_gc_objectstore_cutoff", + "influxdb_iox_keep_daily_catalog_backup_file_lists", + "influxdb_iox_keep_hourly_catalog_backup_file_lists", + "influxdb_listener", + "influxdb_v2", + "influxdb_v2_listener", + "influxdbclient-instance-methods", + "influxdbclient-interface", + "influxdbclient3-instance-methods", + "influxdbclient3close", + "influxdbclient3query", + "influxdbclient3write", + "influxdbclient3write_file", + "influxdbclientquery", + "influxdbclientwritepoint", + "influxdbconf", + "influxdbexcluded-subscriptions", + "influxdbout", + "influxdbsubscriptions", + "influxdbu", + "influxdbv1", + "influxdbv2", + "influxql", + "influxql---example-query-request", + "influxql-1", + "influxql-2", + "influxql-3", + "influxql-4", + "influxql-5", + "influxql-6", + "influxql-7", + "influxql-8", + "influxql-and-flux-parity", + "influxql-and-flux-parity-1", + "influxql-continuous-queries", + "influxql-data-retention-policy-mapping-differences", + "influxql-dbrp-naming-convention", + "influxql-example-to-query-schemas", + "influxql-example-to-query-schemas-1", + "influxql-feature-support", + "influxql-functions", + "influxql-functions-by-type", + "influxql-in-tickscript", + "influxql-internals-reference", + "influxql-keywords", + "influxql-mathematical-operators", + "influxql-max-select-buckets", + "influxql-max-select-point", + "influxql-max-select-series", + "influxql-meta-query", + "influxql-meta-query-variable-use-cases", + "influxql-package", + "influxql-query-basics", + "influxql-query-management", + "influxql-query-results", + "influxql-reference", + "influxql-reference-documentation", + "influxql-retention-policies", + "influxql-shell-helper-commands", + "influxql-subscription-statements", + "influxql-support", + "influxql-syntax", + "influxql-tutorial", + "influxql-window-diagram", + "info", + "info-1", + "inforeset", + "infos_triggered", + "infrastructure-monitoring", + "ingest-blocked-time-24h", + "ingest-tier", + "ingest-tier-cpumem", + "ingester", + "ingester-disk-data-directory-usage", + "ingester-scaling-strategies", + "ingester-storage-volume", + "ingestor-catalog-operations", + "ingress-configuration", + "ingress-improvements", + "ingress-metric-by-login-enabled", + "ingress-metric-by-measurement-enabled", + "ingress-templating", + "inherent-rounding-errors-in-floating-point-arithmetic", + "inhibit", + "initcap", + "initial-data", + "initial-opening-of-data-files", + "initialize-a-client", + "initialize-a-stack", + "initialize-a-stack-when-applying-a-template", + "initialize-a-stack-with-a-name-and-description", + "initialize-a-stack-with-a-name-and-urls-to-associate-with-the-stack", + "initialize-using-a-database-connection-string", + "initialize-with-credential-parameters", + "initialzero", + "inject-annotation-headers", + "inline-alert-template", + "inline-row-template", + "inline-styles", + "inline-styles-1", + "inline-styles-2", + "inline-styles-3", + "inline-styles-4", + "inline-styles-5", + "inmem-index-option-removed", + "inner-join", + "inner-join-example", + "inner-join-result", + "inner-join-results", + "inner-joins", + "inner-query-result", + "inner-query-results", + "input", + "input-1", + "input-activemq", + "input-aerospike", + "input-aliyuncms", + "input-amd_rocm_smi", + "input-amqp_consumer", + "input-and-output-data-for-to-functions", + "input-apache", + "input-apcupsd", + "input-aurora", + "input-awsalarms", + "input-azure_monitor", + "input-azure_storage_queue", + "input-bcache", + "input-beanstalkd", + "input-beat", + "input-bigbluebutton", + "input-bind", + "input-bond", + "input-burrow", + "input-ceph", + "input-cgroup", + "input-chrony", + "input-cisco_telemetry_gnmi", + "input-cisco_telemetry_mdt", + "input-clickhouse", + "input-cloud_pubsub", + "input-cloud_pubsub_push", + "input-cloudwatch", + "input-cloudwatch_metric_streams", + "input-config-fieldpass-and-fielddrop", + "input-config-namepass-and-namedrop", + "input-config-prefix-suffix-and-override", + "input-config-taginclude-and-tagexclude", + "input-config-tagpass-and-tagdrop", + "input-config-tags", + "input-configuration", + "input-configuration-examples", + "input-conntrack", + "input-consul", + "input-consul_agent", + "input-couchbase", + "input-couchdb", + "input-cpu", + "input-csgo", + "input-ctrlx_datalayer", + "input-data", + "input-data-formats", + "input-db2", + "input-dcos", + "input-directory_monitor", + "input-disk", + "input-diskio", + "input-disque", + "input-dmcache", + "input-dns_query", + "input-dnsmasq", + "input-docker", + "input-docker_log", + "input-dovecot", + "input-dpdk", + "input-ds389", + "input-ecs", + "input-elasticsearch", + "input-elasticsearch_query", + "input-ethtool", + "input-eventhub_consumer", + "input-exec", + "input-execd", + "input-fail2ban", + "input-fibaro", + "input-file", + "input-filecount", + "input-filestat", + "input-fireboard", + "input-firehose", + "input-fluentd", + "input-fritzbox", + "input-github", + "input-gnmi", + "input-google_cloud_storage", + "input-graylog", + "input-haproxy", + "input-hddtemp", + "input-http", + "input-http_listener", + "input-http_listener_v2", + "input-http_response", + "input-huebridge", + "input-hugepages", + "input-icinga2", + "input-infiniband", + "input-influxdb", + "input-influxdb_listener", + "input-influxdb_v2_listener", + "input-intel_baseband", + "input-intel_dlb", + "input-intel_pmt", + "input-intel_pmu", + "input-intel_powerstat", + "input-intel_rdt", + "input-internal", + "input-internet_speed", + "input-interrupts", + "input-ipmi_sensor", + "input-ipset", + "input-iptables", + "input-ipvs", + "input-jenkins", + "input-jolokia2_agent", + "input-jolokia2_proxy", + "input-jti_openconfig_telemetry", + "input-kafka_consumer", + "input-kapacitor", + "input-kernel", + "input-kernel_vmstat", + "input-kibana", + "input-kinesis_consumer", + "input-knot", + "input-knx_listener", + "input-kube_inventory", + "input-kubernetes", + "input-lanz", + "input-ldap", + "input-ldap_org", + "input-leofs", + "input-libvirt", + "input-linux_cpu", + "input-linux_sysctl_fs", + "input-logparser", + "input-logstash", + "input-lustre2", + "input-lvm", + "input-mailchimp", + "input-marklogic", + "input-mcrouter", + "input-mdstat", + "input-mem", + "input-memcached", + "input-mesos", + "input-methods", + "input-minecraft", + "input-mock", + "input-modbus", + "input-mongodb", + "input-monit", + "input-mqtt_consumer", + "input-multifile", + "input-mysql", + "input-nats", + "input-nats_consumer", + "input-neptune_apex", + "input-net", + "input-net_response", + "input-netflow", + "input-netstat", + "input-nfsclient", + "input-nginx", + "input-nginx_plus", + "input-nginx_plus_api", + "input-nginx_sts", + "input-nginx_upstream_check", + "input-nginx_vts", + "input-nomad", + "input-nsd", + "input-nsdp", + "input-nsq", + "input-nsq_consumer", + "input-nstat", + "input-ntpq", + "input-nvidia_smi", + "input-octoprint", + "input-opcua", + "input-opcua_listener", + "input-openldap", + "input-openntpd", + "input-opensearch_query", + "input-opensmtpd", + "input-openstack", + "input-opentelemetry", + "input-openweathermap", + "input-oracle", + "input-p4runtime", + "input-parser-plugins", + "input-passenger", + "input-pf", + "input-pgbouncer", + "input-phpfpm", + "input-ping", + "input-plex", + "input-plugin", + "input-plugin-updates", + "input-plugin-updates-1", + "input-plugin-updates-10", + "input-plugin-updates-11", + "input-plugin-updates-12", + "input-plugin-updates-13", + "input-plugin-updates-14", + "input-plugin-updates-15", + "input-plugin-updates-16", + "input-plugin-updates-17", + "input-plugin-updates-18", + "input-plugin-updates-19", + "input-plugin-updates-2", + "input-plugin-updates-20", + "input-plugin-updates-21", + "input-plugin-updates-22", + "input-plugin-updates-23", + "input-plugin-updates-24", + "input-plugin-updates-25", + "input-plugin-updates-26", + "input-plugin-updates-27", + "input-plugin-updates-28", + "input-plugin-updates-29", + "input-plugin-updates-3", + "input-plugin-updates-30", + "input-plugin-updates-31", + "input-plugin-updates-32", + "input-plugin-updates-33", + "input-plugin-updates-34", + "input-plugin-updates-35", + "input-plugin-updates-4", + "input-plugin-updates-5", + "input-plugin-updates-6", + "input-plugin-updates-7", + "input-plugin-updates-8", + "input-plugin-updates-9", + "input-plugins", + "input-plugins-1", + "input-postfix", + "input-postgresql", + "input-postgresql_extensible", + "input-powerdns", + "input-powerdns_recursor", + "input-processes", + "input-procstat", + "input-prometheus", + "input-proxmox", + "input-psi", + "input-puppetagent", + "input-rabbitmq", + "input-radius", + "input-raindrops", + "input-ras", + "input-ravendb", + "input-records", + "input-redfish", + "input-redis", + "input-redis_sentinel", + "input-rethinkdb", + "input-riak", + "input-riemann_listener", + "input-s7comm", + "input-salesforce", + "input-sensors", + "input-sflow", + "input-slab", + "input-slurm", + "input-smart", + "input-smartctl", + "input-snmp", + "input-snmp_trap", + "input-socket_listener", + "input-socketstat", + "input-solr", + "input-sql", + "input-sqlserver", + "input-stackdriver", + "input-statsd", + "input-streams", + "input-supervisor", + "input-suricata", + "input-swap", + "input-synproxy", + "input-syslog", + "input-sysstat", + "input-system", + "input-systemd_timings", + "input-systemd_units", + "input-tacacs", + "input-tail", + "input-teamspeak", + "input-telegraf-apt", + "input-temp", + "input-tengine", + "input-tomcat", + "input-trig", + "input-twemproxy", + "input-unbound", + "input-upsd", + "input-uwsgi", + "input-varnish", + "input-vault", + "input-vsphere", + "input-webhooks", + "input-win_eventlog", + "input-win_perf_counters", + "input-win_services", + "input-win_wmi", + "input-wireguard", + "input-wireless", + "input-x509_cert", + "input-x509_crl", + "input-xtremio", + "input-youtube", + "input-zfs", + "input-zipkin", + "input-zookeeper", + "inputs", + "inputs-1", + "inputs-10", + "inputs-11", + "inputs-12", + "inputs-13", + "inputs-2", + "inputs-3", + "inputs-4", + "inputs-5", + "inputs-6", + "inputs-7", + "inputs-8", + "inputs-9", + "inputstring", + "insecure-skip-verify", + "insecure-skip-verify--false", + "insecureskipverify", + "insert-a-key-value-pair-into-a-dictionary", + "insert-a-new-key-value-pair-into-the-a-dictionary", + "insert-data-into-the-a-specific-database-and-retention-policy", + "insert-data-into-the-a-the-default-retention-policy-of-a-database", + "insert-data-into-the-currently-used-database", + "inspect-cluster-pods", + "inspect-influxdb-internal-metrics", + "inspect-the-response-of-an-http-request", + "inspect-tsi-indexes", + "install-a-monitor", + "install-a-self-signed-certificate-on-debian", + "install-an-influxdb-enterprise-cluster", + "install-and-configure-your-influxdb-cluster", + "install-and-customize-a-template-in-the-cloud-ui", + "install-and-customize-influxdb-community-templates-in-the-cloud-ui", + "install-and-run-the-ui", + "install-and-set-up-influxdb-in-a-container", + "install-and-start-etcd", + "install-and-startup", + "install-and-use-a-client-library", + "install-and-use-the-influx-cli", + "install-and-use-the-python-client-library", + "install-chronograf", + "install-chronograf-last", + "install-completion-scripts", + "install-dependencies", + "install-from-a-deb-file", + "install-from-the-influxdata-repository", + "install-go", + "install-grafana-or-login-to-grafana-cloud", + "install-helm", + "install-influxdb", + "install-influxdb-as-a-service-with-systemd", + "install-influxdb-client-library", + "install-influxdb-hahahugoshortcode1644s2hbhb", + "install-influxdb-in-a-kubernetes-cluster", + "install-influxdb-inch", + "install-influxdb-oss", + "install-kapacitor", + "install-kubectl", + "install-linux", + "install-linux-binaries", + "install-nodejs", + "install-pandas", + "install-postman", + "install-prerequisites", + "install-prerequisites-for-superset-and-flight-sql", + "install-python", + "install-python-dependencies", + "install-tableau-desktop", + "install-telegraf", + "install-telegraf-on-each-node", + "install-the-data-node-package", + "install-the-flux-plugin", + "install-the-influx-cli", + "install-the-influxdata-platform", + "install-the-influxdb-enterprise-monitoring-template", + "install-the-influxdb-javascript-client-library", + "install-the-influxdb-oss-monitoring-template", + "install-the-influxdb-v2-javascript-client-library", + "install-the-influxdb3-python-library", + "install-the-kubecfg-kubit-operator", + "install-the-meta-node-package", + "install-typescript", + "install-using-homebrew", + "install-with-vim-coc", + "install-with-vim-lsp", + "install-yarn", + "install-your-influxdb-license", + "installation", + "installation-documentation-moved-to-the-documentation-site", + "installation-requirements", + "installed-a-new-influxdb-instance", + "installed-as-a-package", + "installed-as-a-standalone-binary", + "installing-influxdb-oss", + "instance", + "instance-id", + "instance-methods", + "instance-owner", + "instr", + "instrumental", + "int", + "integer", + "integer-field-value-examples", + "integer-literals", + "integer-syntax", + "integers", + "integral", + "integrate-influxdb-20-applications-with-influxdb-enterprise-18", + "integrate-with-external-plugins", + "intel_baseband", + "intel_dlb", + "intel_pmt", + "intel_pmu", + "intel_powerstat", + "intel_rdt", + "interacting-with-the-sandbox-tick-stack", + "interactive-clients", + "interactive-shell", + "intermediate-verification", + "internal", + "internal-access-controls", + "internal-configuration-groups", + "internal-error-received-rst_stream", + "internal-error-stream-terminated-by-rst_stream-with-no_error", + "internal-monitoring", + "internal-package", + "internal-shared-secret", + "internet_speed", + "interpolate", + "interpolate-missing-data-by-day", + "interpolate-package", + "interpolate-strings", + "interrupts", + "interval", + "into-clause", + "into-query-with-group-by-", + "into-query-without-group-by-", + "introducing-iot-starter", + "introduction", + "introduction-to-authorization-in-influxdb-enterprise", + "invalid-api-token", + "invalid-argument-error-bucket-bucket_id-not-found", + "invalid-argument-invalid-ticket", + "invalid-line-protocol---double-quote-the-timestamp", + "invalid-operation-time-and-influxqlvarref-are-not-compatible", + "invalid-thumbprint", + "invert-bits-in-an-integer", + "invert-bits-in-an-unsigned-integer", + "invert-bits-in-integers-in-a-stream-of-tables", + "invert-bits-in-unsigned-integers-in-a-stream-of-tables", + "invite-a-user-to-your-account", + "invite-a-user-to-your-organization", + "invoke-a-script", + "invoke-a-script-with-parameters", + "iot", + "iot-sensor-common-queries", + "iotdb", + "iox", + "iox-package", + "ipmi_sensor", + "ipset", + "iptables", + "ipvs", + "is-distinct-from", + "is-flux-going-to-end-of-life", + "is-not-distinct-from", + "is-the-alert-state-and-alert-data-lost-happen-when-updating-a-script", + "is-there-a-limit-on-the-number-of-scripts-kapacitor-can-handle", + "iscounter", + "isnan", + "ispresent", + "israte", + "issue-1-mathematical-operators-with-wildcards-and-regular-expressions", + "issue-2-mathematical-operators-with-functions", + "issues-solved-by-tsi-and-remaining-to-be-solved", + "istio-support", + "iszero", + "iterate-over-an-array", + "iterator-type", + "iteratorcostreq", + "j", + "jaeger", + "january-2021", + "january-2022", + "java", + "java-flight-sql-package", + "javascript", + "javascript-for-browsers", + "javascriptnodejs", + "jenkins", + "jobs", + "jobsactive", + "join", + "join-a-list-of-strings-into-a-single-string", + "join-a-meta-and-data-node-into-a-cluster", + "join-a-meta-and-data-node-to-an-existing-cluster", + "join-a-meta-node-to-an-existing-cluster", + "join-clause", + "join-cpu-and-memory-usage", + "join-data-from-separate-data-sources", + "join-diagram", + "join-meta-nodes-to-the-cluster", + "join-multiple-data-sources-for-mathematic-calculations", + "join-on-time", + "join-output-function-as", + "join-package", + "join-predicate-function-on", + "join-sample-tables", + "join-sql-data-with-data-in-influxdb", + "join-the-data-nodes-to-the-cluster", + "join-the-two-data-streams", + "join-two-streams-of-data", + "join-two-streams-of-tables", + "join-two-streams-of-tables-with-different-fields-and-measurements", + "join-two-tables-by-timestamp", + "join-types", + "join-vs-union", + "joined-output-table", + "joining-with-batch-data", + "joins", + "jolokia2_agent", + "jolokia2_proxy", + "json", + "json-array-of-objects", + "json-array-of-scalar-values", + "json-data", + "json-formatted-results", + "json-object", + "json-package", + "json-v2", + "json_query", + "json_string_fields", + "json_time_format", + "json_time_key", + "json_time_key-json_time_format-json_timezone", + "json_timezone", + "jsternberg-package", + "jti_openconfig_telemetry", + "july", + "july-2021", + "june-2022", + "jwks-signature-verification-optional", + "jwt", + "k", + "k8sautoscale", + "kafka", + "kafka-1", + "kafka-message-partitioning", + "kafka-package", + "kafka_consumer", + "kafka_cpu_handleryaml", + "kafkasasl-oauth-parameters", + "kapacitor", + "kapacitor-alerts-and-the-tickscript", + "kapacitor-and-influxdb-authentication", + "kapacitor-and-influxdb-https", + "kapacitor-as-a-continuous-query-engine", + "kapacitor-authentication-and-authorization", + "kapacitor-authentication-configuration-options", + "kapacitor-cli", + "kapacitor-cluster-management", + "kapacitor-command-line-client-with-https", + "kapacitor-command-line-tools", + "kapacitor-configuration-file", + "kapacitor-configuration-file-location", + "kapacitor-connection-flags", + "kapacitor-connection-options", + "kapacitor-enterprise", + "kapacitor-environment-variables", + "kapacitor-event-handlers", + "kapacitor-event-handlers-supported-in-chronograf", + "kapacitor-flags", + "kapacitor-flux-task-configuration-example", + "kapacitor-frequently-asked-questions", + "kapacitor-ids-in-containerized-or-ephemeral-filesystems", + "kapacitor-integration", + "kapacitor-integration-improvements", + "kapacitor-measurements--fields", + "kapacitor-measurements-and-fields", + "kapacitor-over-tls", + "kapacitor-release-notes", + "kapacitor-security", + "kapacitor-tasks-and-chronograf", + "kapacitor-to-influxdb-tls-configuration-over-http-api", + "kapacitor-user-types-and-permissions", + "kapacitor_edges", + "kapacitor_ingress", + "kapacitor_load", + "kapacitor_memstats", + "kapacitor_nodes", + "kapacitor_topics", + "kapacitorapi", + "kapacitorconfigapi", + "kapacitord", + "kapacitorloopback", + "kaufmans_adaptive_moving_average", + "kaufmans_efficiency_ratio", + "keep", + "keep-a-list-of-columns", + "keep-client-secrets-secure", + "keep-columns-matching-a-predicate", + "keep-empty-tables-when-filtering", + "keep-keys-simple", + "keep-measurement-names-tags-and-fields-simple", + "keep-measurements-and-keys-simple", + "keep-table-names-tags-and-fields-simple", + "keep-tags-simple", + "keep-test-and-production-data-separate", + "keep-the-first-value-when-calculating-the-difference-between-values", + "keepfirst", + "kernel", + "kernel_vmstat", + "key", + "key-components", + "key-concepts", + "key-concepts-before-you-get-started", + "key-differences-between-influxdb-cloud-serverless-and-cloud-dedicated", + "key-features", + "key-mappings", + "keyboard-shortcuts", + "keycloak", + "keycloak-admin-console", + "keycloak-rest-api", + "keycolumns", + "keyword", + "keywords", + "kibana", + "kill-a-running-query", + "kill-query", + "kill-repair", + "kind", + "kindtag", + "kinesis", + "kinesis_consumer", + "knot", + "known-bugs", + "known-issues", + "knx_listener", + "kube-influxdb-kubernetes-monitoring-project", + "kube_inventory", + "kubecfg-kubit-operator", + "kubectl-kubit-helm", + "kubectl-requires-cluster-wide-permissions", + "kubectl-standard-deployment-with-internet-access", + "kubernetes", + "kubernetes-dashboard-template", + "kubernetes-default-paths", + "kubernetes-file-system-overview", + "kubernetes-node", + "kubernetes-overview", + "kubernetes-pod", + "kubernetes-specific-information", + "kubits-role-in-air-gapped-environments", + "l", + "labelsannotations", + "lag", + "lambda-expressions", + "lambda-expressions-as-literals", + "landing-hive", + "lanz", + "large-udp-packets", + "last", + "last-cache-eviction-interval", + "last-value-caches-are-flushed-when-the-server-stops", + "last-values-cache", + "last_cache", + "last_gc_ns", + "last_value", + "lastsuccesstime", + "latfield", + "latitude-and-longitude-values", + "launch-influx", + "launch-the-influxdb-command-line-interface", + "launch-the-process-with-a-configuration-file", + "launched-as-a-service", + "lcm", + "ldap", + "ldap-allowed", + "ldap_org", + "lead", + "leader-lease-timeout", + "leading-edge-data-with-no-backups", + "learn-more", + "learn-more-about-how-filter-works", + "learn-more-about-how-map-works", + "learn-more-about-tokens-and-permissions", + "learn-the-causes-of-high-series-cardinality", + "learn-to-use-apis-for-your-workloads", + "lease-duration", + "leave-out-the-option-tasks-assignment", + "left", + "left-input", + "left-outer-join", + "left-outer-join-example", + "left-outer-join-result", + "left-outer-join-results", + "legacy-backup-examples", + "legacy-format-directory-structure", + "legacy-slack-apps", + "legend", + "length", + "length-and-length_min-options", + "leofs", + "less-than", + "less-than-or-equal", + "letters-and-digits", + "level", + "level--info", + "leveldb-and-log-structured-merge-trees", + "levelfield", + "leveltag", + "levenshtein", + "lexical-elements", + "librato", + "libvirt", + "license-detection", + "license-email", + "license-enforcement", + "license-expiry-logs", + "license-feature-comparison", + "license-file", + "license-grace-periods", + "license-key", + "license-key-or-file", + "license-now-required", + "license-path", + "licensing", + "lifecycle", + "like", + "like-operator-in-the-where-clause", + "limit", + "limit-and-slimit", + "limit-and-slimit-clauses", + "limit-clause", + "limit-examples", + "limit-results-to-a-maximum-of-five-rows", + "limit-results-to-the-first-three-rows-in-each-input-table-after-the-first-two", + "limit-results-to-the-first-three-rows-in-each-table", + "limit-syntax", + "limit-the-number-of-failed-runs-to-retry-for-each-task", + "limit-the-number-of-invokable-scripts-returned-to-20", + "limit-the-number-of-kapacitor-flux-tasks-returned", + "limit-the-number-of-partition-files", + "limit-the-number-of-points-and-series-returned", + "limit-the-number-of-points-and-series-returned-and-include-a-group-by-time-clause", + "limit-the-number-of-points-returned", + "limit-the-number-of-points-returned-and-include-a-group-by-clause", + "limit-the-number-of-returned-task-runs-to-20", + "limit-the-number-of-rows-returned", + "limit-the-number-of-rows-returned-from-each-influxql-group", + "limit-the-number-of-series-returned", + "limit-the-number-of-series-returned-and-include-a-group-by-time-clause", + "limit-the-number-of-tasks-returned-to-20", + "limit-the-number-of-tasks-to-retry-failed-runs-for", + "limit-the-number-points-returned-and-include-a-group-by-clause", + "limit-update", + "limitations", + "limited-influxql-feature-support", + "limited-influxql-support", + "limited-influxql-support-in-influxdb-cloud-and-oss-2x", + "limits-and-adjustable-quotas", + "line-graph", + "line-graph--single-stat", + "line-graph--single-stat-controls", + "line-graph--single-stat-example", + "line-graph-controls", + "line-graph-example", + "line-protocol", + "line-protocol-anatomy", + "line-protocol-element-parsing", + "line-protocol-elements", + "line-protocol-format", + "line-protocol-lp", + "line-protocol-reference", + "line-protocol-syntax", + "linearbins", + "linestring", + "link-to-dashboards-with-variables-defined-in-the-url", + "links", + "linux", + "linux---systemd-systems", + "linux---sysv-or-upstart-systems", + "linux-and-macos", + "linux-browser-download", + "linux-command-line-download", + "linux-default-directories-package", + "linux-default-directories-standalone-binary", + "linux-default-paths-package", + "linux-default-paths-standalone-binary", + "linux-file-system-overview-package", + "linux-file-system-overview-standalone-binary", + "linux-systemd-installations", + "linux_cpu", + "linux_sysctl_fs", + "lisp", + "list", + "list-a-bucket-by-id", + "list-a-bucket-by-name", + "list-a-limited-number-of-flux-task-runs", + "list-a-limited-number-of-flux-tasks", + "list-a-limited-number-of-flux-tasks-api", + "list-a-limited-number-of-runs-for-a-flux-task", + "list-a-replication-stream-by-name", + "list-a-specific-flux-task", + "list-a-specific-flux-task-by-name", + "list-a-specific-flux-task-by-name-api", + "list-a-specific-handler-in-a-topic", + "list-a-specific-kapacitor-flux-task", + "list-a-specific-organization-by-id", + "list-a-specific-organization-by-name", + "list-a-specific-recording", + "list-a-specific-remote-by-name", + "list-a-specific-run-of-a-task", + "list-a-specific-task", + "list-a-specific-user-by-id", + "list-a-specific-user-by-username", + "list-a-specific-v1-authorization-by-id", + "list-a-telegraf-configuration-with-the-specified-id", + "list-admin-tokens", + "list-all-api-tokens", + "list-all-buckets", + "list-all-columns-per-input-table", + "list-all-dashboards", + "list-all-databases", + "list-all-databases-including-deleted-databases", + "list-all-dbrp-mappings-in-your-organization", + "list-all-flux-tasks", + "list-all-flux-tasks-api", + "list-all-handlers-in-a-topic", + "list-all-invokable-scripts", + "list-all-kapacitor-flux-tasks", + "list-all-organizations", + "list-all-remotes", + "list-all-replication-streams", + "list-all-runs-for-a-flux-task", + "list-all-runs-of-a-task", + "list-all-schemas-of-a-bucket-and-print-column-information", + "list-all-secret-keys", + "list-all-stacks", + "list-all-tasks", + "list-all-tasks-created-by-a-specific-user", + "list-all-tasks-matching-a-glob-pattern", + "list-all-telegraf-configurations", + "list-all-tokens", + "list-all-users", + "list-all-v1-authorizations", + "list-api-tokens-associated-with-a-user", + "list-available-aggregator-plugins", + "list-available-influxdb-sample-datasets", + "list-available-input-plugins", + "list-available-output-plugins", + "list-available-parser-plugins", + "list-available-processor-plugins", + "list-available-secretstore-plugins", + "list-available-serializer-plugins", + "list-buckets", + "list-buckets-in-an-influxdb-organization", + "list-clusters", + "list-columns-in-a-measurement", + "list-columns-in-a-table", + "list-currently-running-queries-with-show-queries", + "list-database-tokens", + "list-databases", + "list-databases-from-an-influxdb-instance", + "list-databases-in-json-formatted-output", + "list-databases-in-parquet-formatted-output", + "list-dbrp-mappings", + "list-dbrp-mappings-for-specific-buckets", + "list-dbrp-mappings-with-a-specific-database", + "list-dbrp-mappings-with-a-specific-retention-policy", + "list-deleted-databases", + "list-deprecated-input-plugins", + "list-deprecated-output-plugins", + "list-deprecated-parser-plugins", + "list-deprecated-serializer-plugins", + "list-docker-containers", + "list-existing-users-in-the-influxdb-instance", + "list-field-keys", + "list-field-keys-in-a-measurement", + "list-fields-in-a-measurement", + "list-filters", + "list-flux-task-runs-after-a-specific-run-id", + "list-flux-task-runs-that-occurred-in-a-time-range", + "list-flux-task-runs-that-occurred-in-a-time-range-api", + "list-flux-tasks", + "list-flux-tasks-after-a-specific-task-id", + "list-invokable-scripts", + "list-kapacitor-flux-task-runs", + "list-kapacitor-flux-tasks", + "list-kapacitor-flux-tasks-owned-by-a-specific-user", + "list-kapacitor-flux-tasks-runs", + "list-keys-in-a-record", + "list-kubernetes-nodes", + "list-kubernetes-pods", + "list-logs-from-a-specific-task-execution", + "list-logs-from-all-task-executions", + "list-management-tokens", + "list-measurements", + "list-measurements-in-a-bucket", + "list-measurements-in-a-database", + "list-measurements-that-contain-specific-tag-key-value-pairs", + "list-measurements-that-match-a-regular-expression", + "list-members-of-an-organization", + "list-of-handlers", + "list-only-active-flux-tasks", + "list-only-specific-dashboards", + "list-recordings", + "list-replays", + "list-resource-tokens", + "list-roles", + "list-runs-for-a-flux-task", + "list-secrets-and-secret-stores-using-a-non-default-configuration-location", + "list-secrets-and-secret-stores-using-the-default-configuration-location", + "list-secrets-from-a-specific-secret-store", + "list-service-tests", + "list-system-tables", + "list-system-tables-in-json-formatted-output", + "list-tables", + "list-tables-in-a-database", + "list-tables-with-the-influxctl-cli", + "list-tag-keys", + "list-tag-keys-in-a-measurement", + "list-tag-keys-in-measurements-that-contain-a-specific-tag-key-value-pair", + "list-tag-values", + "list-tag-values-associated-with-a-specific-tag-key-value-pair", + "list-tag-values-for-a-specific-tag-key", + "list-tag-values-for-multiple-tags", + "list-tag-values-for-tags-that-match-a-regular-expression", + "list-tag-values-in-a-measurement", + "list-tasks", + "list-templates", + "list-testable-services", + "list-the-distinct-field-values", + "list-the-distinct-field-values-associated-with-a-field-key", + "list-the-distinct-field-values-associated-with-a-field-key-and-include-several-clauses", + "list-the-distinct-field-values-associated-with-each-field-key-in-a-measurement", + "list-tokens-in-different-output-formats", + "list-topic-events", + "list-topic-handlers", + "list-topics", + "list-unique-tag-values", + "list-users", + "list-v1-authorizations-associated-with-a-user-id", + "list-v1-authorizations-associated-with-a-username", + "listing-continuous-queries", + "lists", + "literal", + "literal-constructors", + "literal-values", + "literals", + "literals-versus-field-values", + "live-datasets", + "live-leaderboard-of-game-scores", + "live-replay-batch-queries-in-a-relative-time-range", + "live-replay-batch-queries-in-an-absolute-time-range", + "liveconnections", + "livestreams", + "ln", + "lnav-log-file-navigator", + "load", + "load-a-dashboard-as-a-resource", + "load-a-raw-stream-of-tables-in-a-test-case", + "load-annotated-csv-sample-data", + "load-balancer", + "load-balancing", + "load-bird-migration-sample-data", + "load-csv-or-line-protocol-in-ui", + "load-data-from-a-client-library-in-the-ui", + "load-data-from-a-telegraf-plugin-in-the-ui", + "load-data-redesign", + "load-data-source-in-ui", + "load-data-updates", + "load-directory-service", + "load-tasks-with-kapacitor", + "load-testing-tools", + "loading", + "loading-the-stack-with-docker-compose", + "loc", + "local-or-utc-timezone", + "localhost8086", + "localhost8181", + "locate-the-physical-plan", + "location", + "location-package", + "locf", + "log", + "log-and-trace-influxdb-enterprise-operations", + "log-and-trace-with-influxdb", + "log-data-not-being-dropped", + "log-data-retention-policies", + "log-destination", + "log-enabled", + "log-enabled--true", + "log-enabled--true-1", + "log-enabled-1", + "log-file-structure", + "log-filter", + "log-format", + "log-formats", + "log-identifier-context-key", + "log-in-and-log-out", + "log-in-to-superset", + "log-level", + "log-location-on-macos", + "log-messages-for-monitoring-and-debugging", + "log-messages-from-a-defined-handler", + "log-messages-from-a-tickscript", + "log-point-errors", + "log-point-errors--true", + "log-queries-after", + "log-queries-after--0s", + "log-timedout-queries--false", + "log10", + "log2", + "logarithmicbins", + "logfmt", + "logging", + "logging-1", + "logging-and-troubleshooting", + "logging-enabled", + "logging-enabled--true", + "logging-enhancements", + "logging-formats", + "logging-keys-used-in-tracing", + "logging-levels", + "logging-locations", + "logging-settings", + "logical-operators", + "logical-plan", + "logicalplan-nodes", + "logo-suppression", + "logparser", + "logql-package", + "logrotate", + "logs", + "logs-as-json", + "logs-as-logfmt", + "logs-in-dashboards", + "logs-when-running-influxdb-as-a-service", + "logstash", + "logzio", + "loki", + "lonfield", + "long", + "long-shard-group-duration", + "lookup", + "lookups", + "lower", + "lower-defaults-for-garbage-collection", + "lp-ingest-at-router-bytes", + "lp-ingest-at-router-lines", + "lpad", + "ltrim", + "lustre2", + "lvc-size-and-persistence", + "lvm", + "m", + "machine-production-sample-data", + "macos", + "macos-browser-download", + "macos-catalina-and-newer-authorize-the-influxd-binary", + "macos-command-line-download", + "macos-default-directories", + "macos-default-paths", + "macos-file-system-overview", + "macos-homebrew", + "macos-using-homebrew", + "mailchimp", + "maintain-state-with-the-in-memory-cache", + "maintain-the-original-timestamp", + "maintenance", + "maintenance-1", + "maintenance-10", + "maintenance-11", + "maintenance-12", + "maintenance-13", + "maintenance-2", + "maintenance-3", + "maintenance-4", + "maintenance-5", + "maintenance-6", + "maintenance-7", + "maintenance-8", + "maintenance-9", + "maintenance-updates", + "maintenance-updates-1", + "maintenance-updates-2", + "maintenance-updates-3", + "maintenance-updates-4", + "maintenance-xyz", + "major", + "make-a-get-request", + "make-a-get-request-and-decode-the-json-response", + "make-a-get-request-that-needs-authorization", + "make-a-get-request-with-query-parameters", + "make-a-post-request-with-a-json-body-and-decode-json-response", + "make-a-post-request-with-query-parameters", + "make-host-machine-files-readable-to-docker", + "make_date", + "mallocs", + "manage", + "manage-admin-and-non-admin-users", + "manage-admin-tokens", + "manage-admin-users", + "manage-alerts", + "manage-api-tokens", + "manage-authorization-with-influxql", + "manage-authorization-with-the-influxdb-enterprise-meta-api", + "manage-billing", + "manage-bucket-schemas", + "manage-buckets", + "manage-checks", + "manage-chronograf-alert-rules", + "manage-chronograf-organizations", + "manage-chronograf-security", + "manage-chronograf-users", + "manage-chronograf-users-and-roles", + "manage-clusters", + "manage-continuous-queries", + "manage-credentials", + "manage-data-partitioning", + "manage-database-tables", + "manage-database-tokens", + "manage-databases", + "manage-dbrps", + "manage-enterprise-permissions-with-chronograf", + "manage-enterprise-permissions-with-the-meta-api", + "manage-environment-variables-in-your-influxdb-cluster", + "manage-explicit-bucket-schemas", + "manage-files-in-mounted-volumes", + "manage-fine-grained-authorization", + "manage-flux-tasks", + "manage-grants", + "manage-influxdb-connections-using-src-files", + "manage-influxdb-connections-using-the-chronograf-ui", + "manage-influxdb-dashboards", + "manage-influxdb-enterprise-clusters", + "manage-influxdb-internal-systems", + "manage-influxdb-logs", + "manage-influxdb-scrapers", + "manage-influxdb-security", + "manage-influxdb-stacks", + "manage-influxdb-users-in-chronograf", + "manage-kapacitor-alerts", + "manage-kapacitor-connections-using-kap-files", + "manage-kapacitor-connections-using-the-chronograf-ui", + "manage-kapacitor-flux-task-runs", + "manage-kapacitor-flux-tasks", + "manage-kapacitor-subscriptions", + "manage-kapacitor-tasks", + "manage-kapacitor-tickscripts", + "manage-labels-in-the-influxdb-ui", + "manage-management-tokens", + "manage-node-labels", + "manage-non-admin-users", + "manage-notebooks", + "manage-notification-endpoints", + "manage-notification-rules", + "manage-organization-members", + "manage-organizations", + "manage-queries", + "manage-queries-in-chronograf", + "manage-read-and-write-privileges-with-influxql", + "manage-recordings", + "manage-replays", + "manage-resource-tokens", + "manage-restrictions", + "manage-retention-policies", + "manage-roles", + "manage-secrets", + "manage-secrets-through-the-influxdb-api", + "manage-security-and-authorization", + "manage-storage", + "manage-subscriptions-in-influxdb", + "manage-tables", + "manage-tasks", + "manage-telegraf-configurations", + "manage-templates", + "manage-the-distinct-value-cache", + "manage-the-influxdb-time-series-index-tsi", + "manage-the-last-value-cache", + "manage-tokens", + "manage-tokens-in-the-influxdb-ui", + "manage-user-authorization", + "manage-users", + "manage-users-and-permissions", + "manage-users-in-your-identity-provider", + "manage-users-in-your-influxdb-cluster", + "manage-your-influxdb-3-enterprise-license", + "manage-your-influxdb-cloud-serverless-account", + "manage-your-influxdb-clustered-license", + "manage-your-monitoring-and-alerting-pipeline", + "managecontinuousquery", + "management-token-strings-are-not-retrievable", + "management-tokens", + "management-tokens-and-the-management-api", + "management-tokens-in-influxctl-cli", + "managequery", + "manageshard", + "managesubscription", + "managing-catalog-components", + "managing-kapacitor-from-chronograf", + "managing-tasks-through-chronograf", + "manifest", + "manipulate-timestamps", + "manual-upgrade", + "manual-upgrade-required", + "manually-add-telegraf-plugins", + "manually-configure-telegraf", + "manually-download-and-install", + "manually-download-and-install-for-macos", + "manually-download-and-install-the-influxctl-binary", + "manually-download-and-install-the-influxd-binary", + "manually-download-the-package", + "manually-initialize-a-new-stack", + "manually-migrated-from-influxdb-1x-to-2x", + "manually-refresh-a-dashboard", + "manually-refresh-a-single-dashboard-cell", + "manually-refresh-dashboard", + "manually-upgrade", + "map", + "map-configuration-properties-to-environment-variables", + "map-database-and-retention-policies-to-buckets", + "map-databases-and-retention-policies-to-buckets", + "map-examples", + "map-new-values", + "map-organizations", + "map-v1-databases-and-retention-policies-to-buckets", + "map-values-based-on-existence", + "map-variable-csv-example", + "map-variable-example", + "map-variable-use-cases", + "mapfn", + "mapped-environment-variables", + "mapped-table", + "march-2022", + "mariadb-bool-types", + "mariadb-data-source-name", + "mariadb-to-flux-data-type-conversion", + "markdown", + "marklogic", + "match-conditions", + "match-expressions", + "match-parameter-names", + "matching-methods", + "math-across-measurements", + "math-functions", + "math-package", + "mathematical-operators", + "mathematical-operators-with-functions", + "mathematical-operators-with-wildcards-and-regular-expressions", + "matlab", + "max", + "max-age", + "max-body-size", + "max-body-size--25000000", + "max-cache-size", + "max-concurrent-compactions", + "max-concurrent-compactions--0", + "max-concurrent-deletes", + "max-concurrent-deletes--1", + "max-concurrent-queries", + "max-concurrent-queries--0", + "max-concurrent-write-limit", + "max-concurrent-write-limit--0", + "max-connection-limit", + "max-connection-limit--0", + "max-enqueued-write-limit", + "max-enqueued-write-limit--0", + "max-fetch", + "max-http-request-size", + "max-index-log-file-size", + "max-index-log-file-size--1m", + "max-log-file-size", + "max-persist-queue-depth", + "max-row-limit", + "max-row-limit--0", + "max-select-buckets", + "max-select-buckets--0", + "max-select-point", + "max-select-point--0", + "max-select-series", + "max-select-series--0", + "max-series-per-database", + "max-series-per-database--1000000", + "max-size", + "max-sync", + "max-values-per-tag", + "max-values-per-tag--100000", + "max-version", + "max-version--tls13", + "max-writes-pending", + "max_bytes", + "maximum-number-of-columns-per-measurement", + "maximum-number-of-columns-per-table", + "maximum-number-of-columns-reached", + "maximum-number-of-tables-reached", + "maxlevel", + "maxsize", + "may-2021", + "may-2022", + "may-adversely-affect-query-performance", + "may-adversely-affect-system-performance", + "may-impact-cluster-performance", + "may-impact-cluster-performance-1", + "may-improve-query-performance-view-more-info", + "mcache_in_use_bytes", + "mcache_sys_bytes", + "mcrouter", + "md5", + "mdstat", + "mean", + "mean-output-tables", + "measure-query-performance-with-flux-profilers", + "measure-series-cardinality", + "measurement", + "measurement-and-tag-templates", + "measurement-schemas-should-be-homogenous", + "measurement-specification", + "measurement-variable-use-cases", + "measurement_expression", + "measurementcolumn", + "measurements", + "measurements-per-db", + "measurements-tags-and-fields", + "median", + "meet-the-developer-series", + "meetup-videos", + "mem", + "member", + "member-expressions", + "member-user-group", + "members-page", + "members-rolemember", + "membytes", + "memcached", + "memcached-memcached", + "memory", + "memory-allocated-bytes", + "memory-allocated-bytes-total", + "memory-allocations-percentage-visualization-with-static-legend", + "memory-allocations-total", + "memory-bucket-hash-system-bytes", + "memory-frees-total", + "memory-gc-garbage-collection-cpu-fraction", + "memory-gc-garbage-collection-system-bytes", + "memory-heap-allocated-bytes", + "memory-heap-idle-bytes", + "memory-heap-in-use-bytes", + "memory-heap-objects", + "memory-heap-released-bytes", + "memory-heap-system-bytes", + "memory-last-gc-garbage-collection-time-seconds", + "memory-lookups-total", + "memory-management", + "memory-mcache-in-use-bytes", + "memory-mcache-system-bytes", + "memory-mspan-in-use-bytes", + "memory-mspan-system-bytes", + "memory-next-gc-garbage-collection-bytes", + "memory-object-store", + "memory-other-system-bytes", + "memory-stack-in-use-bytes", + "memory-stack-system-bytes", + "memory-system-bytes", + "memory-unused-bytes", + "memory-usage-as-a-single-stat", + "memory-usage-ingesters-k8s", + "memory-usage-k8s", + "memory-usage-routers-k8s", + "memory-used-variable", + "memused-data-output", + "memused-stream-definition", + "merge", + "merge-behavior", + "merge-lines-to-optimize-memory-and-bandwidth", + "merge-schema", + "merge-two-arrays", + "mergekey", + "mesos", + "message", + "message-a-definition", + "message-b-definition", + "message-c-definition", + "messagefield", + "messagefn", + "messagekey", + "messagepack", + "messagetype", + "messaging", + "meta", + "meta-and-data-nodes-are-fully-independent", + "meta-auth-enabled", + "meta-insecure-tls", + "meta-internal-shared-secret", + "meta-node", + "meta-node-configuration", + "meta-node-configuration-files-meta-influxdbconf", + "meta-node-configuration-settings", + "meta-node-file-system-layout", + "meta-node-file-system-overview", + "meta-node-settings", + "meta-node-setup-and-requirements", + "meta-nodes", + "meta-nodes-1", + "meta-only-overwrite-force", + "meta-service", + "meta-tls-enabled", + "metadata", + "metadata-cache-hierarchy", + "metaqueries", + "metaquery-templates", + "metastore", + "metastore-directory", + "metastore-settings", + "method", + "metric", + "metric-buffer", + "metric-filtering", + "metric-version-1", + "metric_name-optional", + "metric_selection-optional", + "metricname", + "metrics", + "metrics-disabled", + "metrics-version-2", + "metrictype", + "metricversion", + "microsoft-azure", + "microsoft-entra-id", + "microsoft-social-sign-on", + "migrate-a-configuration-directory", + "migrate-a-single-configuration-file", + "migrate-an-oss-instance-to-influxdb-enterprise", + "migrate-configuration-file-values", + "migrate-configuration-file-values-1", + "migrate-configuration-file-values-2", + "migrate-continuous-queries", + "migrate-continuous-queries-to-tasks", + "migrate-custom-configuration-settings", + "migrate-data", + "migrate-data-between-influxdb-cloud-organizations", + "migrate-data-from-influxdb-1x-to-influxdb-cloud-dedicated", + "migrate-data-from-influxdb-1x-to-influxdb-cloud-serverless", + "migrate-data-from-influxdb-1x-to-influxdb-clustered", + "migrate-data-from-influxdb-cloud-to-influxdb-cloud-dedicated", + "migrate-data-from-influxdb-cloud-to-influxdb-clustered", + "migrate-data-from-influxdb-cloud-to-influxdb-oss", + "migrate-data-from-influxdb-oss-to-influxdb-cloud", + "migrate-data-from-influxdb-oss-to-other-influxdb-instances", + "migrate-data-from-tsm-to-influxdb-cloud-serverless", + "migrate-data-step-1", + "migrate-data-to-influxdb-cloud-dedicated", + "migrate-data-to-influxdb-cloud-serverless", + "migrate-data-to-influxdb-clustered", + "migrate-dbrp-mappings", + "migrate-dbrp-mappings-to-influxdb-cloud", + "migrate-from-influxctl-1x-to-20", + "migrate-influxdb-oss-instances-to-influxdb-enterprise-clusters", + "migrate-resources", + "migrate-system-buckets", + "migrate-time-series-data", + "migrate-to-a-chronograf-ha-configuration", + "migrate-to-influxdb-cloud-serverless", + "migration", + "migration-flux-script", + "migration-task", + "min", + "min-version", + "min-version--tls10", + "minecraft", + "minimal-config", + "minimal-upgrade", + "minimum-influxctl-version", + "minmax", + "minor-xyz", + "minsize", + "minvalue", + "mirror-influxdb-images", + "mirror-kubit-operator-images", + "miscellaneous", + "miscellaneous-1", + "miscellaneous-2", + "miscellaneous-3", + "miscellaneous-event-updates", + "miscellaneous-fixes-and-updates", + "miscellaneous-functions", + "miscellaneous-operational-fixes", + "miscellaneous-operators", + "missing-data", + "missing-influxql-functions", + "missing-tags-in-the-cq-results", + "missing-values", + "missing-values-null", + "mixing-aggregation-functions-with-non-aggregates", + "mixing-data-types-and-line-protocol-elements", + "mock", + "modal-close", + "modbus", + "mode", + "mode-types", + "models", + "modify-configuration-sections", + "modify-retention-policies-with-alter-retention-policy", + "modify-the-configuration-file-to-point-to-prerequisites", + "modify-the-retention-policy-of-the-chronograf-database", + "modulo", + "mongodb", + "monit", + "monitor", + "monitor-32-bit-raspberry-pi-systems", + "monitor-amazon-web-services-aws", + "monitor-containers-using-the-influxdata-1x-platform", + "monitor-docker", + "monitor-haproxy", + "monitor-influxdb-disk-usage-collected-by-telegraf", + "monitor-influxdb-enterprise", + "monitor-influxdb-enterprise-clusters", + "monitor-influxdb-enterprise-using-a-template", + "monitor-influxdb-enterprise-with-influxdb-cloud", + "monitor-influxdb-enterprise-with-influxdb-oss", + "monitor-influxdb-oss", + "monitor-influxdb-oss-using-a-template", + "monitor-influxdb-performance-metrics", + "monitor-influxdb-servers", + "monitor-infrastructure", + "monitor-kapacitor", + "monitor-kapacitor-performance-metrics", + "monitor-kubernetes", + "monitor-kubernetes-using-the-influxdata-1x-platform", + "monitor-networks", + "monitor-package", + "monitor-raspberry-pi", + "monitor-settings", + "monitor-states", + "monitor-the-influxdata-platform", + "monitor-the-migration-progress", + "monitor-vsphere", + "monitor-windows", + "monitor-with-influxdb-insights", + "monitor-with-templates", + "monitor-your-cluster", + "monitor-your-raspberry-pi", + "monitoring-log-messages-of-level-debug-and-above-for-the-http-service", + "monitoring-logging-and-alerting", + "monitoring-settings", + "monitoringtool", + "month_offset", + "more-puts-into-object-storage-view-more-info", + "more-work-for-the-compactor-_enterprise-only_-view-more-info", + "more-work-for-the-compactor-view-more-info", + "mosaic", + "mosaic-behavior", + "mosaic-controls", + "mosaic-visualization", + "move-large-amounts-of-data-with-sequential-queries", + "moving-average", + "moving-average-rules", + "moving_average", + "movingaverage", + "mqtt", + "mqtt-connector", + "mqtt-package", + "mqtt_consumer", + "msg", + "mspan_in_use_bytes", + "mspan_sys_bytes", + "multi-account-support", + "multi-line-entries", + "multi-node-improvements", + "multi-node-selection", + "multi-organization-and-multi-user-support", + "multi-server-setup", + "multifile", + "multiple-inputs-of-the-same-type", + "multiple-measurement--tags-matching", + "multiple-select-statements-in-a-subquery", + "multiple-statements", + "multiple-statements-in-a-subquery", + "multiple-tables-and-results", + "multiple-templates", + "multiple-two-values", + "multiple-versions-of-influxdb-with-homebrew", + "multiplication", + "multiply-row-values-by-x", + "multiply-values-by-x", + "must-be-done-on-the-same-machine-as-influxdb", + "must-query-at-least-one-field", + "must-use-aggregate-or-selector-functions-when-grouping-by-time", + "myclass", + "myfield", + "mymeasurement", + "mysql", + "mysql-bool-types", + "mysql-dashboard-template", + "mysql-data-source-name", + "mysql-to-flux-data-type-conversion", + "n", + "nagios", + "naivebayesclassifier-package", + "name", + "name-1", + "name-syntax-for-mapped-buckets", + "name-tags-and-string-fields", + "name-your-dashboard", + "namecolumn", + "namespace", + "namespacetag", + "naming-restrictions", + "naming-variables", + "nansequal", + "nanvl", + "nats", + "nats-max-payload-bytes", + "nats-port", + "nats_consumer", + "nav-tree", + "navigate-organizations", + "nebius_cloud_monitoring", + "negatable-constraint", + "negate-boolean-values", + "neptune_apex", + "nested-syntax", + "nested-syntax-1", + "nesting-functions", + "net", + "net_response", + "netflow", + "netstat", + "network", + "network-bandwidth", + "network-time-protocol-ntp", + "networking", + "never-directly-modify-the-catalog", + "new", + "new-aggregator-plugins", + "new-aggregator-plugins-1", + "new-aggregators", + "new-bucket-shorthand-for-influx-delete", + "new-configuration-options", + "new-data-sources", + "new-event-handler", + "new-external-plugins", + "new-external-plugins-1", + "new-features", + "new-features-1", + "new-influxdb-cloud-signups-use-influxdb-3", + "new-influxdb-cloud-signups-use-influxdb-3-1", + "new-input-data-formats-parsers", + "new-input-plugins", + "new-input-plugins-1", + "new-input-plugins-2", + "new-input-plugins-3", + "new-input-plugins-4", + "new-input-plugins-5", + "new-inputs", + "new-inputs-1", + "new-output-data-formats-serializers", + "new-output-data-formats-serializers-1", + "new-output-data-formats-serializers-2", + "new-output-plugins", + "new-output-plugins-1", + "new-output-plugins-2", + "new-output-plugins-3", + "new-output-plugins-4", + "new-outputs", + "new-parsers", + "new-parsers-1", + "new-plugins", + "new-plugins-1", + "new-plugins-10", + "new-plugins-11", + "new-plugins-12", + "new-plugins-13", + "new-plugins-14", + "new-plugins-15", + "new-plugins-16", + "new-plugins-17", + "new-plugins-18", + "new-plugins-19", + "new-plugins-2", + "new-plugins-20", + "new-plugins-21", + "new-plugins-22", + "new-plugins-23", + "new-plugins-3", + "new-plugins-4", + "new-plugins-5", + "new-plugins-6", + "new-plugins-7", + "new-plugins-8", + "new-plugins-9", + "new-processor-plugins", + "new-processor-plugins-1", + "new-processor-plugins-2", + "new-processors", + "new-processors-1", + "new-repository", + "new-restore-options", + "new-slack-apps", + "new-socket-listener-and-socket-writer-plugins", + "new-telegraf-plugins-in-ui", + "new-ui-design", + "newcolumn", + "newrelic", + "next-steps", + "next_gc_ns", + "nfsclient", + "nginx", + "nginx_plus", + "nginx_plus_api", + "nginx_sts", + "nginx_upstream_check", + "nginx_vts", + "no-32-bit-builds", + "no-administrative-functionality", + "no-breaking-influxdb-api-changes", + "no-field-key-in-the-select-clause", + "no-hardware-separation", + "no-measurements-appear-in-my-bucket-even-though-theres-data-in-it", + "no-tasks", + "no-thanks", + "no-verify", + "no_proxy", + "noaa-bay-area-weather-data", + "noaa-ndbc-data", + "noaa-water-sample-data", + "node", + "node-creation", + "node-id", + "nodejs", + "nodes", + "noise", + "nomad", + "non-admin-user-management", + "non-admin-users", + "non-byte-aligned-value-extraction", + "non-correlated-subqueries", + "non-numeric-types", + "non-scalar-subqueries", + "non_negative_derivative", + "non_negative_difference", + "none", + "nonnegative", + "norecoveries", + "normalize", + "normalize-counter-resets", + "normalize-data-with-notebooks", + "normalize-irregular-timestamps", + "normalize-resets-in-counter-metrics", + "not", + "not-a-number", + "not-available-for-all-clusters", + "not-between", + "not-compatible-with-parameters", + "not-equal-to", + "not-exists", + "not-helpful", + "not-in", + "not-recommended", + "not-recommended-for-production", + "not-recommended-in-production-influxdb-enterprise-clusters", + "not-supported", + "not-supported-in-influxdb-cloud", + "not-supported-in-influxdb-cloud-serverless", + "notable-behaviors", + "notable-behaviors-1", + "notable-behaviors-10", + "notable-behaviors-11", + "notable-behaviors-12", + "notable-behaviors-13", + "notable-behaviors-14", + "notable-behaviors-15", + "notable-behaviors-16", + "notable-behaviors-17", + "notable-behaviors-18", + "notable-behaviors-19", + "notable-behaviors-2", + "notable-behaviors-20", + "notable-behaviors-21", + "notable-behaviors-22", + "notable-behaviors-23", + "notable-behaviors-24", + "notable-behaviors-3", + "notable-behaviors-4", + "notable-behaviors-5", + "notable-behaviors-6", + "notable-behaviors-7", + "notable-behaviors-8", + "notable-behaviors-9", + "notable-behaviors-of-mathematical-operators", + "notable-behaviors-of-selector-functions", + "notable-behaviors-of-the-group-by-clause", + "notable-behaviors-of-transformation-functions", + "notable-offset-clause-behaviors", + "notable-select-statement-behaviors", + "notable-subquery-behaviors", + "notation", + "note", + "note-controls", + "note-offline-restore", + "note-on-backslashes", + "note-on-http-api-configuration-and-restarting-kapacitor", + "note-view-example", + "notebook-cell-types", + "notebook-concepts", + "notebook-controls", + "notebooks", + "notebooks-annotations-and-visualization-updates", + "notes-about-examples", + "notification-endpoint", + "notification-endpoints", + "notification-rule", + "november-2021", + "now", + "now-vs-systemtime", + "nrows", + "nsd", + "nsdp", + "nsq", + "nsq_consumer", + "nstat", + "ntables", + "nth_value", + "ntile", + "ntp", + "ntpq", + "null", + "null-column-values", + "null-syntax", + "null-timestamps", + "null-types", + "null-values", + "nullable-constraint", + "nullif", + "nulls", + "num_enabled_tasks", + "num_gc", + "num_subscriptions", + "num_tasks", + "numeric-constraint", + "numeric-literals", + "numeric-types", + "numerical-precision", + "numerical-types", + "numfiles", + "numgc", + "numgoroutine", + "nummeasurements", + "numseries", + "nvidia_smi", + "nvl", + "nvl2", + "o", + "oauth-pkce", + "object-storage", + "object-storage-recommendations", + "object-store", + "object-store-cache-endpoint", + "object-store-configuration", + "object-store-connection-limit", + "object-store-custom-certificates", + "object-store-http2-max-frame-size", + "object-store-http2-only", + "object-store-max-retries", + "object-store-retry-timeout", + "object-store-scaling-strategies", + "obtain-requirements", + "octet_length", + "october-2021", + "october-2022", + "octoprint", + "oee-package", + "offline-legacy-restore", + "offline-legacy-restore-examples", + "offline-restores-overwrite-data", + "offset", + "offset-and-soffset-clauses", + "offset-clause", + "offset-examples", + "offset-following", + "offset-following-1", + "offset-interval", + "offset-preceding", + "offset-syntax", + "offset-time-windows", + "ok", + "oks_triggered", + "old-data-not-being-downsampled", + "oldsegmentdiskbytes", + "omitting-data", + "on", + "on-clause", + "on-first-write-sort-tags-by-query-priority", + "on-this-page", + "onboard-service-new-call-total", + "onboard-service-new-duration", + "onboarding-wizards-for-common-programming-languages", + "onempty", + "ongoing-maintenance", + "ongoing-support", + "online-legacy-restore", + "online-legacy-restore-examples", + "only-amd64-x86-architectures", + "only-partition-by-tags-that-always-have-a-value", + "only-use-the-appinstance-to-scale-component-replicas", + "only-works-with-sql", + "only-works-with-sql-1", + "onnonmonotonic", + "op_elapsed", + "op_event", + "op_name", + "opcua", + "opcua_listener", + "open-a-repl-session", + "open-feature-requests", + "open-github-issues", + "open-source-license-for-chronograf", + "open-source-license-for-influxdb", + "open-the-edit-database-token-dialog", + "openconnections", + "openldap", + "openntpd", + "opensearch", + "opensearch_query", + "openshift", + "openshift-route", + "opensmtpd", + "openstack", + "opentelemetry", + "opentsdb", + "opentsdb-input", + "opentsdb-protocol-support-in-influxdb", + "opentsdb-settings", + "opentsdb-telnet-put-api", + "openweathermap", + "operand", + "operands-and-primary-expressions", + "operands-must-be-the-same-type", + "operate-on-arrays", + "operate-on-columns", + "operate-on-dictionaries", + "operate-on-durations", + "operate-on-dynamic-types", + "operate-on-floats", + "operate-on-integers", + "operate-on-records", + "operate-on-strings", + "operate-on-tables", + "operate-on-time", + "operate-on-timestamps", + "operate-on-uintegers", + "operating-system", + "operation-keys", + "operation/ApplyTemplate", + "operation/CreateCheck", + "operation/CreateNotificationEndpoint", + "operation/CreateNotificationRule", + "operation/CreateStack", + "operation/DeleteAuthorizationsID", + "operation/DeleteBucketsID", + "operation/DeleteBucketsIDLabelsID", + "operation/DeleteBucketsIDMembersID", + "operation/DeleteBucketsIDOwnersID", + "operation/DeleteChecksID", + "operation/DeleteChecksIDLabelsID", + "operation/DeleteDBRPID", + "operation/DeleteDashboardsID", + "operation/DeleteDashboardsIDCellsID", + "operation/DeleteDashboardsIDLabelsID", + "operation/DeleteDashboardsIDMembersID", + "operation/DeleteDashboardsIDOwnersID", + "operation/DeleteLabelsID", + "operation/DeleteLegacyAuthorizationsID", + "operation/DeleteNotificationEndpointsID", + "operation/DeleteNotificationEndpointsIDLabelsID", + "operation/DeleteNotificationRulesID", + "operation/DeleteNotificationRulesIDLabelsID", + "operation/DeleteOrgsID", + "operation/DeleteOrgsIDMembersID", + "operation/DeleteOrgsIDOwnersID", + "operation/DeleteOrgsIDSecretsID", + "operation/DeleteRemoteConnectionByID", + "operation/DeleteReplicationByID", + "operation/DeleteScrapersID", + "operation/DeleteScrapersIDLabelsID", + "operation/DeleteScrapersIDMembersID", + "operation/DeleteScrapersIDOwnersID", + "operation/DeleteScriptsID", + "operation/DeleteSourcesID", + "operation/DeleteStack", + "operation/DeleteTasksID", + "operation/DeleteTasksIDLabelsID", + "operation/DeleteTasksIDMembersID", + "operation/DeleteTasksIDOwnersID", + "operation/DeleteTasksIDRunsID", + "operation/DeleteTelegrafsID", + "operation/DeleteTelegrafsIDLabelsID", + "operation/DeleteTelegrafsIDMembersID", + "operation/DeleteTelegrafsIDOwnersID", + "operation/DeleteUsersID", + "operation/DeleteVariablesID", + "operation/DeleteVariablesIDLabelsID", + "operation/ExportTemplate", + "operation/GetAuthorizations", + "operation/GetAuthorizationsID", + "operation/GetBackupKV", + "operation/GetBackupMetadata", + "operation/GetBackupShardId", + "operation/GetBuckets", + "operation/GetBucketsID", + "operation/GetBucketsIDLabels", + "operation/GetBucketsIDMembers", + "operation/GetBucketsIDOwners", + "operation/GetChecks", + "operation/GetChecksID", + "operation/GetChecksIDLabels", + "operation/GetChecksIDQuery", + "operation/GetConfig", + "operation/GetDBRPs", + "operation/GetDBRPsID", + "operation/GetDashboards", + "operation/GetDashboardsID", + "operation/GetDashboardsIDCellsIDView", + "operation/GetDashboardsIDLabels", + "operation/GetDashboardsIDMembers", + "operation/GetDashboardsIDOwners", + "operation/GetDebugPprofAllProfiles", + "operation/GetDebugPprofAllocs", + "operation/GetDebugPprofBlock", + "operation/GetDebugPprofCmdline", + "operation/GetDebugPprofGoroutine", + "operation/GetDebugPprofHeap", + "operation/GetDebugPprofMutex", + "operation/GetDebugPprofProfile", + "operation/GetDebugPprofThreadCreate", + "operation/GetDebugPprofTrace", + "operation/GetFlags", + "operation/GetHealth", + "operation/GetLabels", + "operation/GetLabelsID", + "operation/GetLegacyAuthorizations", + "operation/GetLegacyAuthorizationsID", + "operation/GetLegacyQuery", + "operation/GetMe", + "operation/GetMetrics", + "operation/GetNotificationEndpoints", + "operation/GetNotificationEndpointsID", + "operation/GetNotificationEndpointsIDLabels", + "operation/GetNotificationRules", + "operation/GetNotificationRulesID", + "operation/GetNotificationRulesIDLabels", + "operation/GetNotificationRulesIDQuery", + "operation/GetOrgLimitsID", + "operation/GetOrgUsageID", + "operation/GetOrgs", + "operation/GetOrgsID", + "operation/GetOrgsIDMembers", + "operation/GetOrgsIDOwners", + "operation/GetOrgsIDSecrets", + "operation/GetPing", + "operation/GetQuerySuggestions", + "operation/GetQuerySuggestionsName", + "operation/GetReady", + "operation/GetRemoteConnectionByID", + "operation/GetRemoteConnections", + "operation/GetReplicationByID", + "operation/GetReplications", + "operation/GetResources", + "operation/GetRoutes", + "operation/GetScrapers", + "operation/GetScrapersID", + "operation/GetScrapersIDLabels", + "operation/GetScrapersIDMembers", + "operation/GetScrapersIDOwners", + "operation/GetScripts", + "operation/GetScriptsID", + "operation/GetScriptsIDParams", + "operation/GetSetup", + "operation/GetSources", + "operation/GetSourcesID", + "operation/GetSourcesIDBuckets", + "operation/GetSourcesIDHealth", + "operation/GetTasks", + "operation/GetTasksID", + "operation/GetTasksIDLabels", + "operation/GetTasksIDLogs", + "operation/GetTasksIDMembers", + "operation/GetTasksIDOwners", + "operation/GetTasksIDRuns", + "operation/GetTasksIDRunsID", + "operation/GetTasksIDRunsIDLogs", + "operation/GetTelegrafPlugins", + "operation/GetTelegrafs", + "operation/GetTelegrafsID", + "operation/GetTelegrafsIDLabels", + "operation/GetTelegrafsIDMembers", + "operation/GetTelegrafsIDOwners", + "operation/GetUsers", + "operation/GetUsersID", + "operation/GetVariables", + "operation/GetVariablesID", + "operation/GetVariablesIDLabels", + "operation/HeadPing", + "operation/ListStacks", + "operation/PatchAuthorizationsID", + "operation/PatchBucketsID", + "operation/PatchChecksID", + "operation/PatchDBRPID", + "operation/PatchDashboardsID", + "operation/PatchDashboardsIDCellsID", + "operation/PatchDashboardsIDCellsIDView", + "operation/PatchLabelsID", + "operation/PatchLegacyAuthorizationsID", + "operation/PatchNotificationEndpointsID", + "operation/PatchNotificationRulesID", + "operation/PatchOrgsID", + "operation/PatchOrgsIDSecrets", + "operation/PatchRemoteConnectionByID", + "operation/PatchReplicationByID", + "operation/PatchScrapersID", + "operation/PatchScriptsID", + "operation/PatchSourcesID", + "operation/PatchTasksID", + "operation/PatchUsersID", + "operation/PatchVariablesID", + "operation/PostAuthorizations", + "operation/PostBuckets", + "operation/PostBucketsIDLabels", + "operation/PostBucketsIDMembers", + "operation/PostBucketsIDOwners", + "operation/PostChecksIDLabels", + "operation/PostDBRP", + "operation/PostDashboards", + "operation/PostDashboardsIDCells", + "operation/PostDashboardsIDLabels", + "operation/PostDashboardsIDMembers", + "operation/PostDashboardsIDOwners", + "operation/PostDelete", + "operation/PostLabels", + "operation/PostLegacyAuthorizations", + "operation/PostLegacyAuthorizationsIDPassword", + "operation/PostLegacyWrite", + "operation/PostNotificationEndpointIDLabels", + "operation/PostNotificationRuleIDLabels", + "operation/PostOrgs", + "operation/PostOrgsIDMembers", + "operation/PostOrgsIDOwners", + "operation/PostOrgsIDSecrets", + "operation/PostQuery", + "operation/PostQueryAnalyze", + "operation/PostQueryAst", + "operation/PostQueryV1", + "operation/PostRemoteConnection", + "operation/PostReplication", + "operation/PostRestoreBucketID", + "operation/PostRestoreBucketMetadata", + "operation/PostRestoreKV", + "operation/PostRestoreSQL", + "operation/PostRestoreShardId", + "operation/PostScrapers", + "operation/PostScrapersIDLabels", + "operation/PostScrapersIDMembers", + "operation/PostScrapersIDOwners", + "operation/PostScripts", + "operation/PostScriptsIDInvoke", + "operation/PostSetup", + "operation/PostSetupUser", + "operation/PostSignin", + "operation/PostSignout", + "operation/PostSources", + "operation/PostTasks", + "operation/PostTasksIDLabels", + "operation/PostTasksIDMembers", + "operation/PostTasksIDOwners", + "operation/PostTasksIDRuns", + "operation/PostTasksIDRunsIDRetry", + "operation/PostTelegrafs", + "operation/PostTelegrafsIDLabels", + "operation/PostTelegrafsIDMembers", + "operation/PostTelegrafsIDOwners", + "operation/PostUsers", + "operation/PostUsersIDPassword", + "operation/PostValidateReplicationByID", + "operation/PostVariables", + "operation/PostVariablesIDLabels", + "operation/PostWrite", + "operation/PostWriteV1", + "operation/PutChecksID", + "operation/PutDashboardsIDCells", + "operation/PutMePassword", + "operation/PutNotificationEndpointsID", + "operation/PutNotificationRulesID", + "operation/PutTelegrafsID", + "operation/PutUsersIDPassword", + "operation/PutVariablesID", + "operation/ReadStack", + "operation/UninstallStack", + "operation/UpdateStack", + "operation/createMeasurementSchema", + "operation/getMeasurementSchema", + "operation/getMeasurementSchemas", + "operation/updateMeasurementSchema", + "operational-enhancements", + "operational-improvements", + "operations", + "operator", + "operator-precedence", + "operator-token", + "operators", + "opsgenie", + "opsgenie-package", + "opsgenie-setup", + "opsgenie-v1", + "opsgenie-v2", + "opsgenie2", + "opt-out-of-telemetry-reporting", + "optimal-server-counts", + "optimize-flux-queries", + "optimize-queries", + "optimize-queries-to-reduce-impact-to-your-cluster", + "optimize-slow-or-expensive-queries", + "optimize-writes-to-influxdb", + "optimize-writes-to-influxdb-3-core", + "optimize-writes-to-influxdb-3-enterprise", + "optimize-your-queries", + "optimize-your-query", + "option", + "option-assignment", + "option-environment-variables", + "optional-configuration-groups", + "optional-download-install-and-use-the-influx-cli", + "optional-environment-variables", + "optional-google-domains", + "optional-namespace-monitoring-data", + "optional-sasl-configuration", + "optional-update-primary-hostnames", + "optional-variables", + "optional-verify-the-data-node-was-added-to-the-cluster", + "optional-verify-the-influxdb-meta-service-is-running", + "optional-verify-the-influxdb-service-is-running", + "optional-verify-the-meta-nodes-are-added-to-the-cluster", + "options", + "options-1", + "options-10", + "options-11", + "options-12", + "options-13", + "options-14", + "options-2", + "options-3", + "options-4", + "options-5", + "options-6", + "options-7", + "options-8", + "options-9", + "options-if-sasl-mechanism-is-oauthbearer", + "or", + "or-in-the-where-clause", + "oracle", + "order", + "order-by-clause", + "order-by-time-desc", + "orderby", + "org", + "organization", + "organization-1", + "organization-and-bucket-limits", + "organization-bound-users", + "organization-id-in-the-cli", + "organization-id-in-the-ui", + "organization-management-differences", + "organization-service-call-total", + "organization-service-duration", + "organization-service-new-call-total", + "organization-service-new-duration", + "organization-service-new-error-total", + "organizations-total", + "orgid", + "origin", + "ortime", + "oss-conversion", + "oss-urls", + "other", + "other-1", + "other-10", + "other-11", + "other-2", + "other-3", + "other-4", + "other-5", + "other-6", + "other-7", + "other-8", + "other-9", + "other-changes", + "other-columns", + "other-data", + "other-deployment-options", + "other-features", + "other-helpful-resources", + "other-highlights", + "other-influxdb-syntaxes", + "other-installation-options", + "other-operators", + "other-server-option-flags", + "other-service-options", + "other-telegraf-configuration-options", + "other-updates", + "other_sys_bytes", + "othercolumns", + "others", + "out-of-memory-loops", + "outer-joins", + "outer-query-result", + "outer-query-results", + "output", + "output-1", + "output-a-diff-between-two-streams-of-tables", + "output-all-influxdb-1x-continuous-queries", + "output-amon", + "output-amqp", + "output-application_insights", + "output-azure_data_explorer", + "output-azure_monitor", + "output-basic-sample-data-with-boolean-values", + "output-basic-sample-data-with-float-values", + "output-basic-sample-data-with-integer-values", + "output-basic-sample-data-with-numeric-boolean-values", + "output-basic-sample-data-with-string-values", + "output-basic-sample-data-with-unsigned-integer-values", + "output-bigquery", + "output-clarify", + "output-cloud_pubsub", + "output-cloudwatch", + "output-cloudwatch_logs", + "output-configuration", + "output-configuration-examples", + "output-cratedb", + "output-current-cardinality-with-your-cardinality-limit", + "output-data", + "output-data-formats", + "output-data-formats-serializers", + "output-data-requirements", + "output-data-schema", + "output-datadog", + "output-details", + "output-discard", + "output-dynatrace", + "output-elasticsearch", + "output-event_hubs", + "output-exec", + "output-execd", + "output-file", + "output-format", + "output-formats", + "output-graphite", + "output-graylog", + "output-groundwork", + "output-health", + "output-help-information-for-the-downgrade-command", + "output-http", + "output-influxdb", + "output-influxdb_v2", + "output-information-about-tsi-index-files", + "output-instrumental", + "output-iotdb", + "output-kafka", + "output-kinesis", + "output-librato", + "output-line-protocol", + "output-logzio", + "output-loki", + "output-measurement-data-stored-in-the-index", + "output-mongodb", + "output-mqtt", + "output-nats", + "output-nebius_cloud_monitoring", + "output-newrelic", + "output-nsq", + "output-opensearch", + "output-opentelemetry", + "output-opentsdb", + "output-organization-limits-in-a-table", + "output-parquet", + "output-plugin", + "output-plugin-updates", + "output-plugin-updates-1", + "output-plugin-updates-10", + "output-plugin-updates-11", + "output-plugin-updates-12", + "output-plugin-updates-13", + "output-plugin-updates-14", + "output-plugin-updates-15", + "output-plugin-updates-16", + "output-plugin-updates-17", + "output-plugin-updates-18", + "output-plugin-updates-19", + "output-plugin-updates-2", + "output-plugin-updates-20", + "output-plugin-updates-21", + "output-plugin-updates-22", + "output-plugin-updates-23", + "output-plugin-updates-24", + "output-plugin-updates-25", + "output-plugin-updates-26", + "output-plugin-updates-27", + "output-plugin-updates-3", + "output-plugin-updates-4", + "output-plugin-updates-5", + "output-plugin-updates-6", + "output-plugin-updates-7", + "output-plugin-updates-8", + "output-plugin-updates-9", + "output-plugins", + "output-plugins-1", + "output-postgresql", + "output-prometheus_client", + "output-query-results-to-a-parquet-file", + "output-raw-series-data-stored-in-the-index", + "output-record", + "output-redistimeseries", + "output-remotefile", + "output-riemann", + "output-scalar-values", + "output-schema", + "output-sensu", + "output-signalfx", + "output-socket_writer", + "output-sql", + "output-stackdriver", + "output-stomp", + "output-structure", + "output-sumologic", + "output-syslog", + "output-table", + "output-table-1", + "output-tables", + "output-the-last-three-rows-before-the-last-row-in-each-input-table", + "output-the-last-three-rows-in-each-input-table", + "output-timestream", + "output-to-a-parquet-file", + "output-tokens-to-a-parquet-file", + "output-warp10", + "output-wavefront", + "output-websocket", + "output-yandex_cloud_monitoring", + "output-zabbix", + "output_ordering", + "outputcolumn", + "outputgroupnametag", + "outputs", + "outputs-1", + "outputs-2", + "outputs-3", + "outputs-4", + "outputs-5", + "outputs-6", + "outputs-7", + "outputs-8", + "outputs-9", + "outputservicenametag", + "over-clause", + "overlapping-data-and-deduplication", + "overlay", + "override", + "override-configuration-values", + "override-flux-task-options", + "override-flux-task-options-api", + "override-kapacitor-configurations", + "overrides", + "overriding-default-settings-may-affect-overall-cluster-performance", + "overview", + "overview-of-go-runtime-profiles", + "overview-of-notebooks", + "overwrite-an-existing-key-value-pair-in-a-dictionary", + "overwrite-pid-file", + "owner", + "p", + "p4runtime", + "package-clause", + "package-fixes", + "package-initialization", + "package-main", + "package-manager", + "package-upgrade", + "package-upgrade-1", + "packages", + "page-feedback", + "pagefeedback", + "pagefeedbacktext", + "pagerduty", + "pagerduty-package", + "pagerduty-setup", + "pagerduty-v1", + "pagerduty-v2", + "pagerduty2", + "pagerdutyurl", + "paginate-points", + "paginate-points-and-include-several-clauses", + "paginate-results-by-3-and-return-the-2nd-page-of-results", + "paginate-results-from-each-series-by-3-and-return-the-2nd-page-of-each-series", + "paginate-series", + "paginate-series-and-include-all-clauses", + "pairs", + "pandas", + "parameter", + "parameter-changes", + "parameter-data-types", + "parameter-notation", + "parameterize-an-sql-query", + "parameters", + "parameters-1", + "parameters-2", + "parameters-3", + "parameters-4", + "parameters-5", + "parameters-6", + "parameters-only-supported-in-where-expressions", + "parametric-polymorphism", + "params", + "parquet", + "parquet-mem-cache-prune-interval", + "parquet-mem-cache-prune-percentage", + "parquet-mem-cache-query-path-duration", + "parquet-mem-cache-size", + "parquet-mem-cache-size-mb", + "parquet-storage", + "parquetexec", + "parquetexec-expressions", + "parquetexec_a", + "parquetexec_b", + "parquetexec_b-expressions", + "parse-and-use-json-data-to-restructure-tables", + "parse-json-and-use-array-functions-to-manipulate-into-a-table", + "parse-json-into-flux-types", + "parse-metadata-into-tags", + "parse-mode", + "parse-multivalue-plugin--split", + "parse-timestamp-abbreviations", + "parse-units-of-time-from-a-timestamp", + "parsemode", + "parser", + "parser-plugin-updates", + "parser-plugin-updates-1", + "parser-plugin-updates-10", + "parser-plugin-updates-2", + "parser-plugin-updates-3", + "parser-plugin-updates-4", + "parser-plugin-updates-5", + "parser-plugin-updates-6", + "parser-plugin-updates-7", + "parser-plugin-updates-8", + "parser-plugin-updates-9", + "parser-updates", + "parser-updates-1", + "parser-updates-2", + "parsers", + "parsers-1", + "parsers-2", + "parsers-3", + "parsers-4", + "parsing-failed-for-write_lp-endpoint", + "parsing-metrics", + "partevents", + "partial-write-of-line-protocol-occurred", + "partition-by-clause", + "partition-by-tag-buckets", + "partition-by-tags-that-you-commonly-query-for-a-specific-value", + "partition-guides", + "partition-key-size-limit", + "partition-keys", + "partition-related-queries", + "partition-template-parts", + "partition-template-requirements-and-guidelines", + "partition-templates", + "partition-templates-can-only-be-applied-on-create", + "partition-templates-cant-be-updated", + "partitioning-best-practices", + "partitioning-by-distinct-tag-values", + "partitions", + "partitions-in-the-query-life-cycle", + "parts-of-a-check", + "pass-arguments-to-plugins", + "pass-configuration-arguments", + "pass-configuration-options-to-the-service", + "pass-multiple-parameter-values-to-a-script", + "pass-raw-flux-via-stdin-pipe", + "pass-through-transformation", + "passenger", + "passive-node-experimental", + "passive-node-setup-experimental", + "password", + "password-hash", + "password-or-token", + "password-requirements", + "password-service-new-call-total", + "password-service-new-duration", + "password-service-new-error-total", + "path", + "path-1", + "path-2", + "path-3", + "path-parameters", + "path-parameters-1", + "path-parameters-2", + "path-parameters-3", + "path-parameters-4", + "path-parameters-5", + "pathtoexamplecsv", + "pattern-for-template-variable-query-parameters", + "pause_total_ns", + "pausetotalns", + "pbkdf2-sha256", + "pbkdf2-sha512", + "pearsonr", + "pending-key", + "percent-encode-special-symbols-in-postgresql-dsns", + "percent_rank", + "percentile", + "percentile-amp-quantile", + "percentile-compared-to-other-influxql-functions", + "percentile-versus-quantile", + "percona-bool-types", + "percona-data-source-name", + "percona-to-flux-data-type-conversion", + "perform-a-basic-influxql-query", + "perform-a-basic-sql-query", + "perform-a-bitwise-and-not-operation", + "perform-a-bitwise-and-not-operation-on-a-stream-of-tables", + "perform-a-bitwise-and-operation", + "perform-a-bitwise-and-operation-on-a-stream-of-tables", + "perform-a-bitwise-or-operation", + "perform-a-bitwise-or-operation-on-a-stream-of-tables", + "perform-a-bitwise-xor-operation", + "perform-a-bitwise-xor-operation-on-a-stream-of-tables", + "perform-a-full-backup", + "perform-a-full-outer-join", + "perform-a-left-outer-join", + "perform-a-linear-regression-on-a-dataset", + "perform-a-metadata-only-backup", + "perform-a-right-outer-join", + "perform-additional-aggregate-operations-on-aggregate-values", + "perform-an-incremental-backup", + "perform-an-incremental-backup-on-a-single-database", + "perform-an-inner-join", + "perform-an-ungrouped-aggregation", + "perform-arithmetic-operations-on-durations", + "perform-arithmetic-operations-on-floats", + "perform-arithmetic-operations-on-integers", + "perform-arithmetic-operations-on-uintegers", + "perform-basic-arithmetic-on-an-epoch-timestamp", + "perform-basic-arithmetic-on-an-rfc3339-like-date-time-string", + "perform-bitwise-operations-on-integers", + "perform-bitwise-operations-on-uintegers", + "perform-join-operations", + "perform-mathematical-operations", + "perform-the-calculation", + "perform-the-upgrade", + "performance", + "performance-1", + "performance-and-security-improvements", + "performance-characteristics", + "performance-differences", + "performance-enhancement", + "performance-enhancements", + "performance-improvements", + "performance-improvements-1", + "performance-improvements-to-dashboards-and-queries", + "performance-tips", + "period", + "periodcount", + "periodic-license-checks", + "periodic-tsm-snapshots", + "perl", + "permission-format", + "permission-required-to-create-mapped-buckets", + "permission-to-statement", + "permissions", + "permissions-in-influxdb-enterprise", + "permissions-scope", + "persist-queue-depth", + "persist-task-queue-duration", + "persistent-disk-storage", + "persistent-volume-fixes", + "pf", + "pgbouncer", + "phase-1-steps", + "phase-2-steps", + "phase-3-process", + "phase-4-process", + "php", + "phpfpm", + "physical-plan", + "physical-plan-data-flow", + "physical-plan-leaf-nodes", + "pi", + "pid-file", + "ping", + "ping-auth-enabled--false", + "ping-http-endpoint", + "pingreq", + "pipe", + "pipe-expressions", + "pipe-forward-operator", + "pipe-forwardable-function-example", + "pipeline-architecture", + "pipeline-validity", + "pipelines", + "pipelines-as-graphs", + "pivot", + "pivot-and-write-data-to-influxdb", + "pivot-data-into-a-relational-schema", + "pivot-fields-into-columns", + "pivot-fields-into-columns-for-mathematic-calculations", + "pivot-influxdb-fields-into-columns", + "pivot-is-more-performant", + "pivot-lat-and-lon-fields-into-columns", + "pivot-vs-join", + "pkg", + "pkger-service-call-total", + "pkger-service-duration", + "pkger-service-template-export", + "plan-for-custom-partitioning", + "plannedtime", + "planner-package", + "planning_time", + "platform-support", + "plex", + "plugin", + "plugin-dir", + "plugin-registry", + "plugin-updates", + "plugin-updates-1", + "plugins", + "plugins-1", + "plugins-10", + "plugins-11", + "plugins-12", + "plugins-13", + "plugins-14", + "plugins-15", + "plugins-16", + "plugins-17", + "plugins-18", + "plugins-19", + "plugins-2", + "plugins-20", + "plugins-21", + "plugins-22", + "plugins-23", + "plugins-24", + "plugins-25", + "plugins-3", + "plugins-4", + "plugins-5", + "plugins-6", + "plugins-7", + "plugins-8", + "plugins-9", + "point", + "pointreq", + "pointreqhh-enterprise-only", + "pointreqlocal-enterprise-only", + "pointreqremote-enterprise-only", + "points-dropped-due-to-partial-writes", + "points-in-shard-write-requests-with-errors", + "points-in-shard-writes", + "points-in-shard-writes-with-errors", + "points-in-successful-shard-write-requests", + "points-in-write-requests", + "points-per-second", + "points_received", + "points_written", + "pointswritten", + "pointswrittendropped", + "pointswrittenfail", + "pointswrittenok", + "pointswrittenok-1", + "policies-and-procedures", + "polygon", + "polyline-package", + "polymorphism", + "pool-max-idle-streams", + "pool-max-idle-time", + "populate-sensitive-credentials-with-secrets", + "population", + "port", + "port-update-to-8086", + "port_name", + "portable-format-directory-structure", + "ports", + "pos", + "position", + "possible-with-flux", + "post", + "post-alerts-from-a-defined-handler", + "post-alerts-from-a-tickscript", + "post-cancel-repair", + "post-repair", + "post-request-format", + "post-settings-in-kapacitorconf", + "post-templating", + "post-upgrade", + "postfix", + "postgresql", + "postgresql-compatible-database-requirements", + "postgresql-data-source-name", + "postgresql-instances-without-tls-or-ssl", + "postgresql-to-flux-data-type-conversion", + "postgresql_extensible", + "potential-causes", + "potential-solutions", + "pow", + "power", + "powerdns", + "powerdns_recursor", + "pprof-auth-enabled--false", + "pprof-disabled", + "pprof-enabled", + "pprof-enabled--true", + "pprof-endpoints", + "pre-configure-influxdb-connection-settings", + "pre-process-data-before-writing", + "prebuilt-dashboards", + "prebuilt-dashboards-in-chronograf", + "precalculating-expensive-queries", + "precision", + "precision--", + "precreator", + "predefined-dashboard-variables", + "predefined-template-variables", + "predicate", + "predicate-expression", + "predicate-expressions", + "predicate-function", + "predicate-functions", + "predicates-with-special-characters-or-keywords", + "predict", + "predict-field-values-associated-with-a-field-key", + "predictive-analysis", + "predictors", + "preemptive-cache-age", + "pref-cloud", + "pref-oss", + "pref-tabs", + "prefix", + "prefix-for-query-controller-metrics-changed", + "prep_pg_dumpawk", + "prepare-for-scheduled-downtime", + "prepare-influxdb-buckets", + "prepare-influxdb-databases", + "prepend-csv-data-with-annotation-headers", + "prerequisite", + "prerequisites", + "presentation-mode", + "preserve-columns", + "preserve-duplicate-points", + "preset-time-buckets", + "preset-time-windows", + "pretty", + "pretty-print-json-output", + "prevent-injection-attacks", + "preview-data-output", + "previewrun-mode", + "pricing-updates-and-azure-region", + "pricing-vectors", + "primary-key", + "primary-keys", + "print-column-details-for-a-single-measurement", + "print-information-about-runs-that-will-be-retried", + "printer", + "priority", + "private-influxdb-cloud-offering", + "private-ip-considerations", + "private-ip-validation", + "private-registry-air-gapped", + "problem", + "process", + "process-data-with-invokable-scripts", + "process-input-as-csv", + "process-or-transform-your-data", + "processes", + "processing", + "processing-engine", + "processing-engine-improvements", + "processing-engine-reference", + "processing-the-next-row", + "processor", + "processor-aws_ec2", + "processor-clone", + "processor-configuration", + "processor-converter", + "processor-date", + "processor-dedup", + "processor-defaults", + "processor-enum", + "processor-execd", + "processor-filepath", + "processor-filter", + "processor-geoip", + "processor-ifname", + "processor-lookup", + "processor-noise", + "processor-override", + "processor-parser", + "processor-pivot", + "processor-plugin", + "processor-plugin-updates", + "processor-plugin-updates-1", + "processor-plugin-updates-10", + "processor-plugin-updates-11", + "processor-plugin-updates-2", + "processor-plugin-updates-3", + "processor-plugin-updates-4", + "processor-plugin-updates-5", + "processor-plugin-updates-6", + "processor-plugin-updates-7", + "processor-plugin-updates-8", + "processor-plugin-updates-9", + "processor-plugins", + "processor-port_name", + "processor-printer", + "processor-regex", + "processor-rename", + "processor-reverse_dns", + "processor-s2geo", + "processor-scale", + "processor-snmp_lookup", + "processor-split", + "processor-starlark", + "processor-strings", + "processor-tag_limit", + "processor-template", + "processor-timestamp", + "processor-topk", + "processor-unpivot", + "processor-updates", + "processors", + "processors-1", + "processors-2", + "processors-3", + "processors-4", + "processors-5", + "processors-6", + "processors-7", + "processors-8", + "processors-9", + "procstat", + "proctotal-data-output", + "proctotal-stream-definition", + "prod_line", + "product-and-feature-end-of-life-procedures", + "product-dropdown", + "production-ready-replicate-data-remotely", + "productionevents", + "profile-all-memory-allocations", + "profile-blocking-operations", + "profile-cpu", + "profile-goroutines", + "profile-heap-memory-allocations", + "profile-mutual-exclusions-mutexes", + "profile-thread-creation", + "profiler-package", + "profiling", + "programmatically-access-node-labels", + "project", + "projection", + "projection-1", + "projection-2", + "projectionexec", + "prom-read-auth-enabled--false", + "prometheus", + "prometheus-client-libraries", + "prometheus-endpoints-support-in-influxdb", + "prometheus-exporters-and-integrations", + "prometheus-format", + "prometheus-metric-parsing-formats", + "prometheus-node-exporter", + "prometheus-package", + "prometheus-remote-read-and-write-api-support", + "prometheus-remote-read-and-write-support", + "prometheus-remote-write", + "prometheus-statefulset", + "prometheus_client", + "promql-package", + "promreadreq", + "promwritereq", + "properties", + "properties-of-time-series-data", + "property-methods", + "pros-of-external-monitoring", + "pros-of-internal-monitoring", + "protocol--tcp", + "protocol-buffers-additional-settings", + "prototype-your-app-on-influxdb-cloud-serverless", + "provide-a-custom-certificate-authority-bundle", + "provide-etcd-authentication-credentials", + "provide-etcd-tls-credentials", + "provide-influxdb-authentication-credentials", + "provide-influxdb-connection-credentials", + "provide-plugins-to-nodes-that-run-them", + "provide-required-authentication-credentials", + "provide-vault-server-address-and-token", + "provide-your-token", + "proxmox", + "pruning-predicate", + "psi", + "public-beta", + "public-registry", + "publish-and-subscribe", + "publish-to-multiple-topics-from-a-defined-handler", + "publish-your-plugin", + "puppetagent", + "purgatory", + "purge-interval", + "push", + "push-to-handler", + "pushbullet-package", + "pushdown-functions-and-function-combinations", + "pushdown-functions-in-use", + "pushing-vs-pulling-metrics", + "pushover", + "pushover-priority-levels", + "pushover-setup", + "pyarrow", + "python", + "python-flight-client", + "python-flight-sql-dbapi-client", + "python-plugins-and-the-processing-engine", + "q", + "qc-query-controller-statistics", + "qos", + "quantile", + "quantile-as-a-selector", + "quantile-as-an-aggregate", + "quay-default-paths", + "quay-file-system-overview", + "quayio", + "querier", + "querier-scaling-strategies", + "queries", + "queries-in-a-cluster", + "queries-should-return-one-table", + "queries-with-fill-when-no-data-fall-within-the-querys-time-range", + "queries-with-fillprevious-when-the-previous-result-falls-outside-the-querys-time-range", + "queries-with-fillprevious-when-the-previous-result-is-outside-the-queried-time-range", + "queries-with-no-data-in-the-queried-time-range", + "queriesactive", + "queriesexecuted", + "queriesfinished", + "query", + "query-1", + "query-2", + "query-a-distinct-value-cache", + "query-a-distinct-values-cache", + "query-a-last-value-cache", + "query-a-last-values-cache", + "query-a-mapped-bucket-with-influxql", + "query-a-non-default-retention-policy", + "query-a-raw-csv-string", + "query-a-remote-influxdb-cloud-instance", + "query-a-single-field", + "query-a-sql-data-source", + "query-a-system-table", + "query-a-system-table-and-order-by-a-specific-column", + "query-a-system-table-and-return-json-formatted-output", + "query-a-time-range-relative-to-now", + "query-all-data-from-the-last-month", + "query-all-fields-and-filter-by-tags", + "query-amazon-rds-postgresql-database", + "query-an-absolute-time-range", + "query-an-absolute-time-range-using-unix-timestamps", + "query-an-annotated-csv-string", + "query-analysis", + "query-and-storage-enhancements", + "query-and-visualize-data", + "query-annotated-csv-data-from-a-url", + "query-annotated-csv-data-from-a-url-using-the-requests-package", + "query-annotated-csv-data-from-file", + "query-annotated-csv-from-a-socket-connection", + "query-api", + "query-availability", + "query-brownout", + "query-builder", + "query-builder-or-script-editor", + "query-cardinality-of-data-written-in-the-last-4-hours", + "query-clickhouse", + "query-concurrency", + "query-concurrency--0", + "query-cpu-and-memory-usage", + "query-csv-data-from-a-file", + "query-csv-data-from-a-url", + "query-data", + "query-data-and-return-json-formatted-results", + "query-data-and-write-results-to-a-file", + "query-data-collected-last-friday", + "query-data-collected-last-monday", + "query-data-collected-last-saturday", + "query-data-collected-last-sunday", + "query-data-collected-last-thursday", + "query-data-collected-last-tuesday", + "query-data-collected-last-wednesday", + "query-data-from-a-remote-influxdb-cloud-instance", + "query-data-from-influxdb", + "query-data-from-influxdb-3-system-tables", + "query-data-from-influxdb-in-a-specific-measurement", + "query-data-from-influxdb-in-a-specified-time-range", + "query-data-from-influxdb-with-go", + "query-data-from-influxdb-with-python", + "query-data-from-last-month", + "query-data-from-last-week", + "query-data-from-the-current-week", + "query-data-from-this-calendar-month", + "query-data-from-this-month", + "query-data-from-this-year", + "query-data-from-today", + "query-data-from-yesterday", + "query-data-in-influxdb", + "query-data-in-the-data-explorer", + "query-data-using-a-time-zone-offset", + "query-data-using-influxql", + "query-data-using-sql", + "query-data-with-a-select-statement", + "query-data-with-a-select-statement-and-an-into-clause", + "query-data-with-a-select-statement-and-return-pretty-printed-json", + "query-data-with-a-select-statement-and-return-second-precision-epoch-timestamps", + "query-data-with-flux", + "query-data-with-flux-and-the-data-explorer", + "query-data-with-influxql", + "query-data-with-invalid-authentication-credentials", + "query-data-with-sql", + "query-data-with-sql-and-the-data-explorer", + "query-data-with-the-influxdb-api", + "query-data-with-the-influxdb-javascript-client-library", + "query-data-within-time-boundaries", + "query-data-without-time-boundaries", + "query-downsampled-usage-data-for-a-different-influxdb-cloud-organization", + "query-downsampled-usage-data-for-your-influxdb-cloud-organization", + "query-engine-internals", + "query-example", + "query-examples", + "query-field-keys-from-an-influxdb-bucket", + "query-field-keys-from-an-influxdb-measurement", + "query-fields-and-tags", + "query-fields-based-on-tag-values", + "query-file-limit", + "query-for-cardinality", + "query-for-errors-by-severity-code", + "query-google-cloud-bigtable", + "query-guidelines", + "query-http-endpoint", + "query-improvements-in-data-explorer-and-notebooks", + "query-in-data-explorer", + "query-in-the-flux-repl", + "query-influxdb", + "query-influxdb-1x", + "query-influxdb-3-and-return-results-in-json-format", + "query-influxdb-3-and-return-results-in-table-format", + "query-influxdb-3-and-return-results-with-unix-nanosecond-timestamps", + "query-influxdb-3-using-credentials-from-the-connection-profile", + "query-influxdb-3-with-influxql", + "query-influxdb-3-with-sql", + "query-influxdb-and-append-query-profile-data-to-results", + "query-influxdb-and-return-annotated-csv", + "query-influxdb-cloud-dedicated", + "query-influxdb-cloud-or-2x-remotely", + "query-influxdb-cloud-serverless", + "query-influxdb-clustered", + "query-influxdb-using-the-bucket-id", + "query-influxdb-using-the-bucket-name", + "query-influxdb-with-a-flux-file", + "query-influxdb-with-a-flux-string", + "query-influxdb-with-grafana", + "query-influxdb-with-superset", + "query-initial-memory-bytes", + "query-initial-memory-bytes--0", + "query-language-differences", + "query-languages", + "query-life-cycle", + "query-line-protocol-from-a-socket-connection", + "query-log-enabled", + "query-log-enabled--true", + "query-log-path", + "query-log-size", + "query-logs", + "query-management", + "query-management-settings", + "query-max-memory-bytes", + "query-max-memory-bytes--0", + "query-mem_used-and-mem_total-fields", + "query-memory-bytes", + "query-memory-usage-percentage", + "query-multiple-fields", + "query-notification-events-from-the-last-hour", + "query-number-of-bytes-in-requests-to-the-apiv2write-endpoint", + "query-number-of-bytes-returned-from-the-apiv2query-endpoint", + "query-one-day-of-data-data-from-a-week-ago", + "query-one-day-of-data-from-a-week-ago", + "query-only-the-data-you-need", + "query-package", + "query-parameters", + "query-parameters-1", + "query-parameters-vs-json-body", + "query-partition-information-from-system-tables", + "query-performance", + "query-plan", + "query-plan-diagram", + "query-plans", + "query-points-based-on-field-values", + "query-pressure-data-from-a-specific-sensor", + "query-queue-size", + "query-queue-size--0", + "query-raw-data-from-csv-file", + "query-raw-usage-data-for-your-influxdb-cloud-organization", + "query-remote-influxdb-data-sources", + "query-representation", + "query-request-bytes", + "query-request-count", + "query-response-bytes", + "query-results", + "query-results-1", + "query-routing", + "query-rows-based-on-aggregate-values", + "query-sample-data", + "query-series-cardinality-for-a-specific-tag", + "query-series-cardinality-in-a-bucket", + "query-series-cardinality-in-a-measurement", + "query-specific-columns-from-a-system-table", + "query-specific-columns-from-the-distinct_caches-system-table", + "query-specific-columns-from-the-last_caches-system-table", + "query-specific-fields-and-tags", + "query-specific-fields-from-influxdb", + "query-specific-fields-in-a-measurement-from-influxdb", + "query-specific-fields-in-a-measurement-from-logqlqryn", + "query-sql-data", + "query-stats-enabled", + "query-stats-enabled--false", + "query-string-authentication", + "query-string-parameters", + "query-string-parameters-1", + "query-string-parameters-2", + "query-system-data", + "query-system-information", + "query-system-tables", + "query-tag-keys-from-an-influxdb-measurement", + "query-tag-keys-in-an-influxdb-bucket", + "query-telemetry-data", + "query-the-example-data", + "query-the-flux-version", + "query-the-latest-memory-usage-from-each-host", + "query-the-query-count-for-influxdb-cloud-query-endpoints", + "query-the-test-data-from-your-database", + "query-the-v1-query-endpoint", + "query-tier", + "query-tier-cpumem", + "query-time-range", + "query-timeout", + "query-timeout--0s", + "query-token-metadata", + "query-trace-logging", + "query-unique-tag-values-from-an-influxdb-bucket", + "query-unique-tag-values-from-an-influxdb-measurement", + "query-using-basic-authentication", + "query-using-grpc-or-http", + "query-using-influxdb-3-explorer-beta", + "query-using-influxql", + "query-using-influxql-and-the-http-api", + "query-using-sql", + "query-using-sql-and-the-http-api", + "query-using-stored-credentials", + "query-using-the-api", + "query-using-the-bucket-id", + "query-using-the-bucket-name", + "query-using-the-cli-for-influxql", + "query-using-the-python-client", + "query-variable-example", + "query-with-absolute-time-boundaries", + "query-with-relative-time-boundaries", + "query-with-the-influxdb-api", + "query-with-the-influxdb-javascript-client-library", + "query-would-process-too-many-files-or-partitions", + "querydurationns", + "queryexecutor", + "queryfail", + "queryflux", + "querying-data", + "querying-data-1", + "querying-data-that-occur-after-now-with-a-group-by-time-clause", + "querying-future-data-with-a-group-by-time-clause", + "querying-system-tables-may-impact-overall-cluster-performance", + "querying-the-wrong-retention-policy", + "querying-time-ranges-after-now", + "queryok", + "queryreq", + "queryreqdurationns", + "queryrespbytes", + "questions", + "queuebytes", + "queuebytes-1", + "queued-repairs-are-not-being-processed", + "queuedepth", + "queuedepth-1", + "queueing-active", + "queueing-duration-seconds", + "queuetotalsize", + "quick-install", + "quick-start", + "quickly-switch-between-configurations", + "quickstart", + "quickstart-1", + "quickstart-10", + "quickstart-11", + "quickstart-12", + "quickstart-13", + "quickstart-14", + "quickstart-15", + "quickstart-16", + "quickstart-17", + "quickstart-18", + "quickstart-19", + "quickstart-2", + "quickstart-20", + "quickstart-21", + "quickstart-22", + "quickstart-23", + "quickstart-24", + "quickstart-3", + "quickstart-4", + "quickstart-5", + "quickstart-6", + "quickstart-7", + "quickstart-8", + "quickstart-9", + "quiet", + "quit", + "quix-brand-icons", + "quix-downsample-pipeline", + "quota-and-limit-errors", + "quotes", + "quoting", + "quoting-examples", + "quoting-special-characters-and-additional-naming-guidelines", + "quoting-template-variables-in-influxql", + "qxip-package", + "r", + "rabbitmq", + "radians", + "radius", + "raindrops", + "random", + "random-numbers-sample-data", + "range", + "rank", + "ranking-functions", + "ras", + "rate", + "rate-limiting", + "rate-limits-with-influxdb-cloud", + "rate-of-change-between-subsequent-values", + "ravendb", + "raw", + "raw-data", + "react-tabs-0", + "react-tabs-1", + "react-tabs-10", + "react-tabs-100", + "react-tabs-1000", + "react-tabs-1001", + "react-tabs-1002", + "react-tabs-1003", + "react-tabs-1004", + "react-tabs-1005", + "react-tabs-1006", + "react-tabs-1007", + "react-tabs-1008", + "react-tabs-1009", + "react-tabs-101", + "react-tabs-1010", + "react-tabs-1011", + "react-tabs-1012", + "react-tabs-1013", + "react-tabs-1014", + "react-tabs-1015", + "react-tabs-1016", + "react-tabs-1017", + "react-tabs-1018", + "react-tabs-1019", + "react-tabs-102", + "react-tabs-1020", + "react-tabs-1021", + "react-tabs-1022", + "react-tabs-1023", + "react-tabs-1024", + "react-tabs-1025", + "react-tabs-1026", + "react-tabs-1027", + "react-tabs-1028", + "react-tabs-1029", + "react-tabs-103", + "react-tabs-1030", + "react-tabs-1031", + "react-tabs-1032", + "react-tabs-1033", + "react-tabs-1034", + "react-tabs-1035", + "react-tabs-1036", + "react-tabs-1037", + "react-tabs-1038", + "react-tabs-1039", + "react-tabs-104", + "react-tabs-1040", + "react-tabs-1041", + "react-tabs-1042", + "react-tabs-1043", + "react-tabs-1044", + "react-tabs-1045", + "react-tabs-1046", + "react-tabs-1047", + "react-tabs-1048", + "react-tabs-1049", + "react-tabs-105", + "react-tabs-1050", + "react-tabs-1051", + "react-tabs-1052", + "react-tabs-1053", + "react-tabs-1054", + "react-tabs-1055", + "react-tabs-1056", + "react-tabs-1057", + "react-tabs-1058", + "react-tabs-1059", + "react-tabs-106", + "react-tabs-1060", + "react-tabs-1061", + "react-tabs-1062", + "react-tabs-1063", + "react-tabs-1064", + "react-tabs-1065", + "react-tabs-1066", + "react-tabs-1067", + "react-tabs-1068", + "react-tabs-1069", + "react-tabs-107", + "react-tabs-1070", + "react-tabs-1071", + "react-tabs-1072", + "react-tabs-1073", + "react-tabs-1074", + "react-tabs-1075", + "react-tabs-1076", + "react-tabs-1077", + "react-tabs-1078", + "react-tabs-1079", + "react-tabs-108", + "react-tabs-1080", + "react-tabs-1081", + "react-tabs-1082", + "react-tabs-1083", + "react-tabs-1084", + "react-tabs-1085", + "react-tabs-1086", + "react-tabs-1087", + "react-tabs-1088", + "react-tabs-1089", + "react-tabs-109", + "react-tabs-1090", + "react-tabs-1091", + "react-tabs-1092", + "react-tabs-1093", + "react-tabs-1094", + "react-tabs-1095", + "react-tabs-1096", + "react-tabs-1097", + "react-tabs-1098", + "react-tabs-1099", + "react-tabs-11", + "react-tabs-110", + "react-tabs-1100", + "react-tabs-1101", + "react-tabs-1102", + "react-tabs-1103", + "react-tabs-1104", + "react-tabs-1105", + "react-tabs-1106", + "react-tabs-1107", + "react-tabs-1108", + "react-tabs-1109", + "react-tabs-111", + "react-tabs-1110", + "react-tabs-1111", + "react-tabs-1112", + "react-tabs-1113", + "react-tabs-1114", + "react-tabs-1115", + "react-tabs-1116", + "react-tabs-1117", + "react-tabs-1118", + "react-tabs-1119", + "react-tabs-112", + "react-tabs-1120", + "react-tabs-1121", + "react-tabs-1122", + "react-tabs-1123", + "react-tabs-1124", + "react-tabs-1125", + "react-tabs-1126", + "react-tabs-1127", + "react-tabs-1128", + "react-tabs-1129", + "react-tabs-113", + "react-tabs-1130", + "react-tabs-1131", + "react-tabs-1132", + "react-tabs-1133", + "react-tabs-1134", + "react-tabs-1135", + "react-tabs-1136", + "react-tabs-1137", + "react-tabs-1138", + "react-tabs-1139", + "react-tabs-114", + "react-tabs-1140", + "react-tabs-1141", + "react-tabs-1142", + "react-tabs-1143", + "react-tabs-1144", + "react-tabs-1145", + "react-tabs-1146", + "react-tabs-1147", + "react-tabs-1148", + "react-tabs-1149", + "react-tabs-115", + "react-tabs-1150", + "react-tabs-1151", + "react-tabs-1152", + "react-tabs-1153", + "react-tabs-1154", + "react-tabs-1155", + "react-tabs-1156", + "react-tabs-1157", + "react-tabs-1158", + "react-tabs-1159", + "react-tabs-116", + "react-tabs-1160", + "react-tabs-1161", + "react-tabs-1162", + "react-tabs-1163", + "react-tabs-1164", + "react-tabs-1165", + "react-tabs-1166", + "react-tabs-1167", + "react-tabs-1168", + "react-tabs-1169", + "react-tabs-117", + "react-tabs-1170", + "react-tabs-1171", + "react-tabs-1172", + "react-tabs-1173", + "react-tabs-1174", + "react-tabs-1175", + "react-tabs-1176", + "react-tabs-1177", + "react-tabs-1178", + "react-tabs-1179", + "react-tabs-118", + "react-tabs-1180", + "react-tabs-1181", + "react-tabs-1182", + "react-tabs-1183", + "react-tabs-1184", + "react-tabs-1185", + "react-tabs-1186", + "react-tabs-1187", + "react-tabs-1188", + "react-tabs-1189", + "react-tabs-119", + "react-tabs-1190", + "react-tabs-1191", + "react-tabs-1192", + "react-tabs-1193", + "react-tabs-1194", + "react-tabs-1195", + "react-tabs-1196", + "react-tabs-1197", + "react-tabs-1198", + "react-tabs-1199", + "react-tabs-12", + "react-tabs-120", + "react-tabs-1200", + "react-tabs-1201", + "react-tabs-1202", + "react-tabs-1203", + "react-tabs-1204", + "react-tabs-1205", + "react-tabs-1206", + "react-tabs-1207", + "react-tabs-1208", + "react-tabs-1209", + "react-tabs-121", + "react-tabs-1210", + "react-tabs-1211", + "react-tabs-1212", + "react-tabs-1213", + "react-tabs-1214", + "react-tabs-1215", + "react-tabs-1216", + "react-tabs-1217", + "react-tabs-1218", + "react-tabs-1219", + "react-tabs-122", + "react-tabs-1220", + "react-tabs-1221", + "react-tabs-1222", + "react-tabs-1223", + "react-tabs-1224", + "react-tabs-1225", + "react-tabs-1226", + "react-tabs-1227", + "react-tabs-1228", + "react-tabs-1229", + "react-tabs-123", + "react-tabs-1230", + "react-tabs-1231", + "react-tabs-1232", + "react-tabs-1233", + "react-tabs-1234", + "react-tabs-1235", + "react-tabs-1236", + "react-tabs-1237", + "react-tabs-1238", + "react-tabs-1239", + "react-tabs-124", + "react-tabs-1240", + "react-tabs-1241", + "react-tabs-1242", + "react-tabs-1243", + "react-tabs-1244", + "react-tabs-1245", + "react-tabs-1246", + "react-tabs-1247", + "react-tabs-1248", + "react-tabs-1249", + "react-tabs-125", + "react-tabs-1250", + "react-tabs-1251", + "react-tabs-1252", + "react-tabs-1253", + "react-tabs-1254", + "react-tabs-1255", + "react-tabs-1256", + "react-tabs-1257", + "react-tabs-1258", + "react-tabs-1259", + "react-tabs-126", + "react-tabs-1260", + "react-tabs-1261", + "react-tabs-1262", + "react-tabs-1263", + "react-tabs-1264", + "react-tabs-1265", + "react-tabs-1266", + "react-tabs-1267", + "react-tabs-1268", + "react-tabs-1269", + "react-tabs-127", + "react-tabs-1270", + "react-tabs-1271", + "react-tabs-1272", + "react-tabs-1273", + "react-tabs-1274", + "react-tabs-1275", + "react-tabs-1276", + "react-tabs-1277", + "react-tabs-1278", + "react-tabs-1279", + "react-tabs-128", + "react-tabs-1280", + "react-tabs-1281", + "react-tabs-1282", + "react-tabs-1283", + "react-tabs-1284", + "react-tabs-1285", + "react-tabs-1286", + "react-tabs-1287", + "react-tabs-1288", + "react-tabs-1289", + "react-tabs-129", + "react-tabs-1290", + "react-tabs-1291", + "react-tabs-1292", + "react-tabs-1293", + "react-tabs-1294", + "react-tabs-1295", + "react-tabs-1296", + "react-tabs-1297", + "react-tabs-1298", + "react-tabs-1299", + "react-tabs-13", + "react-tabs-130", + "react-tabs-1300", + "react-tabs-1301", + "react-tabs-1302", + "react-tabs-1303", + "react-tabs-1304", + "react-tabs-1305", + "react-tabs-1306", + "react-tabs-1307", + "react-tabs-1308", + "react-tabs-1309", + "react-tabs-131", + "react-tabs-1310", + "react-tabs-1311", + "react-tabs-1312", + "react-tabs-1313", + "react-tabs-1314", + "react-tabs-1315", + "react-tabs-1316", + "react-tabs-1317", + "react-tabs-1318", + "react-tabs-1319", + "react-tabs-132", + "react-tabs-1320", + "react-tabs-1321", + "react-tabs-1322", + "react-tabs-1323", + "react-tabs-1324", + "react-tabs-1325", + "react-tabs-1326", + "react-tabs-1327", + "react-tabs-1328", + "react-tabs-1329", + "react-tabs-133", + "react-tabs-1330", + "react-tabs-1331", + "react-tabs-1332", + "react-tabs-1333", + "react-tabs-1334", + "react-tabs-1335", + "react-tabs-1336", + "react-tabs-1337", + "react-tabs-1338", + "react-tabs-1339", + "react-tabs-134", + "react-tabs-1340", + "react-tabs-1341", + "react-tabs-1342", + "react-tabs-1343", + "react-tabs-1344", + "react-tabs-1345", + "react-tabs-1346", + "react-tabs-1347", + "react-tabs-1348", + "react-tabs-1349", + "react-tabs-135", + "react-tabs-1350", + "react-tabs-1351", + "react-tabs-1352", + "react-tabs-1353", + "react-tabs-1354", + "react-tabs-1355", + "react-tabs-1356", + "react-tabs-1357", + "react-tabs-1358", + "react-tabs-1359", + "react-tabs-136", + "react-tabs-1360", + "react-tabs-1361", + "react-tabs-1362", + "react-tabs-1363", + "react-tabs-1364", + "react-tabs-1365", + "react-tabs-1366", + "react-tabs-1367", + "react-tabs-1368", + "react-tabs-1369", + "react-tabs-137", + "react-tabs-1370", + "react-tabs-1371", + "react-tabs-1372", + "react-tabs-1373", + "react-tabs-1374", + "react-tabs-1375", + "react-tabs-1376", + "react-tabs-1377", + "react-tabs-1378", + "react-tabs-1379", + "react-tabs-138", + "react-tabs-1380", + "react-tabs-1381", + "react-tabs-1382", + "react-tabs-1383", + "react-tabs-1384", + "react-tabs-1385", + "react-tabs-1386", + "react-tabs-1387", + "react-tabs-1388", + "react-tabs-1389", + "react-tabs-139", + "react-tabs-1390", + "react-tabs-1391", + "react-tabs-1392", + "react-tabs-1393", + "react-tabs-1394", + "react-tabs-1395", + "react-tabs-1396", + "react-tabs-1397", + "react-tabs-1398", + "react-tabs-1399", + "react-tabs-14", + "react-tabs-140", + "react-tabs-1400", + "react-tabs-1401", + "react-tabs-1402", + "react-tabs-1403", + "react-tabs-1404", + "react-tabs-1405", + "react-tabs-1406", + "react-tabs-1407", + "react-tabs-1408", + "react-tabs-1409", + "react-tabs-141", + "react-tabs-1410", + "react-tabs-1411", + "react-tabs-1412", + "react-tabs-1413", + "react-tabs-1414", + "react-tabs-1415", + "react-tabs-1416", + "react-tabs-1417", + "react-tabs-1418", + "react-tabs-1419", + "react-tabs-142", + "react-tabs-1420", + "react-tabs-1421", + "react-tabs-1422", + "react-tabs-1423", + "react-tabs-1424", + "react-tabs-1425", + "react-tabs-1426", + "react-tabs-1427", + "react-tabs-1428", + "react-tabs-1429", + "react-tabs-143", + "react-tabs-1430", + "react-tabs-1431", + "react-tabs-1432", + "react-tabs-1433", + "react-tabs-1434", + "react-tabs-1435", + "react-tabs-1436", + "react-tabs-1437", + "react-tabs-1438", + "react-tabs-1439", + "react-tabs-144", + "react-tabs-1440", + "react-tabs-1441", + "react-tabs-1442", + "react-tabs-1443", + "react-tabs-1444", + "react-tabs-1445", + "react-tabs-1446", + "react-tabs-1447", + "react-tabs-1448", + "react-tabs-1449", + "react-tabs-145", + "react-tabs-1450", + "react-tabs-1451", + "react-tabs-1452", + "react-tabs-1453", + "react-tabs-1454", + "react-tabs-1455", + "react-tabs-1456", + "react-tabs-1457", + "react-tabs-1458", + "react-tabs-1459", + "react-tabs-146", + "react-tabs-1460", + "react-tabs-1461", + "react-tabs-1462", + "react-tabs-1463", + "react-tabs-1464", + "react-tabs-1465", + "react-tabs-1466", + "react-tabs-1467", + "react-tabs-1468", + "react-tabs-1469", + "react-tabs-147", + "react-tabs-1470", + "react-tabs-1471", + "react-tabs-1472", + "react-tabs-1473", + "react-tabs-1474", + "react-tabs-1475", + "react-tabs-1476", + "react-tabs-1477", + "react-tabs-1478", + "react-tabs-1479", + "react-tabs-148", + "react-tabs-1480", + "react-tabs-1481", + "react-tabs-1482", + "react-tabs-1483", + "react-tabs-1484", + "react-tabs-1485", + "react-tabs-1486", + "react-tabs-1487", + "react-tabs-1488", + "react-tabs-1489", + "react-tabs-149", + "react-tabs-1490", + "react-tabs-1491", + "react-tabs-1492", + "react-tabs-1493", + "react-tabs-1494", + "react-tabs-1495", + "react-tabs-1496", + "react-tabs-1497", + "react-tabs-1498", + "react-tabs-1499", + "react-tabs-15", + "react-tabs-150", + "react-tabs-1500", + "react-tabs-1501", + "react-tabs-1502", + "react-tabs-1503", + "react-tabs-1504", + "react-tabs-1505", + "react-tabs-1506", + "react-tabs-1507", + "react-tabs-1508", + "react-tabs-1509", + "react-tabs-151", + "react-tabs-1510", + "react-tabs-1511", + "react-tabs-1512", + "react-tabs-1513", + "react-tabs-1514", + "react-tabs-1515", + "react-tabs-1516", + "react-tabs-1517", + "react-tabs-1518", + "react-tabs-1519", + "react-tabs-152", + "react-tabs-1520", + "react-tabs-1521", + "react-tabs-1522", + "react-tabs-1523", + "react-tabs-1524", + "react-tabs-1525", + "react-tabs-1526", + "react-tabs-1527", + "react-tabs-1528", + "react-tabs-1529", + "react-tabs-153", + "react-tabs-1530", + "react-tabs-1531", + "react-tabs-1532", + "react-tabs-1533", + "react-tabs-1534", + "react-tabs-1535", + "react-tabs-1536", + "react-tabs-1537", + "react-tabs-1538", + "react-tabs-1539", + "react-tabs-154", + "react-tabs-1540", + "react-tabs-1541", + "react-tabs-1542", + "react-tabs-1543", + "react-tabs-1544", + "react-tabs-1545", + "react-tabs-1546", + "react-tabs-1547", + "react-tabs-1548", + "react-tabs-1549", + "react-tabs-155", + "react-tabs-1550", + "react-tabs-1551", + "react-tabs-1552", + "react-tabs-1553", + "react-tabs-1554", + "react-tabs-1555", + "react-tabs-1556", + "react-tabs-1557", + "react-tabs-1558", + "react-tabs-1559", + "react-tabs-156", + "react-tabs-1560", + "react-tabs-1561", + "react-tabs-1562", + "react-tabs-1563", + "react-tabs-1564", + "react-tabs-1565", + "react-tabs-1566", + "react-tabs-1567", + "react-tabs-1568", + "react-tabs-1569", + "react-tabs-157", + "react-tabs-1570", + "react-tabs-1571", + "react-tabs-1572", + "react-tabs-1573", + "react-tabs-1574", + "react-tabs-1575", + "react-tabs-1576", + "react-tabs-1577", + "react-tabs-1578", + "react-tabs-1579", + "react-tabs-158", + "react-tabs-1580", + "react-tabs-1581", + "react-tabs-1582", + "react-tabs-1583", + "react-tabs-1584", + "react-tabs-1585", + "react-tabs-1586", + "react-tabs-1587", + "react-tabs-1588", + "react-tabs-1589", + "react-tabs-159", + "react-tabs-1590", + "react-tabs-1591", + "react-tabs-1592", + "react-tabs-1593", + "react-tabs-1594", + "react-tabs-1595", + "react-tabs-1596", + "react-tabs-1597", + "react-tabs-1598", + "react-tabs-1599", + "react-tabs-16", + "react-tabs-160", + "react-tabs-1600", + "react-tabs-1601", + "react-tabs-1602", + "react-tabs-1603", + "react-tabs-1604", + "react-tabs-1605", + "react-tabs-1606", + "react-tabs-1607", + "react-tabs-1608", + "react-tabs-1609", + "react-tabs-161", + "react-tabs-1610", + "react-tabs-1611", + "react-tabs-1612", + "react-tabs-1613", + "react-tabs-1614", + "react-tabs-1615", + "react-tabs-1616", + "react-tabs-1617", + "react-tabs-1618", + "react-tabs-1619", + "react-tabs-162", + "react-tabs-1620", + "react-tabs-1621", + "react-tabs-1622", + "react-tabs-1623", + "react-tabs-1624", + "react-tabs-1625", + "react-tabs-1626", + "react-tabs-1627", + "react-tabs-1628", + "react-tabs-1629", + "react-tabs-163", + "react-tabs-1630", + "react-tabs-1631", + "react-tabs-1632", + "react-tabs-1633", + "react-tabs-1634", + "react-tabs-1635", + "react-tabs-1636", + "react-tabs-1637", + "react-tabs-1638", + "react-tabs-1639", + "react-tabs-164", + "react-tabs-1640", + "react-tabs-1641", + "react-tabs-1642", + "react-tabs-1643", + "react-tabs-1644", + "react-tabs-1645", + "react-tabs-1646", + "react-tabs-1647", + "react-tabs-1648", + "react-tabs-1649", + "react-tabs-165", + "react-tabs-1650", + "react-tabs-1651", + "react-tabs-1652", + "react-tabs-1653", + "react-tabs-1654", + "react-tabs-1655", + "react-tabs-1656", + "react-tabs-1657", + "react-tabs-1658", + "react-tabs-1659", + "react-tabs-166", + "react-tabs-1660", + "react-tabs-1661", + "react-tabs-1662", + "react-tabs-1663", + "react-tabs-1664", + "react-tabs-1665", + "react-tabs-1666", + "react-tabs-1667", + "react-tabs-1668", + "react-tabs-1669", + "react-tabs-167", + "react-tabs-1670", + "react-tabs-1671", + "react-tabs-1672", + "react-tabs-1673", + "react-tabs-1674", + "react-tabs-1675", + "react-tabs-1676", + "react-tabs-1677", + "react-tabs-1678", + "react-tabs-1679", + "react-tabs-168", + "react-tabs-1680", + "react-tabs-1681", + "react-tabs-1682", + "react-tabs-1683", + "react-tabs-1684", + "react-tabs-1685", + "react-tabs-1686", + "react-tabs-1687", + "react-tabs-1688", + "react-tabs-1689", + "react-tabs-169", + "react-tabs-1690", + "react-tabs-1691", + "react-tabs-1692", + "react-tabs-1693", + "react-tabs-1694", + "react-tabs-1695", + "react-tabs-1696", + "react-tabs-1697", + "react-tabs-1698", + "react-tabs-1699", + "react-tabs-17", + "react-tabs-170", + "react-tabs-1700", + "react-tabs-1701", + "react-tabs-1702", + "react-tabs-1703", + "react-tabs-1704", + "react-tabs-1705", + "react-tabs-1706", + "react-tabs-1707", + "react-tabs-1708", + "react-tabs-1709", + "react-tabs-171", + "react-tabs-1710", + "react-tabs-1711", + "react-tabs-1712", + "react-tabs-1713", + "react-tabs-1714", + "react-tabs-1715", + "react-tabs-1716", + "react-tabs-1717", + "react-tabs-1718", + "react-tabs-1719", + "react-tabs-172", + "react-tabs-1720", + "react-tabs-1721", + "react-tabs-1722", + "react-tabs-1723", + "react-tabs-1724", + "react-tabs-1725", + "react-tabs-1726", + "react-tabs-1727", + "react-tabs-1728", + "react-tabs-1729", + "react-tabs-173", + "react-tabs-1730", + "react-tabs-1731", + "react-tabs-1732", + "react-tabs-1733", + "react-tabs-1734", + "react-tabs-1735", + "react-tabs-1736", + "react-tabs-1737", + "react-tabs-1738", + "react-tabs-1739", + "react-tabs-174", + "react-tabs-1740", + "react-tabs-1741", + "react-tabs-1742", + "react-tabs-1743", + "react-tabs-1744", + "react-tabs-1745", + "react-tabs-1746", + "react-tabs-1747", + "react-tabs-1748", + "react-tabs-1749", + "react-tabs-175", + "react-tabs-1750", + "react-tabs-1751", + "react-tabs-1752", + "react-tabs-1753", + "react-tabs-1754", + "react-tabs-1755", + "react-tabs-1756", + "react-tabs-1757", + "react-tabs-1758", + "react-tabs-1759", + "react-tabs-176", + "react-tabs-1760", + "react-tabs-1761", + "react-tabs-1762", + "react-tabs-1763", + "react-tabs-1764", + "react-tabs-1765", + "react-tabs-1766", + "react-tabs-1767", + "react-tabs-1768", + "react-tabs-1769", + "react-tabs-177", + "react-tabs-1770", + "react-tabs-1771", + "react-tabs-1772", + "react-tabs-1773", + "react-tabs-1774", + "react-tabs-1775", + "react-tabs-1776", + "react-tabs-1777", + "react-tabs-1778", + "react-tabs-1779", + "react-tabs-178", + "react-tabs-1780", + "react-tabs-1781", + "react-tabs-1782", + "react-tabs-1783", + "react-tabs-1784", + "react-tabs-1785", + "react-tabs-1786", + "react-tabs-1787", + "react-tabs-1788", + "react-tabs-1789", + "react-tabs-179", + "react-tabs-1790", + "react-tabs-1791", + "react-tabs-1792", + "react-tabs-1793", + "react-tabs-1794", + "react-tabs-1795", + "react-tabs-1796", + "react-tabs-1797", + "react-tabs-1798", + "react-tabs-1799", + "react-tabs-18", + "react-tabs-180", + "react-tabs-1800", + "react-tabs-1801", + "react-tabs-1802", + "react-tabs-1803", + "react-tabs-1804", + "react-tabs-1805", + "react-tabs-1806", + "react-tabs-1807", + "react-tabs-1808", + "react-tabs-1809", + "react-tabs-181", + "react-tabs-1810", + "react-tabs-1811", + "react-tabs-1812", + "react-tabs-1813", + "react-tabs-1814", + "react-tabs-1815", + "react-tabs-1816", + "react-tabs-1817", + "react-tabs-1818", + "react-tabs-1819", + "react-tabs-182", + "react-tabs-1820", + "react-tabs-1821", + "react-tabs-1822", + "react-tabs-1823", + "react-tabs-1824", + "react-tabs-1825", + "react-tabs-1826", + "react-tabs-1827", + "react-tabs-1828", + "react-tabs-1829", + "react-tabs-183", + "react-tabs-1830", + "react-tabs-1831", + "react-tabs-1832", + "react-tabs-1833", + "react-tabs-1834", + "react-tabs-1835", + "react-tabs-1836", + "react-tabs-1837", + "react-tabs-1838", + "react-tabs-1839", + "react-tabs-184", + "react-tabs-1840", + "react-tabs-1841", + "react-tabs-1842", + "react-tabs-1843", + "react-tabs-1844", + "react-tabs-1845", + "react-tabs-1846", + "react-tabs-1847", + "react-tabs-1848", + "react-tabs-1849", + "react-tabs-185", + "react-tabs-1850", + "react-tabs-1851", + "react-tabs-1852", + "react-tabs-1853", + "react-tabs-1854", + "react-tabs-1855", + "react-tabs-1856", + "react-tabs-1857", + "react-tabs-1858", + "react-tabs-1859", + "react-tabs-186", + "react-tabs-1860", + "react-tabs-1861", + "react-tabs-1862", + "react-tabs-1863", + "react-tabs-1864", + "react-tabs-1865", + "react-tabs-1866", + "react-tabs-1867", + "react-tabs-1868", + "react-tabs-1869", + "react-tabs-187", + "react-tabs-1870", + "react-tabs-1871", + "react-tabs-1872", + "react-tabs-1873", + "react-tabs-1874", + "react-tabs-1875", + "react-tabs-1876", + "react-tabs-1877", + "react-tabs-1878", + "react-tabs-1879", + "react-tabs-188", + "react-tabs-1880", + "react-tabs-1881", + "react-tabs-1882", + "react-tabs-1883", + "react-tabs-1884", + "react-tabs-1885", + "react-tabs-1886", + "react-tabs-1887", + "react-tabs-1888", + "react-tabs-1889", + "react-tabs-189", + "react-tabs-1890", + "react-tabs-1891", + "react-tabs-1892", + "react-tabs-1893", + "react-tabs-1894", + "react-tabs-1895", + "react-tabs-1896", + "react-tabs-1897", + "react-tabs-1898", + "react-tabs-1899", + "react-tabs-19", + "react-tabs-190", + "react-tabs-1900", + "react-tabs-1901", + "react-tabs-1902", + "react-tabs-1903", + "react-tabs-1904", + "react-tabs-1905", + "react-tabs-1906", + "react-tabs-1907", + "react-tabs-1908", + "react-tabs-1909", + "react-tabs-191", + "react-tabs-1910", + "react-tabs-1911", + "react-tabs-1912", + "react-tabs-1913", + "react-tabs-1914", + "react-tabs-1915", + "react-tabs-1916", + "react-tabs-1917", + "react-tabs-1918", + "react-tabs-1919", + "react-tabs-192", + "react-tabs-1920", + "react-tabs-1921", + "react-tabs-1922", + "react-tabs-1923", + "react-tabs-1924", + "react-tabs-1925", + "react-tabs-1926", + "react-tabs-1927", + "react-tabs-1928", + "react-tabs-1929", + "react-tabs-193", + "react-tabs-1930", + "react-tabs-1931", + "react-tabs-1932", + "react-tabs-1933", + "react-tabs-1934", + "react-tabs-1935", + "react-tabs-1936", + "react-tabs-1937", + "react-tabs-1938", + "react-tabs-1939", + "react-tabs-194", + "react-tabs-1940", + "react-tabs-1941", + "react-tabs-1942", + "react-tabs-1943", + "react-tabs-1944", + "react-tabs-1945", + "react-tabs-1946", + "react-tabs-1947", + "react-tabs-1948", + "react-tabs-1949", + "react-tabs-195", + "react-tabs-1950", + "react-tabs-1951", + "react-tabs-1952", + "react-tabs-1953", + "react-tabs-1954", + "react-tabs-1955", + "react-tabs-1956", + "react-tabs-1957", + "react-tabs-1958", + "react-tabs-1959", + "react-tabs-196", + "react-tabs-1960", + "react-tabs-1961", + "react-tabs-1962", + "react-tabs-1963", + "react-tabs-1964", + "react-tabs-1965", + "react-tabs-1966", + "react-tabs-1967", + "react-tabs-1968", + "react-tabs-1969", + "react-tabs-197", + "react-tabs-1970", + "react-tabs-1971", + "react-tabs-1972", + "react-tabs-1973", + "react-tabs-1974", + "react-tabs-1975", + "react-tabs-1976", + "react-tabs-1977", + "react-tabs-1978", + "react-tabs-1979", + "react-tabs-198", + "react-tabs-1980", + "react-tabs-1981", + "react-tabs-1982", + "react-tabs-1983", + "react-tabs-1984", + "react-tabs-1985", + "react-tabs-1986", + "react-tabs-1987", + "react-tabs-1988", + "react-tabs-1989", + "react-tabs-199", + "react-tabs-1990", + "react-tabs-1991", + "react-tabs-1992", + "react-tabs-1993", + "react-tabs-1994", + "react-tabs-1995", + "react-tabs-1996", + "react-tabs-1997", + "react-tabs-1998", + "react-tabs-1999", + "react-tabs-2", + "react-tabs-20", + "react-tabs-200", + "react-tabs-2000", + "react-tabs-2001", + "react-tabs-2002", + "react-tabs-2003", + "react-tabs-2004", + "react-tabs-2005", + "react-tabs-2006", + "react-tabs-2007", + "react-tabs-2008", + "react-tabs-2009", + "react-tabs-201", + "react-tabs-202", + "react-tabs-203", + "react-tabs-204", + "react-tabs-205", + "react-tabs-206", + "react-tabs-207", + "react-tabs-208", + "react-tabs-209", + "react-tabs-21", + "react-tabs-210", + "react-tabs-211", + "react-tabs-212", + "react-tabs-213", + "react-tabs-214", + "react-tabs-215", + "react-tabs-216", + "react-tabs-217", + "react-tabs-218", + "react-tabs-219", + "react-tabs-22", + "react-tabs-220", + "react-tabs-221", + "react-tabs-222", + "react-tabs-223", + "react-tabs-224", + "react-tabs-225", + "react-tabs-226", + "react-tabs-227", + "react-tabs-228", + "react-tabs-229", + "react-tabs-23", + "react-tabs-230", + "react-tabs-231", + "react-tabs-232", + "react-tabs-233", + "react-tabs-234", + "react-tabs-235", + "react-tabs-236", + "react-tabs-237", + "react-tabs-238", + "react-tabs-239", + "react-tabs-24", + "react-tabs-240", + "react-tabs-241", + "react-tabs-242", + "react-tabs-243", + "react-tabs-244", + "react-tabs-245", + "react-tabs-246", + "react-tabs-247", + "react-tabs-248", + "react-tabs-249", + "react-tabs-25", + "react-tabs-250", + "react-tabs-251", + "react-tabs-252", + "react-tabs-253", + "react-tabs-254", + "react-tabs-255", + "react-tabs-256", + "react-tabs-257", + "react-tabs-258", + "react-tabs-259", + "react-tabs-26", + "react-tabs-260", + "react-tabs-261", + "react-tabs-262", + "react-tabs-263", + "react-tabs-264", + "react-tabs-265", + "react-tabs-266", + "react-tabs-267", + "react-tabs-268", + "react-tabs-269", + "react-tabs-27", + "react-tabs-270", + "react-tabs-271", + "react-tabs-272", + "react-tabs-273", + "react-tabs-274", + "react-tabs-275", + "react-tabs-276", + "react-tabs-277", + "react-tabs-278", + "react-tabs-279", + "react-tabs-28", + "react-tabs-280", + "react-tabs-281", + "react-tabs-282", + "react-tabs-283", + "react-tabs-284", + "react-tabs-285", + "react-tabs-286", + "react-tabs-287", + "react-tabs-288", + "react-tabs-289", + "react-tabs-29", + "react-tabs-290", + "react-tabs-291", + "react-tabs-292", + "react-tabs-293", + "react-tabs-294", + "react-tabs-295", + "react-tabs-296", + "react-tabs-297", + "react-tabs-298", + "react-tabs-299", + "react-tabs-3", + "react-tabs-30", + "react-tabs-300", + "react-tabs-301", + "react-tabs-302", + "react-tabs-303", + "react-tabs-304", + "react-tabs-305", + "react-tabs-306", + "react-tabs-307", + "react-tabs-308", + "react-tabs-309", + "react-tabs-31", + "react-tabs-310", + "react-tabs-311", + "react-tabs-312", + "react-tabs-313", + "react-tabs-314", + "react-tabs-315", + "react-tabs-316", + "react-tabs-317", + "react-tabs-318", + "react-tabs-319", + "react-tabs-32", + "react-tabs-320", + "react-tabs-321", + "react-tabs-322", + "react-tabs-323", + "react-tabs-324", + "react-tabs-325", + "react-tabs-326", + "react-tabs-327", + "react-tabs-328", + "react-tabs-329", + "react-tabs-33", + "react-tabs-330", + "react-tabs-331", + "react-tabs-332", + "react-tabs-333", + "react-tabs-334", + "react-tabs-335", + "react-tabs-336", + "react-tabs-337", + "react-tabs-338", + "react-tabs-339", + "react-tabs-34", + "react-tabs-340", + "react-tabs-341", + "react-tabs-342", + "react-tabs-343", + "react-tabs-344", + "react-tabs-345", + "react-tabs-346", + "react-tabs-347", + "react-tabs-348", + "react-tabs-349", + "react-tabs-35", + "react-tabs-350", + "react-tabs-351", + "react-tabs-352", + "react-tabs-353", + "react-tabs-354", + "react-tabs-355", + "react-tabs-356", + "react-tabs-357", + "react-tabs-358", + "react-tabs-359", + "react-tabs-36", + "react-tabs-360", + "react-tabs-361", + "react-tabs-362", + "react-tabs-363", + "react-tabs-364", + "react-tabs-365", + "react-tabs-366", + "react-tabs-367", + "react-tabs-368", + "react-tabs-369", + "react-tabs-37", + "react-tabs-370", + "react-tabs-371", + "react-tabs-372", + "react-tabs-373", + "react-tabs-374", + "react-tabs-375", + "react-tabs-376", + "react-tabs-377", + "react-tabs-378", + "react-tabs-379", + "react-tabs-38", + "react-tabs-380", + "react-tabs-381", + "react-tabs-382", + "react-tabs-383", + "react-tabs-384", + "react-tabs-385", + "react-tabs-386", + "react-tabs-387", + "react-tabs-388", + "react-tabs-389", + "react-tabs-39", + "react-tabs-390", + "react-tabs-391", + "react-tabs-392", + "react-tabs-393", + "react-tabs-394", + "react-tabs-395", + "react-tabs-396", + "react-tabs-397", + "react-tabs-398", + "react-tabs-399", + "react-tabs-4", + "react-tabs-40", + "react-tabs-400", + "react-tabs-401", + "react-tabs-402", + "react-tabs-403", + "react-tabs-404", + "react-tabs-405", + "react-tabs-406", + "react-tabs-407", + "react-tabs-408", + "react-tabs-409", + "react-tabs-41", + "react-tabs-410", + "react-tabs-411", + "react-tabs-412", + "react-tabs-413", + "react-tabs-414", + "react-tabs-415", + "react-tabs-416", + "react-tabs-417", + "react-tabs-418", + "react-tabs-419", + "react-tabs-42", + "react-tabs-420", + "react-tabs-421", + "react-tabs-422", + "react-tabs-423", + "react-tabs-424", + "react-tabs-425", + "react-tabs-426", + "react-tabs-427", + "react-tabs-428", + "react-tabs-429", + "react-tabs-43", + "react-tabs-430", + "react-tabs-431", + "react-tabs-432", + "react-tabs-433", + "react-tabs-434", + "react-tabs-435", + "react-tabs-436", + "react-tabs-437", + "react-tabs-438", + "react-tabs-439", + "react-tabs-44", + "react-tabs-440", + "react-tabs-441", + "react-tabs-442", + "react-tabs-443", + "react-tabs-444", + "react-tabs-445", + "react-tabs-446", + "react-tabs-447", + "react-tabs-448", + "react-tabs-449", + "react-tabs-45", + "react-tabs-450", + "react-tabs-451", + "react-tabs-452", + "react-tabs-453", + "react-tabs-454", + "react-tabs-455", + "react-tabs-456", + "react-tabs-457", + "react-tabs-458", + "react-tabs-459", + "react-tabs-46", + "react-tabs-460", + "react-tabs-461", + "react-tabs-462", + "react-tabs-463", + "react-tabs-464", + "react-tabs-465", + "react-tabs-466", + "react-tabs-467", + "react-tabs-468", + "react-tabs-469", + "react-tabs-47", + "react-tabs-470", + "react-tabs-471", + "react-tabs-472", + "react-tabs-473", + "react-tabs-474", + "react-tabs-475", + "react-tabs-476", + "react-tabs-477", + "react-tabs-478", + "react-tabs-479", + "react-tabs-48", + "react-tabs-480", + "react-tabs-481", + "react-tabs-482", + "react-tabs-483", + "react-tabs-484", + "react-tabs-485", + "react-tabs-486", + "react-tabs-487", + "react-tabs-488", + "react-tabs-489", + "react-tabs-49", + "react-tabs-490", + "react-tabs-491", + "react-tabs-492", + "react-tabs-493", + "react-tabs-494", + "react-tabs-495", + "react-tabs-496", + "react-tabs-497", + "react-tabs-498", + "react-tabs-499", + "react-tabs-5", + "react-tabs-50", + "react-tabs-500", + "react-tabs-501", + "react-tabs-502", + "react-tabs-503", + "react-tabs-504", + "react-tabs-505", + "react-tabs-506", + "react-tabs-507", + "react-tabs-508", + "react-tabs-509", + "react-tabs-51", + "react-tabs-510", + "react-tabs-511", + "react-tabs-512", + "react-tabs-513", + "react-tabs-514", + "react-tabs-515", + "react-tabs-516", + "react-tabs-517", + "react-tabs-518", + "react-tabs-519", + "react-tabs-52", + "react-tabs-520", + "react-tabs-521", + "react-tabs-522", + "react-tabs-523", + "react-tabs-524", + "react-tabs-525", + "react-tabs-526", + "react-tabs-527", + "react-tabs-528", + "react-tabs-529", + "react-tabs-53", + "react-tabs-530", + "react-tabs-531", + "react-tabs-532", + "react-tabs-533", + "react-tabs-534", + "react-tabs-535", + "react-tabs-536", + "react-tabs-537", + "react-tabs-538", + "react-tabs-539", + "react-tabs-54", + "react-tabs-540", + "react-tabs-541", + "react-tabs-542", + "react-tabs-543", + "react-tabs-544", + "react-tabs-545", + "react-tabs-546", + "react-tabs-547", + "react-tabs-548", + "react-tabs-549", + "react-tabs-55", + "react-tabs-550", + "react-tabs-551", + "react-tabs-552", + "react-tabs-553", + "react-tabs-554", + "react-tabs-555", + "react-tabs-556", + "react-tabs-557", + "react-tabs-558", + "react-tabs-559", + "react-tabs-56", + "react-tabs-560", + "react-tabs-561", + "react-tabs-562", + "react-tabs-563", + "react-tabs-564", + "react-tabs-565", + "react-tabs-566", + "react-tabs-567", + "react-tabs-568", + "react-tabs-569", + "react-tabs-57", + "react-tabs-570", + "react-tabs-571", + "react-tabs-572", + "react-tabs-573", + "react-tabs-574", + "react-tabs-575", + "react-tabs-576", + "react-tabs-577", + "react-tabs-578", + "react-tabs-579", + "react-tabs-58", + "react-tabs-580", + "react-tabs-581", + "react-tabs-582", + "react-tabs-583", + "react-tabs-584", + "react-tabs-585", + "react-tabs-586", + "react-tabs-587", + "react-tabs-588", + "react-tabs-589", + "react-tabs-59", + "react-tabs-590", + "react-tabs-591", + "react-tabs-592", + "react-tabs-593", + "react-tabs-594", + "react-tabs-595", + "react-tabs-596", + "react-tabs-597", + "react-tabs-598", + "react-tabs-599", + "react-tabs-6", + "react-tabs-60", + "react-tabs-600", + "react-tabs-601", + "react-tabs-602", + "react-tabs-603", + "react-tabs-604", + "react-tabs-605", + "react-tabs-606", + "react-tabs-607", + "react-tabs-608", + "react-tabs-609", + "react-tabs-61", + "react-tabs-610", + "react-tabs-611", + "react-tabs-612", + "react-tabs-613", + "react-tabs-614", + "react-tabs-615", + "react-tabs-616", + "react-tabs-617", + "react-tabs-618", + "react-tabs-619", + "react-tabs-62", + "react-tabs-620", + "react-tabs-621", + "react-tabs-622", + "react-tabs-623", + "react-tabs-624", + "react-tabs-625", + "react-tabs-626", + "react-tabs-627", + "react-tabs-628", + "react-tabs-629", + "react-tabs-63", + "react-tabs-630", + "react-tabs-631", + "react-tabs-632", + "react-tabs-633", + "react-tabs-634", + "react-tabs-635", + "react-tabs-636", + "react-tabs-637", + "react-tabs-638", + "react-tabs-639", + "react-tabs-64", + "react-tabs-640", + "react-tabs-641", + "react-tabs-642", + "react-tabs-643", + "react-tabs-644", + "react-tabs-645", + "react-tabs-646", + "react-tabs-647", + "react-tabs-648", + "react-tabs-649", + "react-tabs-65", + "react-tabs-650", + "react-tabs-651", + "react-tabs-652", + "react-tabs-653", + "react-tabs-654", + "react-tabs-655", + "react-tabs-656", + "react-tabs-657", + "react-tabs-658", + "react-tabs-659", + "react-tabs-66", + "react-tabs-660", + "react-tabs-661", + "react-tabs-662", + "react-tabs-663", + "react-tabs-664", + "react-tabs-665", + "react-tabs-666", + "react-tabs-667", + "react-tabs-668", + "react-tabs-669", + "react-tabs-67", + "react-tabs-670", + "react-tabs-671", + "react-tabs-672", + "react-tabs-673", + "react-tabs-674", + "react-tabs-675", + "react-tabs-676", + "react-tabs-677", + "react-tabs-678", + "react-tabs-679", + "react-tabs-68", + "react-tabs-680", + "react-tabs-681", + "react-tabs-682", + "react-tabs-683", + "react-tabs-684", + "react-tabs-685", + "react-tabs-686", + "react-tabs-687", + "react-tabs-688", + "react-tabs-689", + "react-tabs-69", + "react-tabs-690", + "react-tabs-691", + "react-tabs-692", + "react-tabs-693", + "react-tabs-694", + "react-tabs-695", + "react-tabs-696", + "react-tabs-697", + "react-tabs-698", + "react-tabs-699", + "react-tabs-7", + "react-tabs-70", + "react-tabs-700", + "react-tabs-701", + "react-tabs-702", + "react-tabs-703", + "react-tabs-704", + "react-tabs-705", + "react-tabs-706", + "react-tabs-707", + "react-tabs-708", + "react-tabs-709", + "react-tabs-71", + "react-tabs-710", + "react-tabs-711", + "react-tabs-712", + "react-tabs-713", + "react-tabs-714", + "react-tabs-715", + "react-tabs-716", + "react-tabs-717", + "react-tabs-718", + "react-tabs-719", + "react-tabs-72", + "react-tabs-720", + "react-tabs-721", + "react-tabs-722", + "react-tabs-723", + "react-tabs-724", + "react-tabs-725", + "react-tabs-726", + "react-tabs-727", + "react-tabs-728", + "react-tabs-729", + "react-tabs-73", + "react-tabs-730", + "react-tabs-731", + "react-tabs-732", + "react-tabs-733", + "react-tabs-734", + "react-tabs-735", + "react-tabs-736", + "react-tabs-737", + "react-tabs-738", + "react-tabs-739", + "react-tabs-74", + "react-tabs-740", + "react-tabs-741", + "react-tabs-742", + "react-tabs-743", + "react-tabs-744", + "react-tabs-745", + "react-tabs-746", + "react-tabs-747", + "react-tabs-748", + "react-tabs-749", + "react-tabs-75", + "react-tabs-750", + "react-tabs-751", + "react-tabs-752", + "react-tabs-753", + "react-tabs-754", + "react-tabs-755", + "react-tabs-756", + "react-tabs-757", + "react-tabs-758", + "react-tabs-759", + "react-tabs-76", + "react-tabs-760", + "react-tabs-761", + "react-tabs-762", + "react-tabs-763", + "react-tabs-764", + "react-tabs-765", + "react-tabs-766", + "react-tabs-767", + "react-tabs-768", + "react-tabs-769", + "react-tabs-77", + "react-tabs-770", + "react-tabs-771", + "react-tabs-772", + "react-tabs-773", + "react-tabs-774", + "react-tabs-775", + "react-tabs-776", + "react-tabs-777", + "react-tabs-778", + "react-tabs-779", + "react-tabs-78", + "react-tabs-780", + "react-tabs-781", + "react-tabs-782", + "react-tabs-783", + "react-tabs-784", + "react-tabs-785", + "react-tabs-786", + "react-tabs-787", + "react-tabs-788", + "react-tabs-789", + "react-tabs-79", + "react-tabs-790", + "react-tabs-791", + "react-tabs-792", + "react-tabs-793", + "react-tabs-794", + "react-tabs-795", + "react-tabs-796", + "react-tabs-797", + "react-tabs-798", + "react-tabs-799", + "react-tabs-8", + "react-tabs-80", + "react-tabs-800", + "react-tabs-801", + "react-tabs-802", + "react-tabs-803", + "react-tabs-804", + "react-tabs-805", + "react-tabs-806", + "react-tabs-807", + "react-tabs-808", + "react-tabs-809", + "react-tabs-81", + "react-tabs-810", + "react-tabs-811", + "react-tabs-812", + "react-tabs-813", + "react-tabs-814", + "react-tabs-815", + "react-tabs-816", + "react-tabs-817", + "react-tabs-818", + "react-tabs-819", + "react-tabs-82", + "react-tabs-820", + "react-tabs-821", + "react-tabs-822", + "react-tabs-823", + "react-tabs-824", + "react-tabs-825", + "react-tabs-826", + "react-tabs-827", + "react-tabs-828", + "react-tabs-829", + "react-tabs-83", + "react-tabs-830", + "react-tabs-831", + "react-tabs-832", + "react-tabs-833", + "react-tabs-834", + "react-tabs-835", + "react-tabs-836", + "react-tabs-837", + "react-tabs-838", + "react-tabs-839", + "react-tabs-84", + "react-tabs-840", + "react-tabs-841", + "react-tabs-842", + "react-tabs-843", + "react-tabs-844", + "react-tabs-845", + "react-tabs-846", + "react-tabs-847", + "react-tabs-848", + "react-tabs-849", + "react-tabs-85", + "react-tabs-850", + "react-tabs-851", + "react-tabs-852", + "react-tabs-853", + "react-tabs-854", + "react-tabs-855", + "react-tabs-856", + "react-tabs-857", + "react-tabs-858", + "react-tabs-859", + "react-tabs-86", + "react-tabs-860", + "react-tabs-861", + "react-tabs-862", + "react-tabs-863", + "react-tabs-864", + "react-tabs-865", + "react-tabs-866", + "react-tabs-867", + "react-tabs-868", + "react-tabs-869", + "react-tabs-87", + "react-tabs-870", + "react-tabs-871", + "react-tabs-872", + "react-tabs-873", + "react-tabs-874", + "react-tabs-875", + "react-tabs-876", + "react-tabs-877", + "react-tabs-878", + "react-tabs-879", + "react-tabs-88", + "react-tabs-880", + "react-tabs-881", + "react-tabs-882", + "react-tabs-883", + "react-tabs-884", + "react-tabs-885", + "react-tabs-886", + "react-tabs-887", + "react-tabs-888", + "react-tabs-889", + "react-tabs-89", + "react-tabs-890", + "react-tabs-891", + "react-tabs-892", + "react-tabs-893", + "react-tabs-894", + "react-tabs-895", + "react-tabs-896", + "react-tabs-897", + "react-tabs-898", + "react-tabs-899", + "react-tabs-9", + "react-tabs-90", + "react-tabs-900", + "react-tabs-901", + "react-tabs-902", + "react-tabs-903", + "react-tabs-904", + "react-tabs-905", + "react-tabs-906", + "react-tabs-907", + "react-tabs-908", + "react-tabs-909", + "react-tabs-91", + "react-tabs-910", + "react-tabs-911", + "react-tabs-912", + "react-tabs-913", + "react-tabs-914", + "react-tabs-915", + "react-tabs-916", + "react-tabs-917", + "react-tabs-918", + "react-tabs-919", + "react-tabs-92", + "react-tabs-920", + "react-tabs-921", + "react-tabs-922", + "react-tabs-923", + "react-tabs-924", + "react-tabs-925", + "react-tabs-926", + "react-tabs-927", + "react-tabs-928", + "react-tabs-929", + "react-tabs-93", + "react-tabs-930", + "react-tabs-931", + "react-tabs-932", + "react-tabs-933", + "react-tabs-934", + "react-tabs-935", + "react-tabs-936", + "react-tabs-937", + "react-tabs-938", + "react-tabs-939", + "react-tabs-94", + "react-tabs-940", + "react-tabs-941", + "react-tabs-942", + "react-tabs-943", + "react-tabs-944", + "react-tabs-945", + "react-tabs-946", + "react-tabs-947", + "react-tabs-948", + "react-tabs-949", + "react-tabs-95", + "react-tabs-950", + "react-tabs-951", + "react-tabs-952", + "react-tabs-953", + "react-tabs-954", + "react-tabs-955", + "react-tabs-956", + "react-tabs-957", + "react-tabs-958", + "react-tabs-959", + "react-tabs-96", + "react-tabs-960", + "react-tabs-961", + "react-tabs-962", + "react-tabs-963", + "react-tabs-964", + "react-tabs-965", + "react-tabs-966", + "react-tabs-967", + "react-tabs-968", + "react-tabs-969", + "react-tabs-97", + "react-tabs-970", + "react-tabs-971", + "react-tabs-972", + "react-tabs-973", + "react-tabs-974", + "react-tabs-975", + "react-tabs-976", + "react-tabs-977", + "react-tabs-978", + "react-tabs-979", + "react-tabs-98", + "react-tabs-980", + "react-tabs-981", + "react-tabs-982", + "react-tabs-983", + "react-tabs-984", + "react-tabs-985", + "react-tabs-986", + "react-tabs-987", + "react-tabs-988", + "react-tabs-989", + "react-tabs-99", + "react-tabs-990", + "react-tabs-991", + "react-tabs-992", + "react-tabs-993", + "react-tabs-994", + "react-tabs-995", + "react-tabs-996", + "react-tabs-997", + "react-tabs-998", + "react-tabs-999", + "react_dropdown_aria_0", + "react_dropdown_aria_0_list", + "react_dropdown_aria_0_list_0", + "react_dropdown_aria_0_list_1", + "react_dropdown_aria_0_list_2", + "react_dropdown_aria_0_list_3", + "react_dropdown_aria_1", + "react_dropdown_aria_10", + "react_dropdown_aria_10_list", + "react_dropdown_aria_10_list_0", + "react_dropdown_aria_10_list_1", + "react_dropdown_aria_10_list_2", + "react_dropdown_aria_10_list_3", + "react_dropdown_aria_10_list_4", + "react_dropdown_aria_11", + "react_dropdown_aria_11_list", + "react_dropdown_aria_11_list_0", + "react_dropdown_aria_11_list_1", + "react_dropdown_aria_11_list_2", + "react_dropdown_aria_11_list_3", + "react_dropdown_aria_11_list_4", + "react_dropdown_aria_12", + "react_dropdown_aria_12_list", + "react_dropdown_aria_12_list_0", + "react_dropdown_aria_12_list_1", + "react_dropdown_aria_12_list_2", + "react_dropdown_aria_12_list_3", + "react_dropdown_aria_12_list_4", + "react_dropdown_aria_13", + "react_dropdown_aria_13_list", + "react_dropdown_aria_13_list_0", + "react_dropdown_aria_13_list_1", + "react_dropdown_aria_13_list_2", + "react_dropdown_aria_13_list_3", + "react_dropdown_aria_13_list_4", + "react_dropdown_aria_14", + "react_dropdown_aria_14_list", + "react_dropdown_aria_14_list_0", + "react_dropdown_aria_14_list_1", + "react_dropdown_aria_14_list_2", + "react_dropdown_aria_14_list_3", + "react_dropdown_aria_14_list_4", + "react_dropdown_aria_15", + "react_dropdown_aria_15_list", + "react_dropdown_aria_15_list_0", + "react_dropdown_aria_15_list_1", + "react_dropdown_aria_15_list_2", + "react_dropdown_aria_15_list_3", + "react_dropdown_aria_15_list_4", + "react_dropdown_aria_16", + "react_dropdown_aria_16_list", + "react_dropdown_aria_16_list_0", + "react_dropdown_aria_16_list_1", + "react_dropdown_aria_16_list_2", + "react_dropdown_aria_16_list_3", + "react_dropdown_aria_17", + "react_dropdown_aria_17_list", + "react_dropdown_aria_17_list_0", + "react_dropdown_aria_17_list_1", + "react_dropdown_aria_17_list_2", + "react_dropdown_aria_17_list_3", + "react_dropdown_aria_17_list_4", + "react_dropdown_aria_17_list_5", + "react_dropdown_aria_18", + "react_dropdown_aria_18_list", + "react_dropdown_aria_18_list_0", + "react_dropdown_aria_18_list_1", + "react_dropdown_aria_18_list_2", + "react_dropdown_aria_18_list_3", + "react_dropdown_aria_18_list_4", + "react_dropdown_aria_18_list_5", + "react_dropdown_aria_19", + "react_dropdown_aria_19_list", + "react_dropdown_aria_19_list_0", + "react_dropdown_aria_19_list_1", + "react_dropdown_aria_19_list_2", + "react_dropdown_aria_19_list_3", + "react_dropdown_aria_19_list_4", + "react_dropdown_aria_19_list_5", + "react_dropdown_aria_1_list", + "react_dropdown_aria_1_list_0", + "react_dropdown_aria_1_list_1", + "react_dropdown_aria_2", + "react_dropdown_aria_20", + "react_dropdown_aria_20_list", + "react_dropdown_aria_20_list_0", + "react_dropdown_aria_20_list_1", + "react_dropdown_aria_20_list_2", + "react_dropdown_aria_20_list_3", + "react_dropdown_aria_20_list_4", + "react_dropdown_aria_20_list_5", + "react_dropdown_aria_21", + "react_dropdown_aria_21_list", + "react_dropdown_aria_21_list_0", + "react_dropdown_aria_21_list_1", + "react_dropdown_aria_21_list_2", + "react_dropdown_aria_21_list_3", + "react_dropdown_aria_21_list_4", + "react_dropdown_aria_21_list_5", + "react_dropdown_aria_22", + "react_dropdown_aria_22_list", + "react_dropdown_aria_22_list_0", + "react_dropdown_aria_22_list_1", + "react_dropdown_aria_22_list_2", + "react_dropdown_aria_22_list_3", + "react_dropdown_aria_22_list_4", + "react_dropdown_aria_22_list_5", + "react_dropdown_aria_23", + "react_dropdown_aria_23_list", + "react_dropdown_aria_23_list_0", + "react_dropdown_aria_23_list_1", + "react_dropdown_aria_23_list_2", + "react_dropdown_aria_23_list_3", + "react_dropdown_aria_23_list_4", + "react_dropdown_aria_23_list_5", + "react_dropdown_aria_24", + "react_dropdown_aria_24_list", + "react_dropdown_aria_24_list_0", + "react_dropdown_aria_24_list_1", + "react_dropdown_aria_24_list_2", + "react_dropdown_aria_24_list_3", + "react_dropdown_aria_24_list_4", + "react_dropdown_aria_24_list_5", + "react_dropdown_aria_25", + "react_dropdown_aria_25_list", + "react_dropdown_aria_25_list_0", + "react_dropdown_aria_25_list_1", + "react_dropdown_aria_25_list_2", + "react_dropdown_aria_25_list_3", + "react_dropdown_aria_25_list_4", + "react_dropdown_aria_25_list_5", + "react_dropdown_aria_25_list_6", + "react_dropdown_aria_26", + "react_dropdown_aria_26_list", + "react_dropdown_aria_26_list_0", + "react_dropdown_aria_26_list_1", + "react_dropdown_aria_26_list_2", + "react_dropdown_aria_26_list_3", + "react_dropdown_aria_26_list_4", + "react_dropdown_aria_26_list_5", + "react_dropdown_aria_26_list_6", + "react_dropdown_aria_27", + "react_dropdown_aria_27_list", + "react_dropdown_aria_27_list_0", + "react_dropdown_aria_27_list_1", + "react_dropdown_aria_27_list_2", + "react_dropdown_aria_27_list_3", + "react_dropdown_aria_27_list_4", + "react_dropdown_aria_27_list_5", + "react_dropdown_aria_27_list_6", + "react_dropdown_aria_28", + "react_dropdown_aria_28_list", + "react_dropdown_aria_28_list_0", + "react_dropdown_aria_28_list_1", + "react_dropdown_aria_28_list_2", + "react_dropdown_aria_28_list_3", + "react_dropdown_aria_28_list_4", + "react_dropdown_aria_28_list_5", + "react_dropdown_aria_28_list_6", + "react_dropdown_aria_29", + "react_dropdown_aria_29_list", + "react_dropdown_aria_29_list_0", + "react_dropdown_aria_29_list_1", + "react_dropdown_aria_29_list_2", + "react_dropdown_aria_29_list_3", + "react_dropdown_aria_29_list_4", + "react_dropdown_aria_29_list_5", + "react_dropdown_aria_29_list_6", + "react_dropdown_aria_2_list", + "react_dropdown_aria_2_list_0", + "react_dropdown_aria_2_list_1", + "react_dropdown_aria_3", + "react_dropdown_aria_30", + "react_dropdown_aria_30_list", + "react_dropdown_aria_30_list_0", + "react_dropdown_aria_30_list_1", + "react_dropdown_aria_30_list_2", + "react_dropdown_aria_30_list_3", + "react_dropdown_aria_30_list_4", + "react_dropdown_aria_30_list_5", + "react_dropdown_aria_30_list_6", + "react_dropdown_aria_31", + "react_dropdown_aria_31_list", + "react_dropdown_aria_31_list_0", + "react_dropdown_aria_31_list_1", + "react_dropdown_aria_31_list_2", + "react_dropdown_aria_31_list_3", + "react_dropdown_aria_31_list_4", + "react_dropdown_aria_31_list_5", + "react_dropdown_aria_31_list_6", + "react_dropdown_aria_32", + "react_dropdown_aria_32_list", + "react_dropdown_aria_32_list_0", + "react_dropdown_aria_32_list_1", + "react_dropdown_aria_32_list_2", + "react_dropdown_aria_32_list_3", + "react_dropdown_aria_32_list_4", + "react_dropdown_aria_32_list_5", + "react_dropdown_aria_32_list_6", + "react_dropdown_aria_33", + "react_dropdown_aria_33_list", + "react_dropdown_aria_33_list_0", + "react_dropdown_aria_33_list_1", + "react_dropdown_aria_33_list_2", + "react_dropdown_aria_33_list_3", + "react_dropdown_aria_33_list_4", + "react_dropdown_aria_33_list_5", + "react_dropdown_aria_33_list_6", + "react_dropdown_aria_34", + "react_dropdown_aria_34_list", + "react_dropdown_aria_34_list_0", + "react_dropdown_aria_34_list_1", + "react_dropdown_aria_34_list_2", + "react_dropdown_aria_34_list_3", + "react_dropdown_aria_34_list_4", + "react_dropdown_aria_34_list_5", + "react_dropdown_aria_34_list_6", + "react_dropdown_aria_35", + "react_dropdown_aria_35_list", + "react_dropdown_aria_35_list_0", + "react_dropdown_aria_35_list_1", + "react_dropdown_aria_35_list_2", + "react_dropdown_aria_35_list_3", + "react_dropdown_aria_35_list_4", + "react_dropdown_aria_35_list_5", + "react_dropdown_aria_35_list_6", + "react_dropdown_aria_36", + "react_dropdown_aria_36_list", + "react_dropdown_aria_36_list_0", + "react_dropdown_aria_36_list_1", + "react_dropdown_aria_36_list_2", + "react_dropdown_aria_36_list_3", + "react_dropdown_aria_36_list_4", + "react_dropdown_aria_36_list_5", + "react_dropdown_aria_36_list_6", + "react_dropdown_aria_37", + "react_dropdown_aria_37_list", + "react_dropdown_aria_37_list_0", + "react_dropdown_aria_37_list_1", + "react_dropdown_aria_38", + "react_dropdown_aria_38_list", + "react_dropdown_aria_38_list_0", + "react_dropdown_aria_38_list_1", + "react_dropdown_aria_38_list_2", + "react_dropdown_aria_39", + "react_dropdown_aria_39_list", + "react_dropdown_aria_39_list_0", + "react_dropdown_aria_39_list_1", + "react_dropdown_aria_39_list_2", + "react_dropdown_aria_3_list", + "react_dropdown_aria_3_list_0", + "react_dropdown_aria_3_list_1", + "react_dropdown_aria_4", + "react_dropdown_aria_40", + "react_dropdown_aria_40_list", + "react_dropdown_aria_40_list_0", + "react_dropdown_aria_40_list_1", + "react_dropdown_aria_40_list_2", + "react_dropdown_aria_41", + "react_dropdown_aria_41_list", + "react_dropdown_aria_41_list_0", + "react_dropdown_aria_41_list_1", + "react_dropdown_aria_41_list_2", + "react_dropdown_aria_42", + "react_dropdown_aria_42_list", + "react_dropdown_aria_42_list_0", + "react_dropdown_aria_42_list_1", + "react_dropdown_aria_42_list_2", + "react_dropdown_aria_43", + "react_dropdown_aria_43_list", + "react_dropdown_aria_43_list_0", + "react_dropdown_aria_43_list_1", + "react_dropdown_aria_43_list_2", + "react_dropdown_aria_44", + "react_dropdown_aria_44_list", + "react_dropdown_aria_44_list_0", + "react_dropdown_aria_44_list_1", + "react_dropdown_aria_44_list_2", + "react_dropdown_aria_45", + "react_dropdown_aria_45_list", + "react_dropdown_aria_45_list_0", + "react_dropdown_aria_45_list_1", + "react_dropdown_aria_45_list_2", + "react_dropdown_aria_46", + "react_dropdown_aria_46_list", + "react_dropdown_aria_46_list_0", + "react_dropdown_aria_46_list_1", + "react_dropdown_aria_47", + "react_dropdown_aria_47_list", + "react_dropdown_aria_47_list_0", + "react_dropdown_aria_47_list_1", + "react_dropdown_aria_48", + "react_dropdown_aria_48_list", + "react_dropdown_aria_48_list_0", + "react_dropdown_aria_48_list_1", + "react_dropdown_aria_4_list", + "react_dropdown_aria_4_list_0", + "react_dropdown_aria_4_list_1", + "react_dropdown_aria_4_list_2", + "react_dropdown_aria_5", + "react_dropdown_aria_5_list", + "react_dropdown_aria_5_list_0", + "react_dropdown_aria_5_list_1", + "react_dropdown_aria_5_list_2", + "react_dropdown_aria_6", + "react_dropdown_aria_6_list", + "react_dropdown_aria_6_list_0", + "react_dropdown_aria_6_list_1", + "react_dropdown_aria_6_list_2", + "react_dropdown_aria_6_list_3", + "react_dropdown_aria_6_list_4", + "react_dropdown_aria_7", + "react_dropdown_aria_7_list", + "react_dropdown_aria_7_list_0", + "react_dropdown_aria_7_list_1", + "react_dropdown_aria_7_list_2", + "react_dropdown_aria_7_list_3", + "react_dropdown_aria_7_list_4", + "react_dropdown_aria_8", + "react_dropdown_aria_8_list", + "react_dropdown_aria_8_list_0", + "react_dropdown_aria_8_list_1", + "react_dropdown_aria_8_list_2", + "react_dropdown_aria_8_list_3", + "react_dropdown_aria_8_list_4", + "react_dropdown_aria_9", + "react_dropdown_aria_9_list", + "react_dropdown_aria_9_list_0", + "react_dropdown_aria_9_list_1", + "react_dropdown_aria_9_list_2", + "react_dropdown_aria_9_list_3", + "react_dropdown_aria_9_list_4", + "read-a-query-plan", + "read-an-explain-report", + "read-and-write-urls-with-authentication", + "read-buffer", + "read-buffer--0", + "read-buffer--0-1", + "read-buffer-1", + "read-from-node-ids", + "read-query-limit-errors", + "read-request-duration", + "read-the-physical-plan", + "readdata", + "readers-rolereader", + "reads", + "reads-total", + "readwrite-token", + "real-world-application-of-basic-syntax", + "real-world-example", + "realm", + "realm--influxdb", + "rebalance-influxdb-enterprise-clusters", + "rebalance-procedure-1-rebalance-a-cluster-to-create-space", + "rebalance-procedure-2-rebalance-a-cluster-to-increase-availability", + "rebalance-the-cluster", + "rebuild-the-tsi-index", + "rebuild-the-tsi-index-in-an-influxdb-enterprise-cluster", + "rebuild-tsi-indexes", + "rec", + "recalculate-the-_values-column", + "receiving-fewer-than-n-points", + "recently-executed-queries", + "recipients", + "recognize-and-address-bottlenecks", + "recognize-overlapping-and-duplicate-data", + "recommendations", + "recommended", + "recommended--set-appropriate-directory-permissions", + "recommended--verify-the-authenticity-of-the-data-service-download", + "recommended--verify-the-authenticity-of-the-meta-service-download", + "recommended-cluster-configurations", + "recommended-configuration-for-modern-compatibility", + "recommended-if", + "recommended-if-1", + "recommended-modern-compatibility-cipher-settings", + "recommended-scaling-strategies-per-component", + "recommended-server-configuration-for-modern-compatibility", + "recommended-verify-the-authenticity-of-the-release-download", + "reconcile-unmatched-sources", + "reconnect-a-data-node-with-a-failed-disk", + "record", + "record-a-batch-task-and-assign-a-custom-recording-id", + "record-a-batch-task-using-a-relative-time-range", + "record-a-batch-task-using-an-absolute-time-range", + "record-a-stream-task", + "record-a-stream-task-and-assign-a-custom-recording-id", + "record-an-influxql-query", + "record-an-influxql-query-and-assign-a-custom-recording-id", + "record-an-influxql-query-for-a-specific-influxdb-cluster", + "record-batch", + "record-constraint", + "record-is-missing-label-label", + "record-literals", + "record-package", + "record-polymorphism", + "record-query", + "record-query-and-backfill-with-stream", + "record-stream", + "record-syntax", + "record-types", + "recordbatch", + "recordbatchesexec", + "recordbatchesexec-attributes", + "records", + "recover-from-a-failed-restore", + "recover-from-a-license-misconfiguration", + "recover-from-bad-configurations", + "recover-user-credentials", + "recoveredpanics", + "recoveredpanics-1", + "recovery", + "recovery-point-objective-rpo", + "recovery-time-objective-rto", + "recovery_action", + "recovery_url", + "recreate-the-time-column", + "recursively-apply-templates-from-a-directory", + "redfish", + "redhat--centos-64-bit", + "redhat-and-centos-64-bit", + "redhat-and-centos-64-bit-1", + "redhat-and-centos-64-bit-2", + "redirect-http-access-logging", + "redirecting-http-access-logging", + "redis", + "redis_sentinel", + "redistimeseries", + "redoc", + "reduce-query-noise", + "refactor-the-alerting-service", + "reference-dictionary-values", + "reference-keys-statically", + "reference-nested-records", + "reference-tickscripts", + "reference-values-in-a-dynamic-array", + "reference-values-in-a-dynamic-record", + "reference-values-in-a-dynamic-type", + "reference-values-in-a-record", + "reference-values-in-an-array", + "refresh-your-admin-token", + "regenerate-an-admin-token", + "regenerate-an-operator-admin-token", + "regenerating-the-operator-token", + "regex", + "regexp-match", + "regexp-match-case-insensitive", + "regexp-nomatch", + "regexp-nomatch-case-insensitive", + "regexp-package", + "regexp_count", + "regexp_like", + "regexp_match", + "regexp_replace", + "region", + "region-definitions", + "regions-with-multiple-clusters", + "register-a-new-application-with-device-code-flow-enabled", + "regr_avgx", + "regr_avgy", + "regr_count", + "regr_intercept", + "regr_r2", + "regr_slope", + "regr_sxx", + "regr_sxy", + "regr_syy", + "regular-expression", + "regular-expression-examples", + "regular-expression-flags", + "regular-expression-functions", + "regular-expression-literals", + "regular-expression-operators", + "regular-expression-syntax", + "regular-expression-types", + "regular-expressions", + "regular-expressions-in-flux", + "rejected-points", + "rejected_points-schema", + "related", + "related-endpoints", + "related-functions", + "related-functions-1", + "related-functions-10", + "related-functions-11", + "related-functions-12", + "related-functions-13", + "related-functions-14", + "related-functions-15", + "related-functions-16", + "related-functions-17", + "related-functions-2", + "related-functions-3", + "related-functions-4", + "related-functions-5", + "related-functions-6", + "related-functions-7", + "related-functions-8", + "related-functions-9", + "related-guide", + "related-guides", + "related-kubernetes-documentation", + "relative-strength-index-rsi-rules", + "relative-time", + "relative_strength_index", + "release-list", + "release-notes", + "release-notes-1", + "release-notes-10", + "release-notes-11", + "release-notes-12", + "release-notes-13", + "release-notes-2", + "release-notes-3", + "release-notes-4", + "release-notes-5", + "release-notes-6", + "release-notes-7", + "release-notes-8", + "release-notes-9", + "release-notes-related-to-influxdb-cloud-dedicated", + "release-notes-related-to-influxdb-clustered", + "release-toc", + "relevant-sideload-properties", + "reliability-features", + "reload", + "reload-a-specific-task", + "reload-all-tasks-with-ids-that-match-a-pattern", + "reload-sideload", + "remap-or-assign-values-in-your-data", + "remote-collect-interval", + "remote-telegraf-configuration", + "remotefile", + "removals", + "remove-a-duplicate-key", + "remove-a-grant", + "remove-a-key-value-pair-from-a-dictionary", + "remove-a-member", + "remove-a-member-from-an-organization", + "remove-a-member-from-an-organization-in-the-influxdb-ui", + "remove-a-member-from-an-organization-using-the-influx-cli", + "remove-a-permission-from-a-role", + "remove-a-property-from-a-dictionary", + "remove-a-restriction", + "remove-a-stack", + "remove-a-stack-and-all-of-its-associated-resources", + "remove-a-suffix-from-all-values-in-a-column", + "remove-a-telegraf-configuration", + "remove-a-topic-handler", + "remove-a-user", + "remove-a-user-from-a-role", + "remove-a-user-from-your-organization", + "remove-multiple-telegraf-configurations", + "remove-nodes-from-a-cluster", + "remove-nodes-from-a-cluster-and-assume-yes-to-all-prompts", + "remove-non-printable-unicode-characters-from-all-tsm-files", + "remove-organizations", + "remove-permissions-from-a-user", + "remove-role-permissions", + "remove-subscriptions", + "remove-the-local-meta-node-running-on-8091", + "remove-the-meta-node-running-on-meta28091", + "remove-the-smtp-configuration-override", + "remove-unnecessary-columns-in-large-datasets", + "remove-unnecessary-data", + "remove-users", + "remove-website-demo-data", + "remove-yourself-from-an-organization", + "removed-in-influx-cli-v205", + "removed-in-influxdb-20-beta-16", + "removed-in-influxdb-oss-205", + "removed-prometheusoperator-feature-flag", + "removeshardreq", + "rename", + "rename-a-check", + "rename-a-database", + "rename-columns-using-a-function", + "rename-data-nodes", + "rename-hosts-in-influxdb-enterprise", + "rename-meta-nodes", + "rename-the-output-field-key", + "renew-a-license", + "renew-or-update-a-license-key-or-file", + "renew-your-license", + "repair", + "repair-order", + "repairing-entropy", + "repeat", + "repeat-a-string-based-on-existing-columns", + "repeat-for-each-dbrp-combination", + "repeated-values-in-dvc-results", + "repl", + "replace", + "replace-a-meta-node-in-a-functional-cluster", + "replace-a-node-in-a-cluster-with-security-enable", + "replace-a-specific-number-of-string-matches", + "replace-all-substrings-that-match-a-regular-expression", + "replace-an-unresponsive-meta-node", + "replace-column-header-with-annotation-shorthand", + "replace-data-nodes-in-an-influxdb-enterprise-cluster", + "replace-influxdb-enterprise-cluster-meta-nodes-and-data-nodes", + "replace-meta-nodes-in-an-influxdb-enterprise-cluster", + "replace-regular-expression-matches-in-a-string", + "replace-regular-expression-matches-in-string-column-values", + "replace-responsive-and-unresponsive-data-nodes-in-a-cluster", + "replace-string-matches", + "replacement", + "replacing-a-machine-that-is-running-a-data-node", + "replacing-an-unresponsive-data-node", + "replay", + "replay-a-recording", + "replay-data-without-recording", + "replay-live", + "replay-live-batch", + "replay-live-flags", + "replay-live-query", + "replay-the-results-of-a-query-against-an-alert-task", + "replicas", + "replicate-an-organization", + "replicate-data-from-influxdb-oss", + "replicate-downsampled-or-processed-data", + "replicate-writes-to-influxdb-oss-to-influxdb-cloud", + "replication", + "replication-factor", + "replication-factor-1", + "replication-factor-2", + "replication-factor-3", + "replication-factor-rf", + "replication-interval", + "replication-queue-total-points-queued", + "replication-service-call-duration", + "replication-service-call-total", + "replications-queue", + "replications-queue-1", + "replications-queue-2", + "replications-queue-current-bytes-queued", + "replications-queue-remote-write-bytes-sent", + "replications-queue-remote-write-errors", + "replications-queue-total-bytes-queued", + "replications-total", + "report", + "report-db", + "report-disk", + "report-issues", + "report-on-disk-size-by-measurement", + "report-on-disk-size-by-shard", + "report-query-performance-issues", + "report-security-vulnerabilities", + "report-the-cardinality-of-all-buckets", + "report-the-cardinality-of-all-retention-policies", + "report-the-cardinality-of-measurements-in-a-specific-bucket", + "report-the-cardinality-of-measurements-in-all-buckets", + "report-the-cardinality-of-tsi-files", + "report-the-total-cardinality-of-your-influxdb-instance", + "reporting", + "reporting-disabled", + "reporting-disabled--false", + "reporttsi", + "representation", + "req", + "req-1", + "reqactive", + "reqdurationns", + "request-a-proof-of-concept-from-the-ui", + "request-an-influxdb-cloud-dedicated-cluster", + "request-body", + "request-body-1", + "request-body-2", + "request-duration-flight-doget-ok--ok", + "request-headers", + "request-headers-1", + "request-headers-2", + "request-headers-3", + "request-headers-4", + "request-headers-5", + "request-help-to-troubleshoot-queries", + "request-multiple-queries", + "request-query-parameters", + "request-query-results-in-csv-format", + "request-query-results-in-csv-format-1", + "request-url", + "request-url-1", + "request-url-2", + "requests-package", + "requests-per-operation---error", + "requests-per-operation---success", + "requests-total", + "require-https-on-the-object-store", + "require-internal-shared-secret-if-meta-auth-enabled", + "require-tls-on-your-catalog-database", + "required", + "required-2x-credentials", + "required-annotations-and-columns", + "required-credentials", + "required-data", + "required-influxdb-cloud-credentials", + "required-input-schema", + "required-parameters", + "required-permissions", + "required-permissions-for-influxdb-cloud", + "required-permissions-for-influxdb-oss", + "required-user-roles", + "requirements", + "requirements-1", + "requirements-and-behavior", + "requirements-and-guidelines", + "requirements-for-influxdb-enterprise-clusters", + "requires-file-system-access", + "requires-global-flags", + "rerun-failed-tasks-with-the-kapacitor-api", + "rerun-update-migrations", + "rerunning-a-recording-of-a-batch-alert", + "rerunning-a-recording-of-a-stream-alert", + "resample-clause", + "resampling-previous-time-intervals", + "resend-an-invitation", + "reserved-characters", + "reserved-keywords", + "reserved-variable-names", + "reset-a-users-password", + "resolve-data-type-conflicts", + "resolve-explicit-schema-rejections", + "resolve-high-cardinality", + "resolve-high-series-cardinality", + "resource", + "resource-fields-that-support-environment-references", + "resource-limits", + "resource-management", + "resourcename", + "resourcenametag", + "resources", + "resourcetag", + "responders", + "response", + "response-1", + "response-10", + "response-11", + "response-12", + "response-13", + "response-14", + "response-15", + "response-16", + "response-17", + "response-18", + "response-19", + "response-2", + "response-20", + "response-3", + "response-4", + "response-5", + "response-6", + "response-7", + "response-8", + "response-9", + "response-codes", + "response-format", + "responses", + "responses-1", + "responses-2", + "responses-3", + "restart-kapacitor", + "restart-kapacitor-1", + "restart-kapacitor-2", + "restart-the-influxdb-meta-service", + "restart-the-influxdb-service", + "restart-traffic-to-data-nodes", + "restore-a-backup", + "restore-a-backup-created-prior-to-version-120", + "restore-a-chronograf-database", + "restore-a-database", + "restore-a-database-offline", + "restore-a-database-to-a-remote-influxdb-instance", + "restore-a-specific-database", + "restore-a-specific-retention-policy", + "restore-a-specific-retention-policy-legacy", + "restore-a-specific-retention-policy-offline", + "restore-a-specific-shard", + "restore-a-specific-shard-legacy", + "restore-a-specific-shard-offline", + "restore-all-data-to-a-remote-influxdb-instance", + "restore-all-databases", + "restore-all-time-series-data", + "restore-and-replace-all-data", + "restore-and-replace-all-influxdb-data", + "restore-backup-data", + "restore-backup-data-for-a-specific-bucket-into-a-new-bucket", + "restore-data", + "restore-data-from-a-specific-bucket", + "restore-data-to-a-database-that-already-exists", + "restore-data-to-a-database-that-already-exists-legacy", + "restore-data-to-a-retention-policy-that-already-exists", + "restore-data-to-a-retention-policy-that-already-exists-legacy", + "restore-data-to-an-existing-database", + "restore-data-with-the-influx-cli", + "restore-examples", + "restore-flags", + "restore-flags-1", + "restore-from-a--full-backup", + "restore-from-a-full-backup", + "restore-from-a-metadata-backup", + "restore-from-an-incremental-backup", + "restore-from-an-incremental-backup-for-a-database-and-merge-that-database-into-an-existing-database", + "restore-from-an-incremental-backup-for-a-single-database-and-give-the-database-a-new-name", + "restore-overwrite-metadata-from-a-full-or-incremental-backup-to-fix-damaged-metadata", + "restore-to-a-new-influxdb-server", + "restore-to-a-recovery-point", + "restore-utility", + "restore-writes-information-not-part-of-the-original-backup", + "restores-from-an-existing-cluster-to-a-new-cluster", + "restrict-by-database", + "restrict-by-measurement-in-a-database", + "restrict-by-series-in-a-database", + "restrictions", + "restructure-tables", + "result-set", + "resulting-group-key", + "resulting-line-protocol", + "resulting-line-protocol-1", + "results", + "results-1", + "results-2", + "results-structure", + "retain", + "retention", + "retention-autocreate", + "retention-autocreate--true", + "retention-check-duration", + "retention-enforcement-related-configuration-settings", + "retention-period", + "retention-period-syntax", + "retention-period-syntax-influxctl-cli", + "retention-period-syntax-management-api", + "retention-periods", + "retention-policy--", + "retention-policy---1", + "retention-policy---2", + "retention-policy---3", + "retention-policy-management", + "retention-policy-rp", + "retention-policy-settings", + "retention-policy-shard-deletions", + "retentionpolicy", + "retentionrules-example", + "rethinkdb", + "retries", + "retrieve-a-database-token-by-id", + "retrieve-a-key-from-the-influxdb-secret-store", + "retrieve-a-partition-id", + "retrieve-a-role-document", + "retrieve-a-script", + "retrieve-a-secret-using-a-non-default-configuration-location", + "retrieve-a-secret-using-the-default-configuration-location", + "retrieve-a-user-details-document", + "retrieve-and-filter-data", + "retrieve-and-list-devices", + "retrieve-and-process-arrow-data", + "retrieve-authentication-credentials-from-a-file", + "retrieve-authentication-credentials-from-environment-variables", + "retrieve-data-example", + "retrieve-data-syntax", + "retrieve-system-information-for-a-query", + "retrieve-the-current-configuration", + "retrieve-the-current-time", + "retrieve-the-health-of-an-influxdb-oss-instance", + "retrieve-your-clusters-admin-token", + "retry-a-kapacitor-flux-task-run", + "retry-a-kapacitor-flux-task-run-api", + "retry-a-limited-number-of-failed-runs-for-a-task", + "retry-a-task-run", + "retry-concurrency", + "retry-failed-flux-task-runs-for-a-specific-task", + "retry-failed-flux-task-runs-for-all-tasks", + "retry-failed-kapacitor-flux-tasks", + "retry-failed-runs-for-a-limited-number-of-tasks", + "retry-failed-task-runs", + "retry-failed-task-runs-1", + "retry-failed-task-runs-for-a-specific-task-id", + "retry-failed-task-runs-that-occurred-after-a-specific-time", + "retry-failed-task-runs-that-occurred-before-a-specific-time", + "retry-failed-task-runs-that-occurred-in-a-specific-time-range", + "retry-flux-task-runs-that-failed-in-a-specific-time-range", + "retry-interval", + "retry-max-interval", + "retry-rate-limit", + "return-a-cumulative-state-count", + "return-a-cumulative-state-count-and-duration", + "return-a-cumulative-state-duration-in-milliseconds", + "return-a-diff-between-a-stream-of-tables-an-the-expected-output", + "return-a-diff-between-a-stream-of-tables-and-the-expected-output", + "return-a-fixed-location-record", + "return-a-list-of-measurements-in-an-influxdb-bucket", + "return-a-nan-value", + "return-a-property-of-a-dictionary", + "return-a-stream-of-tables-with-the-current-system-time", + "return-a-timestamp-representing-today", + "return-a-timezone-based-location-record", + "return-a-value-representing-the-50th-percentile-of-each-input-table", + "return-all-distinct-group-key-columns-in-a-single-table", + "return-all-property-keys-in-a-record", + "return-an-alert-level-based-on-a-value", + "return-an-array-of-regular-expression-matches", + "return-an-infinity-float-value-from-a-positive-or-negative-sign-value", + "return-distinct-values-from-a-non-default-column", + "return-distinct-values-from-data-with-null-values", + "return-distinct-values-from-each-input-table", + "return-distinct-values-from-the-_value-column", + "return-group-key-columns-as-an-array", + "return-group-key-columns-for-each-input-table", + "return-information-about-a-cluster", + "return-negative-derivative-values", + "return-part-of-a-string-based-on-character-index", + "return-query-results-with-millisecond-unix-timestamps", + "return-records-representing-state-changes", + "return-results-as-json-or-csv", + "return-rows-with-an-aggregate-value-greater-than-a-specified-number", + "return-rows-with-the-three-highest-values-in-each-input-table", + "return-rows-with-the-two-lowest-values-in-each-input-table", + "return-rows-with-unique-values-in-each-input-table", + "return-start-and-stop-timestamps-for-the-current-month", + "return-start-and-stop-timestamps-of-last-friday", + "return-start-and-stop-timestamps-of-last-monday", + "return-start-and-stop-timestamps-of-last-saturday", + "return-start-and-stop-timestamps-of-last-sunday", + "return-start-and-stop-timestamps-of-last-thursday", + "return-start-and-stop-timestamps-of-last-tuesday", + "return-start-and-stop-timestamps-of-the-current-week-starting-on-monday", + "return-start-and-stop-timestamps-of-the-current-week-starting-on-sunday", + "return-start-and-stop-timestamps-of-yesterday", + "return-statements", + "return-the-absolute-value", + "return-the-acosine-of-a-value", + "return-the-arcsine-of-a-value", + "return-the-arctangent-of-a-value", + "return-the-average-of-values-in-each-input-table", + "return-the-average-result-greater-than-a-specified-number-from-a-specific-time-range", + "return-the-base-10-exponential-of-n", + "return-the-base-2-exponential-of-a-value", + "return-the-base-e-exponential-of-a-value", + "return-the-base-x-exponential-of-a-value", + "return-the-binary-exponent-of-a-value", + "return-the-binary-expression-of-a-value", + "return-the-binary-logarithm-of-a-value", + "return-the-center-coordinates-of-an-s2-cell", + "return-the-complementary-error-function-of-a-value", + "return-the-copysign-of-two-columns", + "return-the-covariance-between-two-streams-of-tables", + "return-the-cube-root-of-a-value", + "return-the-current-day-of-the-month", + "return-the-current-day-of-the-week", + "return-the-current-day-of-the-year", + "return-the-current-hour", + "return-the-current-microsecond-unit", + "return-the-current-millisecond-unit", + "return-the-current-minute", + "return-the-current-nanosecond-unit", + "return-the-current-numeric-month", + "return-the-current-quarter", + "return-the-current-second", + "return-the-current-week-of-the-year", + "return-the-current-year", + "return-the-day-of-the-month-for-a-relative-duration", + "return-the-day-of-the-month-for-a-time-value", + "return-the-day-of-the-week-for-a-relative-duration", + "return-the-day-of-the-week-for-a-time-value", + "return-the-day-of-the-year-for-a-relative-duration", + "return-the-day-of-the-year-for-a-time-value", + "return-the-decimal-lagarithm-of-a-value", + "return-the-difference-between-minimum-and-maximum-values", + "return-the-error-function-of-a-value", + "return-the-first-non-null-value-in-each-input-table", + "return-the-first-regular-expression-match-in-a-string", + "return-the-first-row-in-each-input-table", + "return-the-first-temperature-from-each-room", + "return-the-flux-version-in-a-stream-of-tables", + "return-the-gamma-function-of-a-value", + "return-the-highest-and-lowest-three-results-in-a-single-result-set", + "return-the-highest-current-value-from-a-stream-of-tables", + "return-the-highest-table-average-from-a-stream-of-tables", + "return-the-highest-two-values-from-a-stream-of-tables", + "return-the-hour-in-local-time", + "return-the-hour-of-a-relative-duration", + "return-the-hour-of-a-time-value", + "return-the-hyperbolic-sine-of-a-value", + "return-the-hyperbolic-tangent-of-a-value", + "return-the-hypotenuse-of-two-values", + "return-the-integer-and-float-that-sum-to-a-value", + "return-the-inverse-complimentary-error-function", + "return-the-inverse-error-function-of-a-value", + "return-the-inverse-hyperbolic-cosine-of-a-value", + "return-the-inverse-hyperbolic-sine-of-a-value", + "return-the-inverse-of-mathfrexp", + "return-the-larger-of-two-values", + "return-the-last-non-null-value-in-each-input-table", + "return-the-last-row-from-each-input-table", + "return-the-last-temperature-from-each-room", + "return-the-length-of-an-array", + "return-the-lesser-of-two-values", + "return-the-lowest-current-value-from-a-stream-of-tables", + "return-the-lowest-table-average-from-a-stream-of-tables", + "return-the-lowest-two-values-from-a-stream-of-tables", + "return-the-maximum-difference-between-two-values", + "return-the-maximum-temperature-from-each-room", + "return-the-microsecond-of-a-relative-duration", + "return-the-microsecond-of-a-time-value", + "return-the-millisecond-of-a-relative-duration", + "return-the-millisecond-of-the-time-value", + "return-the-minute-of-a-relative-duration", + "return-the-minute-of-a-time-value", + "return-the-mode-of-each-input-table", + "return-the-modulo-of-two-values", + "return-the-month-of-a-relative-duration", + "return-the-month-of-a-time-value", + "return-the-nanosecond-for-a-relative-duration", + "return-the-nanosecond-for-a-time-value", + "return-the-natural-logarithm-and-sign-of-a-gamma-function", + "return-the-natural-logarithm-of-a-value", + "return-the-natural-logarithm-of-values-near-zero", + "return-the-nearest-integer-less-than-a-value", + "return-the-newest-points-first", + "return-the-newest-points-first-and-include-a-group-by-time-clause", + "return-the-next-possible-float-value", + "return-the-normalize-fraction-and-integral-of-a-value", + "return-the-nth-row", + "return-the-number-of-times-that-values-in-a-series-change", + "return-the-order-n-bessel-function-of-a-value", + "return-the-order-one-bessel-function-of-a-value", + "return-the-order-zero-bessel-function-of-a-value", + "return-the-quarter-for-a-relative-duration", + "return-the-quarter-for-a-time-value", + "return-the-remainder-of-division-between-two-values", + "return-the-row-with-the-lowest-value-in-each-input-table", + "return-the-row-with-the-maximum-value-from-each-input-table", + "return-the-row-with-the-maximum-value-in-each-input-table", + "return-the-row-with-the-minimum-value", + "return-the-running-total-of-values-in-each-table", + "return-the-s2-cell-level-of-an-s2-cell-id-token", + "return-the-second-of-a-relative-duration", + "return-the-second-of-a-time-value", + "return-the-sine-and-cosine-of-a-value", + "return-the-sine-of-a-radian-value", + "return-the-skew-of-input-tables", + "return-the-skew-of-values", + "return-the-spread-of-values", + "return-the-square-root-of-a-value", + "return-the-standard-deviation-in-input-tables", + "return-the-standard-deviation-of-values-in-each-table", + "return-the-start-time-of-each-time-interval", + "return-the-string-representation-of-a-dictionary", + "return-the-string-representation-of-a-record", + "return-the-string-representation-of-an-array", + "return-the-sum-of-each-input-table", + "return-the-sum-of-values-in-each-table", + "return-the-tangent-of-a-radian-value", + "return-the-time-an-influxdb-task-last-successfully-ran", + "return-the-time-for-a-given-relative-duration", + "return-the-time-for-a-given-time", + "return-the-time-spent-in-a-specified-state", + "return-the-total-distance-travelled-in-miles", + "return-the-total-distance-travelled-per-input-table", + "return-the-utc-offset-for-chicagos-time-zone", + "return-the-week-of-the-year", + "return-the-week-of-the-year-using-a-relative-duration", + "return-the-year-for-a-relative-duration", + "return-the-year-for-a-time-value", + "return-unique-values-from-input-tables", + "return-values", + "return-values-in-the-50th-percentile-of-each-input-table", + "returns", + "returns-the-following", + "returns-the-following-stream-of-tables", + "reverse", + "reverse_dns", + "review-1x-user-privileges", + "review-and-resolve-plan-limit-overages", + "review-cluster-configuration-tables", + "review-contribution-guidelines", + "review-http-status-codes", + "review-open-source-license", + "review-production-installation-configurations", + "review-rejected-points", + "review-requirements", + "review-tags", + "revoke", + "revoke-a-database-token", + "revoke-a-management-token", + "revoke-access-from-a-management-token", + "revoke-access-from-a-token-and-skip-confirmation", + "revoke-access-from-multiple-management-tokens", + "revoke-administrative-privileges-from-an-admin-user", + "revoke-an-admin-token", + "revoke-multiple-database-tokens", + "revoke-read-write-or-all-database-privileges-from-an-existing-user", + "revoked-tokens-are-included-in-output", + "revoked-tokens-are-included-when-listing-management-tokens", + "revoked-tokens-are-included-when-listing-tokens", + "revoking-a-token-is-immediate-and-cannot-be-undone", + "rewritten-riemann-plugin", + "rfc3339-timestamp", + "rfc3339-to-unix-nanosecond", + "rfc3339_date_time_string", + "rfc3339_like_date_time_string", + "rfc3339nano-timestamp", + "rhajek-package", + "riak", + "riemann", + "riemann_listener", + "right", + "right-input", + "right-outer-join", + "right-outer-join-example", + "right-outer-join-result", + "right-outer-join-results", + "rohansreerama5-package", + "role-based-access-controls-rbac", + "roles", + "roll-back-to-a-previous-version", + "room", + "roomid", + "root-configuration-options", + "rotate-revoked-tokens", + "round", + "round-a-value-to-the-nearest-integer", + "round-a-value-to-the-nearest-whole-number", + "round-a-value-up-to-the-nearest-integer", + "round-field-values-associated-with-a-field-key", + "round-field-values-associated-with-a-field-key-and-include-several-clauses", + "round-field-values-associated-with-each-field-key-in-a-measurement", + "round-float-values", + "round-float-values-before-converting-to-integers", + "round-float-values-before-converting-to-uintegers", + "router", + "router-latency", + "router-scaling-strategies", + "routes", + "routing-key", + "routingkey", + "row", + "row-template", + "row-template-file", + "row-template-file-1", + "row-templates", + "row_number", + "rowkey", + "rows", + "rp", + "rpad", + "rpc-enterprise-only", + "rpccalls", + "rpcfailures", + "rpcreadbytes", + "rpcretries", + "rpcwritebytes", + "rpm-package-upgrade", + "rsa-key-size", + "rtrim", + "ruby", + "rules", + "run", + "run-a-query-from-a-file", + "run-a-server-in-specific-modes", + "run-a-server-specifically-for-compacting-data", + "run-a-show-databases-query", + "run-a-show-field-keys-query-with-the-from-clause", + "run-a-show-field-keys-query-with-the-on-clause", + "run-a-show-field-keys-query-without-the-on-clause", + "run-a-show-measurements-query-with-several-clauses-i", + "run-a-show-measurements-query-with-several-clauses-ii", + "run-a-show-measurements-query-with-the-on-clause", + "run-a-show-measurements-query-without-the-on-clause", + "run-a-show-retention-policies-query-with-the-on-clause", + "run-a-show-retention-policies-query-without-the-on-clause", + "run-a-show-series-query-limited-by-time", + "run-a-show-series-query-with-several-clauses", + "run-a-show-series-query-with-the-on-clause", + "run-a-show-series-query-without-the-on-clause", + "run-a-show-tag-keys-query-with-a-with-key-in-clause", + "run-a-show-tag-keys-query-with-several-clauses", + "run-a-show-tag-keys-query-with-the-on-clause", + "run-a-show-tag-keys-query-without-the-on-clause", + "run-a-show-tag-values-query-with-several-clauses", + "run-a-show-tag-values-query-with-the-on-clause", + "run-a-show-tag-values-query-without-the-on-clause", + "run-a-single-telegraf-configuration-and-output-metrics-to-stdout", + "run-a-task", + "run-a-task-from-the-influxdb-ui", + "run-a-task-with-the-influx-cli", + "run-a-task-with-the-influxdb-api", + "run-custom-preprocessing-code", + "run-in-admin-mode", + "run-in-query-mode", + "run-in-query-or-admin-mode", + "run-influxd-version-in-your-terminal", + "run-influxd-with-tls-flags", + "run-influxdb-3-core-with-debug-logging-using-log_filter", + "run-influxdb-3-enterprise-with-debug-logging-using-log_filter", + "run-influxdb-3-with-debug-logging-using-log_filter", + "run-influxdb-cli-commands-in-a-container", + "run-influxdb-directly", + "run-influxdb-on-macos-catalina", + "run-influxdb-on-macos-ventura", + "run-initial-setup-process", + "run-interval", + "run-interval--1s", + "run-kapacitor-with-custom-configuration-settings", + "run-kapacitor-with-default-settings", + "run-meta-nodes-on-separate-servers", + "run-queries-at-multiple-load-scales", + "run-show-field-keys-with-the-from-clause", + "run-show-field-keys-with-the-on-clause", + "run-show-measurements-with-several-clauses-i", + "run-show-measurements-with-several-clauses-ii", + "run-show-measurements-with-the-on-clause", + "run-show-series-with-several-clauses", + "run-show-series-with-the-on-clause", + "run-show-tag-keys-with-a-with-key-in-clause", + "run-show-tag-keys-with-several-clauses", + "run-show-tag-keys-with-the-on-clause", + "run-show-tag-values-with-the-on-clause", + "run-telegraf-as-a-background-service", + "run-telegraf-but-only-enable-specific-plugins", + "run-telegraf-in-your-terminal", + "run-telegraf-with-all-plugins-defined-in-configuration-file", + "run-telegraf-with-pprof", + "run-the-application", + "run-the-custom-builder-to-create-a-telegraf-binary", + "run-the-define-template-command", + "run-the-example", + "run-the-example-to-write-and-query-data", + "run-the-influxdb-3-core-server-with-extra-verbose-logging", + "run-the-influxdb-3-enterprise-server-with-extra-verbose-logging", + "run-the-influxdb-3-explorer-docker-container", + "run-the-influxdb-3-explorer-query-interface-beta", + "run-the-influxdb-3-server", + "run-the-influxdb-3-server-with-extra-verbose-logging", + "run-the-machine-data-generator", + "run-the-object-store-in-a-separate-namespace-or-outside-of-kubernetes", + "running-influxdb-directly", + "running-kapacitor-with-the-udf", + "runningstate", + "runtime", + "runtime-package", + "rust", + "s", + "s2-cell-ids", + "s2cellidlevel", + "s2geo", + "s3-object-store", + "s7comm", + "salesforce", + "saml-certificate-rotation", + "sample", + "sample-configuration", + "sample-configuration-1", + "sample-data", + "sample-data-1", + "sample-data-community-template-url", + "sample-data-series", + "sample-every-other-result", + "sample-iot-application-and-code-snippets-for-nodejs", + "sample-iot-application-and-code-snippets-for-python", + "sample-ldap-configuration", + "sample-package", + "sample-query", + "sample-sensor-data", + "sample-with-a-group-by-time-clause", + "sampledata-package", + "sap-hana-data-source-name", + "sap-hana-to-flux-data-type-conversion", + "sasl-access-token", + "sasl-extensions", + "sasl-gssapi-auth-type", + "sasl-gssapi-disable-pafxfast", + "sasl-gssapi-kerberos-config-path", + "sasl-gssapi-key-tab-path", + "sasl-gssapi-realm", + "sasl-gssapi-service-name", + "sasl-mechanism", + "sasl-oauth-client-id", + "sasl-oauth-client-secret", + "sasl-oauth-scopes", + "sasl-oauth-service", + "sasl-oauth-tenant-id", + "sasl-oauth-token-expiry-margin", + "sasl-oauth-token-url", + "sasl-password", + "sasl-username", + "sasl-version", + "save-as-csv", + "save-notebook-appears-before-first-save", + "save-telegraf-configurations", + "save-time-with-stacks", + "save-your-cell", + "save-your-query-as-a-dashboard-cell-or-task", + "save-your-work", + "saving-diamond-metrics-into-influxdb", + "scala", + "scalar-subqueries", + "scale", + "scale-available-memory", + "scale-components-in-your-cluster", + "scale-your-catalog-and-object-store", + "scale-your-cluster-as-a-whole", + "scale-your-influxdb-cluster", + "scale-your-machines-disk-capacity", + "scaling-strategies", + "scatter", + "scatter-behavior", + "scatter-controls", + "scatter-examples", + "scenario", + "scenarios", + "schedule-and-coverage", + "schedule-every", + "scheduled-events-example", + "scheduling-and-coverage", + "schema", + "schema-1", + "schema-2", + "schema-3", + "schema-4", + "schema-differences", + "schema-exploration", + "schema-information", + "schema-information-is-not-available", + "schema-on-write", + "schema-package", + "schema-pane", + "schema-recommendations", + "schema-restrictions", + "schemaless-design", + "scientific", + "scientific-notation", + "scrape", + "scrape-data-using-influxdb-scrapers", + "scrape-influxdb-oss-internal-metrics", + "scrape-prometheus-metrics", + "scrapers-total", + "scraping-tools-and-parsing-format", + "script-editor", + "script-pane", + "search-and-filter-logs", + "search-btn", + "season-1", + "season-2", + "season-3", + "season-4", + "season-5", + "season-6", + "season-7", + "seasonality", + "seasonality-1", + "second", + "second-whitespace", + "secret", + "secret-store", + "secret-stores", + "secret-stores-1", + "secret-stores-2", + "secrets-package", + "section/Authentication/BasicAuthentication", + "section/Authentication/QuerystringAuthentication", + "section/Authentication/TokenAuthentication", + "secure-by-default", + "secure-communication", + "secure-influxdb-and-kapacitor", + "secure-kapacitor-and-chronograf", + "secure-software-development-life-cycle-sdlc", + "secure-your-host", + "security", + "security-1", + "security-assessments", + "security-enhancements", + "security-features", + "security-level", + "security-level--none", + "security-updates", + "security-updates-1", + "security-updates-2", + "see-how-groups-frame-units-work", + "see-how-range-frame-units-work-with-interval-offsets", + "see-how-range-frame-units-work-with-numeric-offsets", + "see-how-rows-frame-units-work", + "seed", + "select", + "select-a-field-and-perform-basic-arithmetic", + "select-a-field-from-a-measurement-and-perform-basic-arithmetic", + "select-a-field-tag-and-timestamp-from-a-measurement", + "select-a-method-for-calculating-the-median", + "select-a-method-for-calculating-the-quantile", + "select-a-sample-of-the-field-values-associated-with-a-field-key", + "select-a-sample-of-the-field-values-associated-with-a-field-key-and-include-several-clauses", + "select-a-sample-of-the-field-values-associated-with-a-field-key-and-the-relevant-tags-and-fields", + "select-a-sample-of-the-field-values-associated-with-each-field-key-in-a-measurement", + "select-a-sample-of-the-field-values-associated-with-each-field-key-that-matches-a-regular-expression", + "select-a-specific-field-from-a-measurement-and-perform-basic-arithmetic", + "select-a-specific-field-within-relative-time-bounds", + "select-a-timezone", + "select-all-data-from-a-fully-qualified-measurement", + "select-all-data-from-a-fully-qualified-measurement-with-default-retention-policy", + "select-all-data-from-a-measurement-in-a-particular-database", + "select-all-data-from-more-than-one-measurement", + "select-all-data-in-a-measurement", + "select-all-data-in-a-measurement-within-time-bounds", + "select-all-fields-and-tags-from-a-measurement", + "select-all-fields-and-tags-from-a-single-measurement", + "select-all-fields-from-a-measurement", + "select-all-fields-from-a-single-measurement", + "select-clause", + "select-clause-behaviors", + "select-clause-subqueries", + "select-clause-with-correlated-subquery", + "select-data-based-on-tag-value", + "select-data-based-on-tag-value-within-time-bounds", + "select-data-based-on-the-relationship-between-columns", + "select-data-from-a-relative-time-range", + "select-data-from-a-specific-time-range", + "select-data-from-an-ad-hoc-table", + "select-data-from-specific-hours", + "select-data-that-have-a-specific-field-key-value-and-perform-basic-arithmetic", + "select-data-that-have-a-specific-string-field-key-value", + "select-data-that-have-a-specific-tag-key-value", + "select-data-that-have-specific-field-key-values", + "select-data-that-have-specific-field-key-values-and-tag-key-values", + "select-data-that-have-specific-timestamps", + "select-data-with-a-specific-field-key-value-and-perform-basic-arithmetic", + "select-data-with-a-specific-string-field-key-value", + "select-data-with-a-specific-tag-key-value", + "select-data-with-a-specific-tag-value", + "select-data-with-field-values-above-a-threshold-and-a-specific-tag-value", + "select-data-with-specific-field-key-values", + "select-data-with-specific-field-key-values-and-tag-key-valuest", + "select-field-values-above-a-threshold", + "select-field-values-based-on-arithmetic", + "select-fifth-percentile-field-value-associated-with-each-field-key-that-matches-a-regular-expression", + "select-local-time-or-utc-coordinated-universal-time", + "select-only-columns-you-need", + "select-query-includes-group-by-time", + "select-specific-field-values", + "select-specific-fields-and-tags-from-a-measurement", + "select-specific-tags-and-fields-from-a-measurement", + "select-specific-tags-and-fields-from-a-measurement-and-provide-their-identifier-type", + "select-specific-tags-and-fields-from-a-single-measurement", + "select-specific-tags-and-fields-from-a-single-measurement-and-provide-their-identifier-type", + "select-specific-times", + "select-statement", + "select-statement-and-from-clause", + "select-statement-examples", + "select-subquery-examples", + "select-subquery-syntax", + "select-the-50th-percentile-value-from-a-field", + "select-the-50th-percentile-value-from-a-field-within-time-windows-grouped-by-time", + "select-the-50th-percentile-value-from-each-field", + "select-the-50th-percentile-value-from-field-keys-that-match-a-regular-expression", + "select-the-bottom-field-value-associated-with-a-field-key-for-two-tags", + "select-the-bottom-field-value-for-two-unique-tag-values", + "select-the-bottom-field-values-for-unique-tag-values-and-within-time-windows-grouped-by-time", + "select-the-bottom-four-field-values-associated-with-a-field-key-and-the-relevant-tags-and-fields", + "select-the-bottom-three-field-values-and-the-tag-value-associated-with-each", + "select-the-bottom-three-field-values-associated-with-a-field-key", + "select-the-bottom-three-field-values-associated-with-a-field-key-and-include-several-clauses", + "select-the-bottom-three-values-of-a-field", + "select-the-fifth-percentile-field-value-associated-with-a-field-key", + "select-the-fifth-percentile-field-value-associated-with-each-field-key-in-a-measurement", + "select-the-fifth-percentile-field-values-associated-with-a-field-key-and-the-relevant-tags-and-fields", + "select-the-first-field-value-associated-with-a-field-key", + "select-the-first-field-value-associated-with-a-field-key-and-include-several-clauses", + "select-the-first-field-value-associated-with-each-field-key-in-a-measurement", + "select-the-first-field-value-associated-with-each-field-key-that-matches-a-regular-expression", + "select-the-first-value-associated-with-a-field-key-and-the-relevant-tags-and-fields", + "select-the-first-value-for-a-field", + "select-the-first-value-from-a-field-within-time-windows-grouped-by-time", + "select-the-first-value-from-each-field", + "select-the-first-value-from-field-keys-that-match-a-regular-expression", + "select-the-last-field-value-associated-with-a-field-key-and-include-several-clauses", + "select-the-last-field-value-associated-with-a-field-key-and-the-relevant-tags-and-fields", + "select-the-last-field-value-associated-with-each-field-key-that-matches-a-regular-expression", + "select-the-last-field-values-associated-with-a-field-key", + "select-the-last-field-values-associated-with-each-field-key-in-a-measurement", + "select-the-last-value-for-a-field", + "select-the-last-value-from-a-field-within-time-windows-grouped-by-time", + "select-the-last-value-from-each-field", + "select-the-last-value-from-field-keys-that-match-a-regular-expression", + "select-the-maximum-field-value-associated-with-a-field-key", + "select-the-maximum-field-value-associated-with-a-field-key-and-include-several-clauses", + "select-the-maximum-field-value-associated-with-a-field-key-and-the-relevant-tags-and-fields", + "select-the-maximum-field-value-associated-with-each-field-key-in-a-measurement", + "select-the-maximum-field-value-associated-with-each-field-key-that-matches-a-regular-expression", + "select-the-maximum-value-from-a-field", + "select-the-maximum-value-from-a-field-within-time-windows-grouped-by-time", + "select-the-maximum-value-from-each-field", + "select-the-maximum-value-from-field-keys-that-match-a-regular-expression", + "select-the-minimum-field-value-associated-with-a-field-key", + "select-the-minimum-field-value-associated-with-a-field-key-and-include-several-clauses", + "select-the-minimum-field-value-associated-with-a-field-key-and-the-relevant-tags-and-fields", + "select-the-minimum-field-value-associated-with-each-field-key-in-a-measurement", + "select-the-minimum-field-value-associated-with-each-field-key-that-matches-a-regular-expression", + "select-the-minimum-value-from-a-field", + "select-the-minimum-value-from-a-field-within-time-windows-grouped-by-time", + "select-the-minimum-value-from-each-field", + "select-the-minimum-value-from-field-keys-that-match-a-regular-expression", + "select-the-time-range", + "select-the-top-field-value-associated-with-a-field-key-for-two-tags", + "select-the-top-field-value-for-two-unique-tag-values", + "select-the-top-field-values-for-unique-tag-values-and-within-time-windows-grouped-by-time", + "select-the-top-four-field-values-associated-with-a-field-key-and-the-relevant-tags-and-fields", + "select-the-top-three-field-values-and-the-tag-value-associated-with-each", + "select-the-top-three-field-values-associated-with-a-field-key", + "select-the-top-three-field-values-associated-with-a-field-key-and-include-several-clauses", + "select-the-top-three-values-of-a-field", + "select-the-twentieth-percentile-field-value-associated-with-a-field-key-and-include-several-clauses", + "select-time-range", + "select-timezone", + "selecting-a-measurement-without-specifying-a-time-range", + "selecting-tag-keys-in-the-select-clause", + "selecting-tag-keys-in-the-select-statement", + "selection-list", + "selector", + "selector-examples", + "selector-functions", + "selector-functions-in-use", + "selector-functions-may-return-fewer-points-than-expected", + "selector-struct-schema", + "selector_first", + "selector_last", + "selector_max", + "selector_min", + "selectors", + "selectors-and-aggregates", + "self-signed-certificates", + "semantic-error---double-quote-a-boolean-field-value", + "semantic-error---double-quote-a-measurement-name", + "send-a-message-to-an-mqtt-endpoint", + "send-a-message-to-an-mqtt-endpoint-using-input-data", + "send-a-message-to-slack-using-a-slack-webhook", + "send-a-message-to-slack-using-chatpostmessage-api", + "send-a-notification-to-pagerduty-or-http", + "send-a-notification-to-slack", + "send-a-push-notification-note-to-pushbullet", + "send-a-push-notification-to-pushbullet", + "send-a-request-body-that-is-too-large", + "send-a-write-request", + "send-alert-data-to-a-tcp-endpoint-from-a-defined-handler", + "send-alert-data-to-a-tcp-endpoint-from-a-tickscript", + "send-alert-email", + "send-alert-to-teams-channel-in-configuration-file", + "send-alert-to-teams-channel-with-webhook-overrides-configuration-file", + "send-alerts", + "send-alerts-to-a-hipchat-room-from-a-tickscript", + "send-alerts-to-a-kafka-cluster-from-a-defined-handler", + "send-alerts-to-a-kafka-cluster-from-a-tickscript", + "send-alerts-to-a-telegram-bot-from-a-tickscript", + "send-alerts-to-an-alerta-room-from-a-defined-handler", + "send-alerts-to-an-alerta-room-from-a-tickscript", + "send-alerts-to-an-mqtt-broker-from-a-defined-handler", + "send-alerts-to-an-mqtt-broker-from-a-tickscript", + "send-alerts-to-an-victorops-room-from-a-defined-handler", + "send-alerts-to-an-victorops-room-from-a-tickscript", + "send-alerts-to-discord-from-a-defined-handler", + "send-alerts-to-discord-from-a-tickscript", + "send-alerts-to-opsgenie-from-a-defined-handler", + "send-alerts-to-opsgenie-from-a-tickscript", + "send-alerts-to-pagerduty-from-a-defined-handler", + "send-alerts-to-pagerduty-from-a-tickscript", + "send-alerts-to-pushover-from-a-defined-handler", + "send-alerts-to-pushover-from-a-tickscript", + "send-alerts-to-sensu-from-a-defined-handler", + "send-alerts-to-sensu-from-a-tickscript", + "send-alerts-to-slack-from-a-defined-handler", + "send-alerts-to-slack-from-a-tickscript", + "send-alerts-to-teams-from-a-defined-handler", + "send-alerts-to-teams-from-a-tickscript", + "send-alerts-to-the-hipchat-room-from-a-defined-handler", + "send-alerts-to-the-hipchat-room-set-in-the-configuration-file", + "send-alerts-to-the-hipchat-room-set-in-the-tickscript", + "send-alerts-to-the-telegram-bot-from-a-defined-handler", + "send-alerts-to-the-telegram-chat-id-set-in-the-configuration-file", + "send-alerts-to-the-telegram-chat-id-set-in-the-tickscript", + "send-alerts-to-zenoss-from-a-defined-handler", + "send-alerts-to-zenoss-from-a-tickscript", + "send-alerts-using-data-in-influxdb", + "send-an-event-to-pagerduty", + "send-an-http-post-request-for-each-row", + "send-authenticated-api-requests-with-postman", + "send-critical-alerts-to-alerta", + "send-critical-alerts-to-bigpanda", + "send-critical-events-to-servicenow", + "send-critical-events-to-victorops", + "send-critical-events-to-zenoss", + "send-critical-status-events-to-sensu", + "send-critical-status-notifications-to-slack", + "send-critical-statuses-to-a-discord-channel", + "send-critical-statuses-to-a-microsoft-teams-channel", + "send-critical-statuses-to-a-pagerduty-endpoint", + "send-critical-statuses-to-a-telegram-channel", + "send-critical-statuses-to-opsgenie", + "send-data-collected-by-telegraf-to-your-monitor", + "send-data-in-json-body-with-httppost", + "send-data-to-an-mqtt-broker", + "send-data-to-kafka", + "send-email-alerts-using-the-totemplate-option", + "send-influxdata-output-artifacts", + "send-multiple-queries", + "send-notifications-when-usage-exceeds-an-amount", + "send-push-notifications-to-pushbullet", + "send-status-alerts-to-a-slack-endpoint", + "send-the-last-reported-status-to-a-microsoft-teams-channel", + "send-the-last-reported-status-to-a-opsgenie", + "send-the-last-reported-status-to-a-url", + "send-the-last-reported-status-to-discord", + "send-the-last-reported-status-to-sensu", + "send-the-last-reported-status-to-telegram", + "send-the-last-reported-status-to-webex-teams", + "send-the-last-reported-value-and-incident-type-to-servicenow", + "send-the-last-reported-value-and-incident-type-to-victorops", + "send-the-last-reported-value-and-severity-to-zenoss", + "send-the-last-reported-value-and-status-to-alerta", + "send-the-last-reported-value-and-status-to-bigpanda", + "sending-snmp-traps-from-a-tickscript", + "sending-subscription-data-to-multiple-hosts", + "sensorID", + "sensors", + "sensu", + "sensu-api-key-authentication", + "sensu-package", + "separate-wal-and-data-directories", + "separation-of-environments-and-duties", + "separator", + "separator--", + "september-2021", + "september-2022", + "serializer-plugin-updates", + "serializer-updates", + "serializer-updates-1", + "serializers", + "serializers-1", + "serializers-2", + "serializers-3", + "serializers-4", + "series", + "series-and-series-cardinality", + "series-and-series-cardinality-1", + "series-cardinality", + "series-diagram-wrapper flux", + "series-file", + "series-file-compaction", + "series-file-compaction-on-startup", + "series-file-compactions", + "series-id-set-cache-size", + "series-id-set-cache-size--100", + "series-key", + "series-per-db", + "seriescreate", + "server", + "server-configuration-options", + "server-management", + "server-side-template-summarization-and-validation", + "servererror", + "serverless-urls", + "servers", + "service", + "service-discovery-and-metric-scraping", + "service-input-plugin", + "service-key", + "service-tests", + "servicename", + "servicenametag", + "servicenow", + "servicenow-metrics", + "servicenow-package", + "services", + "session-length", + "session-renew-disabled", + "session-service-call-total", + "session-service-duration", + "session-service-error-total", + "set", + "set-a-basic-authentication-header-in-an-http-post-request", + "set-a-column-to-a-specific-string-value", + "set-a-column-value-to-a-static-value", + "set-a-dbrp-mapping-as-default", + "set-a-password-for-a-v1-authorization", + "set-a-secret-using-a-non-default-configuration-location", + "set-a-secret-using-the-default-configuration-location", + "set-certificate-file-permissions", + "set-configurations-for-your-oauth-provider", + "set-createempty-to-false", + "set-custom-default-chronograf-configuration-options", + "set-debug--true-in-your-settings", + "set-environment-variables", + "set-environment-variables-for-a-component", + "set-execution-mode", + "set-kapacitor-log-level-to-error", + "set-namespaceoverride-if-using-a-namespace-other-than-influxdb", + "set-the-location-option-using-a-fixed-location", + "set-the-location-option-using-a-timezone-based-location", + "set-timeouts-specific-to-your-workload", + "set-up-1x-monitoring-dashboards", + "set-up-a-disk-usage-alert", + "set-up-a-flux-task-database-in-influxdb", + "set-up-a-github-repository", + "set-up-a-kubernetes-ingress-controller", + "set-up-a-telegram-bot", + "set-up-a-trigger", + "set-up-an-influxdb-cloud-account-compatible-with-thingworx", + "set-up-and-use-single-sign-on-sso", + "set-up-authentication", + "set-up-authentication-and-authorization", + "set-up-configure-and-start-the-data-node-services", + "set-up-configure-and-start-the-data-services", + "set-up-configure-and-start-the-meta-service", + "set-up-configure-and-start-the-meta-services", + "set-up-data-nodes", + "set-up-database-and-retention-policy-dbrp-mapping", + "set-up-docker-configuration", + "set-up-docker-for-superset", + "set-up-docker-for-superset-and-flight-sql", + "set-up-event-handler", + "set-up-fine-grained-authorization", + "set-up-github-actions-or-circleci", + "set-up-guild", + "set-up-https-in-an-influxdb-enterprise-cluster", + "set-up-https-with-a-ca-certificate", + "set-up-https-with-a-self-signed-certificate", + "set-up-influx-cli-connection-configurations", + "set-up-influxdb", + "set-up-influxdb-enterprise-authorizations", + "set-up-influxdb-enterprise-for-monitoring", + "set-up-influxdb-interactively-with-prompts-for-required-information", + "set-up-influxdb-oss-for-monitoring", + "set-up-influxdb-with-all-required-information-and-skip-confirmation", + "set-up-ingress-tls", + "set-up-internal-kapacitor-authorizations", + "set-up-kapacitor-for-influxdb-cloud-or-2x", + "set-up-local-or-attached-storage", + "set-up-logging", + "set-up-meta-nodes", + "set-up-monitoring", + "set-up-prerequisites", + "set-up-ptc-thingworx", + "set-up-security-organizations-and-users", + "set-up-teams", + "set-up-telegraf", + "set-up-the-band-plot-visualization-in-the-script-editor", + "set-up-the-band-visualization", + "set-up-the-band-visualization-in-the-data-explorer", + "set-up-the-influx-cli", + "set-up-the-map-visualization", + "set-up-the-map-visualization-1", + "set-up-the-migration", + "set-up-the-processing-engine", + "set-up-using-docker-compose-secrets", + "set-up-victorops", + "set-up-your-identity-provider", + "set-up-your-initial-user", + "set-up-your-kubernetes-cluster", + "set-up-your-object-store", + "set-up-your-postgresql-compatible-database", + "set-up-your-project", + "set-values-for-multiple-columns", + "set-your-token-for-authentication", + "setting-access-log-status-filters-using-configuration-settings", + "setting-access-log-status-filters-using-environment-variables", + "setting-the-log-level-to-debug", + "setting-up-a-batch-cpu-alert", + "setting-up-a-live-stream-cpu-alert", + "settings-for-batching", + "settings-for-the-tsm-engine", + "setup", + "setup-configure-and-deploy-influxdb-clustered", + "setup-description", + "setup-tip", + "severity", + "severity-colors", + "severity-format", + "severity-map", + "sflow", + "sha224", + "sha256", + "sha384", + "sha512", + "shape-data", + "shape-data-to-work-with-the-geo-package", + "shape-geo-temporal-data", + "shard", + "shard-compaction", + "shard-compaction-related-configuration-settings", + "shard-data-size", + "shard-deletion", + "shard-diagram", + "shard-disk-size", + "shard-dropped-points", + "shard-duration", + "shard-fields-created", + "shard-files", + "shard-group", + "shard-group-diagram", + "shard-group-duration", + "shard-group-duration-configuration-options", + "shard-group-duration-for-backfilling", + "shard-group-duration-management", + "shard-group-duration-overview", + "shard-group-duration-recommendations", + "shard-group-duration-tradeoffs", + "shard-groups", + "shard-life-cycle", + "shard-movement", + "shard-precreation", + "shard-precreation-related-configuration-settings", + "shard-precreation-settings", + "shard-reader-timeout", + "shard-series", + "shard-status-http-endpoint", + "shard-write-request-timeouts", + "shard-writer-timeout", + "shard-writes", + "shard-writes-with-errors", + "shards", + "shardstatus", + "share-a-notebook", + "share-data-across-plugins", + "share-notebook", + "share-your-influxdb-templates", + "shared-secret", + "shared-secret--", + "shift", + "shift-a-timestamp-forward-or-backward", + "shift-bits-left-in-an-integer", + "shift-bits-left-in-an-unsigned-integer", + "shift-bits-left-in-integers-in-a-stream-of-tables", + "shift-bits-left-in-unsigned-integers-in-a-stream-of-tables", + "shift-bits-right-in-an-integer", + "shift-bits-right-in-an-unsigned-integer", + "shift-bits-right-in-integers-in-a-stream-of-tables", + "shift-bits-right-in-unsigned-integers-in-a-stream-of-tables", + "shift-timestamps-backward-in-time", + "shift-timestamps-forward-in-time", + "short-circuit-evaluation", + "short-shard-group-duration", + "shorthand-and-longhand-flags", + "should-you-migrate", + "show", + "show-a-users-database-privileges", + "show-all", + "show-all-existing-users-and-their-admin-status", + "show-all-run-logs-for-a-flux-task", + "show-all-run-logs-for-a-task", + "show-all-run-logs-for-a-task-api", + "show-cardinality", + "show-columns", + "show-continuous-queries", + "show-current-value-and-historical-values", + "show-databases", + "show-diagnostics", + "show-diagnostics-measurement-details", + "show-diagnostics-statement", + "show-field-key-cardinality", + "show-field-keys", + "show-field-keys-and-field-type-discrepancies", + "show-grants", + "show-human-readable-current-value", + "show-information-about-a-task", + "show-information-about-a-task-in-the-context-of-a-replay", + "show-information-about-distinct-value-caches", + "show-information-about-last-value-caches", + "show-logs-for-a-specific-flux-task-run", + "show-logs-for-a-specific-flux-task-run-api", + "show-measurement-cardinality", + "show-measurement-key-cardinality", + "show-measurements", + "show-or-hide-the-log-status-histogram", + "show-queries", + "show-retention-policies", + "show-secret-values-when-listing-secrets-and-secret-stores", + "show-series", + "show-series-cardinality", + "show-series-exact-cardinality", + "show-shard-groups", + "show-shards", + "show-stats", + "show-stats-1", + "show-stats-for-component", + "show-stats-for-indexes", + "show-stats-measurement-details", + "show-stats-statement", + "show-subscriptions", + "show-tables", + "show-tag-key-cardinality", + "show-tag-keys", + "show-tag-values", + "show-tag-values-cardinality", + "show-template", + "show-topic", + "show-topic-handler", + "show-users", + "shutting-down-the-stack", + "side-effects", + "sideload", + "sideload-source-files", + "sideloadnode-example", + "sigma", + "sign", + "sign-in", + "sign-influxdata-contributor-license-agreement-cla", + "sign-up", + "signalfx", + "signum", + "silent", + "similar-functions", + "simple", + "simple-expressions", + "simple-setup", + "sin", + "single-and-double-quotes", + "single-domain-certificates-signed-by-a-certificate-authority-ca", + "single-node-or-cluster", + "single-quotes", + "single-stat", + "single-stat-behavior", + "single-stat-controls", + "single-stat-examples", + "single-telegraf-configuration", + "single-telegraf-configuration-and-telegraf-configuration-directory", + "singleuse", + "singleuseopen", + "sinh", + "sink-consumer", + "skip-annotation-headers", + "skip-header-lines-in-a-file", + "skip-rows-with-errors", + "skipped-shards", + "slab", + "slack", + "slack-cpu-alerttick", + "slack-cpu-alerttick-1", + "slack-package", + "slack-settings-in-kapacitorconf", + "slack-setup", + "slack_500_errors_dailyyaml", + "slack_cpu_handleryaml", + "slightly-more-setup", + "slimit-clause", + "slurm", + "smart", + "smartctl", + "smoothingfactor", + "smtp", + "snappy-compression", + "snapshotcount", + "snapshotted-wal-files-to-keep", + "snapshotter", + "snmp", + "snmp-agent", + "snmp-trap-data-types", + "snmp_lookup", + "snmp_trap", + "snmptrap", + "snowflake-data-source-name", + "snowflake-to-flux-data-type-conversion", + "socket-package", + "socket_listener", + "socket_writer", + "socketstat", + "soffset-clause", + "soft-delete", + "solr", + "solution", + "solution-1", + "solution-2", + "solution-3", + "solution-4", + "solution-5", + "solution-advantages", + "solution-b1", + "solution-b2", + "solution-disadvantages", + "solution-e1", + "solution-e2", + "solution-e3", + "solution-e4", + "solution-explained", + "solution-overview", + "solutions", + "some-type-of-aggregation", + "sort", + "sort-and-limit", + "sort-and-limit-results", + "sort-by-tags", + "sort-data-by-selection-order", + "sort-data-by-tag-or-field-values", + "sort-data-by-time-with-the-most-recent-first", + "sort-data-with-the-newest-points-first", + "sort-data-with-the-oldest-points-first", + "sort-distinct_caches-system-table-output", + "sort-last_caches-system-table-output", + "sort-orders", + "sort-tags-by-key", + "sort-tags-by-query-priority", + "sort-values-in-ascending-order", + "sortexec", + "sorting-with-null-values", + "sorting-yet-to-be-persisted-data", + "sortpreservingmergeexec", + "source", + "source-1-tags-and-fields", + "source-2-tags-and-fields", + "source-examples", + "source-producer", + "space-values-at-even-time-intervals", + "special-characters", + "special-characters-and-keywords", + "specify-a-time-range-relative-to-a-timestamp", + "specify-a-time-range-relative-to-now", + "specify-a-time-range-with-absolute-time-and-relative-time", + "specify-a-time-range-with-epoch-timestamps", + "specify-a-time-range-with-nanosecond-epoch-timestamps", + "specify-a-time-range-with-relative-time", + "specify-a-time-range-with-rfc3339-date-time-strings", + "specify-a-time-range-with-rfc3339-like-date-time-strings", + "specify-a-time-range-with-second-precision-epoch-timestamps", + "specify-a-timeout", + "specify-authentication-credentials-in-the-connection-string", + "specify-authentication-credentials-in-the-dsn", + "specify-csv-character-encoding", + "specify-hosts-for-writes-and-queries", + "specify-multiple-functions-in-the-select-clause", + "specify-parameters-of-the-aggregate-function", + "specify-role-permissions", + "specify-that-the-key-is-a-field", + "specify-that-the-key-is-a-tag", + "specify-the-encoding-of-the-influxdb-template-to-summarize", + "specify-the-encoding-of-the-influxdb-template-to-validate", + "specify-the-format-of-the-server-responses-with--format", + "specify-the-output-field-key", + "specify-the-output-field-key-for-multiple-functions", + "specify-timestamp-precision-column", + "specify-timestamp-precision-csv", + "specify-timestamp-precision-json", + "specify-timestamp-precision-or-format", + "specify-your-backup-format", + "specify-your-influxdb-url", + "specifying-dbrp-implicitly", + "specifying-paths-to-the-_series-and-index-directories", + "specifying-paths-to-the-_series-directory-and-an-index-file", + "specifying-paths-to-the-_series-directory-and-multiple-index-files", + "split", + "split-a-string-into-an-array-of-substrings", + "split_part", + "splunk-metrics", + "spread", + "sql", + "sql-and-arrow-data-types", + "sql-data-source-names", + "sql-databases", + "sql-output", + "sql-package", + "sql-query-basics", + "sql-reference-documentation", + "sql-server-ado-authentication", + "sql-server-data-source-name", + "sql-server-to-flux-data-type-conversion", + "sql-wildcard-characters", + "sqlite-data-source-name", + "sqlite-path", + "sqlite-to-flux-data-type-conversion", + "sqlserver", + "sqrt", + "square-a-number", + "square-the-value-in-each-row", + "sranka-package", + "srcid", + "ssl-ca", + "ssl-cert", + "ssl-key", + "sso-auth-flow", + "sso-authorization-flow", + "stability-and-compatibility", + "stack_in_use_bytes", + "stack_sys_bytes", + "stackdriver", + "stacked-graph", + "stacked-graph-controls", + "stacked-graph-example", + "stacks", + "standard-and-air-gapped-deployments", + "standard-deployment-with-internet-access", + "standard-deviation-modes", + "standard-format", + "starlark", + "start", + "start-a-vault-server", + "start-an-influxql-shell", + "start-chronograf", + "start-for-free", + "start-in-the-influx-cli-in-flux-mode", + "start-influxdb", + "start-influxdb-and-collect-telegraf-data", + "start-kapacitor", + "start-queries-with-pushdowns", + "start-telegraf", + "start-telegraf-and-verify-data-appears", + "start-the-chronograf-service", + "start-the-data-service", + "start-the-influxql-shell", + "start-the-meta-service", + "start-the-superset-docker-containers", + "start-the-telegraf-service", + "start-the-udf", + "start_sunday", + "startcolumn", + "starting-the-kapacitor-service", + "starts_with", + "startup", + "state", + "state-changes-only", + "statechangesonly", + "statecount", + "stateduration", + "stateful-functions", + "stateless-functions", + "statement-to-permission", + "statements", + "statements-and-clauses", + "statemessage", + "static-datasets", + "static-hostname", + "static-legend", + "static-sample-data-and-bucket-retention-periods", + "statistical-aggregate-functions", + "stats", + "stats-general", + "stats-ingress", + "statsd", + "statsmodels-package", + "status", + "status-codes", + "status-codes-1", + "status-codes-2", + "status-codes-and-responses", + "status-codes-and-responses-1", + "status-codes-and-responses-2", + "statusreq", + "stay-within-the-schema-limits-of-influxdb-cloud-serverless", + "stddev", + "stddev_pop", + "stddev_samp", + "steam-logs-from-the-cpu_alert-tasks", + "steam-pressure-gauge", + "step", + "step-1-download-and-install-chronograf", + "step-1-download-and-install-influxdb", + "step-1-download-and-install-telegraf", + "step-1-download-influx-cli-for-linux", + "step-1-download-influx-cli-for-macos", + "step-1-download-influx-cli-for-windows", + "step-1-enable-authentication", + "step-1-generate-a-self-signed-certificate", + "step-1-install-the-certificate", + "step-1-match-the-trends-of-the-raw-data", + "step-1-truncate-hot-shards", + "step-1-update-the-retention-policy", + "step-2-configure-telegraf", + "step-2-determine-the-seasonal-pattern", + "step-2-enable-authentication", + "step-2-expand-the-downloaded-archive", + "step-2-identify-cold-shards", + "step-2-restart-the-influxdb-service", + "step-2-review-the-tls-configuration-settings", + "step-2-set-certificate-file-permissions", + "step-2-start-chronograf", + "step-2-truncate-hot-shards", + "step-2-unpackage-the-influx-binary", + "step-3-apply-the-holt_winters-function", + "step-3-connect-chronograf-to-the-influxdb-oss-instance", + "step-3-copy-cold-shards", + "step-3-create-an-admin-user", + "step-3-enable-https-in-the-configuration-file", + "step-3-grant-network-access", + "step-3-identify-cold-shards", + "step-3-optional-place-the-binary-in-your-path", + "step-3-restart-the-telegraf-service", + "step-3-review-the-tls-configuration-settings", + "step-3-start-influxdb", + "step-4-confirm-the-copied-shards", + "step-4-confirm-the-telegraf-setup", + "step-4-copy-cold-shards", + "step-4-create-an-admin-user", + "step-4-edit-the-influxdb-source-in-chronograf", + "step-4-enable-https-in-the-influxdb-configuration-file", + "step-4-explore-the-monitoring-data-in-chronograf", + "step-4-learn-influx-cli-commands", + "step-4-macos-catalina-and-newer-authorize-influxdb-binaries", + "step-4-macos-catalina-only-authorize-influxdb-binaries", + "step-4-restart-influxdb", + "step-4-set-up-a-configuration-profile", + "step-5-confirm-the-rebalance", + "step-5-learn-influx-cli-commands", + "step-5-remove-unnecessary-cold-shards", + "step-5-restart-the-influxdb-service", + "step-5-set-up-a-configuration-profile", + "step-5-verify-the-https-setup", + "step-6-confirm-the-rebalance", + "step-6-learn-influx-cli-commands", + "step-6-verify-the-https-setup", + "step-plot", + "step-plot-graph", + "step-plot-graph-controls", + "step-plot-graph-example", + "steps-to-create-a-plugin", + "stomp", + "stop", + "stop-currently-running-queries-with-kill-query", + "stop-reading-and-writing-data", + "stop-traffic-to-the-data-node", + "stop-writes-and-remove-oss", + "stopcolumn", + "stopping-an-influxdb-3-container", + "stopping-the-kapacitor-service", + "storage", + "storage-cache-max-memory-size", + "storage-cache-snapshot-memory-size", + "storage-cache-snapshot-write-cold-duration", + "storage-compact-full-write-cold-duration", + "storage-compact-throughput-burst", + "storage-engine", + "storage-engine-components", + "storage-engine-diagram", + "storage-engine-stability", + "storage-level", + "storage-level-errors", + "storage-level-limits", + "storage-max-concurrent-compactions", + "storage-max-index-log-file-size", + "storage-no-validate-field-size", + "storage-retention-check-interval", + "storage-series-file-max-concurrent-snapshot-compactions", + "storage-series-id-set-cache-size", + "storage-shard-precreator-advance-period", + "storage-shard-precreator-check-interval", + "storage-tsm-use-madv-willneed", + "storage-type-amount-and-configuration", + "storage-usage", + "storage-validate-keys", + "storage-volume-and-iops", + "storage-wal-flush-on-shutdown", + "storage-wal-fsync-delay", + "storage-wal-max-concurrent-writes", + "storage-wal-max-write-delay", + "storage-write-timeout", + "store", + "store-alert-statuses-for-error-counts", + "store-and-retrieve-cached-data", + "store-cached-data-with-expiration", + "store-database", + "store-database--_internal", + "store-enabled", + "store-enabled--true", + "store-internal-metrics-in-an-external-monitor", + "store-interval", + "store-interval--10s", + "store-secrets-in-vault", + "store-secure-tokens-in-a-secret-store", + "store-sensitive-credentials-as-secrets", + "store-the-length-of-string-values", + "store-your-authorization-token-as-an-environment-variable", + "store-your-database-credentials-as-secrets", + "stores", + "strategies-for-improving-query-performance", + "stream", + "stream-all-logs-from-a-task", + "stream-kapacitor-logs", + "stream-kapacitor-logs-filtered-by-tags", + "stream-logs-from-a-task-filtered-by-tags", + "stream-method", + "stream-of-tables", + "stream-or-batch", + "stream-types", + "streamname", + "strict", + "strict-and-non-strict-filtering", + "strict-mode", + "strict-update-and-delete-permissions", + "stricter-input-validation-for-influx-template-commands", + "strictly-filter-data-to-a-specified-region", + "strictly-filter-geotemporal-data-by-region", + "string", + "string-example", + "string-examples", + "string-functions", + "string-lists", + "string-literals", + "string-manipulation-and-data-shaping", + "string-operators", + "string-syntax", + "string-templates", + "string-type-handling", + "string-types", + "string_agg", + "stringable-constraint", + "stringarray", + "strings", + "strings-package", + "strong-passwords", + "strpos", + "structure-results-like-influxql", + "structured-logging", + "subcommands", + "submit-a-flux-query-via-parameter", + "submit-a-flux-query-via-via-stdin", + "submit-custom-date", + "submit-issues-for-unexplained-behaviors-or-errors", + "submit-queries-from-a-file", + "submit-queries-from-a-file-1", + "subqueries", + "subquery", + "subquery-categories", + "subquery-operators", + "subscribe-through-a-cloud-provider", + "subscribe-through-influxdata", + "subscriber", + "subscriber-settings", + "subscription", + "subscription-mode", + "subscription-protocol", + "subscription-protocols", + "subscription-settings", + "subscriptions", + "substituting-for-a-having-clause", + "substituting-for-nested-functions", + "substr", + "substr_index", + "substring", + "substring_index", + "subsystems-and-services", + "subtract-a-duration-from-a-time-value", + "subtract-a-duration-from-a-timestamp", + "subtract-six-hours-from-a-relative-duration", + "subtract-six-hours-from-a-timestamp", + "subtract-two-days-from-one-hour-ago", + "subtractable-constraint", + "subtraction", + "subtraction-rules-for-numeric-types", + "subwritedrop", + "subwriteok", + "successful-request-duration-flight-doget", + "suffix", + "sum", + "summarize-all-influxdb-templates-in-a-directory", + "summarize-an-influxdb-template-from-a-local-file", + "summarize-an-influxdb-template-from-a-url", + "summarize-influxdb-templates-from-multiple-files", + "summarize-query-results-and-data-distribution", + "summarize-system-table-data", + "summarize-system-table-data-in-json-formatted-output", + "summary", + "summary-of-variable-use-between-syntax-sub-spaces", + "summary-table", + "summary-table-1", + "summarycutoff", + "summing-up", + "sumologic", + "supervisor", + "support-for-1x-storage-engine-and-influxdb-1x-compatibility-api", + "support-for-bypassing-identity-provider-configuration-for-databasetoken-management", + "support-for-google-cloud-storage-gcs", + "support-for-http-sources-in-sideloadnode", + "support-for-stream-tasks", + "supported-alert-levels", + "supported-array-types", + "supported-array-types-and-behaviors", + "supported-data-formats", + "supported-data-types", + "supported-database-engines", + "supported-influxql-queries", + "supported-operations", + "supported-operators", + "supported-operators-1", + "supported-operators-2", + "supported-operators-3", + "supported-operators-4", + "supported-parameter-data-types", + "supported-protocols", + "supported-releases", + "supported-timestamp-formats", + "supported-timestamp-values", + "supported-types", + "supported-types-and-behaviors", + "suppress-kapacitor-alerts-based-on-hierarchy", + "suppress-logo", + "suppress-logo--false", + "suppress-write-log", + "suppress-write-log--false", + "suricata", + "swap", + "swarm", + "swarmautoscale", + "switch-index-types", + "switch-influxdb-cloud-accounts", + "switch-influxdb-cloud-organizations", + "switch-organizations", + "switch-organizations-and-accounts-in-the-header", + "switch-organizations-in-the-influxdb-ui", + "switch-the-current-organization", + "switch-the-current-organization-1", + "symptoms-of-entropy", + "synchronize-hosts-with-ntp", + "synchronize-time-between-hosts", + "synchronous-writing", + "synproxy", + "syntax", + "syntax-1", + "syntax-10", + "syntax-11", + "syntax-12", + "syntax-13", + "syntax-14", + "syntax-15", + "syntax-16", + "syntax-17", + "syntax-18", + "syntax-19", + "syntax-2", + "syntax-20", + "syntax-3", + "syntax-4", + "syntax-5", + "syntax-6", + "syntax-7", + "syntax-8", + "syntax-9", + "syntax-description", + "syntax-documentation", + "syntax-to-restore-from-a-full-or-manifest-only-backup", + "syntax-to-restore-from-incremental-and-metadata-backups", + "sys", + "sys_bytes", + "syslog", + "sysstat", + "system", + "system-1", + "system-built-ins", + "system-metrics-dashboard-template", + "system-package", + "system-query-examples", + "system-requirements", + "system-tables", + "system-tables-are-subject-to-change", + "system_-sample-data", + "systemcompactor", + "systemd", + "systemd-permission-errors", + "systemd-systems", + "systemd_timings", + "systemd_units", + "systempartitions", + "systemqueries", + "systems", + "systemtables", + "sysvinit", + "sysvinit-systems", + "t", + "t1", + "t2", + "table", + "table-and-column-limits", + "table-behavior", + "table-columns", + "table-controls", + "table-examples", + "table-extraction", + "table-formatted-results", + "table-grouping-example", + "table-is-missing-column-column", + "table-is-missing-label-label", + "table-limit", + "table-package", + "table-schemas-should-be-homogenous", + "table-view-example", + "tableau-desktop", + "tables", + "tablescan", + "tacacs", + "tag", + "tag-and-field-key-with-the-same-name", + "tag-and-field-naming-requirements", + "tag-bucket-part-templates", + "tag-field-or-named-result", + "tag-key", + "tag-key-variable-use-cases", + "tag-keys", + "tag-order-does-not-matter", + "tag-part-templates", + "tag-set", + "tag-specification", + "tag-value", + "tag-value-variable-use-cases", + "tag-values", + "tag/Authentication", + "tag/Authorizations-(API-tokens)", + "tag/Backup", + "tag/Bucket-Schemas", + "tag/Buckets", + "tag/Cells", + "tag/Checks", + "tag/Config", + "tag/DBRPs", + "tag/Dashboards", + "tag/Data-IO-endpoints", + "tag/Debug", + "tag/Delete", + "tag/Headers", + "tag/Health", + "tag/Invokable-Scripts", + "tag/Labels", + "tag/Legacy-Authorizations", + "tag/Legacy-Query", + "tag/Legacy-Write", + "tag/Limits", + "tag/Metrics", + "tag/NotificationEndpoints", + "tag/NotificationRules", + "tag/Organizations", + "tag/Pagination", + "tag/Ping", + "tag/Query", + "tag/Quick-start", + "tag/Ready", + "tag/RemoteConnections", + "tag/Replications", + "tag/Resources", + "tag/Response-codes", + "tag/Restore", + "tag/Routes", + "tag/Rules", + "tag/Scraper-Targets", + "tag/Secrets", + "tag/Security-and-access-endpoints", + "tag/Setup", + "tag/Signin", + "tag/Signout", + "tag/Sources", + "tag/Supported-operations", + "tag/System-information-endpoints", + "tag/Tasks", + "tag/Telegraf-Plugins", + "tag/Telegrafs", + "tag/Templates", + "tag/Usage", + "tag/Users", + "tag/Variables", + "tag/Views", + "tag/Write", + "tag_keys", + "tag_limit", + "tag_name_expansion-optional", + "tag_selection-tag_name-tag_value-optional", + "tagcolumns", + "tags", + "tags-are-indexed", + "tags-sub-section", + "tags-versus-fields", + "tags-with-empty-values", + "tail", + "tailing-kapacitor-logs", + "talk", + "tan", + "tanh", + "task", + "task-configuration-options", + "task-details", + "task-executor-errors", + "task-executor-promise-queue-usage", + "task-executor-run-duration", + "task-executor-run-latency-seconds", + "task-executor-run-queue-delta", + "task-executor-total-runs-active", + "task-executor-total-runs-complete", + "task-executor-workers-busy", + "task-metadata", + "task-options-for-invokable-scripts", + "task-scheduler-current-execution", + "task-scheduler-execute-delta", + "task-scheduler-schedule-delay", + "task-scheduler-total-execute-failure", + "task-scheduler-total-execution-calls", + "task-scheduler-total-release-calls", + "task-scheduler-total-schedule-calls", + "task-scheduler-total-schedule-fails", + "task-templates", + "task-updates", + "task-updates-1", + "tasks", + "tasks-and-alerts-differences", + "tasks-and-task-templates", + "tasks-package", + "tasks-possible-with-flux", + "tasks-schemaref-task", + "taxonomy-of-node-types", + "tcp", + "teams", + "teams-package", + "teamspeak", + "technical-analysis", + "technical-analysis-functions", + "technical-and-predictive-analysis", + "technical-details", + "technical-papers", + "technical-preview", + "technical-preview-replicate-data-remotely", + "telegraf", + "telegraf-apt", + "telegraf-configuration-ui", + "telegraf-configurations", + "telegraf-doesnt-support-partial-configurations", + "telegraf-global-flags", + "telegraf-opentsdb-output-plugin", + "telegraf-plugins-in-ui", + "telegraf-setup", + "telegraf-template-patterns", + "telegrafs-total", + "telegram", + "telegram-api-access-token", + "telegram-bot", + "telegram-chat-id", + "telegram-package", + "telegram-setup", + "temp", + "template", + "template-manifests", + "template-part-size-limit", + "template-part-types", + "template-resources", + "template-variable-types", + "template-variables", + "template-variables-in-flux", + "templated-tasks", + "templates", + "templates-pattern", + "tenant-isolation", + "tengine", + "termination-query-log--false", + "terminology", + "terminology-differences", + "test-a-plugin-on-the-server", + "test-a-service", + "test-a-wal-plugin", + "test-a-wal-plugin-using-input-arguments", + "test-a-wal-plugin-with-a-file-containing-line-protocol", + "test-a-wal-plugin-with-a-line-protocol-string", + "test-and-run-your-plugin", + "test-authentication-credentials", + "test-create-and-trigger-plugin-code", + "test-die-function-errors", + "test-if-a-string-contains-a-regular-expression-match", + "test-if-a-value-is-an-infinity-value", + "test-if-a-value-is-negative", + "test-if-streams-of-tables-are-different", + "test-if-streams-of-tables-are-different-mid-script", + "test-if-two-values-are-equal", + "test-migrated-configuration-files", + "test-queries", + "test-services", + "test-the-batch-alert-using-record", + "test-the-stream-alert-using-record", + "test-the-task", + "test-with-openssl", + "test-with-self-signed-certificates", + "test-your-authorization-flow", + "test-your-explicit-schema", + "test-your-schema", + "testcase-statements", + "testing-always-allow-setup", + "testing-package", + "tests", + "testutil-package", + "text", + "text-variable-use-cases", + "thank-you", + "the-agent", + "the-basic-select-statement", + "the-batch-and-point-methods", + "the-complete-udf-script", + "the-cq_query", + "the-cq_query-1", + "the-data-generator", + "the-example-above-returns", + "the-filter-function", + "the-full-tickscript", + "the-goal", + "the-graphite-input", + "the-group-by-clause", + "the-handler", + "the-handler-interface", + "the-influxdb-clustered-helm-chart-includes-the-kubit-operator", + "the-influxdb-storage-engine-and-the-time-structured-merge-tree-tsm", + "the-influxdb-ui-does-not-support-influxql", + "the-info-method", + "the-init-method", + "the-into-clause", + "the-limit-and-slimit-clauses", + "the-limit-clause", + "the-main-method", + "the-new-influxdb-storage-engine-from-lsm-tree-to-btree-and-back-again-to-create-the-time-structured-merge-tree", + "the-offset-and-soffset-clauses", + "the-offset-clause", + "the-restore-process", + "the-select-statement", + "the-server", + "the-shorthand-explained", + "the-show-field-keys-query", + "the-show-stats-for-component-option", + "the-show-stats-for-indexes-option", + "the-slack-handler", + "the-slimit-clause", + "the-soffset-clause", + "the-task", + "the-tickscript", + "the-time-field", + "the-time-zone-clause", + "the-udf-agent-go-api-has-changed", + "the-udp-input", + "the-watcher-of-watchers-approach", + "the-where-clause", + "things-to-consider", + "things-to-note-about-the-join-output", + "thingworx-api-requests", + "third-party-software", + "this-command-is-destructive", + "this-is-a-table", + "this-is-a-table-1", + "this-is-a-table-2", + "this-is-a-table-3", + "this-is-a-table-4", + "this-is-a-table-with-lots-of-stuff", + "this-is-a-table-with-lots-of-stuff-1", + "this-is-a-table-with-lots-of-stuff-2", + "this-is-a-table-with-lots-of-stuff-3", + "this-is-a-table-with-lots-of-stuff-4", + "threads", + "threshold", + "threshold-check", + "tick-overview", + "tickscript", + "tickscript-examples", + "tickscript-helper-function", + "tickscript-language-reference", + "tickscript-nodes-overview", + "tickscript-package", + "tickscript-syntax", + "tickscript-syntax-1", + "tickscript-variable", + "tid", + "time", + "time-and-date-functions", + "time-and-metric-names", + "time-and-timezone-queries", + "time-based-backups", + "time-bounds-on-the-inner-query", + "time-bounds-on-the-outer-query", + "time-bounds-on-the-outer-query-recommended", + "time-columns", + "time-data-type", + "time-examples", + "time-expressions", + "time-functions", + "time-ordered-data", + "time-part-templates", + "time-precision", + "time-range", + "time-range-1", + "time-range-2", + "time-range-3", + "time-range-4", + "time-ranges", + "time-series-data", + "time-series-index-tsi", + "time-series-index-tsi-details", + "time-series-index-tsi-overview", + "time-series-index-tsi-query-performance-and-throughputs-improvements", + "time-specification", + "time-structured-merge-tree-tsm", + "time-syntax", + "time-to-become-readable", + "time-types", + "time-zone", + "time-zone-clause", + "time-zone-example", + "time-zone-selector", + "time-zone-shifts", + "time-zone-shifts-1", + "timeable-constraint", + "timecolumn", + "timed-moving-average", + "timedmovingaverage", + "timedst", + "timeout", + "timeout-deadline-exceeded", + "timerange-label", + "timesrc", + "timestamp", + "timestamp-examples", + "timestamp-format", + "timestamp-functions", + "timestamp-precision", + "timestamp-precision-1", + "timestamp-syntax", + "timestamp-timestamp_format-timezone-optional", + "timestamp_format", + "timestamp_key", + "timestamps", + "timestamps-when-grouping-by-time", + "timestream", + "timetime_interval", + "timetime_intervaloffset_interval", + "timezone", + "timezone-annotation-example", + "timezone-examples", + "timezone-package", + "timing-is-everything", + "tips-for-creating-patterns", + "title", + "tls", + "tls-cert", + "tls-enabled--false", + "tls-key", + "tls-min-version", + "tls-settings", + "tls-strict-ciphers", + "tls-transport-layer-security-flags", + "tls-transport-layer-security-options", + "to", + "to-clone-a-cell", + "to-clone-a-dashboard", + "to-create-a-basic-static-threshold-alert-based-on-the-cpu-measurements-provided-by-telegraf", + "to-delete-a-task-through-chronograf", + "to-disable-a-task-through-chronograf", + "to-does-not-require-a-package-import", + "to-enable-a-task-through-chronograf", + "to-modify-a-third-party-alert-handler", + "to_char", + "to_date", + "to_hex", + "to_local_time", + "to_timestamp", + "to_timestamp_micros", + "to_timestamp_millis", + "to_timestamp_nanos", + "to_timestamp_seconds", + "to_unixtime", + "today", + "toggle-dark-mode-and-light-mode", + "token", + "token-and-security-updates", + "token-and-security-updates-1", + "token-authentication", + "token-management-and-authorization-differences", + "token-prefix", + "token-rotation", + "token-service-call-total", + "token-service-duration", + "token-service-error-total", + "token-services-total", + "token-support", + "tokens", + "tokens-in-production-applications", + "tolerance", + "tolevel", + "tomcat", + "tomhollingworth-package", + "toml", + "toml-escaping", + "too-many-open-files-errors", + "tooling", + "tools-for-monitoring-the-influxdata-1x-platform-tick-stack", + "tools-for-querying-the-v1-api", + "tools-for-working-with-flux", + "tools-for-writing-to-the-v1-api", + "tools-for-writing-to-the-v2-api", + "tools-to-execute-queries", + "tools-to-use", + "top", + "top-and-a-tag-key-with-fewer-than-n-tag-values", + "top-tags-and-the-into-clause", + "top-with-a-group-by-time-clause", + "topic", + "topic-events", + "topic-handlers", + "topic-state", + "topics", + "topics-and-topic-handlers", + "topk", + "total-buffer-bytes", + "total-buffer-bytes--0", + "total-max-memory-bytes", + "total-max-memory-bytes--0", + "total-processes-variable", + "total_alloc_bytes", + "totalalloc", + "totalconnections", + "totalstreams", + "totitle-vs-toupper", + "toupper-vs-totitle", + "trace-logging-enabled", + "trace-logging-enabled--false", + "traces", + "traces-exporter", + "traces-exporter-jaeger-agent-host", + "traces-exporter-jaeger-agent-port", + "traces-exporter-jaeger-service-name", + "traces-exporter-jaeger-trace-context-header-name", + "traces-jaeger-debug-name", + "traces-jaeger-max-msgs-per-second", + "traces-jaeger-tags", + "tracing", + "tracing-examples", + "tracing-identifier-key", + "tracing-type", + "track-requests-over-a-one-minute-interval", + "track-requests-over-a-ten-second-interval", + "track-state-changes-across-task-executions", + "transform-data-with-aggregator-and-processor-plugins", + "transform-data-with-math", + "transform-values-in-a-data-stream", + "transformation", + "transformation-that-adds-a-column-with-an-explicit-type", + "transformations", + "translate", + "transpile-influxql-queries-to-flux", + "transport-layer-security-tls", + "transport-layer-security-tls-settings", + "trendfactor", + "trickle", + "trig", + "trigger", + "trigger-alerts-by-comparing-two-measurements", + "trigger-alerts-from-batch-data", + "trigger-alerts-from-stream-data", + "trigger-specification-examples", + "trigger-system", + "trigger-types", + "trim", + "trim-a-prefix-from-all-values-in-a-column", + "trim-leading-and-trailing-periods-from-all-values-in-a-column", + "trim-leading-and-trailing-spaces-from-all-values-in-a-column", + "trim-leading-periods-from-all-values-in-a-column", + "trim-trailing-periods-from-all-values-in-a-column", + "triple-exponential-moving-average-rules", + "triple_exponential_derivative", + "triple_exponential_moving_average", + "troubleshoot", + "troubleshoot-and-optimize-queries", + "troubleshoot-arrow-flight-requests", + "troubleshoot-bucket-schema-errors", + "troubleshoot-deploying-influxdb-clustered", + "troubleshoot-errors", + "troubleshoot-failures", + "troubleshoot-influxd-ctl-authentication", + "troubleshoot-influxd-ctl-join", + "troubleshoot-influxql-errors", + "troubleshoot-issues-writing-data", + "troubleshoot-join-behaviors", + "troubleshoot-join-error-messages", + "troubleshoot-join-operations", + "troubleshoot-joins", + "troubleshoot-ldap-in-influxdb-enterprise", + "troubleshoot-migration-task-failures", + "troubleshoot-notebooks", + "troubleshoot-oauth-errors", + "troubleshoot-partial-writes", + "troubleshoot-queries", + "troubleshoot-rejected-points", + "troubleshoot-telegraf", + "troubleshoot-template-results-and-permissions", + "troubleshoot-tls", + "troubleshoot-with-new-metrics", + "troubleshoot-write-errors", + "troubleshooting", + "trunc", + "truncate", + "truncate-0", + "truncate-1", + "truncate-2", + "truncate-238", + "truncate-3", + "truncate-4", + "truncate-5", + "truncate-6", + "truncate-a-value-at-the-decimal", + "truncate-all-time-values-to-the-minute", + "truncate-or-wrap-log-messages", + "truncate-shards-3-minutes-after-command-execution", + "truncate-time-values", + "truncate-time-values-using-relative-durations", + "truncate-timestamps-to-a-specified-unit", + "truncate-to-weeks", + "try-it-out", + "tsdbstore", + "tsi", + "tsi-index", + "tsi-level-compaction", + "tsi-log-file-compaction", + "tsi-time-series-index", + "tsi-tsi1-index-settings", + "tsl", + "tsm-compaction-metrics", + "tsm-compaction-strategies", + "tsm-directories-and-files-layout", + "tsm-files", + "tsm-snapshotting-in-memory-cache-to-disk", + "tsm-time-structured-merge-tree", + "tsm-use-madv-willneed--false", + "tsm1_cache", + "tsm1_engine", + "tsm1_filestore", + "tsm1_wal", + "tsmfullcompactionduration", + "tsmfullcompactionerr", + "tsmfullcompactionqueue", + "tsmfullcompactions", + "tsmfullcompactionsactive", + "tsmlevel1compactionduration", + "tsmlevel1compactionerr", + "tsmlevel1compactionqueue", + "tsmlevel1compactions", + "tsmlevel1compactionsactive", + "tsmlevel2compactionduration", + "tsmlevel2compactionerr", + "tsmlevel2compactionqueue", + "tsmlevel2compactions", + "tsmlevel2compactionsactive", + "tsmlevel3compactionduration", + "tsmlevel3compactionerr", + "tsmlevel3compactionqueue", + "tsmlevel3compactions", + "tsmlevel3compactionsactive", + "tsmoptimizecompactionduration", + "tsmoptimizecompactionerr", + "tsmoptimizecompactionqueue", + "tsmoptimizecompactions", + "tsmoptimizecompactionsactive", + "tune-garbage-collection", + "twemproxy", + "two-graphite-listeners-udp--tcp-config", + "two-or-more-data-nodes", + "typ", + "type", + "type-casting-examples", + "type-constraints", + "type-conversion", + "type-conversion-functions", + "type-conversions", + "type-notation", + "type-variables", + "types", + "types-package", + "typesdb--usrlocalsharecollectd", + "tz", + "tz-examples", + "u", + "ubuntu--debian-64-bit", + "ubuntu-and-debian-64-bit", + "ubuntu-and-debian-64-bit-1", + "ubuntu-and-debian-64-bit-2", + "udfname", + "udp", + "udp-is-connectionless", + "udp-protocol-support-in-influxdb", + "udp-read-buffer", + "udp-read-buffer--0", + "udp-settings", + "ui-disabled", + "ui-error-messages", + "ui-improvements", + "ui-improvements-1", + "ui-improvements-10", + "ui-improvements-11", + "ui-improvements-12", + "ui-improvements-13", + "ui-improvements-14", + "ui-improvements-15", + "ui-improvements-16", + "ui-improvements-17", + "ui-improvements-18", + "ui-improvements-19", + "ui-improvements-2", + "ui-improvements-20", + "ui-improvements-21", + "ui-improvements-3", + "ui-improvements-4", + "ui-improvements-5", + "ui-improvements-6", + "ui-improvements-7", + "ui-improvements-8", + "ui-improvements-9", + "uinteger", + "uinteger-field-value-examples", + "uinteger-syntax", + "uintegers", + "unable-to-parse---bad-timestamp", + "unable-to-parse---time-outside-range", + "unauthenticated-unauthenticated", + "unauthorized-permission-denied", + "unbound", + "unbounded-following", + "unbounded-following-1", + "unbounded-preceding", + "understand-and-troubleshoot-flight-responses", + "understand-how-telegraf-writes-data-to-influxdb", + "understand-the-difference-between-tags-and-fields", + "understand-trigger-types", + "understanding-auxiliary-fields", + "understanding-cache-namespaces", + "understanding-cursors", + "understanding-iterators", + "understanding-kubits-role-in-air-gapped-environments", + "understanding-system-table-data-distribution", + "understanding-the-returned-timestamp", + "understanding-the-returned-timestamp-1", + "understanding-tsi", + "unexpected-timestamps-and-values-in-query-results", + "ungroup-data", + "union", + "union-clause", + "union-custom-rows-with-query-results", + "union-query-results-with-custom-data", + "union-results-from-different-measurements", + "union-two-streams-of-tables-with-empty-group-keys", + "union-two-streams-of-tables-with-unique-group-keys", + "union-types", + "union-vs-join", + "unionexec", + "unit", + "units", + "universe-block", + "universe-package", + "unix-epoch", + "unix-nanosecond-to-rfc3339", + "unix-socket-enabled", + "unix-socket-enabled--false", + "unix-timestamp", + "unix-timestamp-example", + "unmodified-examplecsv", + "unnamed-import-file", + "unoptimized-queries", + "unpivot", + "unpivot-data-into-_field-and-_value-columns", + "unshare-a-notebook", + "unsigned-integer", + "unsigned-integers", + "unsignedlong", + "unsupported-influxql-queries", + "unsupported-operations", + "unsupported-operators", + "unsupported-sql-types", + "unwindow-aggregate-tables", + "unwindowed-output-table", + "upcoming-changes-to-influx-cli-packaging", + "update-a-bucket", + "update-a-bucket-schema", + "update-a-bucket-schema-using-the-influx-cli", + "update-a-bucket-schema-using-the-influxdb-http-api", + "update-a-bucket-using-the-http-api", + "update-a-bucket-using-the-influx-cli", + "update-a-buckets-name-in-the-influxdb-ui", + "update-a-buckets-retention-period", + "update-a-buckets-retention-period-in-the-influxdb-ui", + "update-a-connection-configuration-and-do-not-set-it-to-active", + "update-a-connection-configuration-and-set-it-to-active", + "update-a-database", + "update-a-database-token", + "update-a-databases-column-limit", + "update-a-databases-retention-period", + "update-a-databases-table-limit", + "update-a-dbrp-mapping", + "update-a-flux-task", + "update-a-grant", + "update-a-password", + "update-a-remote", + "update-a-replication", + "update-a-restriction", + "update-a-schema-and-print-column-information", + "update-a-schema-using-the-influx-cli", + "update-a-schema-with-columns-format", + "update-a-scraper", + "update-a-scraper-in-the-influxdb-ui", + "update-a-secret-using-the-influx-cli", + "update-a-secret-using-the-influxdb-api", + "update-a-secret-using-the-influxdb-cloud-ui", + "update-a-stack", + "update-a-stack-with-a-name-and-description", + "update-a-stack-with-a-name-and-urls-to-associate-with-stack", + "update-a-stack-with-new-resources-and-export-the-stack-as-a-template", + "update-a-stack-with-new-resources-to-manage", + "update-a-task", + "update-a-task-description", + "update-a-task-flux-script", + "update-a-task-flux-script-1", + "update-a-task-from-a-flux-file", + "update-a-task-from-a-flux-string", + "update-a-task-from-a-script-id", + "update-a-task-in-the-influxdb-ui", + "update-a-task-to-query-multiple-dbrp-combinations", + "update-a-task-with-the-influx-cli", + "update-a-task-with-the-influxdb-api", + "update-a-task-without-reloading-the-task", + "update-a-telegraf-configuration", + "update-a-telegraf-configuration-via-stdin", + "update-a-template", + "update-a-templates-tickscript", + "update-a-templates-type", + "update-a-token", + "update-a-token-for-read-and-write-access-to-a-database", + "update-a-token-for-read-only-access-to-a-database", + "update-a-token-in-the-influxdb-ui", + "update-a-token-to-provide-mixed-permissions-to-multiple-databases", + "update-a-token-to-provide-read-only-access-to-multiple-databases", + "update-a-token-using-the-influxdb-api", + "update-a-token-with-mixed-permissions-to-multiple-databases", + "update-a-token-with-read-and-write-access-to-a-database", + "update-a-token-with-read-and-write-access-to-all-databases", + "update-a-token-with-read-only-access-to-a-database", + "update-a-token-with-read-only-access-to-multiple-databases", + "update-a-tokens-description", + "update-a-tokens-permissions", + "update-a-topic-handler", + "update-a-user", + "update-a-user-in-the-influxdb-ui", + "update-a-user-password", + "update-a-user-password-using-a-user-id", + "update-a-user-password-using-a-username", + "update-a-user-using-the-influx-cli", + "update-a-username", + "update-a-variable", + "update-alert-logic", + "update-an-existing-secret", + "update-an-invokable-script", + "update-an-organization", + "update-an-organization-in-the-influxdb-ui", + "update-an-organization-using-the-influx-cli", + "update-check-queries-and-logic", + "update-checks", + "update-environment-variables-instead-of-removing-them", + "update-flux-task-code", + "update-flux-task-code-api", + "update-flux-task-code-using-a-file", + "update-flux-task-code-via-stdin", + "update-hardcoded-influxdb-urls", + "update-kapacitor-flux-tasks", + "update-node-labels", + "update-notification-endpoints", + "update-notification-rules", + "update-openssl-and-influxdb", + "update-resource-ids", + "update-s2-cell-id-token-level", + "update-secrets", + "update-telegraf-configurations", + "update-the-data-node-configuration-file", + "update-the-default-retention-policy", + "update-the-description-of-an-organization", + "update-the-meta-node-configuration-file", + "update-the-name-of-a-bucket", + "update-the-name-of-a-organization", + "update-the-name-of-a-user", + "update-the-name-of-an-organization", + "update-the-name-or-description--of-a-configuration", + "update-the-name-or-description-for-notification-endpoint", + "update-the-name-or-description-for-notification-rules", + "update-the-retention-period-of-a-bucket", + "update-the-retention-policy-of-a-dbrp-mapping", + "update-the-shard-group-duration-of-a-bucket", + "update-the-source-code-of-an-invokable-script", + "update-the-status-of-a-task", + "update-the-status-of-a-task-1", + "update-the-tickscript-of-a-task", + "update-tokens-ui", + "update-upgraded-influxdb-connections", + "update-users", + "update-your-custom-binary", + "update-your-image-to-use-a-new-package-version", + "update-your-influxdb-enterprise-license-without-restarting-data-nodes", + "update-your-namespace-if-using-a-namespace-other-than-influxdb", + "updated-azure-ad-documentation", + "updates", + "updates-1", + "updates-2", + "updating-tickscript", + "upgrade-bug-fix", + "upgrade-chronograf", + "upgrade-data-nodes", + "upgrade-deb-packages", + "upgrade-from-a-non-licensed-release", + "upgrade-from-influxdb-1x-to-27", + "upgrade-from-influxdb-1x-to-influxdb-cloud", + "upgrade-from-influxdb-20-beta-to-influxdb-20", + "upgrade-from-influxdb-2x-to-influxdb-27", + "upgrade-from-influxdb-oss-2x-to-influxdb-cloud", + "upgrade-influxdb-clustered", + "upgrade-influxdb-enterprise-clusters", + "upgrade-initialization-mode", + "upgrade-meta-nodes", + "upgrade-notes", + "upgrade-notes-1", + "upgrade-notes-2", + "upgrade-notes-3", + "upgrade-notes-4", + "upgrade-notes-5", + "upgrade-notes-6", + "upgrade-notes-7", + "upgrade-notes-8", + "upgrade-requirements", + "upgrade-to-checkpoint-releases-first", + "upgrade-to-influxdb-111x", + "upgrade-to-influxdb-enterprise", + "upgrade-to-kapacitor-v1", + "upgrade-to-the-latest-influxdb-v2-version", + "upgrade-to-usage-based-plan", + "upgrade-with-a-custom-influxdb-1x-configuration-file", + "upgrade-with-custom-paths", + "upgrade-with-zip-or-targz", + "upgrade-your-influxdb-clustered-version", + "upgraded-from-influxdb-1x-to-2x", + "upgrading", + "upgrading--for-users-of-the-tsi-preview", + "upgrading--for-users-of-the-tsi-preview-1", + "upgrading--for-users-of-the-tsi-preview-2", + "upgrading-1", + "upgrading-from-influxdb-enterprise-v1113", + "upload-line-protocol-through-the-chronograf-ui", + "upper", + "upperboundcolumn", + "upperdashboardtime", + "upsd", + "uptime-seconds", + "url", + "url-1", + "url-encode-a-string", + "url-encode-strings-in-a-stream-of-tables", + "url-source", + "urls", + "urm-new-call-total", + "urm-new-duration", + "us-central-iowa", + "us-east-virginia", + "us-west-2-1", + "us-west-2-2", + "usage", + "usage-based-plan", + "usage-notes", + "usage-package", + "use", + "use-a-custom-dashboard-variable", + "use-a-flux-duration-to-define-a-sql-interval", + "use-a-handler-in-a-tickscript", + "use-a-heatmap-to-visualize-correlation", + "use-a-managed-identity-in-an-azure-vm", + "use-a-management-token", + "use-a-package-manager", + "use-a-regex-to-filter-by-field-key", + "use-a-regex-to-filter-by-tag-value", + "use-a-regular-expression-to-specify-a-field-value-in-the-where-clause", + "use-a-regular-expression-to-specify-a-tag-with-a-value-in-the-where-clause", + "use-a-regular-expression-to-specify-a-tag-with-no-value-in-the-where-clause", + "use-a-regular-expression-to-specify-field-keys-and-tag-keys-in-function-arguments", + "use-a-regular-expression-to-specify-field-keys-and-tag-keys-in-the-select-clause", + "use-a-regular-expression-to-specify-measurements-in-the-from-clause", + "use-a-regular-expression-to-specify-tag-keys-in-the-group-by-clause", + "use-a-regular-expression-to-specify-tag-values-in-the-where-clause", + "use-a-scatter-plot-to-visualize-correlation", + "use-a-selector-function-with-a-group-by-time-clause", + "use-a-selector-function-with-another-function-and-with-a-specified-time-range", + "use-a-selector-function-with-another-function-and-without-a-specified-time-range", + "use-a-single-selector-function-with-a-single-field-key-and-without-a-specified-time-range", + "use-a-single-selector-function-with-multiple-field-keys-and-without-a-specified-time-range", + "use-a-token-in-an-api-request", + "use-a-token-in-postman", + "use-alternate-boolean-format", + "use-alternate-numeric-formats", + "use-an-aggregate-function-with-a-specified-time-range", + "use-an-aggregate-function-with-a-specified-time-range-and-a-group-by-time-clause", + "use-an-aggregate-function-with-default-parameters", + "use-an-aggregate-function-without-a-specified-time-range", + "use-an-existing-trial-or-at-home-license", + "use-an-extracted-row-record", + "use-an-influxdb-scraper", + "use-and-configure-display-formats", + "use-and-manage-variables", + "use-annotations-in-chronograf-views", + "use-annotations-in-dashboards", + "use-annotations-in-the-chronograf-interface", + "use-anti-entropy-service-in-influxdb-enterprise", + "use-api-client-libraries", + "use-appropriate-retention-periods", + "use-bundled-distributions-with-browsers-and-module-loaders", + "use-case-examples", + "use-cases", + "use-cert-manager-and-lets-encrypt-to-manage-tls-certificates", + "use-chronograf", + "use-chronograf-with-influxdb-cloud", + "use-chronograf-with-influxdb-oss", + "use-cli-configurations", + "use-cli-environment-variables", + "use-client-libraries", + "use-client-libraries-to-downsample-data", + "use-command-line-options", + "use-compatibility-apis-and-client-libraries-to-write-data", + "use-configuration-files", + "use-cumulativesum-with-aggregatewindow", + "use-curl-to-write-data-from-a-file", + "use-custom-dashboard-variables", + "use-dashboard-template-variables", + "use-dashboard-variables", + "use-data-analysis-tools", + "use-data-source-cells", + "use-database-tokens-to-authorize-data-reads-and-writes", + "use-date_bin_gapfill-to-fill-gaps-in-data", + "use-date_bin_gapfill-to-insert-rows-when-no-rows-exists", + "use-date_bin_wallclock_gapfill-to-fill-gaps-in-data", + "use-date_bin_wallclock_gapfill-to-insert-rows-when-no-rows-exists", + "use-date_trunc-to-return-hourly-averages", + "use-date_trunc-to-return-weekly-averages", + "use-different-timestamp-formats", + "use-docker-cli", + "use-docker-compose", + "use-environment-variables", + "use-example-plugins", + "use-explain-keywords-to-view-a-query-plan", + "use-external-tools-to-manage-and-process-logs", + "use-extracted-column-values", + "use-fields-for-unique-and-numeric-data", + "use-files-to-inject-headers", + "use-first-or-last-with-aggregatewindow", + "use-flux-in-chronograf-dashboards", + "use-flux-to-query-data-from-iox", + "use-go", + "use-grafana", + "use-grafana-with-influxdb-cloud", + "use-grafana-with-influxdb-enterprise", + "use-grafana-with-influxdb-oss", + "use-gzip-compression", + "use-gzip-compression-with-the-influxdb-api", + "use-gzip-to-compress-the-query-response", + "use-heavy-functions-sparingly", + "use-helm-charts", + "use-helm-charts-to-deploy-influxdata-platform-components", + "use-holtwinters-to-predict-future-values", + "use-holtwinters-with-seasonality-to-predict-future-values", + "use-homebrew", + "use-homebrew-to-install-influxctl", + "use-homebrew-to-upgrade", + "use-influx---influxdb-command-line-interface", + "use-influx-cli-commands", + "use-influxdb-3-client-libraries", + "use-influxdb-3-clients", + "use-influxdb-3-clients-to-query", + "use-influxdb-client-libraries-to-write-line-protocol-data", + "use-influxdb-community-templates", + "use-influxdb-flight-rpc-clients", + "use-influxdb-inch", + "use-influxdb-telegraf-configurations", + "use-influxdb-templates", + "use-influxql-for-diagnostics", + "use-influxql-to-write-to-influxdb-2x-or-influxdb-cloud", + "use-join-for-multiple-data-sources", + "use-join-functions-to-join-your-data", + "use-joinfull-to-join-your-data", + "use-joininner-to-join-your-data", + "use-joinleft-to-join-your-data", + "use-joinright-to-join-your-data", + "use-jointime-to-join-your-data", + "use-k8s-operator", + "use-kapacitor-authorizations", + "use-kapacitor-batch-tasks", + "use-kapacitor-stream-tasks", + "use-kapacitor-with-influxdb-cloud", + "use-kapacitor-with-influxdb-oss", + "use-latitude-and-longitude-values-to-generate-s2-cell-id-tokens", + "use-limit-and-slimit-together", + "use-logrotate", + "use-mathabs-in-map", + "use-mathacos-in-map", + "use-mathacosh-in-map", + "use-mathasin-in-map", + "use-mathasinh-in-map", + "use-mathatan-in-map", + "use-mathatanh-in-map", + "use-mathcbrt-in-map", + "use-mathceil-in-map", + "use-mathcopysign-in-map", + "use-mathcos-in-map", + "use-mathcosh-in-map", + "use-mathdim-in-map", + "use-matherf-in-map", + "use-matherfc-in-map", + "use-matherfcinv-in-map", + "use-matherfinv-in-map", + "use-mathexp-in-map", + "use-mathexp2-in-map", + "use-mathexpm1-in-map", + "use-mathfloat64bits-in-map", + "use-mathfloat64frombits-in-map", + "use-mathfloor-in-map", + "use-mathfrexp-in-map", + "use-mathgamma-in-map", + "use-mathhypot-in-map", + "use-mathilogb-in-map", + "use-mathisinf-in-map", + "use-mathisnan-in-map", + "use-mathj0-in-map", + "use-mathj1-in-map", + "use-mathjn-in-map", + "use-mathldexp-in-map", + "use-mathlgamma-in-map", + "use-mathlog-in-map", + "use-mathlog10-in-map", + "use-mathlog1p-in-map", + "use-mathlog2-in-map", + "use-mathlogb-in-map", + "use-mathminf-in-map", + "use-mathmmax-in-map", + "use-mathmmin-in-map", + "use-mathmod-in-map", + "use-mathmodf-in-map", + "use-mathnextafter-in-map", + "use-mathpow-in-map", + "use-mathpow10-in-map", + "use-mathremainder-in-map", + "use-mathround-in-map", + "use-mathroundtoeven-in-map", + "use-mathsignbit-in-map", + "use-mathsin-in-map", + "use-mathsincos-in-map", + "use-mathsinh-in-map", + "use-mathsqrt-in-map", + "use-mathtan-in-map", + "use-mathtanh-in-map", + "use-mathtrunc-in-map", + "use-mathy0-in-map", + "use-mathy1-in-map", + "use-mathyn-in-map", + "use-median-as-a-selector-transformation", + "use-median-as-an-aggregate-transformation", + "use-median-with-aggregatewindow", + "use-multiple-fields-in-a-calculation", + "use-new-influxdb-tools", + "use-no_sync-for-immediate-write-responses", + "use-ntp-to-synchronize-time-between-hosts", + "use-offset-to-account-for-latent-data", + "use-organization-id", + "use-pandas-to-analyze-data", + "use-parameterized-flux-queries", + "use-parameterized-queries-with-influxql", + "use-parameterized-queries-with-sql", + "use-parameters-in-a-script", + "use-parameters-in-where-expressions", + "use-partition-templates", + "use-piped-forward-data-in-a-custom-function", + "use-postman-with-the-influxdb-api", + "use-powershell-for-windows", + "use-pre-created-dashboards", + "use-pre-created-dashboards-in-chronograf", + "use-prometheus-histograms-in-flux", + "use-prometheusscrape", + "use-pyarrow-to-analyze-data", + "use-pyarrow-to-convert-query-results-to-pandas", + "use-pyarrow-to-read-query-results", + "use-python", + "use-python-and-pandas-to-view-an-explain-report", + "use-quantile-with-aggregatewindow", + "use-quix-streams-to-downsample-data", + "use-recommended-naming-conventions", + "use-regular-expression-flags", + "use-regular-expressions-in-predicate-expressions", + "use-restricted-tokens-for-production-apps", + "use-scientific-notation", + "use-secrets", + "use-secrets-in-a-query", + "use-secrets-in-your-query", + "use-secrets-to-store-sql-database-credentials", + "use-selector-functions", + "use-set-instead-of-map-when-possible", + "use-solutions-for-kubernetes-services", + "use-sql-or-influxql-as-your-query-language", + "use-sql-results-to-populate-dashboard-variables", + "use-sql-to-query-data-from-iox", + "use-ssds", + "use-ssl", + "use-superset", + "use-superset-to-query-data", + "use-tableau", + "use-tableau-to-query-data-with-sql", + "use-tag-buckets-for-high-cardinality-tags", + "use-tags-and-fields", + "use-tags-to-improve-query-performance", + "use-task-options-in-your-flux-script", + "use-telegraf", + "use-telegraf-plugins", + "use-telegraf-to-dual-write-to-influxdb", + "use-telegraf-to-write-csv-data", + "use-telegraf-to-write-csv-data-to-influxdb", + "use-telegraf-to-write-data", + "use-telegraf-with-influxdb", + "use-template-variables", + "use-template-variables-in-cell-queries", + "use-template-variables-in-cell-titles", + "use-templates-to-migrate-influxdb-resources", + "use-the---once-option-to-single-shot-execute", + "use-the-api-for-batching-and-higher-volume-writes", + "use-the-apiv3write_lp-endpoint", + "use-the-best-data-type-for-your-data", + "use-the-cli", + "use-the-cli-or-http-api-to-regenerate-the-operator-token", + "use-the-client-library-in-a-nodejs-application", + "use-the-coarsest-time-precision-possible", + "use-the-codeexecdcode-shim", + "use-the-create-trigger-command", + "use-the-current-utc-time-as-a-query-boundary", + "use-the-default-organization", + "use-the-downsampling-jupyter-notebook", + "use-the-flux-lsp-with-vim", + "use-the-flux-to-function-in-a-query", + "use-the-following", + "use-the-following-1", + "use-the-holtwinters-fitted-model-to-predict-future-values", + "use-the-http-api-and-client-libraries-to-write-data", + "use-the-http-query-api", + "use-the-influx-cli", + "use-the-influx-query-command", + "use-the-influxctl-cli", + "use-the-influxctl-cli-to-write-line-protocol-data", + "use-the-influxdb-data-explorer-to-query-data", + "use-the-influxdb-operator", + "use-the-influxdb-ui", + "use-the-influxdb-ui-to-write-csv-data", + "use-the-influxdb-v1-http-api", + "use-the-influxdb3-cli", + "use-the-influxdb3-cli-to-write-data", + "use-the-influxql-into-clause-in-a-query", + "use-the-influxql-shell", + "use-the-interactive-flux-repl", + "use-the-interactive-influxql-shell", + "use-the-kube-influxdb-project", + "use-the-mad-algorithm-to-detect-anomalies", + "use-the-operator-token-to-create-a-named-admin-token", + "use-the-portable-format-for-influxdb-15-and-later", + "use-the-prometheus-remote-read-and-write-api", + "use-the-repl", + "use-the-same-influxdb-clustered-version-used-to-generate-the-snapshot", + "use-the-telegraf-agent", + "use-the-token-to-create-a-database", + "use-the-trigger-specific-namespace", + "use-the-url-query-parameter", + "use-the-v1-query-api-and-influxql", + "use-the-v1-write-api", + "use-the-v3-lightweight-client-libraries", + "use-the-v3-query-api", + "use-thingworx-with-influxdb-cloud", + "use-three-and-only-three-meta-nodes", + "use-timestamps-and-durations-together", + "use-token-authentication-with-curl", + "use-tokens", + "use-tokens-with-basic-authentication", + "use-ttl-appropriately", + "use-visualization-cells", + "use-visualization-tools", + "use-vs-code-to-edit-your-configuration-file", + "use-with-module-bundlers", + "use-your-own-tools", + "useful-performance-metrics-commands", + "usepointtimes", + "useprevious", + "user", + "user-account", + "user-cannot-log-in-after-updating-their-password-in-the-ldap-server", + "user-datagram-protocol-udp", + "user-defined-functions-udfs", + "user-groups", + "user-interface-differences", + "user-interface-improvements", + "user-key", + "user-management", + "user-management-commands", + "user-migration", + "user-new-call-total", + "user-new-duration", + "user-permissions", + "user-provisioning", + "user-sessions-with-authorizations", + "user-types", + "user-types-and-privileges", + "username", + "users", + "users-total", + "users-versus-database-tokens", + "usgs-earthquake-data", + "using-a-single-meta-node-for-non-production-environments", + "using-fips-readiness-checks", + "using-flux-and-influxql", + "using-flux-in-dashboard-cells", + "using-go-and-the-influxdb3-go-client", + "using-influx---influxdb-command-line-interface", + "using-logrotate", + "using-multiple-discord-configurations", + "using-multiple-slack-configurations", + "using-nightly-builds", + "using-or-to-select-time-multiple-time-intervals", + "using-python-and-pandas", + "using-reduce-to-construct-a-json", + "using-sasl-with-kapacitor", + "using-the--file-flag", + "using-the-aggregate-event-handler", + "using-the-alerta-event-handler", + "using-the-configuration-file", + "using-the-discord-event-handler", + "using-the-exec-event-handler", + "using-the-hipchat-event-handler", + "using-the-inhibit-method-to-suppress-alerts", + "using-the-kafka-event-handler", + "using-the-log-event-handler", + "using-the-mqtt-event-handler", + "using-the-opsgenie-event-handler", + "using-the-pagerduty-v1-event-handler", + "using-the-pagerduty-v2-event-handler", + "using-the-post-event-handler", + "using-the-publish-event-handler", + "using-the-pushover-event-handler", + "using-the-read-buffer-option-for-the-udp-listener", + "using-the-sensu-event-handler", + "using-the-show-stats-statement", + "using-the-slack-event-handler", + "using-the-smtpemail-event-handler", + "using-the-snmp-trap-event-handler", + "using-the-tcp-event-handler", + "using-the-telegram-event-handler", + "using-the-victorops-event-handler", + "using-variables", + "using-with-the-file-output", + "using-with-the-file-output-plugin", + "using-with-the-http-output", + "using-with-the-http-output-plugin", + "uuid", + "uwsgi", + "v", + "v0100", + "v01000", + "v01010", + "v01020", + "v01030", + "v01040", + "v01050", + "v01060", + "v01070", + "v01080", + "v01081", + "v01090", + "v01091", + "v0110", + "v01110", + "v01120", + "v01121", + "v01130", + "v01140", + "v01141", + "v01150", + "v01160", + "v01170", + "v01171", + "v01172", + "v01173", + "v01180", + "v01181", + "v01191", + "v0120", + "v01200", + "v01201", + "v01210", + "v01220", + "v01230", + "v01240", + "v01250", + "v01260", + "v01270", + "v01271", + "v01272", + "v01273", + "v01280", + "v01290", + "v0130", + "v01300", + "v01310", + "v01320", + "v01330", + "v01340", + "v01351", + "v01360", + "v01370", + "v01380", + "v01390", + "v0140", + "v01400", + "v01410", + "v01420", + "v01430", + "v01431", + "v01440", + "v01450", + "v01460", + "v01470", + "v01480", + "v01490", + "v0150", + "v01500", + "v01501", + "v01510", + "v01511", + "v01520", + "v01530", + "v01540", + "v01550", + "v01551", + "v01560", + "v01570", + "v01580", + "v01590", + "v0160", + "v01600", + "v0161", + "v01610", + "v01620", + "v01630", + "v01640", + "v01641", + "v01650", + "v01660", + "v01670", + "v01680", + "v01690", + "v0170", + "v01700", + "v01701", + "v01710", + "v01720", + "v01730", + "v01740", + "v01741", + "v01750", + "v01760", + "v01770", + "v01771", + "v01780", + "v01790", + "v0180", + "v01800", + "v01801", + "v01810", + "v01820", + "v01830", + "v01840", + "v01841", + "v01842", + "v01850", + "v01860", + "v01870", + "v01880", + "v01881", + "v01890", + "v0190", + "v01900", + "v01910", + "v01920", + "v01930", + "v01940", + "v01941", + "v01943", + "v01943-1", + "v01944", + "v01945", + "v01950", + "v01951", + "v0200", + "v0210", + "v0211", + "v0212", + "v0213", + "v0214", + "v0220", + "v0230", + "v0240", + "v0250", + "v0260", + "v0270", + "v0280", + "v0281", + "v0282", + "v0283", + "v0290", + "v0300", + "v0310", + "v0311", + "v0320", + "v0321", + "v0330", + "v0331", + "v0332", + "v0341", + "v0342", + "v0350", + "v0351", + "v0360", + "v0361", + "v0362", + "v0370", + "v0371", + "v0372", + "v0380", + "v0390", + "v0400", + "v0401", + "v0402", + "v0410", + "v0420", + "v0430", + "v0440", + "v0450", + "v0451", + "v0452", + "v0460", + "v0461", + "v0462", + "v0470", + "v0471", + "v0480", + "v0490", + "v0500", + "v0501", + "v0502", + "v0510", + "v0520", + "v0530", + "v0540", + "v0550", + "v0551", + "v0560", + "v0570", + "v0580", + "v0581", + "v0582", + "v0583", + "v0584", + "v0590", + "v0591", + "v0592", + "v0593", + "v0594", + "v0595", + "v0596", + "v0600", + "v0610", + "v0620", + "v0630", + "v0640", + "v0650", + "v0660", + "v0661", + "v0670", + "v0680", + "v0690", + "v0691", + "v0692", + "v0700", + "v0710", + "v0711", + "v0720", + "v0721", + "v073", + "v0730", + "v074", + "v0740", + "v0750", + "v0760", + "v0761", + "v0770", + "v0771", + "v0780", + "v0790", + "v080", + "v0800", + "v0810", + "v0820", + "v0821", + "v0822", + "v0830", + "v0831", + "v0840", + "v0850", + "v0860", + "v0870", + "v0871", + "v0880", + "v0890", + "v090", + "v0900", + "v0910", + "v0920", + "v0930", + "v0940", + "v0950", + "v0960", + "v0970", + "v0980", + "v0990", + "v1-api-query-parameters", + "v1-api-write-parameters", + "v1-cli-not-supported", + "v1-influx-cli-not-supported", + "v1-package", + "v1-write-endpoint", + "v10", + "v100", + "v101", + "v102", + "v103", + "v104", + "v110", + "v110-1", + "v1100", + "v1101", + "v1102", + "v1103", + "v1104", + "v1105", + "v1106", + "v1107", + "v111", + "v1110", + "v1111", + "v1112", + "v1113", + "v1114", + "v1115", + "v1116", + "v1117", + "v1118", + "v112", + "v112-1", + "v1121", + "v1122", + "v1123", + "v1124", + "v1125", + "v1126", + "v113", + "v1131", + "v1132", + "v1133", + "v1134", + "v114", + "v1141", + "v1142", + "v1143", + "v1144", + "v1145", + "v115", + "v1150", + "v1151", + "v1152", + "v1153", + "v1160", + "v1161", + "v1162", + "v1163", + "v1170", + "v1171", + "v1172", + "v1173", + "v118", + "v1181", + "v1182", + "v1183", + "v1190", + "v1191", + "v1192", + "v1193", + "v12", + "v120", + "v1201", + "v1202", + "v1203", + "v1204", + "v121", + "v121-1", + "v1211", + "v1212", + "v1213", + "v1214", + "v122", + "v1220", + "v1221", + "v1222", + "v1223", + "v1224", + "v123", + "v1230", + "v1231", + "v1232", + "v1233", + "v1234", + "v1234-1", + "v124", + "v1240", + "v1241", + "v1242", + "v1243", + "v1244", + "v125", + "v1250", + "v1251", + "v1252", + "v1253", + "v1260", + "v1261", + "v1262", + "v1263", + "v1270", + "v1271", + "v1272", + "v1273", + "v1274", + "v1280", + "v1281", + "v1282", + "v1283", + "v1284", + "v1285", + "v1290", + "v1291", + "v1292", + "v1293", + "v1294", + "v1295", + "v130", + "v1300", + "v1301", + "v1302", + "v1303", + "v131", + "v1310", + "v13100", + "v1311", + "v1312", + "v1313", + "v132", + "v1320", + "v1321", + "v1322", + "v1323", + "v133", + "v1330", + "v1331-2025-01-10", + "v1332-2025-02-10", + "v1333-2025-02-25", + "v134", + "v1340", + "v1340-2025-03-10", + "v1341-2025-03-24", + "v135", + "v1350", + "v136", + "v1360", + "v1361", + "v137", + "v1370", + "v138", + "v1380", + "v1381", + "v1382", + "v1383", + "v139", + "v1390", + "v140", + "v1400", + "v1401", + "v1403", + "v141", + "v1411", + "v1412", + "v1413", + "v1415", + "v142", + "v1421", + "v1423", + "v1425", + "v143", + "v1430", + "v1431", + "v1433", + "v144", + "v1440", + "v1441", + "v145", + "v15", + "v150", + "v1500", + "v1501", + "v151", + "v152", + "v153", + "v154", + "v155", + "v156", + "v157", + "v158", + "v159", + "v16", + "v160", + "v161", + "v162", + "v163", + "v164", + "v165", + "v166", + "v17", + "v170", + "v171", + "v1710", + "v1711", + "v1712", + "v1713", + "v1714", + "v1715", + "v1716", + "v1717", + "v172", + "v173", + "v174", + "v175", + "v176", + "v177", + "v178", + "v179", + "v18", + "v180", + "v181", + "v1810", + "v182", + "v183", + "v184", + "v185", + "v186", + "v187", + "v188", + "v189", + "v1891", + "v190", + "v191", + "v192", + "v193", + "v194", + "v195", + "v196", + "v197", + "v198", + "v200", + "v200-alpha1", + "v200-alpha10", + "v200-alpha11", + "v200-alpha12", + "v200-alpha13", + "v200-alpha14", + "v200-alpha15", + "v200-alpha16", + "v200-alpha17", + "v200-alpha18", + "v200-alpha19", + "v200-alpha2", + "v200-alpha20", + "v200-alpha21", + "v200-alpha3", + "v200-alpha4", + "v200-alpha5", + "v200-alpha6", + "v200-alpha7", + "v200-alpha8", + "v200-alpha9", + "v200-beta1", + "v200-beta10", + "v200-beta11", + "v200-beta12", + "v200-beta13", + "v200-beta14", + "v200-beta15", + "v200-beta16", + "v200-beta2", + "v200-beta3", + "v200-beta4", + "v200-beta5", + "v200-beta6", + "v200-beta7", + "v200-beta8", + "v200-beta9", + "v200-rc0", + "v200-rc1", + "v200-rc2", + "v200-rc3", + "v200-rc4", + "v201", + "v201-general-availability", + "v202", + "v202-general-availability", + "v203", + "v203-general-availability", + "v204", + "v204-general-availability", + "v205-general-availability", + "v206-general-availability", + "v207", + "v208", + "v209", + "v210", + "v2100", + "v211", + "v220", + "v221", + "v230", + "v231", + "v240", + "v241", + "v242", + "v243", + "v244", + "v250", + "v251", + "v260", + "v261", + "v270", + "v271", + "v2710", + "v2711", + "v2712", + "v273", + "v274", + "v275", + "v276", + "v277", + "v278", + "v279", + "v280", + "v290", + "v291", + "v292", + "v293", + "v294", + "v295", + "v296", + "v297", + "v298", + "v299", + "v2x-influx-cli-not-supported", + "v3-wayfinding-close", + "v3-wayfinding-modal", + "v3-wayfinding-opt-out", + "v3-wayfinding-opt-out-input", + "v3-wayfinding-stay", + "v3-wayfinding-switch", + "v300", + "v300-0beta1", + "v300-0beta2", + "v300-0beta3", + "v301", + "v302", + "v303", + "v310", + "valcolumn", + "valid-duration-units", + "valid-durations-units", + "valid-durations-units-include", + "validate-a-hex-color-code-string", + "validate-a-template", + "validate-all-influxdb-templates-in-a-directory", + "validate-an-influxdb-template-from-a-local-file", + "validate-an-influxdb-template-from-a-url", + "validate-influxdb-templates-from-multiple-files", + "validate-keys--false", + "validate-your-telegraf-configuration-with---test", + "value", + "value-count", + "valuecolumn", + "valuecolumns", + "valuecounter", + "valuedst", + "values-per-second", + "valueswrittenok", + "valueswrittenok-1", + "var", + "var_pop", + "var_population", + "var_samp", + "var_sample", + "variable", + "variable-assignment", + "variable-name-restrictions", + "variable-types", + "variables", + "variables-and-literals", + "varnish", + "vars", + "vault", + "vault-addr", + "vault-address", + "vault-cacert", + "vault-capath", + "vault-client-cert", + "vault-client-key", + "vault-client-timeout", + "vault-max-retries", + "vault-skip-verify", + "vault-tls-server-name", + "vault-token", + "vectorize", + "venv-install", + "verb-usage", + "verbose", + "verbose-option", + "verification", + "verify", + "verify-1x-users-were-migrated-to-hahahugoshortcode1641s33hbhb", + "verify-buckets-have-a-mapping", + "verify-certificate-and-key-files", + "verify-certificate-and-key-files-1", + "verify-database-components", + "verify-dns-resolution", + "verify-download-integrity-using-sha-256", + "verify-file-integrity-and-authenticity-using-gpg", + "verify-influxdb-resources-data-and-integrations", + "verify-ldap-authentication-using-a-local-configuration", + "verify-ldap-authentication-using-the-server-configuration", + "verify-role-deletion", + "verify-role-permissions", + "verify-roles", + "verify-seriesfile", + "verify-snapshots", + "verify-the-authenticity-of-downloaded-binary-optional", + "verify-the-install", + "verify-the-secret-exists", + "verify-tls-configuration", + "verify-tls-connection", + "verify-tombstone", + "verify-user-in-role", + "verify-user-permissions", + "verify-user-removal", + "verify-your-license", + "verify-your-setup", + "verifying-the-restart", + "verifying-the-stack", + "version", + "version-format", + "version-maintenance", + "vertica-data-source-name", + "vertica-to-flux-data-type-conversion", + "vertical-scaling", + "vertically-scale-a-component", + "victorops", + "victorops-cpu-alerttick", + "victorops-package", + "victorops-settings-in-kapacitorconf", + "victorops-setup", + "video-arrow", + "view-a-birds-migration-path", + "view-a-list-of-all-checks", + "view-a-list-of-all-notification-rules", + "view-a-single-token", + "view-a-tasks-run-history-in-the-influxdb-ui", + "view-a-tasks-run-history-with-the-influx-cli", + "view-a-tasks-run-history-with-the-influxdb-api", + "view-a-template-summary", + "view-abs-query-example", + "view-account-information", + "view-acos-query-example", + "view-acosh-query-example", + "view-alert-history", + "view-all-dbrp-mappings", + "view-all-partitions-for-a-table", + "view-all-stored-query-logs", + "view-and-create-influxdb-dbrp-mappings", + "view-and-create-influxdb-v1-authorizations", + "view-and-download-the-telegrafconf", + "view-approx_distinct-query-example", + "view-approx_median-query-example", + "view-approx_percentile_cont-query-example", + "view-approx_percentile_cont_with_weight-query-example", + "view-array_agg-query-example", + "view-arrow_cast-query-example", + "view-arrow_typeof-query-example", + "view-ascii-query-example", + "view-asin-query-example", + "view-asinh-query-example", + "view-atan-query-example", + "view-atan2-query-example", + "view-atanh-query-example", + "view-available-influxdb-client-libraries", + "view-avg-query-example", + "view-bit_and-query-example", + "view-bit_length-query-example", + "view-bit_or-query-example", + "view-bit_xor-query-example", + "view-bool_and-query-example", + "view-bool_or-query-example", + "view-btrim-query-example", + "view-bucket-retention-periods", + "view-bucket-retention-periods-and-shard-group-durations", + "view-bucket-schema-type-and-schemas", + "view-buckets", + "view-buckets-in-the-influxdb-ui", + "view-buckets-using-the-influx-cli", + "view-buckets-using-the-influxdb-http-api", + "view-cbrt-query-example", + "view-ceil-query-example", + "view-check-details", + "view-checkpoint-release-upgrade-example", + "view-checks", + "view-chr-query-example", + "view-chronograf-dashboards-in-presentation-mode", + "view-cluster-details", + "view-coalesce-query-example", + "view-collectd-configuration-properties", + "view-column-information-for-a-table", + "view-command-updates", + "view-compaction-totals-for-a-specific-table", + "view-compaction-totals-for-each-table", + "view-concat-query-example", + "view-concat_ws-query-example", + "view-configuration-option-parity", + "view-configuration-sections", + "view-corr-query-example", + "view-cos-query-example", + "view-cosh-query-example", + "view-cot-query-example", + "view-count-query-example", + "view-covar-query-example", + "view-covar_pop-query-example", + "view-covar_samp-query-example", + "view-critical-check-statuses-from-the-last-hour", + "view-cume_dist-query-example", + "view-current_date-query-example", + "view-current_time-query-example", + "view-data-information-and-statistics", + "view-data-usage", + "view-database-retention-periods", + "view-date_bin-query-example", + "view-date_bin_wallclock-query-example", + "view-date_part-query-examples", + "view-date_trunc-query-examples", + "view-degrees-query-example", + "view-dense_rank-query-example", + "view-digest-query-example", + "view-distinct_cache-query-example", + "view-earthquakes-reported-by-usgs", + "view-encode-query-example", + "view-ends_with-query-example", + "view-error-counts-by-severity-over-time", + "view-example", + "view-example-1x-databases-and-retention-policies-as-influxdb-cloud-buckets", + "view-example-1x-databases-and-retention-policies-as-influxdb-cloud-dedicated-databases", + "view-example-1x-databases-and-retention-policies-as-influxdb-clustered-databases", + "view-example-appinstance-with-resource-requests-and-limits", + "view-example-aws-s3-access-policy", + "view-example-buckets-output", + "view-example-csv-formatted-output", + "view-example-csv-formatted-results", + "view-example-for-fnfield_key-n", + "view-example-for-fnfield_key-tag_key-n", + "view-example-general-statistics-output", + "view-example-health-summary", + "view-example-influxd-ctl-show-output", + "view-example-ingester-storage-configuration", + "view-example-ingress-statistics-output", + "view-example-input", + "view-example-input-and-output", + "view-example-input-and-output-data", + "view-example-json-formatted-output", + "view-example-json-formatted-results", + "view-example-json-line-formatted-output", + "view-example-json-line-formatted-results", + "view-example-json-output", + "view-example-of-a-sparse-non-homogenous-schema", + "view-example-of-disabling-partial-writes-in-your-appinstance-resource", + "view-example-of-environment-variables-in-all-components", + "view-example-output", + "view-example-output-1", + "view-example-output-2", + "view-example-output-3", + "view-example-output-4", + "view-example-output-5", + "view-example-output-6", + "view-example-output-7", + "view-example-output-8", + "view-example-partition-templates-and-keys", + "view-example-pretty-formatted-output", + "view-example-pretty-formatted-results", + "view-example-response-body", + "view-example-results", + "view-example-results-1", + "view-example-results-2", + "view-example-results-3", + "view-example-results-with-unix-nanosecond-timestamps", + "view-example-schemafieldkeys-output", + "view-example-schemameasurementfieldkeys-output", + "view-example-schemameasurements-output", + "view-example-schemameasurementtagkeys-output", + "view-example-schemameasurementtagvalues-output", + "view-example-schematagkeys-output", + "view-example-schematagvalues-output", + "view-example-table-formatted-results", + "view-example-valuesyaml-with-resource-requests-and-limits", + "view-example-with-an-array-of-json-objects", + "view-example-with-an-array-of-scalar-values", + "view-examples-of-json-arrays-that-cannot-be-directly-parsed-into-flux-arrays", + "view-examples-of-using-exists-to-check-for-non-null-dynamic-types", + "view-existing-dbrp-mappings", + "view-existing-grants", + "view-existing-restrictions", + "view-existing-roles", + "view-existing-v1-authorizations", + "view-exp-query-example", + "view-explain-analyze-example-output", + "view-explain-example-output", + "view-extract-query-example", + "view-factorial-query-example", + "view-find_in_set-query-example", + "view-first_value-query-example", + "view-flightqueryjava", + "view-floor-query-example", + "view-flux-query-results", + "view-free-plan-information", + "view-from-clause-subquery-example", + "view-from_unixtime-query-example", + "view-full-example-telegraf-configuration-file", + "view-gcd-query-example", + "view-general-kapacitor-statistics", + "view-grouping-query-example", + "view-in-examples-using-a-list-literal", + "view-in-examples-using-a-query", + "view-in-presentation-mode", + "view-incoming-data", + "view-influxd-ctl-show-output-with-added-labels", + "view-influxd-ctl-show-output-with-deleted-label", + "view-influxd-ctl-show-output-with-updated-labels", + "view-influxdb-api-documentation-locally", + "view-influxdb-flight-and-grpc-status-codes", + "view-influxdb-oss-replication-service-metrics", + "view-influxql-query-results", + "view-information-about-failed-runs-that-would-be-executed", + "view-initcap-query-example", + "view-input-and-downsampled-output", + "view-input-and-pivoted-output", + "view-instr-query-example", + "view-interpolate-query-example", + "view-isnan-query-example", + "view-iszero-query-example", + "view-json-manifest", + "view-kapacitor-flux-task-logs", + "view-kapacitor-ingress-statistics", + "view-kapacitord-config-output", + "view-lag-query-example", + "view-last_cache-query-example", + "view-last_value-query-example", + "view-lcm-query-example", + "view-lead-query-example", + "view-left-query-example", + "view-length-query-example", + "view-levenshtein-query-example", + "view-license-controller-logs", + "view-ln-query-example", + "view-locf-query-example", + "view-log-query-example", + "view-log10-query-example", + "view-log2-query-example", + "view-logs-for-a-task-with-the-influxdb-api", + "view-logs-in-chronograf", + "view-lower-query-example", + "view-lpad-query-example", + "view-ltrim-query-example", + "view-make_date-query-example", + "view-mapped-environment-variables", + "view-max-query-example", + "view-md5-query-example", + "view-median-query-example", + "view-members", + "view-members-of-organization-in-the-influxdb-ui", + "view-members-of-organization-using-the-influx-cli", + "view-min-query-example", + "view-more-buckets-in-the-influx-cli", + "view-more-export-command-examples", + "view-myinfluxdbyml-appinstance-configuration", + "view-nanvl-query-example", + "view-node-labels", + "view-notification-endpoint-details", + "view-notification-endpoint-history", + "view-notification-rule-details", + "view-notification-rules", + "view-notifications-triggered-by-a-notification-rule", + "view-now-query-example", + "view-nth_value-query-example", + "view-ntile-query-example", + "view-nullif-query-example", + "view-nvl-query-example", + "view-nvl2-query-example", + "view-octet_length-query-example", + "view-opentsdb-configuration-properties", + "view-or-hide-annotations", + "view-or-update-notebooks", + "view-organization", + "view-organizations", + "view-organizations-in-the-influxdb-ui", + "view-organizations-using-the-influx-cli", + "view-output-table", + "view-overlay-query-example", + "view-partition-information", + "view-percent-encoded-dsn-example", + "view-percent_rank-query-example", + "view-pi-query-example", + "view-position-query-example", + "view-power-query-example", + "view-program-output", + "view-queries", + "view-query-logs-for-a-specific-query-within-a-time-interval", + "view-query-logs-for-queries-with-end-to-end-durations-above-a-threshold", + "view-query-results", + "view-radians-query-example", + "view-random-query-example", + "view-rank-query-example", + "view-raw-data", + "view-regexp_count-query-example", + "view-regexp_like-query-example", + "view-regexp_match-query-example", + "view-regexp_replace-query-example", + "view-regr_avgx-query-example", + "view-regr_avgy-query-example", + "view-regr_count-query-example", + "view-regr_intercept-query-example", + "view-regr_r2-query-example", + "view-regr_slope-query-example", + "view-regr_sxx-query-example", + "view-regr_sxy-query-example", + "view-regr_syy-query-example", + "view-repeat-query-example", + "view-replace-query-example", + "view-returned-markdown-table", + "view-reverse-query-example", + "view-right-query-example", + "view-round-query-example", + "view-row_number-query-example", + "view-rpad-query-example", + "view-rtrim-query-example", + "view-runtime-configuration", + "view-sample-configtoml", + "view-sample-ldap-configuration", + "view-schema-type-and-schemas-in-the-influxdb-ui", + "view-schema-type-and-schemas-using-the-influx-cli", + "view-schema-type-and-schemas-using-the-influxdb-http-api", + "view-secret-keys", + "view-secret-keys-using-the-influx-cli", + "view-secret-keys-using-the-influxdb-api", + "view-secret-keys-using-the-influxdb-cloud-ui", + "view-selector_first-query-example", + "view-selector_last-query-example", + "view-selector_max-query-example", + "view-selector_min-query-example", + "view-setup-instructions", + "view-sha224-query-example", + "view-sha256-query-example", + "view-sha384-query-example", + "view-sha512-query-example", + "view-show-all-example-output", + "view-signum-query-example", + "view-sin-query-example", + "view-sinh--query-example", + "view-split_part-query-example", + "view-sql-query-results", + "view-sqrt-query-example", + "view-stacks", + "view-starts_with-query-example", + "view-statuses-generated-by-a-check", + "view-statuses-generated-by-a-notification-rule", + "view-stddev-query-example", + "view-stddev_pop-query-example", + "view-stddev_samp-query-example", + "view-string_agg-query-example", + "view-strpos-query-example", + "view-substr-query-example", + "view-substr_index-query-example", + "view-sum-query-example", + "view-systemcompactor-schema", + "view-systempartitions-schema", + "view-systemqueries-schema", + "view-systemtables-schema", + "view-table-illustration-of-a-full-outer-join", + "view-table-illustration-of-a-left-outer-join", + "view-table-illustration-of-a-right-outer-join", + "view-table-illustration-of-an-inner-join", + "view-tan-query-example", + "view-tanh-query-example", + "view-task-run-history-and-logs", + "view-task-run-logs", + "view-task-run-logs-with-the-influxdb-api", + "view-tasks", + "view-tasks-in-the-influxdb-ui", + "view-tasks-with-the-influx-cli", + "view-tasks-with-the-influxdb-api", + "view-telegraf-configurations", + "view-the-command-line-that-invoked-influxdb", + "view-the-dockerfile", + "view-the-incoming-data", + "view-the-kapacitor-server-or-cluster-id", + "view-the-linebuilder-python-implementation", + "view-the-maven-pomxml", + "view-the-monitoring-dashboard", + "view-the-number-of-partitions-for-a-specific-table", + "view-the-number-of-partitions-per-table", + "view-the-partition-template-of-a-specific-table", + "view-the-requirements-for-azure-blob-storage", + "view-the-requirements-for-google-cloud-storage", + "view-the-returned-json-object", + "view-the-size-in-megabytes-of-a-specific-table", + "view-the-size-in-megabytes-per-table", + "view-the-status-of-cache-warm-operations", + "view-the-string-representation-of-any-flux-type", + "view-the-total-size-in-bytes-of-compacted-partitions-for-a-specific-table", + "view-the-total-size-in-bytes-of-compacted-partitions-per-table", + "view-the-written-data", + "view-time-range-selector", + "view-time-zone-discontinuity-example", + "view-to_char-query-example", + "view-to_date-query-example", + "view-to_hex-query-example", + "view-to_local_time-query-example", + "view-to_local_time-query-example-with-a-time-zone-offset", + "view-to_local_time-query-example-with-date_bin", + "view-to_timestamp-query-example", + "view-to_timestamp_micros-example-with-string-format-parsing", + "view-to_timestamp_micros-query-example", + "view-to_timestamp_millis-example-with-string-format-parsing", + "view-to_timestamp_millis-query-example", + "view-to_timestamp_nanos-example-with-string-format-parsing", + "view-to_timestamp_nanos-query-example", + "view-to_timestamp_seconds-example-with-string-format-parsing", + "view-to_timestamp_seconds-query-example", + "view-to_unixtime-example-with-string-format-parsing", + "view-to_unixtime-query-example", + "view-tokens", + "view-tokens-in-the-influxdb-ui", + "view-tokens-using-the-influx-cli", + "view-tokens-using-the-influxdb-api", + "view-translate-query-example", + "view-trim-query-example", + "view-trunc-query-example", + "view-tz-and-timestamp-comparison", + "view-tz-query-example", + "view-tz-query-example-from-getting-started-data", + "view-udp-configuration-properties", + "view-upper-query-example", + "view-usage-based-plan-information", + "view-users", + "view-users-using-the-influx-cli", + "view-uuid-query-example", + "view-var-query-example", + "view-var_pop-query-example", + "view-var_samp-query-example", + "view-variables", + "view-variables-in-the-data-explorer", + "view-variables-in-the-organization", + "view-version-1-tables-when-queried-from-influxdb", + "view-version-2-tables-when-queried-from-influxdb", + "view-written-data", + "view-your-dashboard-id", + "view-your-influxdb-logs", + "view-your-organization-id", + "view-your-organization-name", + "view-your-runtime-server-configuration", + "view-your-server-configuration-with-the-api", + "view-your-server-configuration-with-the-cli", + "viewers-roleviewer", + "viewing-alert-tasks-in-chronograf", + "viewing-alerts-from-tasks-in-the-alert-history-of-chronograf", + "virtual-dbrp-mappings", + "virtual-env-location", + "virtual-training-schedule", + "virtual-training-videos", + "visibleto", + "visualization", + "visualization-fixes", + "visualization-options-for-pressure-gauge", + "visualization-types", + "visualization-types-in-chronograf", + "visualization-updates", + "visualize-data", + "visualize-errors-by-severity", + "visualize-influxdb-internal-metrics", + "visualize-kapacitor-metrics", + "visualize-prometheus-histograms-in-influxdb", + "visualize-summary-metric-quantile-values", + "visualize-that-data-in-a-chronograf-dashboard", + "visualize-your-query", + "vmware-vsphere-hosts", + "vmware-vsphere-overview", + "vmware-vsphere-vms", + "volume-reference", + "vsflux-and-flux-lsp-no-longer-maintained", + "vsphere", + "vsphere-dashboard-template", + "vtimerangestart", + "vtimerangestop", + "vwindowperiod", + "w", + "wait-before-writing-to-a-new-database-with-the-same-name", + "wait-before-writing-to-a-new-database-with-the-same-name-as-a-deleted-database", + "wait-for-a-recording", + "wait-for-replays", + "wal-dir", + "wal-dir--varlibinfluxdbwal", + "wal-directories-and-files-layout", + "wal-directory", + "wal-failed-write-attempts", + "wal-flush-interval", + "wal-fsync-delay", + "wal-fsync-delay--0s", + "wal-max-write-buffer-size", + "wal-size", + "wal-snapshot-size", + "wal-subsystem-metrics", + "wal-write-ahead-log", + "wal-write-attempts", + "walcompactiontimems", + "want", + "warm-the-cache", + "warm-the-ldap-cache", + "warmup_type", + "warn", + "warnreset", + "warns_triggered", + "warp10", + "watch", + "water_level_checsumflux", + "water_level_processflux", + "wavefront", + "web", + "web-console", + "webexteams-package", + "webhookid", + "webhooks", + "webhooktoken", + "websocket", + "week_offset", + "west-europe-amsterdam", + "what-about-buckets-and-measurements", + "what-alternatives-are-available-in-light-of-this-eol-announcement", + "what-alternatives-do-you-have-for-flux-tasks", + "what-are-the-configuration-recommendations-and-schema-guidelines-for-writing-sparse-historical-data", + "what-are-the-expected-next-steps", + "what-are-the-minimum-and-maximum-integers-that-influxdb-can-store", + "what-are-the-minimum-and-maximum-timestamps-that-influxdb-can-store", + "what-causes-unexpected-or-additional-values-with-same-timestamp", + "what-determines-the-time-intervals-returned-by-group-by-time-queries", + "what-different-types-of-api-tokens-exist", + "what-do-you-mean-by-flux-is-in-maintenance-mode", + "what-happened-to-buckets-and-measurements", + "what-happened-to-the-_time-column", + "what-is-a-user-defined-function-udf", + "what-is-in-this-section", + "what-is-running", + "what-is-series-cardinality", + "what-is-the-difference-between-a-socket-udf-and-a-process-udf", + "what-is-the-native-collector---mqtt-feature", + "what-is-the-processing-engine", + "what-is-the-relationship-between-shard-group-durations-and-retention-periods", + "what-is-the-relationship-between-shard-group-durations-and-retention-policies", + "what-is-time-series-data", + "what-newline-character-does-the-influxdb-api-require", + "what-newline-character-does-the-influxdb-write-api-require", + "what-words-and-characters-should-i-avoid-when-writing-data-to-influxdb", + "what-you-will-need", + "whats-in-this-guide", + "when-creating-a-bucket", + "when-do-i-need-more-ram", + "when-does-data-actually-get-deleted", + "when-querying-data", + "when-should-i-single-quote-and-when-should-i-double-quote-in-queries", + "when-should-i-single-quote-and-when-should-i-double-quote-when-writing-data", + "when-should-i-use-single-quote-versus-double-quotes-in-a-query", + "when-should-we-use-kapacitor-instead-of-cqs", + "when-should-we-use-stream-tasks-vs-batch-tasks-in-kapacitor", + "when-to-consider-custom-partitioning", + "when-to-use-helm", + "when-to-use-kubectl", + "when-to-use-kubit-cli", + "when-to-use-the-join-package", + "when-to-use-union-and-pivot-instead-of-join-functions", + "when-writing-data", + "where", + "where-are-my-certificates", + "where-can-i-find-influxdb-enterprise-logs", + "where-can-i-find-influxdb-logs", + "where-can-i-get-more-information-on-using-telegraf-as-a-replacement-for-native-collector---mqtt", + "where-can-i-see-the-current-status-of-influxdb-cloud", + "where-can-i-see-the-current-status-of-my-influxdb-instance", + "where-clause", + "where-clause-examples", + "where-clause-subqueries", + "where-clause-with-correlated-subquery", + "where-clause-with-non-scalar-subquery", + "where-clause-with-scalar-subquery", + "where-data-lives", + "where-subquery-examples", + "where-subquery-syntax", + "where-to-from-here", + "where-to-next", + "where-to-store-data-tag-or-field", + "which-influxql-functions-support-nesting", + "whitespace", + "whitespace-i", + "whitespace-ii", + "who-do-i-contact-for-billing-issues", + "why-am-i-getting-the-error-total-duration-of-queries-in-the-last-30s-exceeds-limit-of-25m0s", + "why-am-i-missing-data-after-creating-a-new-default-retention-policy", + "why-am-i-seeing-a-409-error-in-some-of-my-data-node-logs", + "why-am-i-seeing-a-503-service-unavailable-error-in-my-meta-node-logs", + "why-am-i-seeing-error-writing-count-stats--partial-write-errors-in-my-data-node-logs", + "why-am-i-seeing-hinted-handoff-queue-not-empty-errors-in-my-data-node-logs", + "why-am-i-seeing-queue-is-full-errors-in-my-data-node-logs", + "why-am-i-seeing-unable-to-determine-if-hostname-is-a-meta-node-when-i-try-to-add-a-meta-node-with-influxd-ctl-join", + "why-are-my-into-queries-missing-data", + "why-arent-data-dropped-after-ive-altered-a-retention-policy", + "why-cant-i-query-boolean-field-values", + "why-do-my-queries-return-no-data-or-partial-data", + "why-do-these-results-include-timestamps-outside-of-the-queried-time-range", + "why-does-fillprevious-return-empty-results", + "why-does-influxdb-fail-to-parse-microsecond-units-in-the-configuration-file", + "why-does-my-query-return-epoch-0-as-the-timestamp", + "why-does-series-cardinality-matter", + "why-doesnt-my-query-return-data", + "why-dont-my-group-by-time-queries-return-timestamps-that-occur-after-now", + "why-indexing-matters-the-schema-case-study", + "why-is-influxdb-reporting-an-out-of-memory-oom-exception-when-my-system-has-free-memory", + "why-is-my-query-slow", + "why-is-my-query-with-a-where-or-time-clause-returning-empty-results", + "why-is-this-feature-being-eold", + "why-is-this-manual-process-required", + "why-isnt-data-dropped-after-i-update-a-buckets-retention-period", + "why-shouldnt-i-just-use-a-relational-database", + "why-your-schema-matters", + "width", + "wildcard-certificates-signed-by-a-certificate-authority", + "wildcard-expressions", + "wildcard-matching", + "will-i-lose-any-data-already-ingested", + "win-system", + "win_eventlog", + "win_perf_counters", + "win_services", + "win_system", + "win_wmi", + "window", + "window-aggregate-functions", + "window-amp-aggregate", + "window-by-calendar-month", + "window-by-calendar-months-and-years", + "window-by-week", + "window-clause", + "window-data-into-30-second-intervals", + "window-data-into-thirty-second-intervals", + "window-every-20-seconds-covering-40-second-periods", + "window-frames", + "window-functions", + "window-output-tables", + "window-your-data", + "windowing", + "windowing-data", + "windows", + "windows-default-paths", + "windows-file-system-overview", + "windows-newlines", + "windows-service-commands", + "windows-support", + "wire-format", + "wireguard", + "wireless", + "with-clause", + "withdraw-an-invitation", + "withfit", + "withminsse", + "work-the-shard-system", + "work-with-geo-temporal-data", + "work-with-multiple-data-sources", + "work-with-prometheus-metric-types", + "working-with-tags-fields-and-variables", + "working_cardinality", + "works-with-influxdb-cloud-bucket-schemas", + "works-with-influxdb-oss-2x", + "workspace", + "write", + "write-a-basic-query", + "write-a-field-value-10-as-a-float-to-influxdb", + "write-a-point-to-a-database-that-doesnt-exist", + "write-a-point-to-a-retention-policy-that-doesnt-exist", + "write-a-point-to-the-database-mydb-and-the-retention-policy-myrp", + "write-a-point-to-the-database-mydb-using-basic-authentication", + "write-a-point-to-the-database-mydb-using-http-authentication", + "write-a-point-to-the-database-mydb-with-a-nanosecond-timestamp", + "write-a-point-to-the-database-mydb-with-a-timestamp-in-seconds", + "write-a-point-to-the-database-mydb-with-the-local-servers-nanosecond-timestamp", + "write-a-point-with-a-nanosecond-timestamp-to-the-mydb-database", + "write-a-point-with-an-incorrect-timestamp", + "write-a-point-with-invalid-authentication-credentials", + "write-a-point-with-special-characters", + "write-a-point-with-the-local-servers-nanosecond-timestamp-to-the-mydb-database", + "write-a-single-line-of-line-protocol", + "write-aggregated-results-for-more-than-one-measurement-to-a-different-database-downsampling-with-backreferencing", + "write-aggregated-results-to-a-measurement-downsampling", + "write-ahead-log-wal", + "write-ahead-log-wal-persistence", + "write-an-external-plugin", + "write-an-integer-to-a-field-that-previously-accepted-a-float", + "write-and-query-limits-http-response-code", + "write-annotated-csv-data-from-a-compressed-file", + "write-annotated-csv-data-from-a-file", + "write-annotated-csv-data-from-a-url", + "write-annotated-csv-data-from-multiple-files", + "write-annotated-csv-data-from-multiple-sources", + "write-annotated-csv-data-from-multiple-urls", + "write-annotated-csv-data-using-rate-limiting", + "write-annotated-csv-data-via-stdin", + "write-annotated-csv-from-a-file", + "write-another-set-of-fields-to-new-measurement", + "write-api", + "write-api-behaviors", + "write-back-to-influxdb", + "write-buffer-size", + "write-buffer-size--1000", + "write-concurrency", + "write-concurrency--40", + "write-consistency", + "write-csv-data", + "write-csv-data-to-influxdb", + "write-csv-data-to-influxdb-cloud-dedicated", + "write-csv-data-to-influxdb-clustered", + "write-csv-data-with-the-influx-cli", + "write-data", + "write-data-from-a-file", + "write-data-in-line-protocol-syntax", + "write-data-to-a-non-default-retention-policy", + "write-data-to-influxdb", + "write-data-to-influxdb-with-go", + "write-data-to-influxdb-with-insert", + "write-data-to-influxdb-with-python", + "write-data-using-basic-authentication", + "write-data-using-influxdb-api-client-libraries", + "write-data-using-the-cli", + "write-data-using-the-http-api", + "write-data-using-the-influxdb-api", + "write-data-using-the-telegraf-influxdb-output-plugin", + "write-data-using-token-authentication", + "write-data-via-stdin", + "write-data-with-client-libraries", + "write-data-with-flux", + "write-data-with-insert", + "write-data-with-line-protocol", + "write-data-with-millisecond-unix-timestamps", + "write-data-with-no-code-third-party-technologies", + "write-data-with-output-plugins", + "write-data-with-the-client-library", + "write-data-with-the-influx-cli", + "write-data-with-the-influxdb-api", + "write-data-with-the-influxdb-javascript-client-library", + "write-data-with-third-party-technologies", + "write-examples", + "write-extended-annotated-csv-data-via-stdin", + "write-failed-for-shard---engine-cache-maximum-memory-size-exceeded", + "write-home-sensor-actions-data-to-influxdb", + "write-home-sensor-data-to-influxdb", + "write-http-endpoint", + "write-line-protocol", + "write-line-protocol-and-accept-partial-writes", + "write-line-protocol-data-to-influxdb-cloud-dedicated", + "write-line-protocol-data-to-influxdb-cloud-serverless", + "write-line-protocol-data-to-influxdb-clustered", + "write-line-protocol-from-a-compressed-file", + "write-line-protocol-from-a-file", + "write-line-protocol-from-a-url", + "write-line-protocol-from-multiple-files", + "write-line-protocol-from-multiple-sources", + "write-line-protocol-from-multiple-urls", + "write-line-protocol-to-influxdb", + "write-line-protocol-to-influxdb-3", + "write-line-protocol-to-influxdb-3-using-credentials-from-the-connection-profile", + "write-line-protocol-to-influxdb-3-with-a-custom-batch-size", + "write-line-protocol-to-influxdb-3-with-a-custom-client-timeout", + "write-line-protocol-to-influxdb-3-with-non-default-timestamp-precision", + "write-line-protocol-to-your-influxdb-3-server", + "write-line-protocol-using-the-influx-write-command", + "write-line-protocol-via-stdin", + "write-logs-10-examples", + "write-logs-to-a-file", + "write-min-max-and-mean-values-to-influxdb-cloud", + "write-multiple-data-points-in-one-request", + "write-multiple-lines-of-line-protocol", + "write-one-set-of-fields-to-a-new-measurement", + "write-optimizations", + "write-options", + "write-pivoted-data-to-influxdb", + "write-prometheus-metrics-to-influxdb", + "write-prometheus-metrics-to-influxdb-at-regular-intervals", + "write-raw-query-results-back-to-influxdb", + "write-requests-at-router", + "write-responses", + "write-sample-data-to-influxdb-with-line-protocol", + "write-several-points-to-the-database-by-separating-points-with-a-new-line", + "write-several-points-to-the-database-mydb-by-separating-points-with-a-new-line", + "write-several-points-to-the-database-mydb-from-the-file-datatxt", + "write-socket-based-user-defined-functions-udfs", + "write-status-metrics", + "write-telemetry-data", + "write-test-data-to-the-new-database", + "write-the-bitcoin-price-sample-data-to-influxdb", + "write-the-bitcoin-sample-data-to-influxdb", + "write-the-data-to-a-different-field", + "write-the-device-authorization-to-a-bucket", + "write-the-downsampled-data-back-to-influxdb", + "write-the-field-value--1234456e78-as-a-float-to-influxdb", + "write-the-field-value-1-as-a-float-to-influxdb", + "write-the-field-value-1-as-an-integer-to-influxdb", + "write-the-field-value-stringing-along-as-a-string-to-influxdb", + "write-the-field-value-true-as-a-boolean-to-influxdb", + "write-the-home-sensor-actions-data-to-influxdb", + "write-the-home-sensor-data-to-influxdb", + "write-the-line-protocol-to-influxdb", + "write-the-noaa-bay-area-weather-data-to-influxdb", + "write-the-random-number-sample-data-to-influxdb", + "write-the-results-of-a-query-to-a-fully-qualified-measurement", + "write-the-results-of-a-query-to-a-measurement", + "write-time-as-a-field-key-and-attempt-to-query-it", + "write-time-as-a-measurement-and-query-it", + "write-time-as-a-tag-key-and-attempt-to-query-it", + "write-timeout", + "write-timeout--10s", + "write-to-a-bucket-and-query-the-written-data", + "write-to-an-amazon-rds-postgresql-database", + "write-to-influxdb-v1x-and-influxdb-cloud", + "write-to-influxdb-v1x-and-influxdb-cloud-dedicated", + "write-to-influxdb-v1x-and-influxdb-cloud-serverless", + "write-to-influxdb-v1x-and-influxdb-clustered", + "write-to-influxdb-v1x-and-v2x", + "write-to-multiple-influxdb-buckets", + "write-to-the-v1-http-write-endpoint", + "write-tracing", + "write-tracing--false", + "write-valid-schemas", + "write-validation-and-memory-buffer", + "write_errors", + "writeblocked", + "writeblocked-1", + "writebytes", + "writeconsistency", + "writedata", + "writedrop", + "writedropped", + "writedropped-1", + "writedropped-2", + "writeerr", + "writeerr-1", + "writeerror", + "writefailures", + "writenodereq", + "writenodereq-1", + "writenodereqfail", + "writenodereqfail-1", + "writenodereqpoints", + "writenodereqpoints-1", + "writeok", + "writeok-1", + "writeok-2", + "writepartial-enterprise-only", + "writepointsdropped", + "writepointserr", + "writepointsok", + "writereq", + "writereq-1", + "writereqactive", + "writereqbytes", + "writereqdurationns", + "writereqerr", + "writereqok", + "writes", + "writes-in-a-cluster", + "writes-total", + "writeshardfail", + "writeshardpointsreq", + "writeshardreq", + "writeshardreq-1", + "writeshardreq-2", + "writeshardreq-3", + "writeshardreqpoints", + "writeshardreqpoints-1", + "writeshardreqpoints-2", + "writetimeout", + "writing-a-float-to-a-field-that-previously-accepted-booleans", + "writing-a-point-to-a-database-that-doesnt-exist", + "writing-a-task-to-be-editable-in-chronograf", + "writing-a-udf", + "writing-a-user-defined-function-udf", + "writing-and-exploring-data", + "writing-and-querying-for-multi-node-setups", + "writing-data", + "writing-data-1", + "writing-data-from-api-to-disk", + "writing-data-to-influxdb", + "writing-individual-fields-with-different-timestamps", + "writing-modes", + "writing-multiple-points", + "writing-points-from-a-file", + "x", + "x-axis", + "x509_cert", + "x509_crl", + "xml", + "xpath-json", + "xpath-messagepack", + "xpath-protocol-buffers", + "xpath_protobuf_file-mandatory", + "xpath_protobuf_import_paths-optional", + "xpath_protobuf_skip_bytes-optional", + "xpath_protobuf_type-mandatory", + "xtremio", + "y", + "y-axis", + "yandex_cloud_monitoring", + "yield-multiple-results-from-a-query", + "you-must-use-a-local-license-file", + "youtube", + "zabbix", + "zenoss", + "zenoss-cpu-alerttick", + "zenoss-package", + "zenoss-settings-in-kapacitorconf", + "zenoss_cpu_handleryaml", + "zfs", + "zipkin", + "zookeeper" + ] + } +} diff --git a/layouts/index.html b/layouts/index.html index b04b66e5c..81859367a 100644 --- a/layouts/index.html +++ b/layouts/index.html @@ -19,7 +19,7 @@