diff --git a/.circleci/config.yml b/.circleci/config.yml index e0ee7ca74..1db280c45 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -31,10 +31,10 @@ jobs: command: cd api-docs && bash generate-api-docs.sh - run: name: Inject Flux stdlib frontmatter - command: node ./flux-build-scripts/inject-flux-stdlib-frontmatter.js + command: node ./flux-build-scripts/inject-flux-stdlib-frontmatter.cjs - run: name: Update Flux/InfluxDB versions - command: node ./flux-build-scripts/update-flux-versions.js + command: node ./flux-build-scripts/update-flux-versions.cjs - save_cache: key: install-{{ .Environment.CACHE_VERSION }}-{{ checksum ".circleci/config.yml" }} paths: diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 1132ae5cf..505688122 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -103,11 +103,13 @@ GitHub Copilot should help document InfluxData products by creating clear, accur product_version: weight: # Page order (1-99, 101-199, etc.) ``` -- Follow the shortcode documentation in `CONTRIBUTING.md` +- Follow the shortcode examples in `content/example.md` and the documentation + for docs-v2 contributors in `CONTRIBUTING.md` - Use provided shortcodes correctly: - Notes/warnings: `{{% note %}}`, `{{% warn %}}` - Product-specific: `{{% enterprise %}}`, `{{% cloud %}}` - Tabbed content: `{{< tabs-wrapper >}}`, `{{% tabs %}}`, `{{% tab-content %}}` + - Tabbed content for code examples (without additional text): `{{< code-tabs-wrapper >}}`, `{{% code-tabs %}}`, `{{% code-tab-content %}}` - Version links: `{{< latest >}}`, `{{< latest-patch >}}` - API endpoints: `{{< api-endpoint >}}` - Required elements: `{{< req >}}` diff --git a/.github/instructions/contributing.instructions.md b/.github/instructions/contributing.instructions.md new file mode 100644 index 000000000..4fb3b1efe --- /dev/null +++ b/.github/instructions/contributing.instructions.md @@ -0,0 +1,1705 @@ +--- +applyTo: "content/**/*.md, layouts/**/*.html" +--- + +# GitHub Copilot Instructions for InfluxData Documentation + +## Purpose and scope + +GitHub Copilot should help document InfluxData products +by creating clear, accurate technical content with proper +code examples, frontmatter, shortcodes, and formatting. + +# Contributing to InfluxData Documentation + +### Sign the InfluxData CLA + +The InfluxData Contributor License Agreement (CLA) is part of the legal framework +for the open source ecosystem that protects both you and InfluxData. +To make substantial contributions to InfluxData documentation, first sign the InfluxData CLA. +What constitutes a "substantial" change is at the discretion of InfluxData documentation maintainers. + +[Sign the InfluxData CLA](https://www.influxdata.com/legal/cla/) + +_**Note:** Typo and broken link fixes are greatly appreciated and do not require signing the CLA._ + +_If you're new to contributing or you're looking for an easy update, see [`docs-v2` good-first-issues](https://github.com/influxdata/docs-v2/issues?q=is%3Aissue+is%3Aopen+label%3Agood-first-issue)._ + +## Make suggested updates + +### Fork and clone InfluxData Documentation Repository + +[Fork this repository](https://help.github.com/articles/fork-a-repo/) and +[clone it](https://help.github.com/articles/cloning-a-repository/) to your local machine. + +## Install project dependencies + +docs-v2 automatically runs format (Markdown, JS, and CSS) linting and code block tests for staged files that you try to commit. + +For the linting and tests to run, you need to install Docker and Node.js +dependencies. + +\_**Note:** +The git pre-commit and pre-push hooks are configured to run linting and tests automatically +when you commit or push changes. +We strongly recommend letting them run, but you can skip them +(and avoid installing related dependencies) +by including the `--no-verify` flag with your commit--for example, enter the following command in your terminal: + +```sh +git commit -m "" --no-verify +``` + +### Install Node.js dependencies + +To install dependencies listed in package.json: + +1. Install [Node.js](https://nodejs.org/en) for your system. +2. Install [Yarn](https://yarnpkg.com/getting-started/install) for your system. +3. Run `yarn` to install dependencies (including Hugo). +4. Install the Yarn package manager and run `yarn` to install project dependencies. + +`package.json` contains dependencies used in `/assets/js` JavaScript code and +dev dependencies used in pre-commit hooks for linting, syntax-checking, and testing. + +Dev dependencies include: + +- [Lefthook](https://github.com/evilmartians/lefthook): configures and +manages git pre-commit and pre-push hooks for linting and testing Markdown content. +- [prettier](https://prettier.io/docs/en/): formats code, including Markdown, according to style rules for consistency +- [Cypress]: e2e testing for UI elements and URLs in content + +### Install Docker + +docs-v2 includes Docker configurations (`compose.yaml` and Dockerfiles) for running the Vale style linter and tests for code blocks (Shell, Bash, and Python) in Markdown files. + +Install [Docker](https://docs.docker.com/get-docker/) for your system. + +#### Build the test dependency image + +After you have installed Docker, run the following command to build the test +dependency image, `influxdata:docs-pytest`. +The tests defined in `compose.yaml` use the dependencies and execution +environment from this image. + +```bash +docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest . +``` + +### Run the documentation locally (optional) + +To run the documentation locally, follow the instructions provided in the README. + +### Install Visual Studio Code extensions + +If you use Microsoft Visual Studio (VS) Code, you can install extensions +to help you navigate, check, and edit files. + +docs-v2 contains a `./.vscode/settings.json` that configures the following extensions: + +- Comment Anchors: recognizes tags (for example, `//SOURCE`) and makes links and filepaths clickable in comments. +- Vale: shows linter errors and suggestions in the editor. +- YAML Schemas: validates frontmatter attributes. + +### Make your changes + +Make your suggested changes being sure to follow the [style and formatting guidelines](#style--formatting) outline below. + +## Lint and test your changes + +`package.json` contains scripts for running tests and linting. + +### Automatic pre-commit checks + +docs-v2 uses Lefthook to manage Git hooks that run during pre-commit and pre-push. The hooks run the scripts defined in `package.json` to lint Markdown and test code blocks. +When you try to commit changes (`git commit`), Git runs +the commands configured in `lefthook.yml` which pass your **staged** files to Vale, +Prettier, Cypress (for UI tests and link-checking), and Pytest (for testing Python and shell code in code blocks). + +### Skip pre-commit hooks + +**We strongly recommend running linting and tests**, but you can skip them +(and avoid installing dependencies) +by including the `LEFTHOOK=0` environment variable or the `--no-verify` flag with +your commit--for example: + +```sh +git commit -m "" --no-verify +``` + +```sh +LEFTHOOK=0 git commit +``` + +### Set up test scripts and credentials + +Tests for code blocks require your InfluxDB credentials and other typical +InfluxDB configuration. + +To set up your docs-v2 instance to run tests locally, do the following: + +1. **Set executable permissions on test scripts** in `./test/src`: + + ```sh + chmod +x ./test/src/*.sh + ``` + +2. **Create credentials for tests**: + + - Create databases, buckets, and tokens for the product(s) you're testing. + - If you don't have access to a Clustered instance, you can use your +Cloud Dedicated instance for testing in most cases. To avoid conflicts when + running tests, create separate Cloud Dedicated and Clustered databases. + +1. **Create .env.test**: Copy the `./test/env.test.example` file into each + product directory to test and rename the file as `.env.test`--for example: + + ```sh + ./content/influxdb/cloud-dedicated/.env.test + ``` + +2. Inside each product's `.env.test` file, assign your InfluxDB credentials to + environment variables: + + - Include the usual `INFLUX_` environment variables + - In + `cloud-dedicated/.env.test` and `clustered/.env.test` files, also define the + following variables: + + - `ACCOUNT_ID`, `CLUSTER_ID`: You can find these values in your `influxctl` + `config.toml` configuration file. + - `MANAGEMENT_TOKEN`: Use the `influxctl management create` command to generate + a long-lived management token to authenticate Management API requests + + See the substitution + patterns in `./test/src/prepare-content.sh` for the full list of variables you may need to define in your `.env.test` files. + +3. For influxctl commands to run in tests, move or copy your `config.toml` file + to the `./test` directory. + +> [!Warning] +> +> - The database you configure in `.env.test` and any written data may +be deleted during test runs. +> - Don't add your `.env.test` files to Git. To prevent accidentally adding credentials to the docs-v2 repo, +Git is configured to ignore `.env*` files. Consider backing them up on your local machine in case of accidental deletion. + +#### Test shell and python code blocks + +[pytest-codeblocks](https://github.com/nschloe/pytest-codeblocks/tree/main) extracts code from python and shell Markdown code blocks and executes assertions for the code. +If you don't assert a value (using a Python `assert` statement), `--codeblocks` considers a non-zero exit code to be a failure. + +**Note**: `pytest --codeblocks` uses Python's `subprocess.run()` to execute shell code. + +You can use this to test CLI and interpreter commands, regardless of programming +language, as long as they return standard exit codes. + +To make the documented output of a code block testable, precede it with the +`` tag and **omit the code block language +descriptor**--for example, in your Markdown file: + +##### Example markdown + +```python +print("Hello, world!") +``` + + + +The next code block is treated as an assertion. +If successful, the output is the following: + +``` +Hello, world! +``` + +For commands, such as `influxctl` CLI commands, that require launching an +OAuth URL in a browser, wrap the command in a subshell and redirect the output +to `/shared/urls.txt` in the container--for example: + +```sh +# Test the preceding command outside of the code block. +# influxctl authentication requires TTY interaction-- +# output the auth URL to a file that the host can open. +script -c "influxctl user list " \ + /dev/null > /shared/urls.txt +``` + +You probably don't want to display this syntax in the docs, which unfortunately +means you'd need to include the test block separately from the displayed code +block. +To hide it from users, wrap the code block inside an HTML comment. +pytest-codeblocks will still collect and run the code block. + +##### Mark tests to skip + +pytest-codeblocks has features for skipping tests and marking blocks as failed. +To learn more, see the pytest-codeblocks README and tests. + +#### Troubleshoot tests + +### Pytest collected 0 items + +Potential reasons: + +- See the test discovery options in `pytest.ini`. +- For Python code blocks, use the following delimiter: + + ```python + # Codeblocks runs this block. + ``` + + `pytest --codeblocks` ignores code blocks that use the following: + + ```py + # Codeblocks ignores this block. + ``` + +### Vale style linting + +docs-v2 includes Vale writing style linter configurations to enforce documentation writing style rules, guidelines, branding, and vocabulary terms. + +To run Vale, use the Vale extension for your editor or the included Docker configuration. +For example, the following command runs Vale in a container and lints `*.md` (Markdown) files in the path `./content/influxdb/cloud-dedicated/write-data/` using the specified configuration for `cloud-dedicated`: + +```sh +docker compose run -T vale --config=content/influxdb/cloud-dedicated/.vale.ini --minAlertLevel=error content/influxdb/cloud-dedicated/write-data/**/*.md +``` + +The output contains error-level style alerts for the Markdown content. + +**Note**: We strongly recommend running Vale, but it's not included in the +docs-v2 pre-commit hooks](#automatic-pre-commit-checks) for now. +You can include it in your own Git hooks. + +If a file contains style, spelling, or punctuation problems, +the Vale linter can raise one of the following alert levels: + +- **Error**: + - Problems that can cause content to render incorrectly + - Violations of branding guidelines or trademark guidelines + - Rejected vocabulary terms +- **Warning**: General style guide rules and best practices +- **Suggestion**: Style preferences that may require refactoring or updates to an exceptions list + +### Integrate Vale with your editor + +To integrate Vale with VSCode: + +1. Install the [Vale VSCode](https://marketplace.visualstudio.com/items?itemName=ChrisChinchilla.vale-vscode) extension. +2. In the extension settings, set the `Vale:Vale CLI:Path` value to the path of your Vale binary (`${workspaceFolder}/node_modules/.bin/vale` for Yarn-installed Vale). + +To use with an editor other than VSCode, see the [Vale integration guide](https://vale.sh/docs/integrations/guide/). + +### Configure style rules + +`/.ci/vale/styles/` contains configuration files for the custom `InfluxDataDocs` style. + +The easiest way to add accepted or rejected spellings is to enter your terms (or regular expression patterns) into the Vocabulary files at `.ci/vale/styles/config/vocabularies`. + +To add accepted/rejected terms for specific products, configure a style for the product and include a `Branding.yml` configuration. As an example, see `content/influxdb/cloud-dedicated/.vale.ini` and `.ci/vale/styles/Cloud-Dedicated/Branding.yml`. + +To learn more about configuration and rules, see [Vale configuration](https://vale.sh/docs/topics/config). + +### Submit a pull request + +Push your changes up to your forked repository, then [create a new pull request](https://help.github.com/articles/creating-a-pull-request/). + +## Style & Formatting + +### Markdown + +Most docs-v2 documentation content uses [Markdown](https://en.wikipedia.org/wiki/Markdown). + +_Some parts of the documentation, such as `./api-docs`, contain Markdown within YAML and rely on additional tooling._ + +### Semantic line feeds + +Use [semantic line feeds](http://rhodesmill.org/brandon/2012/one-sentence-per-line/). +Separating each sentence with a new line makes it easy to parse diffs with the human eye. + +**Diff without semantic line feeds:** + +```diff +-Data is taking off. This data is time series. You need a database that specializes in time series. You should check out InfluxDB. ++Data is taking off. This data is time series. You need a database that specializes in time series. You need InfluxDB. +``` + +**Diff with semantic line feeds:** + +```diff +Data is taking off. +This data is time series. +You need a database that specializes in time series. +-You should check out InfluxDB. ++You need InfluxDB. +``` + +### Article headings + +Use only h2-h6 headings in markdown content. +h1 headings act as the page title and are populated automatically from the `title` frontmatter. +h2-h6 headings act as section headings. + +### Image naming conventions + +Save images using the following naming format: `project/version-context-description.png`. +For example, `influxdb/2-0-visualizations-line-graph.png` or `influxdb/2-0-tasks-add-new.png`. +Specify a version other than 2.0 only if the image is specific to that version. + +## Page frontmatter + +Every documentation page includes frontmatter which specifies information about the page. +Frontmatter populates variables in page templates and the site's navigation menu. + +```yaml +title: # Title of the page used in the page's h1 +seotitle: # Page title used in the html title and used in search engine results +list_title: # Title used in article lists generated using the {{< children >}} shortcode +description: # Page description displayed in search engine results +menu: + influxdb_2_0: + name: # Article name that only appears in the left nav + parent: # Specifies a parent group and nests navigation items +weight: # Determines sort order in both the nav tree and in article lists +draft: # If true, will not render page on build +product/v2.x/tags: # Tags specific to each version (replace product and .x" with the appropriate product and minor version ) +related: # Creates links to specific internal and external content at the bottom of the page + - /path/to/related/article + - https://external-link.com, This is an external link +external_url: # Used in children shortcode type="list" for page links that are external +list_image: # Image included with article descriptions in children type="articles" shortcode +list_note: # Used in children shortcode type="list" to add a small note next to listed links +list_code_example: # Code example included with article descriptions in children type="articles" shortcode +list_query_example:# Code examples included with article descriptions in children type="articles" shortcode, + # References to examples in data/query_examples +canonical: # Path to canonical page, overrides auto-gen'd canonical URL +v2: # Path to v2 equivalent page +prepend: # Prepend markdown content to an article (especially powerful with cascade) + block: # (Optional) Wrap content in a block style (note, warn, cloud) + content: # Content to prepend to article +append: # Append markdown content to an article (especially powerful with cascade) + block: # (Optional) Wrap content in a block style (note, warn, cloud) + content: # Content to append to article +metadata: [] # List of metadata messages to include under the page h1 +updated_in: # Product and version the referenced feature was updated in (displayed as a unique metadata) +source: # Specify a file to pull page content from (typically in /content/shared/) +``` + +### Title usage + +##### `title` + +The `title` frontmatter populates each page's HTML `h1` heading tag. +It shouldn't be overly long, but should set the context for users coming from outside sources. + +##### `seotitle` + +The `seotitle` frontmatter populates each page's HTML `title` attribute. +Search engines use this in search results (not the page's h1) and therefore it should be keyword optimized. + +##### `list_title` + +The `list_title` frontmatter determines an article title when in a list generated +by the [`{{< children >}}` shortcode](#generate-a-list-of-children-articles). + +##### `menu > name` + +The `name` attribute under the `menu` frontmatter determines the text used in each page's link in the site navigation. +It should be short and assume the context of its parent if it has one. + +#### Page Weights + +To ensure pages are sorted both by weight and their depth in the directory +structure, pages should be weighted in "levels." +All top level pages are weighted 1-99. +The next level is 101-199. +Then 201-299 and so on. + +_**Note:** `_index.md` files should be weighted one level up from the other `.md` files in the same directory._ + +### Related content + +Use the `related` frontmatter to include links to specific articles at the bottom of an article. + +- If the page exists inside of this documentation, just include the path to the page. + It will automatically detect the title of the page. +- If the page exists inside of this documentation, but you want to customize the link text, + include the path to the page followed by a comma, and then the custom link text. + The path and custom text must be in that order and separated by a comma and a space. +- If the page exists outside of this documentation, include the full URL and a title for the link. + The link and title must be in that order and separated by a comma and a space. + +```yaml +related: + - /v2.0/write-data/quick-start + - /v2.0/write-data/quick-start, This is custom text for an internal link + - https://influxdata.com, This is an external link +``` + +### Canonical URLs + +Search engines use canonical URLs to accurately rank pages with similar or identical content. +The `canonical` HTML meta tag identifies which page should be used as the source of truth. + +By default, canonical URLs are automatically generated for each page in the InfluxData +documentation using the latest version of the current product and the current path. + +Use the `canonical` frontmatter to override the auto-generated canonical URL. + +_**Note:** The `canonical` frontmatter supports the [`{{< latest >}}` shortcode](#latest-links)._ + +```yaml +canonical: /path/to/canonical/doc/ + +# OR + +canonical: /{{< latest "influxdb" "v2" >}}/path/to/canonical/doc/ +``` + +### v2 equivalent documentation + +To display a notice on a 1.x page that links to an equivalent 2.0 page, +add the following frontmatter to the 1.x page: + +```yaml +v2: /influxdb/v2.0/get-started/ +``` + +### Prepend and append content to a page + +Use the `prepend` and `append` frontmatter to add content to the top or bottom of a page. +Each has the following fields: + +```yaml +append: | + > [!Note] + > #### This is example markdown content + > This is just an example note block that gets appended to the article. +``` + +Use this frontmatter with [cascade](#cascade) to add the same content to +all children pages as well. + +```yaml +cascade: + append: | + > [!Note] + > #### This is example markdown content + > This is just an example note block that gets appended to the article. +``` + +### Cascade + +To automatically apply frontmatter to a page and all of its children, use the +[`cascade` frontmatter](https://gohugo.io/content-management/front-matter/#front-matter-cascade) +built in into Hugo. + +```yaml +title: Example page +description: Example description +cascade: + layout: custom-layout +``` + +`cascade` applies the frontmatter to all children unless the child already includes +those frontmatter keys. Frontmatter defined on the page overrides frontmatter +"cascaded" from a parent. + +## Use shared content in a page + +Use the `source` frontmatter to specify a shared file to use to populate the +page content. Shared files are typically stored in the `/content/shared` directory. + +When building shared content, use the `show-in` and `hide-in` shortcodes to show +or hide blocks of content based on the current InfluxDB product/version. +For more information, see [show-in](#show-in) and [hide-in](#hide-in). + +## Shortcodes + +### Notes and warnings + +Shortcodes are available for formatting notes and warnings in each article: + +```md +{{% note %}} +Insert note markdown content here. +{{% /note %}} + +{{% warn %}} +Insert warning markdown content here. +{{% /warn %}} +``` + +### Product data + +Display the full product name and version name for the current page--for example: + +- InfluxDB 3 Core +- InfluxDB 3 Cloud Dedicated + +```md +{{% product-name %}} +``` + +Display the short version name (part of the key used in `products.yml`) from the current page URL--for example: + +- `/influxdb3/core` returns `core` + +```md +{{% product-key %}} +``` + +#### Enterprise name + +The name used to refer to InfluxData's enterprise offering is subject to change. +To facilitate easy updates in the future, use the `enterprise-name` shortcode +when referencing the enterprise product. +This shortcode accepts a `"short"` parameter which uses the "short-name". + +``` +This is content that references {{< enterprise-name >}}. +This is content that references {{< enterprise-name "short" >}}. +``` + +Product names are stored in `data/products.yml`. + +#### Enterprise link + +References to InfluxDB Enterprise are often accompanied with a link to a page where +visitors can get more information about the Enterprise offering. +This link is subject to change. +Use the `enterprise-link` shortcode when including links to more information about +InfluxDB Enterprise. + +``` +Find more info [here][{{< enterprise-link >}}] +``` + +### Latest patch version + +Use the `{{< latest-patch >}}` shortcode to add the latest patch version of a product. +By default, this shortcode parses the product and minor version from the URL. +To specify a specific product and minor version, use the `product` and `version` arguments. +Easier to maintain being you update the version number in the `data/products.yml` file instead of updating individual links and code examples. + +```md +{{< latest-patch >}} + +{{< latest-patch product="telegraf" >}} + +{{< latest-patch product="chronograf" version="1.7" >}} +``` + +### Latest influx CLI version + +Use the `{{< latest-patch cli=true >}}` shortcode to add the latest version of the `influx` +CLI supported by the minor version of InfluxDB. +By default, this shortcode parses the minor version from the URL. +To specify a specific minor version, use the `version` argument. +Maintain CLI version numbers in the `data/products.yml` file instead of updating individual links and code examples. + +```md +{{< latest-patch cli=true >}} + +{{< latest-cli version="2.1" >}} +``` + +### API endpoint + +Use the `{{< api-endpoint >}}` shortcode to generate a code block that contains +a colored request method, a specified API endpoint, and an optional link to +the API reference documentation. +Provide the following arguments: + +- **method**: HTTP request method (get, post, patch, put, or delete) +- **endpoint**: API endpoint +- **api-ref**: Link the endpoint to a specific place in the API documentation +- **influxdb_host**: Specify which InfluxDB product host to use + _if the `endpoint` contains the `influxdb/host` shortcode_. + Uses the current InfluxDB product as default. + Supports the following product values: + + - oss + - cloud + - serverless + - dedicated + - clustered + +```md +{{< api-endpoint method="get" endpoint="/api/v2/tasks" api-ref="/influxdb/cloud/api/#operation/GetTasks">}} +``` + +```md +{{< api-endpoint method="get" endpoint="{{< influxdb/host >}}/api/v2/tasks" influxdb_host="cloud">}} +``` + +### Tabbed Content + +To create "tabbed" content (content that is changed by a users' selection), use the following three shortcodes in combination: + +`{{< tabs-wrapper >}}` +This shortcode creates a wrapper or container for the tabbed content. +All UI interactions are limited to the scope of each container. +If you have more than one "group" of tabbed content in a page, each needs its own `tabs-wrapper`. +This shortcode must be closed with `{{< /tabs-wrapper >}}`. + +**Note**: The `<` and `>` characters used in this shortcode indicate that the contents should be processed as HTML. + +`{{% tabs %}}` +This shortcode creates a container for buttons that control the display of tabbed content. +It should contain simple markdown links with anonymous anchors (`#`). +The link text is used as the button text. +This shortcode must be closed with `{{% /tabs %}}`. + +**Note**: The `%` characters used in this shortcode indicate that the contents should be processed as Markdown. + +The `{{% tabs %}}` shortcode has an optional `style` argument that lets you +assign CSS classes to the tags HTML container. The following classes are available: + +- **small**: Tab buttons are smaller and don't scale to fit the width. +- **even-wrap**: Prevents uneven tab widths when tabs are forced to wrap. + +`{{% tab-content %}}` +This shortcode creates a container for a content block. +Each content block in the tab group needs to be wrapped in this shortcode. +**The number of `tab-content` blocks must match the number of links provided in the `tabs` shortcode** +This shortcode must be closed with `{{% /tab-content %}}`. + +**Note**: The `%` characters used in this shortcode indicate that the contents should be processed as Markdown. + +#### Example tabbed content group + +```md +{{< tabs-wrapper >}} + +{{% tabs %}} +[Button text for tab 1](#) +[Button text for tab 2](#) +{{% /tabs %}} + +{{% tab-content %}} +Markdown content for tab 1. +{{% /tab-content %}} + +{{% tab-content %}} +Markdown content for tab 2. +{{% /tab-content %}} + +{{< /tabs-wrapper >}} +``` + +#### Tabbed code blocks + +Shortcodes are also available for tabbed code blocks primarily used to give users +the option to choose between different languages and syntax. +The shortcode structure is the same as above, but the shortcode names are different: + +`{{< code-tabs-wrapper >}}` +`{{% code-tabs %}}` +`{{% code-tab-content %}}` + +````md +{{< code-tabs-wrapper >}} + +{{% code-tabs %}} +[Flux](#) +[InfluxQL](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} + +```js +data = from(bucket: "example-bucket") + |> range(start: -15m) + |> filter(fn: (r) => + r._measurement == "mem" and + r._field == "used_percent" + ) +``` + +{{% /code-tab-content %}} + +{{% code-tab-content %}} + +```sql +SELECT "used_percent" +FROM "telegraf"."autogen"."mem" +WHERE time > now() - 15m +``` + +{{% /code-tab-content %}} + +{{< /code-tabs-wrapper >}} +```` + +#### Link to tabbed content + +To link to tabbed content, click on the tab and use the URL parameter shown. +It will have the form `?t=`, plus a string. +For example: + +``` +[Windows installation](/influxdb/v2.0/install/?t=Windows) +``` + +### Required elements + +Use the `{{< req >}}` shortcode to identify required elements in documentation with +orange text and/or asterisks. By default, the shortcode outputs the text, "Required," but +you can customize the text by passing a string argument with the shortcode. + +```md +{{< req >}} +``` + +**Output:** Required + +```md +{{< req "This is Required" >}} +``` + +**Output:** This is required + +If using other named arguments like `key` or `color`, use the `text` argument to +customize the text of the required message. + +```md +{{< req text="Required if ..." color="blue" type="key" >}} +``` + +#### Required elements in a list + +When identifying required elements in a list, use `{{< req type="key" >}}` to generate +a "\* Required" key before the list. For required elements in the list, include +{{< req "\*" >}} before the text of the list item. For example: + +```md +{{< req type="key" >}} + +- {{< req "\*" >}} **This element is required** +- {{< req "\*" >}} **This element is also required** +- **This element is NOT required** +``` + +#### Change color of required text + +Use the `color` argument to change the color of required text. +The following colors are available: + +- blue +- green +- magenta + +```md +{{< req color="magenta" text="This is required" >}} +``` + +### Page navigation buttons + +Use the `{{< page-nav >}}` shortcode to add page navigation buttons to a page. +These are useful for guiding users through a set of docs that should be read in sequential order. +The shortcode has the following parameters: + +- **prev:** path of the previous document _(optional)_ +- **next:** path of the next document _(optional)_ +- **prevText:** override the button text linking to the previous document _(optional)_ +- **nextText:** override the button text linking to the next document _(optional)_ +- **keepTab:** include the currently selected tab in the button link _(optional)_ + +The shortcode generates buttons that link to both the previous and next documents. +By default, the shortcode uses either the `list_title` or the `title` of the linked +document, but you can use `prevText` and `nextText` to override button text. + +```md + + +{{ page-nav prev="/path/to/prev/" next="/path/to/next" >}} + + + +{{ page-nav prev="/path/to/prev/" prevText="Previous" next="/path/to/next" nextText="Next" >}} + + + +{{ page-nav prev="/path/to/prev/" next="/path/to/next" keepTab=true>}} +``` + +### Keybinds + +Use the `{{< keybind >}}` shortcode to include OS-specific keybindings/hotkeys. +The following parameters are available: + +- mac +- linux +- win +- all +- other + +```md + + +{{< keybind mac="⇧⌘P" other="Ctrl+Shift+P" >}} + + + +{{< keybind all="Ctrl+Shift+P" >}} + + + +{{< keybind mac="⇧⌘P" linux="Ctrl+Shift+P" win="Ctrl+Shift+Alt+P" >}} +``` + +### Diagrams + +Use the `{{< diagram >}}` shortcode to dynamically build diagrams. +The shortcode uses [mermaid.js](https://github.com/mermaid-js/mermaid) to convert +simple text into SVG diagrams. +For information about the syntax, see the [mermaid.js documentation](https://mermaid-js.github.io/mermaid/#/). + +```md +{{< diagram >}} +flowchart TB +This --> That +That --> There +{{< /diagram >}} +``` + +### File system diagrams + +Use the `{{< filesystem-diagram >}}` shortcode to create a styled file system +diagram using a Markdown unordered list. + +##### Example filesystem diagram shortcode + +```md +{{< filesystem-diagram >}} + +- Dir1/ +- Dir2/ + - ChildDir/ + - Child + - Child +- Dir3/ + {{< /filesystem-diagram >}} +``` + +### High-resolution images + +In many cases, screenshots included in the docs are taken from high-resolution (retina) screens. +Because of this, the actual pixel dimension is 2x larger than it needs to be and is rendered 2x bigger than it should be. +The following shortcode automatically sets a fixed width on the image using half of its actual pixel dimension. +This preserves the detail of the image and renders it at a size where there should be little to no "blur" +cause by browser image resizing. + +```html +{{< img-hd src="/path/to/image" alt="Alternate title" />}} +``` + +###### Notes + +- This should only be used on screenshots takes from high-resolution screens. +- The `src` should be relative to the `static` directory. +- Image widths are limited to the width of the article content container and will scale accordingly, + even with the `width` explicitly set. + +### Truncated content blocks + +In some cases, it may be appropriate to shorten or truncate blocks of content. +Use cases include long examples of output data or tall images. +The following shortcode truncates blocks of content and allows users to opt into +to seeing the full content block. + +```md +{{% truncate %}} +Truncated markdown content here. +{{% /truncate %}} +``` + +### Expandable accordion content blocks + +Use the `{{% expand "Item label" %}}` shortcode to create expandable, accordion-style content blocks. +Each expandable block needs a label that users can click to expand or collapse the content block. +Pass the label as a string to the shortcode. + +```md +{{% expand "Label 1" %}} +Markdown content associated with label 1. +{{% /expand %}} + +{{% expand "Label 2" %}} +Markdown content associated with label 2. +{{% /expand %}} + +{{% expand "Label 3" %}} +Markdown content associated with label 3. +{{% /expand %}} +``` + +Use the optional `{{< expand-wrapper >}}` shortcode around a group of `{{% expand %}}` +shortcodes to ensure proper spacing around the expandable elements: + +```md +{{< expand-wrapper >}} +{{% expand "Label 1" %}} +Markdown content associated with label 1. +{{% /expand %}} + +{{% expand "Label 2" %}} +Markdown content associated with label 2. +{{% /expand %}} +{{< /expand-wrapper >}} +``` + +### Captions + +Use the `{{% caption %}}` shortcode to add captions to images and code blocks. +Captions are styled with a smaller font size, italic text, slight transparency, +and appear directly under the previous image or code block. + +```md +{{% caption %}} +Markdown content for the caption. +{{% /caption %}} +``` + +### Generate a list of children articles + +Section landing pages often contain just a list of articles with links and descriptions for each. +This can be cumbersome to maintain as content is added. +To automate the listing of articles in a section, use the `{{< children >}}` shortcode. + +```md +{{< children >}} +``` + +The children shortcode can also be used to list only "section" articles (those with their own children), +or only "page" articles (those with no children) using the `show` argument: + +```md +{{< children show="sections" >}} + + + +{{< children show="pages" >}} +``` + +_By default, it displays both sections and pages._ + +Use the `type` argument to specify the format of the children list. + +```md +{{< children type="functions" >}} +``` + +The following list types are available: + +- **articles:** lists article titles as headers with the description or summary + of the article as a paragraph. Article headers link to the articles. +- **list:** lists children article links in an unordered list. +- **anchored-list:** lists anchored children article links in an unordered list + meant to act as a page navigation and link to children header. +- **functions:** a special use-case designed for listing Flux functions. + +#### Include a "Read more" link + +To include a "Read more" link with each child summary, set `readmore=true`. +_Only the `articles` list type supports "Read more" links._ + +```md +{{< children readmore=true >}} +``` + +#### Include a horizontal rule + +To include a horizontal rule after each child summary, set `hr=true`. +_Only the `articles` list type supports horizontal rules._ + +```md +{{< children hr=true >}} +``` + +#### Include a code example with a child summary + +Use the `list_code_example` frontmatter to provide a code example with an article +in an articles list. + +````yaml +list_code_example: | + ```sh + This is a code example + ``` +```` + +#### Organize and include native code examples + +To include text from a file in `/shared/text/`, use the +`{{< get-shared-text >}}` shortcode and provide the relative path and filename. + +This is useful for maintaining and referencing sample code variants in their +native file formats. + +1. Store code examples in their native formats at `/shared/text/`. + +```md +/shared/text/example1/example.js +/shared/text/example1/example.py +``` + +2. Include the files--for example, in code tabs: + + ````md + {{% code-tabs-wrapper %}} + {{% code-tabs %}} + [Javascript](#js) + [Python](#py) + {{% /code-tabs %}} + {{% code-tab-content %}} + + ```js + {{< get-shared-text "example1/example.js" >}} + ``` + + {{% /code-tab-content %}} + {{% code-tab-content %}} + + ```py + {{< get-shared-text "example1/example.py" >}} + ``` + + {{% /code-tab-content %}} + {{% /code-tabs-wrapper %}} + ```` + +#### Include specific files from the same directory + +To include the text from one file in another file in the same +directory, use the `{{< get-leaf-text >}}` shortcode. +The directory that contains both files must be a +Hugo [_Leaf Bundle_](https://gohugo.io/content-management/page-bundles/#leaf-bundles), +a directory that doesn't have any child directories. + +In the following example, `api` is a leaf bundle. `content` isn't. + +```md +content +| +|--- api +| query.pdmc +| query.sh +| \_index.md +``` + +##### query.pdmc + +```md +# Query examples +``` + +##### query.sh + +```md +curl https://localhost:8086/query +``` + +To include `query.sh` and `query.pdmc` in `api/_index.md`, use the following code: + +````md +{{< get-leaf-text "query.pdmc" >}} + +# Curl example + +```sh +{{< get-leaf-text "query.sh" >}} +``` +```` + +Avoid using the following file extensions when naming included text files since Hugo interprets these as markup languages: +`.ad`, `.adoc`, `.asciidoc`, `.htm`, `.html`, `.markdown`, `.md`, `.mdown`, `.mmark`, `.pandoc`, `.pdc`, `.org`, or `.rst`. + +#### Reference a query example in children + +To include a query example with the children in your list, update `data/query_examples.yml` +with the example code, input, and output, and use the `list_query_example` +frontmatter to reference the corresponding example. + +```yaml +list_query_example: cumulative_sum +``` + +#### Children frontmatter + +Each children list `type` uses [frontmatter properties](#page-frontmatter) when generating the list of articles. +The following table shows which children types use which frontmatter properties: + +| Frontmatter | articles | list | functions | +| :------------------- | :------: | :--: | :-------: | +| `list_title` | ✓ | ✓ | ✓ | +| `description` | ✓ | | | +| `external_url` | ✓ | ✓ | | +| `list_image` | ✓ | | | +| `list_note` | | ✓ | | +| `list_code_example` | ✓ | | | +| `list_query_example` | ✓ | | | + +### Authentication token link + +Use the `{{% token-link "" "%}}` shortcode to +automatically generate links to token management documentation. The shortcode +accepts two _optional_ arguments: + +- **descriptor**: An optional token descriptor +- **link_append**: An optional path to append to the token management link path, + `///admin/tokens/`. + +```md +{{% token-link "database" "resource/" }} + + +[database token](/influxdb3/enterprise/admin/tokens/resource/) +``` + +InfluxDB 3 Enterprise and InfluxDB 3 Core support different kinds of tokens. +The shortcode has a blacklist of token descriptors for each that will prevent +unsupported descriptors from appearing in the rendered output based on the +current product. + +### Inline icons + +The `icon` shortcode allows you to inject icons in paragraph text. +It's meant to clarify references to specific elements in the InfluxDB user interface. +This shortcode supports Clockface (the UI) v2 and v3. +Specify the version to use as the second argument. The default version is `v3`. + +``` +{{< icon "icon-name" "v2" >}} +``` + +Below is a list of available icons (some are aliases): + +- add-cell +- add-label +- alert +- calendar +- chat +- checkmark +- clone +- cloud +- cog +- config +- copy +- dashboard +- dashboards +- data-explorer +- delete +- download +- duplicate +- edit +- expand +- export +- eye +- eye-closed +- eye-open +- feedback +- fullscreen +- gear +- graph +- hide +- influx +- influx-icon +- nav-admin +- nav-config +- nav-configuration +- nav-dashboards +- nav-data-explorer +- nav-organizations +- nav-orgs +- nav-tasks +- note +- notebook +- notebooks +- org +- orgs +- pause +- pencil +- play +- plus +- refresh +- remove +- replay +- save-as +- search +- settings +- tasks +- toggle +- trash +- trashcan +- triangle +- view +- wrench +- x + +### InfluxDB UI left navigation icons + +In many cases, documentation references an item in the left nav of the InfluxDB UI. +Provide a visual example of the navigation item using the `nav-icon` shortcode. +This shortcode supports Clockface (the UI) v2 and v3. +Specify the version to use as the second argument. The default version is `v3`. + +``` +{{< nav-icon "tasks" "v2" >}} +``` + +The following case insensitive values are supported: + +- admin, influx +- data-explorer, data explorer +- notebooks, books +- dashboards +- tasks +- monitor, alerts, bell +- cloud, usage +- data, load data, load-data +- settings +- feedback + +### Flexbox-formatted content blocks + +CSS Flexbox formatting lets you create columns in article content that adjust and +flow based on the viewable width. +In article content, this helps if you have narrow tables that could be displayed +side-by-side, rather than stacked vertically. +Use the `{{< flex >}}` shortcode to create the Flexbox wrapper. +Use the `{{% flex-content %}}` shortcode to identify each column content block. + +```md +{{< flex >}} +{{% flex-content %}} +Column 1 +{{% /flex-content %}} +{{% flex-content %}} +Column 2 +{{% /flex-content %}} +{{< /flex >}} +``` + +`{{% flex-content %}}` has an optional width argument that determines the maximum +width of the column. + +```md +{{% flex-content "half" %}} +``` + +The following options are available: + +- half _(Default)_ +- third +- quarter + +### Tooltips + +Use the `{{< tooltip >}}` shortcode to add tooltips to text. +The **first** argument is the text shown in the tooltip. +The **second** argument is the highlighted text that triggers the tooltip. + +```md +I like {{< tooltip "Butterflies are awesome!" "butterflies" >}}. +``` + +The rendered output is "I like butterflies" with "butterflies" highlighted. +When you hover over "butterflies," a tooltip appears with the text: "Butterflies are awesome!" + +### Flux sample data tables + +The Flux `sample` package provides basic sample datasets that can be used to +illustrate how Flux functions work. To quickly display one of the raw sample +datasets, use the `{{% flux/sample %}}` shortcode. + +The `flux/sample` shortcode has the following arguments that can be specified +by name or positionally. + +#### set + +Sample dataset to output. Use either `set` argument name or provide the set +as the first argument. The following sets are available: + +- float +- int +- uint +- string +- bool +- numericBool + +#### includeNull + +Specify whether or not to include _null_ values in the dataset. +Use either `includeNull` argument name or provide the boolean value as the second argument. + +#### includeRange + +Specify whether or not to include time range columns (`_start` and `_stop`) in the dataset. +This is only recommended when showing how functions that require a time range +(such as `window()`) operate on input data. +Use either `includeRange` argument name or provide the boolean value as the third argument. + +##### Example Flux sample data shortcodes + +```md + + +{{% flux/sample %}} + + + +{{% flux/sample set="string" includeNull=false %}} + + + +{{% flux/sample "int" true %}} + + + + +{{% flux/sample set="int" includeNull=true includeRange=true %}} +{{% flux/sample "int" true true %}} +``` + +### Duplicate OSS content in Cloud + +Docs for InfluxDB OSS and InfluxDB Cloud share a majority of content. +To prevent duplication of content between versions, use the following shortcodes: + +- `{{< duplicate-oss >}}` +- `{{% oss-only %}}` +- `{{% cloud-only %}}` + +#### duplicate-oss + +The `{{< duplicate-oss >}}` shortcode copies the page content of the file located +at the identical file path in the most recent InfluxDB OSS version. +The Cloud version of this markdown file should contain the frontmatter required +for all pages, but the body content should just be the `{{< duplicate-oss >}}` shortcode. + +#### oss-only + +Wrap content that should only appear in the OSS version of the doc with the `{{% oss-only %}}` shortcode. +Use the shortcode on both inline and content blocks: + +```md +{{% oss-only %}}This is inline content that only renders in the InfluxDB OSS docs{{% /oss-only %}} + +{{% oss-only %}} + +This is a multi-paragraph content block that spans multiple paragraphs and will +only render in the InfluxDB OSS documentation. + +**Note:** Notice the blank newline after the opening short-code tag. +This is necessary to get the first sentence/paragraph to render correctly. + +{{% /oss-only %}} + +- {{% oss-only %}}This is a list item that will only render in InfluxDB OSS docs.{{% /oss-only %}} +- {{% oss-only %}} + + This is a multi-paragraph list item that will only render in the InfluxDB OSS docs. + + **Note:** Notice shortcode is _inside_ of the line item. + There also must be blank newline after the opening short-code tag. + This is necessary to get the first sentence/paragraph to render correctly. + + {{% /oss-only %}} + +1. Step 1 +2. {{% oss-only %}}This is a list item that will only render in InfluxDB OSS docs.{{% /oss-only %}} +3. {{% oss-only %}} + + This is a list item that contains multiple paragraphs or nested list items and will only render in the InfluxDB OSS docs. + + **Note:** Notice shortcode is _inside_ of the line item. + There also must be blank newline after the opening short-code tag. + This is necessary to get the first sentence/paragraph to render correctly. + + {{% /oss-only %}} +``` + +#### cloud-only + +Wrap content that should only appear in the Cloud version of the doc with the `{{% cloud-only %}}` shortcode. +Use the shortcode on both inline and content blocks: + +```md +{{% cloud-only %}}This is inline content that only renders in the InfluxDB Cloud docs{{% /cloud-only %}} + +{{% cloud-only %}} + +This is a multi-paragraph content block that spans multiple paragraphs and will +only render in the InfluxDB Cloud documentation. + +**Note:** Notice the blank newline after the opening short-code tag. +This is necessary to get the first sentence/paragraph to render correctly. + +{{% /cloud-only %}} + +- {{% cloud-only %}}This is a list item that will only render in InfluxDB Cloud docs.{{% /cloud-only %}} +- {{% cloud-only %}} + + This is a list item that contains multiple paragraphs or nested list items and will only render in the InfluxDB Cloud docs. + + **Note:** Notice shortcode is _inside_ of the line item. + There also must be blank newline after the opening short-code tag. + This is necessary to get the first sentence/paragraph to render correctly. + + {{% /cloud-only %}} + +1. Step 1 +2. {{% cloud-only %}}This is a list item that will only render in InfluxDB Cloud docs.{{% /cloud-only %}} +3. {{% cloud-only %}} + + This is a multi-paragraph list item that will only render in the InfluxDB Cloud docs. + + **Note:** Notice shortcode is _inside_ of the line item. + There also must be blank newline after the opening short-code tag. + This is necessary to get the first sentence/paragraph to render correctly. + + {{% /cloud-only %}} +``` + +### Show or hide content blocks in shared content + +The `source` frontmatter lets you source page content from another file and is +used to share content across InfluxDB products. Within the shared content, you +can use the `show-in` and `hide-in` shortcodes to conditionally show or hide +content blocks based on the InfluxDB "version." Valid "versions" include: + +- v2 +- cloud +- cloud-serverless +- cloud-dedicated +- clustered +- core +- enterprise + +#### show-in + +The `show-in` shortcode accepts a comma-delimited string of InfluxDB "versions" +to show the content block in. The version is the second level of the page +path--for example: `/influxdb//...`. + +```md +{{% show-in "core,enterprise" %}} + +This content will appear in pages in the InfluxDB 3 Core and InfluxDB 3 Enterprise +documentation, but not any other InfluxDB documentation this content is shared in. + +{{% /show-in %}} +``` + +#### hide-in + +The `hide-in` shortcode accepts a comma-delimited string of InfluxDB "versions" +to hide the content block in. The version is the second level of the page +path--for example: `/influxdb//...`. + +```md +{{% hide-in "core,enterprise" %}} + +This content will not appear in pages in the InfluxDB 3 Core and InfluxDB 3 +Enterprise documentation, but will in all other InfluxDB documentation this +content is shared in. + +{{% /hide-in %}} +``` + +### All-Caps + +Clockface v3 introduces many buttons with text formatted as all-caps. +Use the `{{< caps >}}` shortcode to format text to match those buttons. + +```md +Click {{< caps >}}Add Data{{< /caps >}} +``` + +### Code callouts + +Use the `{{< code-callout >}}` shortcode to highlight and emphasize a specific +piece of code (for example, a variable, placeholder, or value) in a code block. +Provide the string to highlight in the code block. +Include a syntax for the codeblock to properly style the called out code. + +````md +{{< code-callout "03a2bbf46249a000" >}} + +```sh +http://localhost:8086/orgs/03a2bbf46249a000/... +``` + +{{< /code-callout >}} +```` + +### InfluxDB University banners + +Use the `{{< influxdbu >}}` shortcode to add an InfluxDB University banner that +points to the InfluxDB University site or a specific course. +Use the default banner template, a predefined course template, or fully customize +the content of the banner. + +```html + +{{< influxdbu >}} + + +{{< influxdbu "influxdb-101" >}} + + +{{< influxdbu title="Course title" summary="Short course summary." action="Take +the course" link="https://university.influxdata.com/" >}} +``` + +#### Course templates + +Use one of the following course templates: + +- influxdb-101 +- telegraf-102 +- flux-103 + +#### Custom banner content + +Use the following shortcode parameters to customize the content of the InfluxDB +University banner: + +- **title**: Course or banner title +- **summary**: Short description shown under the title +- **action**: Text of the button +- **link**: URL the button links to + +### Reference content + +The InfluxDB documentation is "task-based," meaning content primarily focuses on +what a user is **doing**, not what they are **using**. +However, there is a need to document tools and other things that don't necessarily +fit in the task-based style. +This is referred to as "reference content." + +Reference content is styled just as the rest of the InfluxDB documentation. +The only difference is the `menu` reference in the page's frontmatter. +When defining the menu for reference content, use the following pattern: + +```yaml +# Pattern +menu: + ___ref: + # ... + +# Example +menu: + influxdb_2_0_ref: + # ... +``` + +## InfluxDB URLs + +When a user selects an InfluxDB product and region, example URLs in code blocks +throughout the documentation are updated to match their product and region. +InfluxDB URLs are configured in `/data/influxdb_urls.yml`. + +By default, the InfluxDB URL replaced inside of code blocks is `http://localhost:8086`. +Use this URL in all code examples that should be updated with a selected provider and region. + +For example: + +```` +```sh +# This URL will get updated +http://localhost:8086 + +# This URL will NOT get updated +http://example.com +``` +```` + +If the user selects the **US West (Oregon)** region, all occurrences of `http://localhost:8086` +in code blocks will get updated to `https://us-west-2-1.aws.cloud2.influxdata.com`. + +### Exempt URLs from getting updated + +To exempt a code block from being updated, include the `{{< keep-url >}}` shortcode +just before the code block. + +```` +{{< keep-url >}} +``` +// This URL won't get updated +http://localhost:8086 +``` +```` + +### Code examples only supported in InfluxDB Cloud + +Some functionality is only supported in InfluxDB Cloud and code examples should +only use InfluxDB Cloud URLs. In these cases, use `https://cloud2.influxdata.com` +as the placeholder in the code block. It will get updated on page load and when +users select a Cloud region in the URL select modal. + +```` +```sh +# This URL will get updated +https://cloud2.influxdata.com +``` +```` + +### Automatically populate InfluxDB host placeholder + +The InfluxDB host placeholder that gets replaced by custom domains differs +between each InfluxDB product/version. +Use the `influxdb/host` shortcode to automatically render the correct +host placeholder value for the current product. You can also pass a single +argument to specify a specific InfluxDB product to use. +Supported argument values: + +- oss +- cloud +- cloud-serverless +- cloud-dedicated +- clustered +- core +- enterprise + +``` +{{< influxdb/host >}} + +{{< influxdb/host "serverless" >}} +``` + +### User-populated placeholders + +Use the `code-placeholders` shortcode to format placeholders +as text fields that users can populate with their own values. +The shortcode takes a regular expression for matching placeholder names. +Use the `code-placeholder-key` shortcode to format the placeholder names in +text that describes the placeholder--for example: + +``` +{{% code-placeholders "DATABASE_NAME|USERNAME|PASSWORD_OR_TOKEN|API_TOKEN|exampleuser@influxdata.com" %}} +```sh +curl --request POST http://localhost:8086/write?db=DATABASE_NAME \ + --header "Authorization: Token API_TOKEN" \ + --data-binary @path/to/line-protocol.txt +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME` and `RETENTION_POLICY`{{% /code-placeholder-key %}}: the [database and retention policy mapping (DBRP)](/influxdb/v2/reference/api/influxdb-1x/dbrp/) for the InfluxDB v2 bucket that you want to write to +- {{% code-placeholder-key %}}`USERNAME`{{% /code-placeholder-key %}}: your [InfluxDB 1.x username](/influxdb/v2/reference/api/influxdb-1x/#manage-credentials) +- {{% code-placeholder-key %}}`PASSWORD_OR_TOKEN`{{% /code-placeholder-key %}}: your [InfluxDB 1.x password or InfluxDB API token](/influxdb/v2/reference/api/influxdb-1x/#manage-credentials) +- {{% code-placeholder-key %}}`API_TOKEN`{{% /code-placeholder-key %}}: your [InfluxDB API token](/influxdb/v2/admin/tokens/) +``` + +## InfluxDB API documentation + +InfluxData uses [Redoc](https://github.com/Redocly/redoc) to generate the full +InfluxDB API documentation when documentation is deployed. +Redoc generates HTML documentation using the InfluxDB `swagger.yml`. +For more information about generating InfluxDB API documentation, see the +[API Documentation README](https://github.com/influxdata/docs-v2/tree/master/api-docs#readme). diff --git a/.github/instructions/influxdb3-code-placeholders.instructions.md b/.github/instructions/influxdb3-code-placeholders.instructions.md new file mode 100644 index 000000000..583ef705a --- /dev/null +++ b/.github/instructions/influxdb3-code-placeholders.instructions.md @@ -0,0 +1,89 @@ +--- +mode: 'edit' +applyTo: "content/{influxdb3/core,influxdb3/enterprise,shared/influxdb3*}/**" +--- +## Best Practices + +- Use UPPERCASE for placeholders to make them easily identifiable +- Don't use pronouns in placeholders (e.g., "your", "this") +- List placeholders in the same order they appear in the code +- Provide clear descriptions including: +- - Expected data type or format +- - Purpose of the value +- - Any constraints or requirements +- Mark optional placeholders as "Optional:" in their descriptions +- Placeholder key descriptions should fit the context of the code snippet +- Include examples for complex formats + +## Writing Placeholder Descriptions + +Descriptions should follow consistent patterns: + +1. **Admin Authentication tokens**: + - Recommended: "a {{% token-link "admin" %}} for your {{< product-name >}} instance" + - Avoid: "your token", "the token", "an authorization token" +2. **Database resource tokens**: + - Recommended: "your {{% token-link "database" %}}"{{% show-in "enterprise" %}} with permissions on the specified database{{% /show-in %}} + - Avoid: "your token", "the token", "an authorization token" +3. **Database names**: + - Recommended: "the name of the database to [action]" + - Avoid: "your database", "the database name" +4. **Conditional content**: + - Use `{{% show-in "enterprise" %}}` for content specific to enterprise versions + - Example: "your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}}" + +## Common placeholders for InfluxDB 3 + +- `AUTH_TOKEN`: your {{% token-link %}} +- `DATABASE_NAME`: the database to use +- `TABLE_NAME`: Name of the table/measurement to query or write to +- `NODE_ID`: Node ID for a specific node in a cluster +- `CLUSTER_ID`: Cluster ID for a specific cluster +- `HOST`: InfluxDB server hostname or URL +- `PORT`: InfluxDB server port (typically 8181) +- `QUERY`: SQL or InfluxQL query string +- `LINE_PROTOCOL`: Line protocol data for writes +- `PLUGIN_FILENAME`: Name of plugin file to use +- `CACHE_NAME`: Name for a new or existing cache + +## Hugo shortcodes in Markdown + +- `{{% code-placeholders "PLACEHOLDER1|PLACEHOLDER2" %}}`: Use this shortcode to define placeholders in code snippets. +- `{{% /code-placeholders %}}`: End the shortcode. +- `{{% code-placeholder-key %}}`: Use this shortcode to define a specific placeholder key. +- `{{% /code-placeholder-key %}}`: End the specific placeholder key shortcode. + +## Language-Specific Placeholder Formatting + +- **Bash/Shell**: Use uppercase variables with no quotes or prefix + ```bash + --database DATABASE_NAME + ``` +- Python: Use string literals with quotes + ```python + database_name='DATABASE_NAME' + ``` +- JSON: Use key-value pairs with quotes + ```json + { + "database": "DATABASE_NAME" + } + ``` + +## Real-World Examples from Documentation + +### InfluxDB CLI Commands +This pattern appears frequently in CLI documentation: + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} +```bash +influxdb3 write \ + --database DATABASE_NAME \ + --token AUTH_TOKEN \ + --precision ns +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +{{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to write to +{{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with write permissions on the specified database{{% /show-in %}} \ No newline at end of file diff --git a/.gitignore b/.gitignore index a701b05a8..650f31962 100644 --- a/.gitignore +++ b/.gitignore @@ -15,7 +15,10 @@ node_modules !telegraf-build/templates !telegraf-build/scripts !telegraf-build/README.md +/cypress/downloads /cypress/screenshots/* +/cypress/videos/* +test-results.xml /influxdb3cli-build-scripts/content .vscode/* .idea diff --git a/.husky/_/serve b/.husky/_/serve new file mode 100755 index 000000000..df25a7d09 --- /dev/null +++ b/.husky/_/serve @@ -0,0 +1,57 @@ +#!/bin/sh + +if [ "$LEFTHOOK_VERBOSE" = "1" -o "$LEFTHOOK_VERBOSE" = "true" ]; then + set -x +fi + +if [ "$LEFTHOOK" = "0" ]; then + exit 0 +fi + +call_lefthook() +{ + if test -n "$LEFTHOOK_BIN" + then + "$LEFTHOOK_BIN" "$@" + elif lefthook -h >/dev/null 2>&1 + then + lefthook "$@" + else + dir="$(git rev-parse --show-toplevel)" + osArch=$(uname | tr '[:upper:]' '[:lower:]') + cpuArch=$(uname -m | sed 's/aarch64/arm64/;s/x86_64/x64/') + if test -f "$dir/node_modules/lefthook-${osArch}-${cpuArch}/bin/lefthook" + then + "$dir/node_modules/lefthook-${osArch}-${cpuArch}/bin/lefthook" "$@" + elif test -f "$dir/node_modules/@evilmartians/lefthook/bin/lefthook-${osArch}-${cpuArch}/lefthook" + then + "$dir/node_modules/@evilmartians/lefthook/bin/lefthook-${osArch}-${cpuArch}/lefthook" "$@" + elif test -f "$dir/node_modules/@evilmartians/lefthook-installer/bin/lefthook" + then + "$dir/node_modules/@evilmartians/lefthook-installer/bin/lefthook" "$@" + elif test -f "$dir/node_modules/lefthook/bin/index.js" + then + "$dir/node_modules/lefthook/bin/index.js" "$@" + + elif bundle exec lefthook -h >/dev/null 2>&1 + then + bundle exec lefthook "$@" + elif yarn lefthook -h >/dev/null 2>&1 + then + yarn lefthook "$@" + elif pnpm lefthook -h >/dev/null 2>&1 + then + pnpm lefthook "$@" + elif swift package plugin lefthook >/dev/null 2>&1 + then + swift package --disable-sandbox plugin lefthook "$@" + elif command -v mint >/dev/null 2>&1 + then + mint run csjones/lefthook-plugin "$@" + else + echo "Can't find lefthook in PATH" + fi + fi +} + +call_lefthook run "serve" "$@" diff --git a/.nvmrc b/.nvmrc new file mode 100644 index 000000000..945c17819 --- /dev/null +++ b/.nvmrc @@ -0,0 +1 @@ +v23.10.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index dad714a79..479578424 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ # Contributing to InfluxData Documentation -## Sign the InfluxData CLA +### Sign the InfluxData CLA The InfluxData Contributor License Agreement (CLA) is part of the legal framework for the open source ecosystem that protects both you and InfluxData. @@ -28,8 +28,10 @@ For the linting and tests to run, you need to install Docker and Node.js dependencies. \_**Note:** -We strongly recommend running linting and tests, but you can skip them -(and avoid installing dependencies) +The git pre-commit and pre-push hooks are configured to run linting and tests automatically +when you commit or push changes. +We strongly recommend letting them run, but you can skip them +(and avoid installing related dependencies) by including the `--no-verify` flag with your commit--for example, enter the following command in your terminal: ```sh @@ -51,7 +53,7 @@ dev dependencies used in pre-commit hooks for linting, syntax-checking, and test Dev dependencies include: - [Lefthook](https://github.com/evilmartians/lefthook): configures and -manages pre-commit hooks for linting and testing Markdown content. +manages git pre-commit and pre-push hooks for linting and testing Markdown content. - [prettier](https://prettier.io/docs/en/): formats code, including Markdown, according to style rules for consistency - [Cypress]: e2e testing for UI elements and URLs in content @@ -93,9 +95,11 @@ Make your suggested changes being sure to follow the [style and formatting guide ## Lint and test your changes +`package.json` contains scripts for running tests and linting. + ### Automatic pre-commit checks -docs-v2 uses Lefthook to manage Git hooks, such as pre-commit hooks that lint Markdown and test code blocks. +docs-v2 uses Lefthook to manage Git hooks that run during pre-commit and pre-push. The hooks run the scripts defined in `package.json` to lint Markdown and test code blocks. When you try to commit changes (`git commit`), Git runs the commands configured in `lefthook.yml` which pass your **staged** files to Vale, Prettier, Cypress (for UI tests and link-checking), and Pytest (for testing Python and shell code in code blocks). diff --git a/api-docs/.config.yml b/api-docs/.config.yml index e337b7689..f075dddec 100644 --- a/api-docs/.config.yml +++ b/api-docs/.config.yml @@ -1,5 +1,5 @@ plugins: - - './../openapi/plugins/docs-plugin.js' + - './../openapi/plugins/docs-plugin.cjs' extends: - recommended - docs/all diff --git a/api-docs/influxdb/cloud/.config.yml b/api-docs/influxdb/cloud/.config.yml index e2c6e000f..25a391d42 100644 --- a/api-docs/influxdb/cloud/.config.yml +++ b/api-docs/influxdb/cloud/.config.yml @@ -1,5 +1,5 @@ plugins: - - '../../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.cjs' extends: - recommended - docs/all diff --git a/api-docs/influxdb/v2/.config.yml b/api-docs/influxdb/v2/.config.yml index d17efcdfc..c99715a57 100644 --- a/api-docs/influxdb/v2/.config.yml +++ b/api-docs/influxdb/v2/.config.yml @@ -1,5 +1,5 @@ plugins: - - '../../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.cjs' extends: - recommended - docs/all diff --git a/api-docs/influxdb/v2/v2/ref.yml b/api-docs/influxdb/v2/v2/ref.yml index c2f5e9308..547f37265 100644 --- a/api-docs/influxdb/v2/v2/ref.yml +++ b/api-docs/influxdb/v2/v2/ref.yml @@ -218,11 +218,11 @@ tags: |:-----------:|:------------------------ |:--------------------- | | `200` | Success | | | `204` | Success. No content | InfluxDB doesn't return data for the request. | - | `400` | Bad request | May indicate one of the following:
  • Line protocol is malformed. The response body contains the first malformed line in the data and indicates what was expected. For partial writes, the number of points written and the number of points rejected are also included. For more information, check the `rejected_points` measurement in your `_monitoring` bucket.
  • `Authorization` header is missing or malformed or the API token doesn't have permission for the operation.
| + | `400` | Bad request | May indicate one of the following:
  • the request body is malformed
  • `Authorization` header is missing or malformed
  • the API token doesn't have permission for the operation.
| | `401` | Unauthorized | May indicate one of the following:
  • `Authorization: Token` header is missing or malformed
  • API token value is missing from the header
  • API token doesn't have permission. For more information about token types and permissions, see [Manage API tokens](/influxdb/v2/security/tokens/)
| | `404` | Not found | Requested resource was not found. `message` in the response body provides details about the requested resource. | | `413` | Request entity too large | Request payload exceeds the size limit. | - | `422` | Unprocessable entity | Request data is invalid. `code` and `message` in the response body provide details about the problem. | + | `422` | Unprocessable entity | Request data is invalid. The request was well-formed, but couldn't complete due to semantic errors--for example, some or all points in a write request were rejected due to a schema or retention policy violation. The response body provides details about the problem. For more information about rejected points, see how to [Troubleshoot issues writing data](/influxdb/v2/write-data/troubleshoot/)| | `429` | Too many requests | API token is temporarily over the request quota. The `Retry-After` header describes when to try the request again. | | `500` | Internal server error | | | `503` | Service unavailable | Server is temporarily unavailable to process the request. The `Retry-After` header describes when to try the request again. | @@ -12752,6 +12752,12 @@ paths: - Returns this error only if the [Go (golang) `ioutil.ReadAll()`](https://pkg.go.dev/io/ioutil#ReadAll) function raises an error. - Returns `Content-Type: application/json` for this error. + '422': + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + description: The request was well-formed, but some or all the points were rejected due to semantic errors--for example, schema conflicts or retention policy violations. Error message contains details for one or more rejected points. '429': description: | Too many requests. @@ -13190,6 +13196,14 @@ paths: - Legacy Query /write: post: + description: |- + Writes line protocol to the specified bucket. + + This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. + + Use this endpoint to send data in [line protocol](https://docs.influxdata.com/influxdb/v2/reference/syntax/line-protocol/) format to InfluxDB. + Use query parameters to specify options for writing data. + operationId: PostLegacyWrite parameters: - $ref: '#/components/parameters/TraceSpan' @@ -13263,6 +13277,12 @@ paths: schema: $ref: '#/components/schemas/LineProtocolLengthError' description: Write has been rejected because the payload is too large. Error message returns max size supported. All data in body was rejected and not written. + '422': + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + description: The request was well-formed, but some or all the points were rejected due to semantic errors--for example, schema conflicts or retention policy violations. Error message contains details for one or more rejected points. '429': description: Token is temporarily over quota. The Retry-After header describes when to try the write again. headers: diff --git a/api-docs/influxdb3/cloud-dedicated/.config.yml b/api-docs/influxdb3/cloud-dedicated/.config.yml index 9f8eebb6e..11808b821 100644 --- a/api-docs/influxdb3/cloud-dedicated/.config.yml +++ b/api-docs/influxdb3/cloud-dedicated/.config.yml @@ -1,5 +1,5 @@ plugins: - - '../../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.cjs' extends: - recommended - docs/all diff --git a/api-docs/influxdb3/cloud-dedicated/management/openapi.yml b/api-docs/influxdb3/cloud-dedicated/management/openapi.yml index 74868b46b..374a9ab72 100644 --- a/api-docs/influxdb3/cloud-dedicated/management/openapi.yml +++ b/api-docs/influxdb3/cloud-dedicated/management/openapi.yml @@ -40,6 +40,7 @@ tags: See how to [create a management token](/influxdb3/cloud-dedicated/admin/tokens/management/). By default, management tokens in InfluxDB 3 are short-lived tokens issued by an OAuth2 identity provider that grant a specific user administrative access to your InfluxDB cluster. However, for automation purposes, you can manually create management tokens that authenticate directly with your InfluxDB cluster and do not require human interaction with your identity provider. + - name: Database tokens description: Manage database read/write tokens for a cluster - name: Databases @@ -397,6 +398,26 @@ paths: post: operationId: CreateClusterDatabase summary: Create a database + description: | + Create a database for a cluster. + + The database name must be unique within the cluster. + + **Default maximum number of columns**: 250 + **Default maximum number of tables**: 500 + + The retention period is specified in nanoseconds. For example, to set a retention period of 1 hour, use `3600000000000`. + + InfluxDB Cloud Dedicated lets you define a [custom partitioning](/influxdb3/cloud-dedicated/admin/custom-partitions/) strategy for each database and table. + A _partition_ is a logical grouping of data stored in [Apache Parquet](https://parquet.apache.org/). + By default, data is partitioned by day, + but, depending on your schema and workload, customizing the partitioning + strategy can improve query performance. + + To use custom partitioning, you define a [partition template](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/). + If a table doesn't have a custom partition template, it inherits the database's template. + The partition template is set at the time of database creation and cannot be changed later. + For more information, see [Custom partitions](/influxdb3/cloud-dedicated/admin/custom-partitions/). tags: - Databases parameters: @@ -779,6 +800,18 @@ paths: post: operationId: CreateClusterDatabaseTable summary: Create a database table + description: | + Create a table. The database must already exist. With InfluxDB Cloud Dedicated, tables and measurements are synonymous. + + Typically, tables are created automatically on write using the measurement name + specified in line protocol written to InfluxDB. + However, to apply a [custom partition template](/influxdb3/cloud-dedicated/admin/custom-partitions/) + to a table, you must manually [create the table with custom partitioning](/influxdb3/cloud-dedicated/admin/tables/#create-a-table-with-custom-partitioning) before you write any data to it. + + Partitioning defaults to `%Y-%m-%d` (daily). + When a partition template is applied to a database, it becomes the default template + for all tables in that database, but can be overridden when creating a + table. tags: - Tables parameters: @@ -1185,6 +1218,14 @@ paths: get: operationId: GetDatabaseToken summary: Get a database token + description: | + Retrieve metadata details for a [database token](/influxdb3/cloud-dedicated/admin/tokens/database/). + + #### Store secure tokens in a secret store + + We recommend storing database tokens in a **secure secret store**. + + Token strings are viewable _only_ on token creation and aren't stored by InfluxDB; you can't recover a lost token. tags: - Database tokens parameters: @@ -1299,6 +1340,8 @@ paths: patch: operationId: UpdateDatabaseToken summary: Update a database token + description: | + Update the description and permissions of a [database token](/influxdb3/cloud-dedicated/admin/tokens/database/). tags: - Database tokens parameters: @@ -1317,7 +1360,6 @@ paths: - name: tokenId in: path description: The ID of the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) to update - required: true schema: $ref: '#/components/schemas/UuidV4' requestBody: @@ -1625,9 +1667,9 @@ components: description: | A template for [partitioning](/influxdb3/cloud-dedicated/admin/custom-partitions/) a cluster database. - Each template part is evaluated in sequence, concatinating the final - partition key from the output of each part, delimited by the partition - key delimiter `|`. + Each partition template part is evaluated in sequence. + The outputs from each part are concatenated with the + `|` delimiter to form the final partition key. For example, using the partition template below: diff --git a/api-docs/influxdb3/cloud-serverless/.config.yml b/api-docs/influxdb3/cloud-serverless/.config.yml index 684da1b8c..e845948d0 100644 --- a/api-docs/influxdb3/cloud-serverless/.config.yml +++ b/api-docs/influxdb3/cloud-serverless/.config.yml @@ -1,5 +1,5 @@ plugins: - - '../../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.cjs' extends: - recommended - docs/all diff --git a/api-docs/influxdb3/clustered/.config.yml b/api-docs/influxdb3/clustered/.config.yml index ca9356a9e..454f39d94 100644 --- a/api-docs/influxdb3/clustered/.config.yml +++ b/api-docs/influxdb3/clustered/.config.yml @@ -1,5 +1,5 @@ plugins: - - '../../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.cjs' extends: - recommended - docs/all diff --git a/api-docs/influxdb3/core/.config.yml b/api-docs/influxdb3/core/.config.yml index 1b83bee88..14792e219 100644 --- a/api-docs/influxdb3/core/.config.yml +++ b/api-docs/influxdb3/core/.config.yml @@ -1,5 +1,5 @@ plugins: - - '../../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.cjs' extends: - recommended - docs/all diff --git a/api-docs/influxdb3/core/v3/ref.yml b/api-docs/influxdb3/core/v3/ref.yml index 7d1ba6a38..0c320aada 100644 --- a/api-docs/influxdb3/core/v3/ref.yml +++ b/api-docs/influxdb3/core/v3/ref.yml @@ -52,7 +52,7 @@ tags: #### Related guides - [Manage tokens](/influxdb3/core/admin/tokens/) - - [Authentication and authorization](/influxdb3/core/reference/authentication/) + - [Authentication and authorization](/influxdb3/core/reference/internals/authentication/) x-traitTag: true - name: Cache data description: | diff --git a/api-docs/influxdb3/enterprise/.config.yml b/api-docs/influxdb3/enterprise/.config.yml index 5ad79f54f..4b8210b97 100644 --- a/api-docs/influxdb3/enterprise/.config.yml +++ b/api-docs/influxdb3/enterprise/.config.yml @@ -1,5 +1,5 @@ plugins: - - '../../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.cjs' extends: - recommended - docs/all diff --git a/api-docs/influxdb3/enterprise/v3/ref.yml b/api-docs/influxdb3/enterprise/v3/ref.yml index 28babb0c0..a02ae5865 100644 --- a/api-docs/influxdb3/enterprise/v3/ref.yml +++ b/api-docs/influxdb3/enterprise/v3/ref.yml @@ -52,7 +52,7 @@ tags: #### Related guides - [Manage tokens](/influxdb3/enterprise/admin/tokens/) - - [Authentication and authorization](/influxdb3/enterprise/reference/authentication/) + - [Authentication and authorization](/influxdb3/enterprise/reference/internals/authentication/) x-traitTag: true - name: Cache data description: | @@ -157,7 +157,7 @@ tags: 1. [Create an admin token](#section/Authentication) for the InfluxDB 3 Enterprise API. ```bash - curl -X POST "http://localhost:8181/api/v3/enterprise/configure/token/admin" + curl -X POST "http://localhost:8181/api/v3/configure/token/admin" ``` 2. [Check the status](#section/Server-information) of the InfluxDB server. @@ -1351,15 +1351,13 @@ paths: tags: - Authentication - Token - /api/v3/configure/enterprise/token/admin: + /api/v3/configure/token/admin: post: operationId: PostCreateAdminToken summary: Create admin token description: | Creates an admin token. An admin token is a special type of token that has full access to all resources in the system. - - This endpoint is only available in InfluxDB 3 Enterprise. responses: '201': description: | @@ -1374,14 +1372,12 @@ paths: tags: - Authentication - Token - /api/v3/configure/enterprise/token/admin/regenerate: + /api/v3/configure/token/admin/regenerate: post: operationId: PostRegenerateAdminToken summary: Regenerate admin token description: | Regenerates an admin token and revokes the previous token with the same name. - - This endpoint is only available in InfluxDB 3 Enterprise. parameters: [] responses: '201': @@ -1940,8 +1936,6 @@ components: scheme: bearer bearerFormat: JWT description: | - _During Alpha release, an API token is not required._ - A Bearer token for authentication. Provide the scheme and the API token in the `Authorization` header--for example: diff --git a/api-docs/openapi/plugins/decorators/paths/remove-private-paths.js b/api-docs/openapi/plugins/decorators/paths/remove-private-paths.cjs similarity index 100% rename from api-docs/openapi/plugins/decorators/paths/remove-private-paths.js rename to api-docs/openapi/plugins/decorators/paths/remove-private-paths.cjs diff --git a/api-docs/openapi/plugins/decorators/paths/strip-trailing-slash.js b/api-docs/openapi/plugins/decorators/paths/strip-trailing-slash.cjs similarity index 100% rename from api-docs/openapi/plugins/decorators/paths/strip-trailing-slash.js rename to api-docs/openapi/plugins/decorators/paths/strip-trailing-slash.cjs diff --git a/api-docs/openapi/plugins/decorators/paths/strip-version-prefix.js b/api-docs/openapi/plugins/decorators/paths/strip-version-prefix.cjs similarity index 100% rename from api-docs/openapi/plugins/decorators/paths/strip-version-prefix.js rename to api-docs/openapi/plugins/decorators/paths/strip-version-prefix.cjs diff --git a/api-docs/openapi/plugins/decorators/replace-shortcodes.js b/api-docs/openapi/plugins/decorators/replace-shortcodes.cjs similarity index 100% rename from api-docs/openapi/plugins/decorators/replace-shortcodes.js rename to api-docs/openapi/plugins/decorators/replace-shortcodes.cjs diff --git a/api-docs/openapi/plugins/decorators/replace-urls.js b/api-docs/openapi/plugins/decorators/replace-urls.cjs similarity index 100% rename from api-docs/openapi/plugins/decorators/replace-urls.js rename to api-docs/openapi/plugins/decorators/replace-urls.cjs diff --git a/api-docs/openapi/plugins/decorators/servers/delete-servers.js b/api-docs/openapi/plugins/decorators/servers/delete-servers.cjs similarity index 100% rename from api-docs/openapi/plugins/decorators/servers/delete-servers.js rename to api-docs/openapi/plugins/decorators/servers/delete-servers.cjs diff --git a/api-docs/openapi/plugins/decorators/servers/set-servers.js b/api-docs/openapi/plugins/decorators/servers/set-servers.cjs similarity index 100% rename from api-docs/openapi/plugins/decorators/servers/set-servers.js rename to api-docs/openapi/plugins/decorators/servers/set-servers.cjs diff --git a/api-docs/openapi/plugins/decorators/set-info.js b/api-docs/openapi/plugins/decorators/set-info.cjs similarity index 100% rename from api-docs/openapi/plugins/decorators/set-info.js rename to api-docs/openapi/plugins/decorators/set-info.cjs diff --git a/api-docs/openapi/plugins/decorators/tags/set-tag-groups.js b/api-docs/openapi/plugins/decorators/tags/set-tag-groups.cjs similarity index 99% rename from api-docs/openapi/plugins/decorators/tags/set-tag-groups.js rename to api-docs/openapi/plugins/decorators/tags/set-tag-groups.cjs index 40dbb6d4b..38a752859 100644 --- a/api-docs/openapi/plugins/decorators/tags/set-tag-groups.js +++ b/api-docs/openapi/plugins/decorators/tags/set-tag-groups.cjs @@ -1,6 +1,6 @@ module.exports = SetTagGroups; -const { collect, getName, sortName, isPresent } = require('../../helpers/content-helper.js') +const { collect, getName, sortName, isPresent } = require('../../helpers/content-helper.cjs') /** * Returns an object that defines handler functions for: * - Operation nodes diff --git a/api-docs/openapi/plugins/decorators/tags/set-tags.js b/api-docs/openapi/plugins/decorators/tags/set-tags.js deleted file mode 100644 index 7369eeea6..000000000 --- a/api-docs/openapi/plugins/decorators/tags/set-tags.js +++ /dev/null @@ -1,25 +0,0 @@ -module.exports = SetTags; - -const { tags } = require('../../../content/content') -/** - * Returns an object that defines handler functions for: - * - DefinitionRoot (the root openapi) node - * The DefinitionRoot handler, executed when - * the parser is leaving the root node, - * sets the root `tags` list to the provided `data`. - */ -/** @type {import('@redocly/openapi-cli').OasDecorator} */ -function SetTags() { - const data = tags(); - - return { - DefinitionRoot: { - /** Set tags from custom tags when visitor enters root. */ - enter(root) { - if(data) { - root.tags = data; - } - } - } - } -}; diff --git a/api-docs/openapi/plugins/docs-content.js b/api-docs/openapi/plugins/docs-content.cjs similarity index 92% rename from api-docs/openapi/plugins/docs-content.js rename to api-docs/openapi/plugins/docs-content.cjs index 975b2ad6b..289ee6215 100644 --- a/api-docs/openapi/plugins/docs-content.js +++ b/api-docs/openapi/plugins/docs-content.cjs @@ -1,5 +1,5 @@ const path = require('path'); -const { toJSON } = require('./helpers/content-helper'); +const { toJSON } = require('./helpers/content-helper.cjs'); function getVersioned(filename) { const apiDocsRoot=path.resolve(process.env.API_DOCS_ROOT_PATH || process.cwd()); diff --git a/api-docs/openapi/plugins/docs-plugin.js b/api-docs/openapi/plugins/docs-plugin.cjs similarity index 80% rename from api-docs/openapi/plugins/docs-plugin.js rename to api-docs/openapi/plugins/docs-plugin.cjs index ebeacd7ac..1fba52bfc 100644 --- a/api-docs/openapi/plugins/docs-plugin.js +++ b/api-docs/openapi/plugins/docs-plugin.cjs @@ -1,14 +1,14 @@ -const {info, servers, tagGroups} = require('./docs-content'); -const ReportTags = require('./rules/report-tags'); -const ValidateServersUrl = require('./rules/validate-servers-url'); -const RemovePrivatePaths = require('./decorators/paths/remove-private-paths'); -const ReplaceShortcodes = require('./decorators/replace-shortcodes'); -const SetInfo = require('./decorators/set-info'); -const DeleteServers = require('./decorators/servers/delete-servers'); -const SetServers = require('./decorators/servers/set-servers'); -const SetTagGroups = require('./decorators/tags/set-tag-groups'); -const StripVersionPrefix = require('./decorators/paths/strip-version-prefix'); -const StripTrailingSlash = require('./decorators/paths/strip-trailing-slash'); +const {info, servers, tagGroups} = require('./docs-content.cjs'); +const ReportTags = require('./rules/report-tags.cjs'); +const ValidateServersUrl = require('./rules/validate-servers-url.cjs'); +const RemovePrivatePaths = require('./decorators/paths/remove-private-paths.cjs'); +const ReplaceShortcodes = require('./decorators/replace-shortcodes.cjs'); +const SetInfo = require('./decorators/set-info.cjs'); +const DeleteServers = require('./decorators/servers/delete-servers.cjs'); +const SetServers = require('./decorators/servers/set-servers.cjs'); +const SetTagGroups = require('./decorators/tags/set-tag-groups.cjs'); +const StripVersionPrefix = require('./decorators/paths/strip-version-prefix.cjs'); +const StripTrailingSlash = require('./decorators/paths/strip-trailing-slash.cjs'); const id = 'docs'; diff --git a/api-docs/openapi/plugins/helpers/content-helper.js b/api-docs/openapi/plugins/helpers/content-helper.cjs similarity index 100% rename from api-docs/openapi/plugins/helpers/content-helper.js rename to api-docs/openapi/plugins/helpers/content-helper.cjs diff --git a/api-docs/openapi/plugins/rules/report-tags.js b/api-docs/openapi/plugins/rules/report-tags.cjs similarity index 100% rename from api-docs/openapi/plugins/rules/report-tags.js rename to api-docs/openapi/plugins/rules/report-tags.cjs diff --git a/api-docs/openapi/plugins/rules/validate-servers-url.js b/api-docs/openapi/plugins/rules/validate-servers-url.cjs similarity index 100% rename from api-docs/openapi/plugins/rules/validate-servers-url.js rename to api-docs/openapi/plugins/rules/validate-servers-url.cjs diff --git a/assets/js/index.js b/assets/js/index.js new file mode 100644 index 000000000..f63ad8b5d --- /dev/null +++ b/assets/js/index.js @@ -0,0 +1 @@ +export * from './main.js'; diff --git a/assets/js/main.js b/assets/js/main.js index 57b92a837..5c2289720 100644 --- a/assets/js/main.js +++ b/assets/js/main.js @@ -6,9 +6,6 @@ /** Import modules that are not components. * TODO: Refactor these into single-purpose component modules. */ -// import * as codeblocksPreferences from './api-libs.js'; -// import * as datetime from './datetime.js'; -// import * as featureCallouts from './feature-callouts.js'; import * as apiLibs from './api-libs.js'; import * as codeControls from './code-controls.js'; import * as contentInteractions from './content-interactions.js'; @@ -21,15 +18,6 @@ import * as pageContext from './page-context.js'; import * as pageFeedback from './page-feedback.js'; import * as tabbedContent from './tabbed-content.js'; import * as v3Wayfinding from './v3-wayfinding.js'; -// import * as homeInteractions from './home-interactions.js'; -// import { getUrls, getReferrerHost, InfluxDBUrl } from './influxdb-url.js'; -// import * as keybindings from './keybindings.js'; -// import * as listFilters from './list-filters.js'; -// import { Modal } from './modal.js'; -// import { showNotifications } from './notifications.js'; -// import ReleaseTOC from './release-toc.js'; -// import * as scroll from './scroll.js'; -// import { TabbedContent } from './tabbed-content.js'; /** Import component modules * The component pattern organizes JavaScript, CSS, and HTML for a specific UI element or interaction: @@ -41,40 +29,95 @@ import * as v3Wayfinding from './v3-wayfinding.js'; import AskAITrigger from './ask-ai-trigger.js'; import CodePlaceholder from './code-placeholders.js'; import { CustomTimeTrigger } from './custom-timestamps.js'; +import FluxInfluxDBVersionsTrigger from './flux-influxdb-versions.js'; import { SearchButton } from './search-button.js'; import { SidebarToggle } from './sidebar-toggle.js'; import Theme from './theme.js'; import ThemeSwitch from './theme-switch.js'; -// import CodeControls from './code-controls.js'; -// import ContentInteractions from './content-interactions.js'; -// import CustomTimestamps from './custom-timestamps.js'; -// import Diagram from './Diagram.js'; -// import FluxGroupKeysExample from './FluxGroupKeysExample.js'; -import FluxInfluxDBVersionsTrigger from './flux-influxdb-versions.js'; -// import PageFeedback from './page-feedback.js'; -// import SearchInput from './SearchInput.js'; -// import Sidebar from './Sidebar.js'; -// import V3Wayfinding from './v3-wayfinding.js'; -// import VersionSelector from './VersionSelector.js'; -// Expose libraries and components within a namespaced object (for backwards compatibility or testing) -// Expose libraries and components within a namespaced object (for backwards compatibility or testing) +/** + * Component Registry + * A central registry that maps component names to their constructor functions. + * Add new components to this registry as they are created or migrated from non-component modules. + * This allows for: + * 1. Automatic component initialization based on data-component attributes + * 2. Centralized component management + * 3. Easy addition/removal of components + * 4. Simplified testing and debugging + */ +const componentRegistry = { + 'ask-ai-trigger': AskAITrigger, + 'code-placeholder': CodePlaceholder, + 'custom-time-trigger': CustomTimeTrigger, + 'flux-influxdb-versions-trigger': FluxInfluxDBVersionsTrigger, + 'search-button': SearchButton, + 'sidebar-toggle': SidebarToggle, + 'theme': Theme, + 'theme-switch': ThemeSwitch +}; - - -document.addEventListener('DOMContentLoaded', function () { +/** + * Initialize global namespace for documentation JavaScript + * Exposes core modules for debugging, testing, and backwards compatibility + */ +function initGlobals() { if (typeof window.influxdatadocs === 'undefined') { window.influxdatadocs = {}; } - // Expose modules to the global object for debugging, testing, and backwards compatibility for non-ES6 modules. + // Expose modules to the global object for debugging, testing, and backwards compatibility window.influxdatadocs.delay = delay; window.influxdatadocs.localStorage = window.LocalStorageAPI = localStorage; window.influxdatadocs.pageContext = pageContext; window.influxdatadocs.toggleModal = modals.toggleModal; + window.influxdatadocs.componentRegistry = componentRegistry; + + return window.influxdatadocs; +} - // On content loaded, initialize (not-component-ready) UI interaction modules - // To differentiate these from component-ready modules, these modules typically export an initialize function that wraps UI interactions and event listeners. +/** + * Initialize components based on data-component attributes + * @param {Object} globals - The global influxdatadocs namespace + */ +function initComponents(globals) { + const components = document.querySelectorAll('[data-component]'); + + components.forEach((component) => { + const componentName = component.getAttribute('data-component'); + const ComponentConstructor = componentRegistry[componentName]; + + if (ComponentConstructor) { + // Initialize the component and store its instance in the global namespace + try { + const instance = ComponentConstructor({ component }); + globals[componentName] = ComponentConstructor; + + // Optionally store component instances for future reference + if (!globals.instances) { + globals.instances = {}; + } + + if (!globals.instances[componentName]) { + globals.instances[componentName] = []; + } + + globals.instances[componentName].push({ + element: component, + instance + }); + } catch (error) { + console.error(`Error initializing component "${componentName}":`, error); + } + } else { + console.warn(`Unknown component: "${componentName}"`); + } + }); +} + +/** + * Initialize all non-component modules + */ +function initModules() { modals.initialize(); apiLibs.initialize(); codeControls.initialize(); @@ -84,67 +127,24 @@ document.addEventListener('DOMContentLoaded', function () { pageFeedback.initialize(); tabbedContent.initialize(); v3Wayfinding.initialize(); +} - /** Initialize components - Component Structure: Each component is structured as a jQuery anonymous function that listens for the document ready state. - Initialization in main.js: Each component is called in main.js inside a jQuery document ready function to ensure they are initialized when the document is ready. - Note: These components should *not* be called directly in the HTML. - */ - const components = document.querySelectorAll('[data-component]'); - components.forEach((component) => { - const componentName = component.getAttribute('data-component'); - switch (componentName) { - case 'ask-ai-trigger': - AskAITrigger({ component }); - window.influxdatadocs[componentName] = AskAITrigger; - break; - case 'code-placeholder': - CodePlaceholder({ component }); - window.influxdatadocs[componentName] = CodePlaceholder; - break; - case 'custom-time-trigger': - CustomTimeTrigger({ component }); - window.influxdatadocs[componentName] = CustomTimeTrigger; - break; - case 'flux-influxdb-versions-trigger': - FluxInfluxDBVersionsTrigger({ component }); - window.influxdatadocs[componentName] = FluxInfluxDBVersionsTrigger; - break; - case 'search-button': - SearchButton({ component }); - window.influxdatadocs[componentName] = SearchButton; - break; - case 'sidebar-toggle': - SidebarToggle({ component }); - window.influxdatadocs[componentName] = SidebarToggle; - break; - case 'theme': - Theme({ component }); - window.influxdatadocs[componentName] = Theme; - break; - // CodeControls(); - // ContentInteractions(); - // CustomTimestamps(); - // Diagram(); - // FluxGroupKeysExample(); - // FluxInfluxDBVersionsModal(); - // InfluxDBUrl(); - // Modal(); - // PageFeedback(); - // ReleaseTOC(); - // SearchInput(); - // showNotifications(); - // Sidebar(); - // TabbedContent(); - // ThemeSwitch({}); - // V3Wayfinding(); - // VersionSelector(); - case 'theme-switch': - ThemeSwitch({ component }); - window.influxdatadocs[componentName] = ThemeSwitch; - break; - default: - console.warn(`Unknown component: ${componentName}`); - } - }); -}); +/** + * Main initialization function + */ +function init() { + // Initialize global namespace and expose core modules + const globals = initGlobals(); + + // Initialize non-component UI modules + initModules(); + + // Initialize components from registry + initComponents(globals); +} + +// Initialize everything when the DOM is ready +document.addEventListener('DOMContentLoaded', init); + +// Export public API +export { initGlobals, componentRegistry }; \ No newline at end of file diff --git a/assets/js/release-toc.js b/assets/js/release-toc.js index 42858fccc..c27a9deaf 100644 --- a/assets/js/release-toc.js +++ b/assets/js/release-toc.js @@ -5,13 +5,13 @@ * release notes pages. */ -// Use jQuery filter to get an array of all the *release* h2 elements -const releases = $('h2').filter( - (_i, el) => !el.id.match(/checkpoint-releases/) +// Get all h2 elements that are not checkpoint-releases +const releases = Array.from(document.querySelectorAll('h2')).filter( + el => !el.id.match(/checkpoint-releases/) ); // Extract data about each release from the array of releases -releaseData = releases.map((_i, el) => ({ +const releaseData = releases.map(el => ({ name: el.textContent, id: el.id, class: el.getAttribute('class'), @@ -19,8 +19,8 @@ releaseData = releases.map((_i, el) => ({ })); // Use release data to generate a list item for each release -getReleaseItem = (releaseData) => { - var li = document.createElement("li"); +function getReleaseItem(releaseData) { + const li = document.createElement("li"); if (releaseData.class !== null) { li.className = releaseData.class; } @@ -29,9 +29,10 @@ getReleaseItem = (releaseData) => { return li; } -// Use jQuery each to build the release table of contents -releaseData.each((_i, release) => { - $('#release-toc ul')[0].appendChild(getReleaseItem(release)); +// Build the release table of contents +const releaseTocUl = document.querySelector('#release-toc ul'); +releaseData.forEach(release => { + releaseTocUl.appendChild(getReleaseItem(release)); }); /* @@ -39,20 +40,30 @@ releaseData.each((_i, release) => { * number specified in the `show` attribute of `ul.release-list`. * Once all the release items are visible, the "Show More" button is hidden. */ -$('#release-toc .show-more').click(function () { - const itemHeight = 1.885; // Item height in rem - const releaseNum = releaseData.length; - const maxHeight = releaseNum * itemHeight; - const releaseIncrement = Number($('#release-list')[0].getAttribute('show')); - const currentHeight = Number( - $('#release-list')[0].style.height.match(/\d+\.?\d+/)[0] - ); - const potentialHeight = currentHeight + releaseIncrement * itemHeight; - const newHeight = potentialHeight > maxHeight ? maxHeight : potentialHeight; +const showMoreBtn = document.querySelector('#release-toc .show-more'); +if (showMoreBtn) { + showMoreBtn.addEventListener('click', function () { + const itemHeight = 1.885; // Item height in rem + const releaseNum = releaseData.length; + const maxHeight = releaseNum * itemHeight; + const releaseList = document.getElementById('release-list'); + const releaseIncrement = Number(releaseList.getAttribute('show')); + const currentHeightMatch = releaseList.style.height.match(/\d+\.?\d+/); + const currentHeight = currentHeightMatch + ? Number(currentHeightMatch[0]) + : 0; + const potentialHeight = currentHeight + releaseIncrement * itemHeight; + const newHeight = potentialHeight > maxHeight ? maxHeight : potentialHeight; - $('#release-list')[0].style.height = `${newHeight}rem`; + releaseList.style.height = `${newHeight}rem`; - if (newHeight >= maxHeight) { - $('#release-toc .show-more').fadeOut(100); - } -}); + if (newHeight >= maxHeight) { + // Simple fade out + showMoreBtn.style.transition = 'opacity 0.1s'; + showMoreBtn.style.opacity = 0; + setTimeout(() => { + showMoreBtn.style.display = 'none'; + }, 100); + } + }); +} diff --git a/assets/jsconfig.json b/assets/jsconfig.json index 377218ccb..4ad710c10 100644 --- a/assets/jsconfig.json +++ b/assets/jsconfig.json @@ -3,7 +3,8 @@ "baseUrl": ".", "paths": { "*": [ - "*" + "*", + "../node_modules/*" ] } } diff --git a/assets/styles/layouts/_homepage.scss b/assets/styles/layouts/_homepage.scss index a0583b4c9..ca92588e9 100644 --- a/assets/styles/layouts/_homepage.scss +++ b/assets/styles/layouts/_homepage.scss @@ -105,7 +105,7 @@ .product { padding: 0 1rem; display: flex; - flex: 1 1 50%; + flex: 1 1 33%; flex-direction: column; justify-content: space-between; max-width: 33%; @@ -118,11 +118,10 @@ line-height: 1.5rem; color: rgba($article-text, .7); } - } - &.new { - .product-info h3::after { - content: "New"; + h3[state] { + &::after { + content: attr(state); margin-left: .5rem; font-size: 1rem; padding: .25em .5em .25em .4em; @@ -132,6 +131,8 @@ font-style: italic; vertical-align: middle; } + + } } ul.product-links { @@ -227,6 +228,30 @@ background: $article-bg; } + .categories { + display: flex; + flex-direction: row; + flex-wrap: wrap; + // margin: 0 -1rem; + width: calc(100% + 2rem); + + .category { + &.full-width { + width: 100%; + } + &.two-thirds { + width: 66.66%; + .product { max-width: 50%; } + } + &.one-third { + width: 33.33%; + .product { + max-width: 100%; + } + } + } + } + .category-head{ margin: 1rem 0 2rem; &::after { @@ -234,6 +259,7 @@ display: block; border-top: 1px solid $article-hr; margin-top: -1.15rem; + width: calc(100% - 2rem); } } } @@ -441,6 +467,16 @@ ul {margin-bottom: 0;} } } + .categories .category { + &.two-thirds { + width: 100%; + .product { max-width: 100%; } + } + &.one-third { + width: 100%; + .product { max-width: 100%; } + } + } } #telegraf { flex-direction: column; diff --git a/assets/styles/layouts/article/_blocks.scss b/assets/styles/layouts/article/_blocks.scss index 090ee9560..62b205491 100644 --- a/assets/styles/layouts/article/_blocks.scss +++ b/assets/styles/layouts/article/_blocks.scss @@ -96,4 +96,5 @@ blockquote { "blocks/tip", "blocks/important", "blocks/warning", - "blocks/caution"; + "blocks/caution", + "blocks/beta"; diff --git a/assets/styles/layouts/article/blocks/_beta.scss b/assets/styles/layouts/article/blocks/_beta.scss new file mode 100644 index 000000000..b3ab3a70c --- /dev/null +++ b/assets/styles/layouts/article/blocks/_beta.scss @@ -0,0 +1,105 @@ +.block.beta { + @include gradient($grad-burningDusk); + padding: 4px; + border: none; + border-radius: 25px !important; + + .beta-content { + background: $article-bg; + border-radius: 21px; + padding: calc(1.65rem - 4px) calc(2rem - 4px) calc(.1rem + 4px) calc(2rem - 4px); + + h4 { + color: $article-heading; + } + + p {margin-bottom: 1rem;} + + .expand-wrapper { + border: none; + margin: .5rem 0 1.5rem; + } + .expand { + border: none; + padding: 0; + + .expand-content p { + margin-left: 2rem; + } + + ul { + + margin-top: -1rem; + + &.feedback-channels { + + padding: 0; + margin: -1rem 0 1.5rem 2rem; + list-style: none; + + a { + color: $article-heading; + font-weight: $medium; + position: relative; + + &.discord:before { + content: url('/svgs/discord.svg'); + display: inline-block; + height: 1.1rem; + width: 1.25rem; + vertical-align: top; + margin: 2px .65rem 0 0; + } + + &.community:before { + content: "\e900"; + color: $article-heading; + margin: 0 .65rem 0 0; + font-size: 1.2rem; + font-family: 'icomoon-v2'; + vertical-align: middle; + } + + &.slack:before { + content: url('/svgs/slack.svg'); + display: inline-block; + height: 1.1rem; + width: 1.1rem; + vertical-align: text-top; + margin-right: .65rem; + } + + &.reddit:before { + content: url('/svgs/reddit.svg'); + display: inline-block; + height: 1.1rem; + width: 1.2rem; + vertical-align: top; + margin: 2px .65rem 0 0; + } + + &::after { + content: "\e90a"; + font-family: 'icomoon-v4'; + font-weight: bold; + font-size: 1.3rem; + display: inline-block; + position: absolute; + @include gradient($grad-burningDusk); + background-clip: text; + -webkit-text-fill-color: transparent; + right: 0; + transform: translateX(.25rem); + opacity: 0; + transition: transform .2s, opacity .2s; + } + + &:hover { + &::after {transform: translateX(1.5rem); opacity: 1;} + } + } + } + } + } + } +} \ No newline at end of file diff --git a/build-scripts/build-copilot-instructions.js b/build-scripts/build-copilot-instructions.js new file mode 100644 index 000000000..0d089e2c1 --- /dev/null +++ b/build-scripts/build-copilot-instructions.js @@ -0,0 +1,76 @@ +#!/usr/bin/env node + +/** + * Script to generate GitHub Copilot instructions + * for InfluxData documentation. + */ +import fs from 'fs'; +import path from 'path'; +import process from 'process'; +import { execSync } from 'child_process'; + +// Get the current file path and directory +export { buildContributingInstructions }; + +(async () => { + try { + await buildContributingInstructions(); + } catch (error) { + console.error('Error generating Copilot instructions:', error); + } +})(); + +/** Build instructions from CONTRIBUTING.md + * This script reads CONTRIBUTING.md, formats it appropriately, + * and saves it to .github/instructions/contributing.instructions.md + */ +function buildContributingInstructions() { + // Paths + const contributingPath = path.join(process.cwd(), 'CONTRIBUTING.md'); + const instructionsDir = path.join(process.cwd(), '.github', 'instructions'); + const instructionsPath = path.join( + instructionsDir, + 'contributing.instructions.md' + ); + + // Ensure the instructions directory exists + if (!fs.existsSync(instructionsDir)) { + fs.mkdirSync(instructionsDir, { recursive: true }); + } + + // Read the CONTRIBUTING.md file + let content = fs.readFileSync(contributingPath, 'utf8'); + + // Format the content for Copilot instructions with applyTo attribute + content = `--- +applyTo: "content/**/*.md, layouts/**/*.html" +--- + +# GitHub Copilot Instructions for InfluxData Documentation + +## Purpose and scope + +GitHub Copilot should help document InfluxData products +by creating clear, accurate technical content with proper +code examples, frontmatter, shortcodes, and formatting. + +${content}`; + + // Write the formatted content to the instructions file + fs.writeFileSync(instructionsPath, content); + + console.log(`✅ Generated Copilot instructions at ${instructionsPath}`); + + // Add the file to git if it has changed + try { + const gitStatus = execSync( + `git status --porcelain "${instructionsPath}"` + ).toString(); + if (gitStatus.trim()) { + execSync(`git add "${instructionsPath}"`); + console.log('✅ Added instructions file to git staging'); + } + } catch (error) { + console.warn('⚠️ Could not add instructions file to git:', error.message); + } +} diff --git a/compose.yaml b/compose.yaml index a2338c3b1..fe0293615 100644 --- a/compose.yaml +++ b/compose.yaml @@ -449,6 +449,9 @@ services: - type: bind source: ./content target: /app/content + - type: bind + source: ./CONTRIBUTING.md + target: /app/CONTRIBUTING.md volumes: test-content: cloud-tmp: diff --git a/config/_default/config.yml b/config/_default/config.yml new file mode 100644 index 000000000..917d78e2d --- /dev/null +++ b/config/_default/config.yml @@ -0,0 +1,2 @@ +import: + - hugo.yml \ No newline at end of file diff --git a/config/testing/config.yml b/config/testing/config.yml new file mode 100644 index 000000000..f403c8347 --- /dev/null +++ b/config/testing/config.yml @@ -0,0 +1,20 @@ +baseURL: 'http://localhost:1315/' + +server: + port: 1315 + +# Override settings for testing +buildFuture: true + +# Configure what content is built in testing env +params: + environment: testing + buildTestContent: true + +# Keep your shared content exclusions +ignoreFiles: + - "content/shared/.*" + +# Ignore specific warning logs +ignoreLogs: + - warning-goldmark-raw-html \ No newline at end of file diff --git a/content/enterprise_influxdb/v1/concepts/schema_and_data_layout.md b/content/enterprise_influxdb/v1/concepts/schema_and_data_layout.md index 43c781460..c60e8ccef 100644 --- a/content/enterprise_influxdb/v1/concepts/schema_and_data_layout.md +++ b/content/enterprise_influxdb/v1/concepts/schema_and_data_layout.md @@ -22,7 +22,7 @@ We recommend the following design guidelines for most use cases: Your queries should guide what data you store in [tags](/enterprise_influxdb/v1/concepts/glossary/#tag) and what you store in [fields](/enterprise_influxdb/v1/concepts/glossary/#field) : -- Store commonly queried and grouping ([`group()`](/flux/v0.x/stdlib/universe/group) or [`GROUP BY`](/enterprise_influxdb/v1/query_language/explore-data/#group-by-tags)) metadata in tags. +- Store commonly queried and grouping ([`group()`](/flux/v0/stdlib/universe/group) or [`GROUP BY`](/enterprise_influxdb/v1/query_language/explore-data/#group-by-tags)) metadata in tags. - Store data in fields if each data point contains a different value. - Store numeric values as fields ([tag values](/enterprise_influxdb/v1/concepts/glossary/#tag-value) only support string values). diff --git a/content/example.md b/content/example.md index d484b26f8..5bcd782aa 100644 --- a/content/example.md +++ b/content/example.md @@ -6,14 +6,14 @@ related: - /influxdb/v2/write-data/ - /influxdb/v2/write-data/quick-start - https://influxdata.com, This is an external link -draft: true +test_only: true # Custom parameter to indicate test-only content --- This is a paragraph. Lorem ipsum dolor ({{< icon "trash" "v2" >}}) sit amet, consectetur adipiscing elit. Nunc rutrum, metus id scelerisque euismod, erat ante suscipit nibh, ac congue enim risus id est. Etiam tristique nisi et tristique auctor. Morbi eu bibendum erat. Sed ullamcorper, dui id lobortis efficitur, mauris odio pharetra neque, vel tempor odio dolor blandit justo. [Ref link][foo] -[foo]: https://docs.influxadata.com +[foo]: https://docs.influxdata.com This is **bold** text. This is _italic_ text. This is _**bold and italic**_. diff --git a/content/flux/v0/release-notes.md b/content/flux/v0/release-notes.md index 3d731055e..597a651f1 100644 --- a/content/flux/v0/release-notes.md +++ b/content/flux/v0/release-notes.md @@ -433,7 +433,7 @@ representative of the Flux SPEC. details. - Add tagging support to Flux tests. - Add new function [`experimental.catch()`](/flux/v0/stdlib/experimental/catch/). -- Add new function [`testing.shouldError()`](/flux/v0.x/stdlib/testing/shoulderror/). +- Add new function [`testing.shouldError()`](/flux/v0/stdlib/testing/shoulderror/). ### Bug fixes diff --git a/content/influxdb/cloud/account-management/data-usage.md b/content/influxdb/cloud/account-management/data-usage.md index 40969ea56..9efb3e144 100644 --- a/content/influxdb/cloud/account-management/data-usage.md +++ b/content/influxdb/cloud/account-management/data-usage.md @@ -12,8 +12,8 @@ menu: parent: Account management name: View data usage related: - - /flux/v0.x/stdlib/experimental/usage/from/ - - /flux/v0.x/stdlib/experimental/usage/limits/ + - /flux/v0/stdlib/experimental/usage/from/ + - /flux/v0/stdlib/experimental/usage/limits/ alt_links: cloud-serverless: /influxdb3/cloud-serverless/admin/billing/data-usage/ --- diff --git a/content/influxdb/cloud/account-management/limits.md b/content/influxdb/cloud/account-management/limits.md index b1ea7636d..b4670cf88 100644 --- a/content/influxdb/cloud/account-management/limits.md +++ b/content/influxdb/cloud/account-management/limits.md @@ -9,8 +9,8 @@ menu: parent: Account management name: Adjustable quotas and limits related: - - /flux/v0.x/stdlib/experimental/usage/from/ - - /flux/v0.x/stdlib/experimental/usage/limits/ + - /flux/v0/stdlib/experimental/usage/from/ + - /flux/v0/stdlib/experimental/usage/limits/ - /influxdb/cloud/write-data/best-practices/resolve-high-cardinality/ alt_links: cloud-serverless: /influxdb3/cloud-serverless/admin/billing/limits/ @@ -97,7 +97,7 @@ Combine delete predicate expressions (if possible) into a single request. Influx The {{< product-name >}} UI displays a notification message when service quotas or limits are exceeded. The error messages correspond with the relevant [API error responses](#api-error-responses). -Errors can also be viewed in the [Usage page](/influxdb/cloud/account-management/data-usage/) under **Limit Events**, e.g. `event_type_limited_query`, `event_type_limited_write`,`event_type_limited_cardinality`, or `event_type_limited_delete_rate`. +Errors can also be viewed in the [Usage page](/influxdb/cloud/account-management/data-usage/) under **Limit Events**, for example: `event_type_limited_query`, `event_type_limited_write`,`event_type_limited_cardinality`, or `event_type_limited_delete_rate`. ## API error responses diff --git a/content/influxdb/cloud/admin/organizations/migrate-org.md b/content/influxdb/cloud/admin/organizations/migrate-org.md index 9d7809aa9..a03aaa43d 100644 --- a/content/influxdb/cloud/admin/organizations/migrate-org.md +++ b/content/influxdb/cloud/admin/organizations/migrate-org.md @@ -40,7 +40,7 @@ To replicate the state of an organization: ### Write data with Flux Perform a query to return all specified data. Write results directly to a bucket in the new organization with the Flux -[`to()` function](/flux/v0.x/stdlib/influxdata/influxdb/to/). +[`to()` function](/flux/v0/stdlib/influxdata/influxdb/to/). {{% note %}} If writes are prevented by rate limiting, diff --git a/content/influxdb/cloud/query-data/execute-queries/query-demo-data.md b/content/influxdb/cloud/query-data/execute-queries/query-demo-data.md index c932e2f0b..d0f94cbf6 100644 --- a/content/influxdb/cloud/query-data/execute-queries/query-demo-data.md +++ b/content/influxdb/cloud/query-data/execute-queries/query-demo-data.md @@ -25,7 +25,7 @@ types of demo data that let you explore and familiarize yourself with InfluxDB C {{% note %}} #### Free to use and read-only - InfluxDB Cloud demo data buckets are **free to use** and are **_not_ subject to - [Free Plan rate limits](influxdb/cloud/account-management/limits/#free-plan-rate-limits) rate limits**. + [Free Plan rate limits](/influxdb/cloud/account-management/limits/#free-plan-rate-limits) rate limits**. - Demo data buckets are **read-only**. You cannot write data into demo data buckets. {{% /note %}} diff --git a/content/influxdb/cloud/reference/cli/influx/transpile/_index.md b/content/influxdb/cloud/reference/cli/influx/transpile/_index.md index 72b7485fb..1f56fa658 100644 --- a/content/influxdb/cloud/reference/cli/influx/transpile/_index.md +++ b/content/influxdb/cloud/reference/cli/influx/transpile/_index.md @@ -13,7 +13,7 @@ prepend: | > [Use InfluxQL to query InfluxDB](/influxdb/cloud/query-data/influxql/). > For information about manually converting InfluxQL queries to Flux, see: > - > - [Get started with Flux](/flux/v0.x/get-started/) + > - [Get started with Flux](/flux/v0/get-started/) > - [Query data with Flux](/influxdb/cloud/query-data/flux/) > - [Migrate continuous queries to Flux tasks](/influxdb/cloud/upgrade/v1-to-cloud/migrate-cqs/) source: /shared/influxdb-v2/reference/cli/influx/transpile/_index.md diff --git a/content/influxdb/cloud/reference/release-notes/cloud-updates.md b/content/influxdb/cloud/reference/release-notes/cloud-updates.md index 70a7ab533..d6cea7c84 100644 --- a/content/influxdb/cloud/reference/release-notes/cloud-updates.md +++ b/content/influxdb/cloud/reference/release-notes/cloud-updates.md @@ -188,7 +188,7 @@ Now, you can add the following buckets with sample data to your notebooks: ### Add ability to share notebooks -Add ability to [share a notebook](/influxdb/cloud/tools/notebooks/manage-notebooks/#share-a-notebook) in the the InfluxDB Cloud notebook UI. +Add ability to [share a notebook](/influxdb/cloud/tools/notebooks/manage-notebooks/#share-a-notebook) in the InfluxDB Cloud notebook UI. ## October 2021 @@ -209,7 +209,7 @@ Refresh the look and feel of InfluxDB Cloud UI. The updated icons, fonts, and la ### Flux update -Upgrade to [Flux v0.139](/flux/v0.x/release-notes/). +Upgrade to [Flux v0.139](/flux/v0/release-notes/). ### Telegraf configuration UI @@ -347,7 +347,7 @@ Install and customize any [InfluxDB community template](https://github.com/influ ## Features - **InfluxDB OSS 2.0 alpha-17** – - _See the [alpha-17 release notes](/influxdb/v2%2E0/reference/release-notes/influxdb/#v200-alpha17) for details._ + _See the [alpha-17 release notes](/influxdb/v2/reference/release-notes/influxdb/#v200-alpha17) for details._ - Alerts and Notifications to Slack (Free Plan), PagerDuty and HTTP (Usage-based Plan). - Rate limiting on cardinality for Free Plan. - Billing notifications. @@ -359,7 +359,7 @@ Install and customize any [InfluxDB community template](https://github.com/influ ### Features - **InfluxDB OSS 2.0 alpha-15** – - _See the [alpha-9 release notes](/influxdb/v2%2E0/reference/release-notes/influxdb/#v200-alpha15) for details._ + _See the [alpha-9 release notes](/influxdb/v2/reference/release-notes/influxdb/#v200-alpha15) for details._ - Usage-based Plan. - Adjusted Free Plan rate limits. - Timezone selection in the user interface. @@ -386,7 +386,7 @@ Install and customize any [InfluxDB community template](https://github.com/influ ### Features - **InfluxDB OSS 2.0 alpha-9** – - _See the [alpha-9 release notes](/influxdb/v2%2E0/reference/release-notes/influxdb/#v200-alpha9) for details._ + _See the [alpha-9 release notes](/influxdb/v2/reference/release-notes/influxdb/#v200-alpha9) for details._ ### Bug fixes @@ -403,7 +403,7 @@ Install and customize any [InfluxDB community template](https://github.com/influ ### Features - **InfluxDB OSS 2.0 alpha-7** – - _See the [alpha-7 release notes](/influxdb/v2%2E0/reference/release-notes/influxdb/#v200-alpha7) for details._ + _See the [alpha-7 release notes](/influxdb/v2/reference/release-notes/influxdb/#v200-alpha7) for details._ ### Bug fixes diff --git a/content/influxdb/cloud/write-data/troubleshoot.md b/content/influxdb/cloud/write-data/troubleshoot.md index aaa93a1fc..ec065866f 100644 --- a/content/influxdb/cloud/write-data/troubleshoot.md +++ b/content/influxdb/cloud/write-data/troubleshoot.md @@ -19,4 +19,5 @@ source: /shared/influxdb-v2/write-data/troubleshoot.md --- \ No newline at end of file +// SOURCE content/shared/influxdb-v2/write-data/troubleshoot.md +--> \ No newline at end of file diff --git a/content/influxdb/v1/concepts/schema_and_data_layout.md b/content/influxdb/v1/concepts/schema_and_data_layout.md index 5824c4477..f1006cc22 100644 --- a/content/influxdb/v1/concepts/schema_and_data_layout.md +++ b/content/influxdb/v1/concepts/schema_and_data_layout.md @@ -22,7 +22,7 @@ We recommend the following design guidelines for most use cases: Your queries should guide what data you store in [tags](/influxdb/v1/concepts/glossary/#tag) and what you store in [fields](/influxdb/v1/concepts/glossary/#field) : -- Store commonly queried and grouping ([`group()`](/flux/v0.x/stdlib/universe/group) or [`GROUP BY`](/influxdb/v1/query_language/explore-data/#group-by-tags)) metadata in tags. +- Store commonly queried and grouping ([`group()`](/flux/v0/stdlib/universe/group) or [`GROUP BY`](/influxdb/v1/query_language/explore-data/#group-by-tags)) metadata in tags. - Store data in fields if each data point contains a different value. - Store numeric values as fields ([tag values](/influxdb/v1/concepts/glossary/#tag-value) only support string values). diff --git a/content/influxdb/v1/flux/guides/exists.md b/content/influxdb/v1/flux/guides/exists.md index 036e04905..c4c3ad77e 100644 --- a/content/influxdb/v1/flux/guides/exists.md +++ b/content/influxdb/v1/flux/guides/exists.md @@ -83,7 +83,7 @@ customSumProduct = (tables=<-) => tables #### Check if a statically defined record contains a key -When you use the [record literal syntax](/flux/v0.x/data-types/composite/record/#record-syntax) +When you use the [record literal syntax](/flux/v0/data-types/composite/record/#record-syntax) to statically define a record, Flux knows the record type and what keys to expect. - If the key exists in the static record, `exists` returns `true`. diff --git a/content/influxdb/v2/install/_index.md b/content/influxdb/v2/install/_index.md index c27d355f4..f7af4d919 100644 --- a/content/influxdb/v2/install/_index.md +++ b/content/influxdb/v2/install/_index.md @@ -164,13 +164,13 @@ gpg: key 7C3D57159FC2F927: public key "InfluxData Package Signing Key }}_darwin_amd64.tar.gz" \ + "https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz" \ ``` --> ```sh curl --silent --location \ - https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz.asc \ + https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz.asc \ | gpg --verify - ~/Downloads/influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz \ 2>&1 | grep 'InfluxData Package Signing Key ' ``` @@ -239,12 +239,12 @@ brew install influxdb 1. In your browser or your terminal, download the InfluxDB package. - InfluxDB v2 (macOS) + InfluxDB v2 (macOS) ```sh # Download using cURL curl --location -O \ - "https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz" + "https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz" ``` 2. {{< req text="Recommended:" color="magenta" >}}: Verify the integrity of the download--for example, enter the @@ -443,18 +443,18 @@ _If necessary, adjust the example file paths and utilities for your system._ 1. In your browser or your terminal, download the InfluxDB binary for your system architecture (AMD64 or ARM). - InfluxDB v2 (amd64) - InfluxDB v2 (arm) + InfluxDB v2 (amd64) + InfluxDB v2 (arm) @@ -463,7 +463,7 @@ _If necessary, adjust the example file paths and utilities for your system._ ```sh # Use curl to download the amd64 binary. curl --location -O \ - https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_linux_amd64.tar.gz + https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_linux_amd64.tar.gz ``` @@ -471,7 +471,7 @@ _If necessary, adjust the example file paths and utilities for your system._ ```sh # Use curl to download the arm64 binary. curl --location -O \ - https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz + https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz ``` 2. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-system). @@ -505,7 +505,7 @@ _If necessary, adjust the example file paths and utilities for your system._ | grep 'InfluxData Package Signing Key ' \ && # Download and verify the binary's signature file - curl --silent --location "https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_linux_amd64.tar.gz.asc" \ + curl --silent --location "https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_linux_amd64.tar.gz.asc" \ | gpg --verify - influxdb2-{{< latest-patch >}}_linux_amd64.tar.gz \ 2>&1 | grep 'InfluxData Package Signing Key ' ``` @@ -519,7 +519,7 @@ _If necessary, adjust the example file paths and utilities for your system._ | grep 'InfluxData Package Signing Key ' \ && # Download and verify the binary's signature file - curl --silent --location "https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz.asc" \ + curl --silent --location "https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz.asc" \ | gpg --verify - influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz \ 2>&1 | grep 'InfluxData Package Signing Key ' ``` @@ -618,7 +618,7 @@ chmod 0750 ~/.influxdbv2 > > _You'll install the `influx CLI` in a [later step](#download-install-and-configure-the-influx-cli)._ -InfluxDB v2 (Windows) +InfluxDB v2 (Windows) Expand the downloaded archive into `C:\Program Files\InfluxData\` and rename the files if desired. diff --git a/content/influxdb/v2/reference/config-options.md b/content/influxdb/v2/reference/config-options.md index 18f0eadcb..4f639b6ab 100644 --- a/content/influxdb/v2/reference/config-options.md +++ b/content/influxdb/v2/reference/config-options.md @@ -2744,6 +2744,61 @@ storage-validate-keys = true --- +### storage-wal-flush-on-shutdown +Flush the WAL on shutdown. + +**Default:** `false` + +| influxd flag | Environment variable | Configuration key | +| :------------------------------ | :------------------------------------ | :---------------------------- | +| `--storage-wal-flush-on-shutdown` | `INFLUXD_STORAGE_WAL_FLUSH_ON_SHUTDOWN` | `storage-wal-flush-on-shutdown` | + +If set, `influxd` flushes or snapshots all WALs prior to completing shutdown--`influxd` performs cache snapshots on shutdown, which +results in the WAL files being written to TSM files and then deleted. + +This is useful in upgrade and downgrade scenarios to prevent WAL format +compatibility issues. + +###### influxd flag + + +```sh +influxd --storage-wal-flush-on-shutdown +``` + +###### Environment variable +```sh +export INFLUXD_STORAGE_WAL_FLUSH_ON_SHUTDOWN=true +``` + +###### Configuration file +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[YAML](#) +[TOML](#) +[JSON](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```yml +storage-wal-flush-on-shutdown: true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```toml +storage-wal-flush-on-shutdown = true +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```json +{ + "storage-wal-flush-on-shutdown": true +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + ### storage-wal-fsync-delay Duration a write will wait before fsyncing. A duration greater than `0` batches multiple fsync calls. diff --git a/content/influxdb/v2/reference/release-notes/influxdb.md b/content/influxdb/v2/reference/release-notes/influxdb.md index 18319bddb..9633439d6 100644 --- a/content/influxdb/v2/reference/release-notes/influxdb.md +++ b/content/influxdb/v2/reference/release-notes/influxdb.md @@ -8,6 +8,27 @@ menu: weight: 101 --- +## v2.7.12 {date="2025-05-20"} + +### Features + +- Add a `--pid-file` option to write a PID file to the specified location on startup. InfluxDB removes the PID file on shutdown. +- Add a `--storage-wal-flush-on-shutdown` option to flush the WAL on database shutdown to ensure all data is written to disk. +- Improve response error messages for dropped points, adding details including database, retention policy, and which bound was violated for partial writes. + +### Bug Fixes + +- Fix a locking issue in `TagValueIterator` that could cause reads and writes in buckets to block. [PR #26414](https://github.com/influxdata/influxdb/pull/26414) + +### Maintenance + +- Improved startup logging with an "are we there yet" counter for the number and percentage of shards opened. +- Update Go to 1.23.9. +- Update Flux to v0.196.1. +- Refresh dependencies to address security vulnerabilities and improve stability. + +--- + ## v2.7.11 {date="2024-12-02"} ### Features @@ -606,7 +627,7 @@ to migrate InfluxDB key-value metadata schemas to earlier 2.x versions when nece #### Flux - Update to [Flux v0.139.0](/flux/v0/release-notes/#v01390). -- Enable writing to remote hosts using the Flux [`to()`](/flux/v0/stdlib/influxdata/influxdb/to/) and [`experimental.to()`](/flux/v0/v0.x/stdlib/experimental/to/) functions. +- Enable writing to remote hosts using the Flux [`to()`](/flux/v0/stdlib/influxdata/influxdb/to/) and [`experimental.to()`](/flux/v0/stdlib/experimental/to/) functions. - Flux now supports locations that dynamically modify time offsets based on your specified timezone. You can also specify fixed time offsets relative to UTC. - Perform [bitwise operations](/flux/v0/stdlib/experimental/bitwise/) on integers and unsigned integers. @@ -673,24 +694,24 @@ New features include: - Add a new route `/api/v2/resources` that returns a list of known resources to the platform, including the following resource types. Makes it easier to update All Access tokens with current resources: - - `AuthorizationsResourceType` - - `BucketsResourceType` - - `ChecksResourceType` - - `DashboardsResourceType` - - `DBRPResourceType` - - `DocumentsResourceType` - - `LabelsResourceType` - - `NotificationEndpointResourceType` - - `NotificationRuleResourceType` - - `OrgsResourceType` - - `ScraperResourceType` - - `SecretsResourceType` - - `SourcesResourceType` - - `TasksResourceType` - - `TelegrafsResourceType` - - `UsersResourceType` - - `VariablesResourceType` - - `ViewsResourceType` + - `AuthorizationsResourceType` + - `BucketsResourceType` + - `ChecksResourceType` + - `DashboardsResourceType` + - `DBRPResourceType` + - `DocumentsResourceType` + - `LabelsResourceType` + - `NotificationEndpointResourceType` + - `NotificationRuleResourceType` + - `OrgsResourceType` + - `ScraperResourceType` + - `SecretsResourceType` + - `SourcesResourceType` + - `TasksResourceType` + - `TelegrafsResourceType` + - `UsersResourceType` + - `VariablesResourceType` + - `ViewsResourceType` #### Flux updates @@ -992,10 +1013,10 @@ The startup process automatically generates replacement `tsi1` indexes for shard - Fix timeout setup for `influxd` graceful shutdown. - Require user to set password during initial user onboarding. - Error message improvements: - - Remove duplication from task error messages. - - Improve error message shown when influx CLI can't find an `org` by name. - - Improve error message when opening BoltDB with unsupported file system options. - - Improve messages in DBRP API validation errors. + - Remove duplication from task error messages. + - Improve error message shown when influx CLI can't find an `org` by name. + - Improve error message when opening BoltDB with unsupported file system options. + - Improve messages in DBRP API validation errors. - `influxd upgrade` improvements: - Add confirmation step with file sizes before copying data files. - Prevent panic in `influxd upgrade` when v1 users exist but v1 config is missing. @@ -1072,8 +1093,8 @@ Previously, the database retention policy (DBRP) mapping API did not match the s ### Features - Improvements to upgrade from 1.x to 2.x: - - Warning appears if auth is not enabled in 1.x (`auth-enabled = false`), which is not an option in 2.x. For details, see [Upgrade from InfluxDB 1.x to InfluxDB 2.0](/influxdb/v2/upgrade/v1-to-v2/). - - `upgrade` command now checks to see if continuous queries are running and automatically exports them to a local file. + - Warning appears if auth is not enabled in 1.x (`auth-enabled = false`), which is not an option in 2.x. For details, see [Upgrade from InfluxDB 1.x to InfluxDB 2.0](/influxdb/v2/upgrade/v1-to-v2/). + - `upgrade` command now checks to see if continuous queries are running and automatically exports them to a local file. - Upgrade to [Flux v0.95.0](/flux/v0/release-notes/#v0-95-0). - Upgrade `flux-lsp-browser` to v.0.5.23. - Manage database retention policy (DBRP) mappings via CLI. See [`influx v1 dbrp`](/influxdb/v2/reference/cli/influx/v1/dbrp/). @@ -1117,8 +1138,8 @@ When there are multiple [DBRP mappings](/influxdb/v2/reference/api/influxdb-1x/d Highlights include: - Support for **upgrading to InfluxDB 2.0**: - - To upgrade **from InfluxDB 1.x**, see [Upgrade from InfluxDB 1.x to InfluxDB 2.0](/influxdb/v2/upgrade/v1-to-v2). - - To upgrade **from InfluxDB 2.0 beta 16 or earlier**, see [Upgrade from InfluxDB 2.0 beta to InfluxDB 2.0](/influxdb/v2/upgrade/v2-beta-to-v2). + - To upgrade **from InfluxDB 1.x**, see [Upgrade from InfluxDB 1.x to InfluxDB 2.0](/influxdb/v2/upgrade/v1-to-v2). + - To upgrade **from InfluxDB 2.0 beta 16 or earlier**, see [Upgrade from InfluxDB 2.0 beta to InfluxDB 2.0](/influxdb/v2/install/upgrade/v2-beta-to-v2/). - **Flux**, our powerful new functional data scripting language designed for querying, analyzing, and acting on data. This release includes [Flux v0.94.0](/flux/v0/release-notes/#v0940). If you're new to Flux, [check out how to get started with Flux](/influxdb/v2/query-data/get-started/). Next, delve deeper into the [Flux standard library](/flux/v0/stdlib//) reference docs and see how to [query with Flux](/influxdb/v2/query-data/flux/). - Support for [InfluxDB 1.x API compatibility](/influxdb/v2/reference/api/influxdb-1x/). - **Templates** and **stacks**. Discover how to [use community templates](/influxdb/v2/tools/influxdb-templates/use/) and how to [manage templates with stacks](/influxdb/v2/tools/influxdb-templates/stacks/). @@ -1241,14 +1262,14 @@ If you're new to InfluxDB 2.0, we recommend checking out [how to get started](/i {{% warn %}} #### Manual upgrade required -To simplify the migration for existing users of InfluxDB 1.x, this release includes significant breaking changes that require a manual upgrade from all alpha and beta versions. For more information, see [Upgrade to InfluxDB OSS 2.0rc](/influxdb/v2/upgrade/v2-beta-to-v2/), +To simplify the migration for existing users of InfluxDB 1.x, this release includes significant breaking changes that require a manual upgrade from all alpha and beta versions. For more information, see [Upgrade to InfluxDB OSS 2.0rc](/influxdb/v2/install/upgrade/v2-beta-to-v2/), {{% /warn %}} ### Breaking changes #### Manual upgrade -- To continue using data from InfluxDB 2.0 beta 16 or earlier, you must move all existing data out of the `~/.influxdbv2` (or equivalent) path, including `influxd.bolt`. All existing dashboards, tasks, integrations, alerts, users, and tokens must be recreated. For information on how to migrate your data, see [Upgrade to InfluxDB OSS 2.0rc](/influxdb/v2/upgrade/v2-beta-to-v2/). +- To continue using data from InfluxDB 2.0 beta 16 or earlier, you must move all existing data out of the `~/.influxdbv2` (or equivalent) path, including `influxd.bolt`. All existing dashboards, tasks, integrations, alerts, users, and tokens must be recreated. For information on how to migrate your data, see [Upgrade to InfluxDB OSS 2.0rc](/influxdb/v2/install/upgrade/v2-beta-to-v2/). #### Port update to 8086 @@ -2045,7 +2066,7 @@ _**This will remove all data from your InfluxDB v2.0 instance including time ser ###### Linux and macOS ```sh -rm ~/.influxdbv2/influxd.bolt +rm -f ~/.influxdbv2/influxd.bolt ``` Once completed, `v2.0.0-alpha.6` can be started. @@ -2079,7 +2100,7 @@ run the following command. ###### Linux and macOS ```sh -rm -r ~/.influxdbv2/engine +rm -rf ~/.influxdbv2/engine ``` Once completed, InfluxDB v2.0.0-alpha.5 can be started. diff --git a/content/influxdb/v2/write-data/troubleshoot.md b/content/influxdb/v2/write-data/troubleshoot.md index ddf703f5a..81cbc2e58 100644 --- a/content/influxdb/v2/write-data/troubleshoot.md +++ b/content/influxdb/v2/write-data/troubleshoot.md @@ -13,312 +13,9 @@ related: - /influxdb/v2/api/#tag/Write, InfluxDB API /write endpoint - /influxdb/v2/reference/internals - /influxdb/v2/reference/cli/influx/write +source: /shared/influxdb-v2/write-data/troubleshoot.md --- -Learn how to avoid unexpected results and recover from errors when writing to InfluxDB. -{{% show-in "v2" %}} - -- [Handle `write` and `delete` responses](#handle-write-and-delete-responses) -- [Troubleshoot failures](#troubleshoot-failures) - -{{% /show-in %}} - -{{% show-in "cloud,cloud-serverless" %}} - -- [Handle `write` and `delete` responses](#handle-write-and-delete-responses) -- [Troubleshoot failures](#troubleshoot-failures) -- [Troubleshoot rejected points](#troubleshoot-rejected-points) - -{{% /show-in %}} - -## Handle `write` and `delete` responses - -{{% show-in "cloud,cloud-serverless" %}} - -In InfluxDB Cloud, writes and deletes are asynchronous and eventually consistent. -Once InfluxDB validates your request and [queues](/influxdb/cloud/reference/internals/durability/#backup-on-write) the write or delete, it sends a _success_ response (HTTP `204` status code) as an acknowledgement. -To ensure that InfluxDB handles writes and deletes in the order you request them, wait for the acknowledgement before you send the next request. -Because writes are asynchronous, keep the following in mind: - -- Data might not yet be queryable when you receive _success_ (HTTP `204` status code). -- InfluxDB may still reject points after you receive _success_ (HTTP `204` status code). - -{{% /show-in %}} - -{{% show-in "v2" %}} - -If InfluxDB OSS successfully writes all the request data to the bucket, InfluxDB returns _success_ (HTTP `204` status code). -The first rejected point in a batch causes InfluxDB to reject the entire batch and respond with an [HTTP error status](#review-http-status-codes). - -{{% /show-in %}} - -### Review HTTP status codes - -InfluxDB uses conventional HTTP status codes to indicate the success or failure of a request. -Write requests return the following status codes: - -{{% show-in "cloud,cloud-serverless" %}} - -| HTTP response code | Message | Description | -| :-------------------------------| :--------------------------------------------------------------- | :------------- | -| `204 "Success"` | | If InfluxDB validated the request data format and queued the data for writing to the bucket | -| `400 "Bad request"` | `message` contains the first malformed line | If data is malformed | -| `401 "Unauthorized"` | | If the [`Authorization: Token` header](/influxdb/cloud/api-guide/api_intro/#authentication) is missing or malformed or if the [API token](/influxdb/cloud/api-guide/api_intro/#authentication) doesn't have [permission](/influxdb/cloud/admin/tokens/) to write to the bucket | -| `404 "Not found"` | requested **resource type**, e.g. "organization", and **resource name** | If a requested resource (e.g. organization or bucket) wasn't found | -| `413 “Request too large”` | cannot read data: points in batch is too large | If a **write** request exceeds the maximum [global limit](/influxdb/cloud/account-management/limits/#global-limits) | -| `429 “Too many requests”` | `Retry-After` header: xxx (seconds to wait before retrying the request) | If a **read** or **write** request exceeds your plan's [adjustable service quotas](/influxdb/cloud/account-management/limits/#adjustable-service-quotas) or if a **delete** request exceeds the maximum [global limit](/influxdb/cloud/account-management/limits/#global-limits) | -| `500 "Internal server error"` | | Default status for an error | -| `503 “Service unavailable“` | Series cardinality exceeds your plan's service quota | If **series cardinality** exceeds your plan's [adjustable service quotas](/influxdb/cloud/account-management/limits/#adjustable-service-quotas) | - -{{% /show-in %}} - -{{% show-in "v2" %}} - -- `204` **Success**: All request data was written to the bucket. -- `400` **Bad request**: The [line protocol](/influxdb/v2/reference/syntax/line-protocol/) data in the request was malformed. - The response body contains the first malformed line in the data. All request data was rejected and not written. -- `401` **Unauthorized**: May indicate one of the following: - - [`Authorization: Token` header](/influxdb/v2/api-guide/api_intro/#authentication) is missing or malformed. - - [API token](/influxdb/v2/api-guide/api_intro/#authentication) value is missing from the header. - - API token does not have sufficient permissions to write to the organization and the bucket. For more information about token types and permissions, see [Manage API tokens](/influxdb/v2/admin/tokens/). -- `404` **Not found**: A requested resource (e.g. an organization or bucket) was not found. The response body contains the requested resource type, e.g. "organization", and resource name. -- `413` **Request entity too large**: All request data was rejected and not written. InfluxDB OSS only returns this error if the [Go (golang) `ioutil.ReadAll()`](https://pkg.go.dev/io/ioutil#ReadAll) function raises an error. -- `500` **Internal server error**: Default HTTP status for an error. -- `503` **Service unavailable**: Server is temporarily unavailable to accept writes. The `Retry-After` header describes when to try the write again. - -{{% /show-in %}} - -The `message` property of the response body may contain additional details about the error. -If some of your data did not write to the bucket, see how to [troubleshoot rejected points](#troubleshoot-rejected-points). - -{{% show-in "cloud,cloud-serverless" %}} - -### Troubleshoot partial writes - -Because writes are asynchronous, they may fail partially or completely even though InfluxDB returns an HTTP `2xx` status code for a valid request. -For example, a partial write may occur when InfluxDB writes all points that conform to the bucket schema, but rejects points that have the wrong data type in a field. -To check for writes that fail asynchronously, create a [task](/influxdb/cloud/process-data/manage-tasks/) to [check the _monitoring bucket for rejected points](#review-rejected-points). -To resolve partial writes and rejected points, see [troubleshoot failures](#troubleshoot-failures). - -{{% /show-in %}} - -## Troubleshoot failures - -{{% show-in "v2" %}} - -If you notice data is missing in your bucket, do the following: - -- Check the `message` property in the response body for details about the error. -- If the `message` describes a field error, [troubleshoot rejected points](#troubleshoot-rejected-points). -- Verify all lines contain valid syntax ([line protocol](/influxdb/v2/reference/syntax/line-protocol/) or [CSV](/influxdb/v2/reference/syntax/annotated-csv/)). -- Verify the timestamps match the [precision parameter](/influxdb/v2/write-data/#timestamp-precision). -- Minimize payload size and network errors by [optimizing writes](/influxdb/v2/write-data/best-practices/optimize-writes/). - -{{% /show-in %}} - -{{% show-in "cloud,cloud-serverless" %}} -If you notice data is missing in your bucket, do the following: - -- Check the `message` property in the response body for details about the error--for example, `partial write error` indicates [rejected points](#troubleshoot-rejected-points). -- Check for [rejected points](#troubleshoot-rejected-points) in your organization's `_monitoring` bucket. -- Verify all lines contain valid syntax ([line protocol](/influxdb/cloud/reference/syntax/line-protocol/) or [CSV](/influxdb/cloud/reference/syntax/annotated-csv/)). See how to [find parsing errors](#find-parsing-errors). -- Verify the data types match the [series](/influxdb/cloud/reference/key-concepts/data-elements/#series) or [bucket schema](/influxdb/cloud/admin/buckets/bucket-schema/). See how to resolve [explicit schema rejections](#resolve-explicit-schema-rejections). -- Verify the timestamps match the [precision parameter](/influxdb/cloud/write-data/#timestamp-precision). -- Minimize payload size and network errors by [optimizing writes](/influxdb/cloud/write-data/best-practices/optimize-writes/). -{{% /show-in %}} - -## Troubleshoot rejected points - -{{% show-in "v2" %}} - -InfluxDB rejects points for the following reasons: - -- The **batch** contains another point with the same series, but one of the fields has a different value type. -- The **bucket** contains another point with the same series, but one of the fields has a different value type. - -Check for [field type](/influxdb/v2/reference/key-concepts/data-elements/#field-value) differences between the missing data point and other points that have the same [series](/influxdb/v2/reference/key-concepts/data-elements/#series)--for example, did you attempt to write `string` data to an `int` field? - -{{% /show-in %}} - -{{% show-in "cloud,cloud-serverless" %}} - -InfluxDB may have rejected points even if the HTTP request returned "Success". -InfluxDB logs rejected data points and associated errors to your organization's `_monitoring` bucket. - -- [Review rejected points](#review-rejected-points) - - [Find parsing errors](#find-parsing-errors) - - [Find data type conflicts and schema rejections](#find-data-type-conflicts-and-schema-rejections) -- [Resolve data type conflicts](#resolve-data-type-conflicts) -- [Resolve explicit schema rejections](#resolve-explicit-schema-rejections) - -### Review rejected points - -To get a log of rejected points, query the [`rejected_points` measurement](/influxdb/cloud/reference/internals/system-buckets/#_monitoring-bucket-schema) in your organization's `_monitoring` bucket. -To more quickly locate `rejected_points`, keep the following in mind: - -- If your line protocol batch contains single lines with multiple [fields](/influxdb/cloud/reference/syntax/line-protocol/#field-set), InfluxDB logs an entry for each point (each unique field) that is rejected. -- Each entry contains a `reason` tag that describes why the point was rejected. -- Entries for [data type conflicts and schema rejections](#find-data-type-conflicts-and-schema-rejections) have a `count` field value of `1`. -- Entries for [parsing errors](#find-parsing-errors) contain an `error` field (and don't contain a `count` field). - -#### rejected_points schema - -| Name | Value | -|:------ |:----- | -| `_measurement`| `rejected_points` | -| `_field` | [`count`](#find-data-type-conflicts-and-schema-rejections) or [`error`](#find-parsing-errors) | -| `_value` | [`1`](#find-data-type-conflicts-and-schema-rejections) or [error details](#find-parsing-errors) | -| `bucket` | ID of the bucket that rejected the point | -| `measurement` | Measurement name of the point | -| `field` | Name of the field that caused the rejection | -| `reason` | Brief description of the problem. See specific reasons in [data type conflicts and schema rejections](#find-data-type-conflicts-and-schema-rejections) | -| `gotType` | Received [field](/influxdb/cloud/reference/key-concepts/data-elements/#field-value) type: `Boolean`, `Float`, `Integer`, `String`, or `UnsignedInteger` | -| `wantType` | Expected [field](/influxdb/cloud/reference/key-concepts/data-elements/#field-value) type: `Boolean`, `Float`, `Integer`, `String`, or `UnsignedInteger` | -| `` | Time the rejected point was logged | - -#### Find parsing errors - -If InfluxDB can't parse a line (e.g. due to syntax problems), the response `message` might not provide details. -To find parsing error details, query `rejected_points` entries that contain the `error` field. - -```js -from(bucket: "_monitoring") - |> range(start: -1h) - |> filter(fn: (r) => r._measurement == "rejected_points") - |> filter(fn: (r) => r._field == "error") -``` - -#### Find data type conflicts and schema rejections - -To find `rejected_points` caused by [data type conflicts](#resolve-data-type-conflicts) or [schema rejections](#resolve-explicit-schema-rejections), -query for the `count` field. - -```js -from(bucket: "_monitoring") - |> range(start: -1h) - |> filter(fn: (r) => r._measurement == "rejected_points") - |> filter(fn: (r) => r._field == "count") -``` - -### Resolve data type conflicts - -When you write to a bucket that has the `implicit` schema type, InfluxDB compares new points to points that have the same [series](/influxdb/cloud/reference/key-concepts/data-elements/#series). -If a point has a field with a different data type than the series, InfluxDB rejects the point and logs a `rejected_points` entry. -The `rejected_points` entry contains one of the following reasons: - -| Reason | Meaning | -|:------ |:------- | -| `type conflict in batch write` | The **batch** contains another point with the same series, but one of the fields has a different value type. | -| `type conflict with existing data` | The **bucket** contains another point with the same series, but one of the fields has a different value type. | - -### Resolve explicit schema rejections - -If you write to a bucket with an -[explicit schema](/influxdb/cloud/admin/buckets/bucket-schema/), -the data must conform to the schema. Otherwise, InfluxDB rejects the data. - -Do the following to interpret explicit schema rejections: - -- [Detect a measurement mismatch](#detect-a-measurement-mismatch) -- [Detect a field type mismatch](#detect-a-field-type-mismatch) - -#### Detect a measurement mismatch - -InfluxDB rejects a point if the [measurement](/influxdb/cloud/reference/key-concepts/data-elements/#measurement) doesn't match the **name** of a [bucket schema](/influxdb/cloud/admin/buckets/bucket-schema/). -The `rejected_points` entry contains the following `reason` tag value: - -| Reason | Meaning | -|:------ |:------- -| `measurement not allowed by schema` | The **bucket** is configured to use explicit schemas and none of the schemas matches the **measurement** of the point. | - -Consider the following [line protocol](/influxdb/cloud/reference/syntax/line-protocol) data. - -``` -airSensors,sensorId=TLM0201 temperature=73.97,humidity=35.23,co=0.48 1637014074 -``` - -The line has an `airSensors` measurement and three fields (`temperature`, `humidity`, and `co`). -If you try to write this data to a bucket that has the [`explicit` schema type](/influxdb/cloud/admin/buckets/bucket-schema/) and doesn't have an `airSensors` schema, the `/api/v2/write` InfluxDB API returns an error and the following data: - -```json -{ - "code": "invalid", - "message": "3 out of 3 points rejected (check rejected_points in your _monitoring bucket for further information)" -} -``` - -InfluxDB logs three `rejected_points` entries, one for each field. - -| _measurement | _field | _value | field | measurement | reason | -|:----------------|:-------|:-------|:------------|:------------|:----------------------------------| -| rejected_points | count | 1 | humidity | airSensors | measurement not allowed by schema | -| rejected_points | count | 1 | co | airSensors | measurement not allowed by schema | -| rejected_points | count | 1 | temperature | airSensors | measurement not allowed by schema | - -#### Detect a field type mismatch - -InfluxDB rejects a point if the [measurement](/influxdb/cloud/reference/key-concepts/data-elements/#measurement) matches the **name** of a bucket schema and the field data types don't match. -The `rejected_points` entry contains the following reason: - -| Reason | Meaning | -|:------------------------------------|:-----------------------------------------------------------------------------------------------------| -| `field type mismatch with schema` | The point has the same measurement as a configured schema and they have different field value types. | - -Consider a bucket that has the following `airSensors` [`explicit bucket schema`](/influxdb/cloud/admin/buckets/bucket-schema/): - -```json -{ - "name": "airSensors", - "columns": [ - { - "name": "time", - "type": "timestamp" - }, - { - "name": "sensorId", - "type": "tag" - }, - { - "name": "temperature", - "type": "field", - "dataType": "float" - }, - { - "name": "humidity", - "type": "field", - "dataType": "float" - }, - { - "name": "co", - "type": "field", - "dataType": "float" - } - ] -} -``` - -The following [line protocol](/influxdb/cloud/reference/syntax/line-protocol/) data has an `airSensors` measurement, a `sensorId` tag, and three fields (`temperature`, `humidity`, and `co`). - -``` -airSensors,sensorId=L1 temperature=90.5,humidity=70.0,co=0.2 1637014074 -airSensors,sensorId=L1 temperature="90.5",humidity=70.0,co=0.2 1637014074 -``` - -In the example data above, the second point has a `temperature` field value with the _string_ data type. -Because the `airSensors` schema requires `temperature` to have the _float_ data type, -InfluxDB returns a `400` error and a message that describes the result: - -```json -{ - "code": "invalid", - "message": "partial write error (5 accepted): 1 out of 6 points rejected (check rejected_points in your _monitoring bucket for further information)" -} -``` - -InfluxDB logs the following `rejected_points` entry to the `_monitoring` bucket: - -| _measurement | _field | _value | bucket | field | gotType | measurement | reason | wantType | -|:------------------|:-------|:-------|:-------------------|:--------------|:---------|:------------|:----------------------------------|:---------| -| rejected_points | count | 1 | a7d5558b880a93da | temperature | String | airSensors | field type mismatch with schema | Float | - -{{% /show-in %}} \ No newline at end of file + \ No newline at end of file diff --git a/content/influxdb3/cloud-dedicated/_index.md b/content/influxdb3/cloud-dedicated/_index.md index c2708a8c2..e8bfb23f6 100644 --- a/content/influxdb3/cloud-dedicated/_index.md +++ b/content/influxdb3/cloud-dedicated/_index.md @@ -18,7 +18,7 @@ The InfluxDB time series platform is designed to handle high write and query loa Learn how to use and leverage InfluxDB Cloud Dedicated for your specific time series use case. -Request an InfluxDB Cloud Dedicated cluster +Run an {{% product-name %}} proof of concept (PoC) Get started with InfluxDB Cloud Dedicated ## InfluxDB 3 diff --git a/content/influxdb3/cloud-dedicated/admin/account/_index.md b/content/influxdb3/cloud-dedicated/admin/account/_index.md new file mode 100644 index 000000000..852e78438 --- /dev/null +++ b/content/influxdb3/cloud-dedicated/admin/account/_index.md @@ -0,0 +1,43 @@ +--- +title: View account information +seotitle: View {{% product-name %}} account information +description: > + Use the Admin UI for {{% product-name %}} to view information for your {{% product-name omit="InfluxDB " %}} account. + Your {{% product-name %}} account is a collection of {{% product-name omit="Clustered "%}} clusters and associated resources. +menu: + influxdb3_cloud_dedicated: + parent: Administer InfluxDB Cloud +weight: 99 +influxdb3/cloud-dedicated/tags: [clusters] +--- + +Use the Admin UI for {{% product-name %}} to view information for your {{% product-name omit="InfluxDB " %}} account. +Your {{% product-name %}} account is a collection of {{% product-name omit="Clustered "%}} clusters and associated resources. + +- [Access the Admin UI](#access-the-admin-ui) +- [View account information](#view-account-information) +- [View cluster information](#view-cluster-information) + - [Access operational dashboards](#access-operational-dashboards) +- [Administer management tokens](#administer-management-tokens) + +## Access the Admin UI + +1. To access the {{< product-name >}} Admin UI, visit the following URL in your browser: + +
+   https://console.influxdata.com
+   
+ +2. Use the credentials provided by InfluxData to log into the Admin UI. + If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). + +## View account information + +After you log in to the Admin UI, the Account Management portal displays the following information about your account: + +- Account ID +- Contract status +- Contract start date +- The [list of clusters](/influxdb3/cloud-dedicated/admin/clusters/list/?t=admin-ui) associated with the account + +{{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-account-info.png" alt="InfluxDB Cloud Dedicated Admin UI account information" />}} diff --git a/content/influxdb3/cloud-dedicated/admin/clusters/get.md b/content/influxdb3/cloud-dedicated/admin/clusters/get.md index f75415775..1e8fcec38 100644 --- a/content/influxdb3/cloud-dedicated/admin/clusters/get.md +++ b/content/influxdb3/cloud-dedicated/admin/clusters/get.md @@ -1,7 +1,7 @@ --- title: Get cluster information description: > - Use the + Use the Admin UI or the [`influxctl cluster get ` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/cluster/get/) to view information about your InfluxDB Cloud Dedicated cluster. menu: influxdb3_cloud_dedicated: @@ -13,8 +13,55 @@ list_code_example: | ``` --- -Use the [`influxctl cluster get` CLI command](/influxdb3/cloud-dedicated/reference/cli/influxctl/get/) -to view information about your {{< product-name omit=" Clustered" >}} cluster. +Use the Admin UI or the [`influxctl cluster get` CLI command](/influxdb3/cloud-dedicated/reference/cli/influxctl/get/) to view information about your +{{< product-name omit=" Clustered" >}} cluster, including: + +- Cluster ID +- Cluster name +- Cluster URL +- Cluster status +- Cluster size (standard or custom) + +{{< tabs-wrapper >}} +{{% tabs %}} +[Admin UI](#) +[influxctl](#) +{{% /tabs %}} +{{% tab-content %}} +## Access the Cloud Dedicated Admin UI + +1. To access the {{< product-name >}} Admin UI, visit the following URL in your browser: + +
+   https://console.influxdata.com
+   
+ +2. Use the credentials provided by InfluxData to log into the Admin UI. + If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). + + After you log in, the Account Management portal displays [account information](/influxdb3/cloud-dedicated/admin/account/) + and lists all clusters associated with your account. +3. **Search** for the cluster or use the sort button and column headers to sort the cluster list and find the cluster. + +### View cluster details + +The cluster list displays the following cluster details: + +- Cluster ID and name +- Status (ready, provisioning, etc.) +- Size (standard or custom) +- URL endpoint + +### Cluster management tools + +The options button (3 vertical dots) to the right of any cluster provides additional tools for cluster management: + +- Copy Cluster ID +- Copy Cluster URL +- Observe in Grafana _(only if your cluster has enabled operational dashboards. For more information, see how to [monitor your cluster](/influxdb3/cloud-dedicated/admin/monitor-your-cluster/).)_ + +{{% /tab-content %}} +{{% tab-content %}} 1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/#download-and-install-influxctl), and then [configure a connection profile](/influxdb3/cloud-dedicated/reference/cli/influxctl/#configure-connection-profiles) for your cluster. 2. Run `influxctl cluster get` with the following: @@ -74,3 +121,5 @@ The output is the cluster as a JSON object that includes additional fields such "category": 1 } ``` +{{% /tab-content %}} +{{< /tabs-wrapper >}} diff --git a/content/influxdb3/cloud-dedicated/admin/clusters/list.md b/content/influxdb3/cloud-dedicated/admin/clusters/list.md index ccf9c2e7f..42f560d99 100644 --- a/content/influxdb3/cloud-dedicated/admin/clusters/list.md +++ b/content/influxdb3/cloud-dedicated/admin/clusters/list.md @@ -1,7 +1,7 @@ --- title: List clusters description: > - Use the [`influxctl cluster list` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/cluster/list/) + Use the Admin UI or the [`influxctl cluster list` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/cluster/list/) to view information about InfluxDB Cloud Dedicated clusters associated with your account ID. menu: influxdb3_cloud_dedicated: @@ -15,10 +15,45 @@ aliases: - /influxdb3/cloud-dedicated/admin/clusters/list/ --- -Use the [`influxctl cluster list` CLI command](/influxdb3/cloud-dedicated/reference/cli/influxctl/list/) -view information about all {{< product-name omit=" Clustered" >}} clusters associated with your account ID. +Use the Admin UI or the [`influxctl cluster list` CLI command](/influxdb3/cloud-dedicated/reference/cli/influxctl/list/) +to view information about all {{< product-name omit=" Clustered" >}} clusters associated with your account ID. + +{{< tabs-wrapper >}} +{{% tabs %}} +[Admin UI](#) +[influxctl](#) +{{% /tabs %}} +{{% tab-content %}} +## Access the Cloud Dedicated Admin UI + +1. To access the {{< product-name >}} Admin UI, visit the following URL in your browser: + +
+   https://console.influxdata.com
+   
+ +2. Use the credentials provided by InfluxData to log into the Admin UI. + If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). + + After you log in, the Account Management portal displays [account information](/influxdb3/cloud-dedicated/admin/account/) + and lists all clusters associated with your account. +3. You can **Search** clusters by name or ID to filter the list and use the sort button and column headers to sort the list. + +### View cluster details + +The cluster list displays the following cluster details: + +- Cluster ID and name +- Status (ready, provisioning, etc.) +- Size (standard or custom) +- URL endpoint +{{% /tab-content %}} +{{% tab-content %}} + +## Use the CLI 1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/#download-and-install-influxctl), and then [configure a connection profile](/influxdb3/cloud-dedicated/reference/cli/influxctl/#configure-connection-profiles) for your cluster. + 2. Run `influxctl cluster list` with the following: - _Optional_: [Output format](#output-formats) @@ -69,3 +104,6 @@ The output is a JSON array of cluster objects that include additional fields suc } ] ``` +{{% /tab-content %}} +{{< /tabs-wrapper >}} + diff --git a/content/influxdb3/cloud-dedicated/admin/custom-partitions/define-custom-partitions.md b/content/influxdb3/cloud-dedicated/admin/custom-partitions/define-custom-partitions.md index 6a8db4cae..9edb5b012 100644 --- a/content/influxdb3/cloud-dedicated/admin/custom-partitions/define-custom-partitions.md +++ b/content/influxdb3/cloud-dedicated/admin/custom-partitions/define-custom-partitions.md @@ -14,5 +14,6 @@ source: /shared/v3-distributed-admin-custom-partitions/define-custom-partitions. --- diff --git a/content/influxdb3/cloud-dedicated/admin/databases/_index.md b/content/influxdb3/cloud-dedicated/admin/databases/_index.md index 5f0c04b84..07a934c57 100644 --- a/content/influxdb3/cloud-dedicated/admin/databases/_index.md +++ b/content/influxdb3/cloud-dedicated/admin/databases/_index.md @@ -52,7 +52,7 @@ never be removed by the retention enforcement service. You can customize [table (measurement) limits](#table-limit) and [table column limits](#column-limit) when you [create](#create-a-database) or -[update a database](#update-a-database) in {{< product-name >}}. +[update a database](#update-a-database) in {{% product-name %}}. ### Table limit diff --git a/content/influxdb3/cloud-dedicated/admin/databases/create.md b/content/influxdb3/cloud-dedicated/admin/databases/create.md index b6a3df965..4d3e26f7f 100644 --- a/content/influxdb3/cloud-dedicated/admin/databases/create.md +++ b/content/influxdb3/cloud-dedicated/admin/databases/create.md @@ -1,10 +1,10 @@ --- title: Create a database description: > - Use the [`influxctl database create` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/create/) + Use the Admin UI, the [`influxctl database create` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/create/), or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) to create a new InfluxDB database in your InfluxDB Cloud Dedicated cluster. - Provide a database name and an optional retention period. + You can create a database with an optional retention period and custom partitioning. menu: influxdb3_cloud_dedicated: parent: Manage databases @@ -12,57 +12,19 @@ weight: 201 list_code_example: | ##### CLI - ```sh + ```bash influxctl database create \ --retention-period 30d \ - --max-tables 500 \ - --max-columns 250 \ DATABASE_NAME ``` ##### API - ```sh + ```bash curl \ --location "https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases" \ - --request POST \ - --header "Accept: application/json" \ - --header 'Content-Type: application/json' \ --header "Authorization: Bearer MANAGEMENT_TOKEN" \ - --data '{ - "name": "'DATABASE_NAME'", - "maxTables": 500, - "maxColumnsPerTable": 250, - "retentionPeriod": 2592000000000, - "partitionTemplate": [ - { - "type": "tag", - "value": "TAG_KEY_1" - }, - { - "type": "tag", - "value": "TAG_KEY_2" - }, - { - "type": "bucket", - "value": { - "tagName": "TAG_KEY_3", - "numberOfBuckets": 100 - } - }, - { - "type": "bucket", - "value": { - "tagName": "TAG_KEY_4", - "numberOfBuckets": 300 - } - }, - { - "type": "time", - "value": "%Y-%m-%d" - } - ] - }' + --json '{ "name": "DATABASE_NAME" }' ``` related: - /influxdb3/cloud-dedicated/reference/cli/influxctl/database/create/ @@ -70,8 +32,116 @@ related: - /influxdb3/cloud-dedicated/reference/api/ --- -Use the [`influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/) +Use the Admin UI, the [`influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/), or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) to create a database in your {{< product-name omit=" Clustered" >}} cluster. +You can create a database with an optional retention period and custom partitioning. + +- [Create a database](#create-a-database) +- [Create a database with custom partitioning](#create-a-database-with-custom-partitioning) +- [Partition template requirements and guidelines](#partition-template-requirements-and-guidelines) +- [Database attributes](#database-attributes) + - [Retention period syntax](#retention-period-syntax) + - [Database naming restrictions](#database-naming-restrictions) + - [InfluxQL DBRP naming convention](#influxql-dbrp-naming-convention) + - [Table and column limits](#table-and-column-limits) + +## Create a database + +{{< tabs-wrapper >}} +{{% tabs %}} +[Admin UI](#) +[influxctl](#) +[Management API](#) +{{% /tabs %}} + +{{% tab-content %}} + +1. Open the {{< product-name >}} Admin UI at +
+   https://console.influxdata.com
+   
+ +2. Use the credentials provided by InfluxData to log into the Admin UI. + If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). + +3. In the cluster list, find and click the cluster you want to create a database in. You + can sort on column headers or use the **Search** field to find a specific cluster. + +4. Click the **New Database** button above the database list. + The **Create Database** dialog displays. + + Create database dialog + +5. In the **Create Database** dialog, provide the following information: + - **Database name**: The name of the database to create. See [Database naming restrictions](#database-naming-restrictions). + - **Retention period**: The retention period for the database. See [Retention period syntax](#retention-period-syntax). + - **Max tables**: The maximum number of tables (measurements) allowed in the database. Default is 500. + - **Max columns per table**: The maximum number of columns allowed in each table (measurement). Default is 250. + +6. Click the **Create Database** button to create the database. + The new database displays in the list of databases for the cluster. +{{% /tab-content %}} + +{{% tab-content %}} + +1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/#download-and-install-influxctl), and then [configure an `influxctl` connection profile](/influxdb3/cloud-dedicated/reference/cli/influxctl/#configure-connection-profiles) for your cluster. + +2. Run the `influxctl database create` command: + +{{% code-placeholders "DATABASE_NAME|30d" %}} +```bash +influxctl database create \ + --retention-period 30d \ + DATABASE_NAME +``` +{{% /code-placeholders %}} + +Replace {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}} with your desired database name. +{{% /tab-content %}} + +{{% tab-content %}} + +_This example uses [cURL](https://curl.se/) to send a Management HTTP API request, but you can use any HTTP client._ + +1. If you haven't already, follow the instructions to [install cURL](https://everything.curl.dev/install/index.html) for your system. +2. In your terminal, use cURL to send a request to the following {{% product-name %}} endpoint: + +{{% api-endpoint method="POST" +endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases" +api-ref="/influxdb3/cloud-dedicated/api/management/#operation/CreateClusterDatabase" %}} + +{{% code-placeholders "ACCOUNT_ID|CLUSTER_ID|MANAGEMENT_TOKEN|DATABASE_NAME" %}} +```bash +curl \ + --location "https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases" \ + --header "Authorization: Bearer MANAGEMENT_TOKEN" \ + --json '{ + "name": "DATABASE_NAME" + }' +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`ACCOUNT_ID`{{% /code-placeholder-key %}}: the [account](/influxdb3/cloud-dedicated/admin/account/) ID for the cluster _(list details via the [Admin UI](/influxdb3/cloud-dedicated/admin/clusters/list/) or [CLI](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json))_ +- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: the [cluster](/influxdb3/cloud-dedicated/admin/clusters/) ID _(list details via the [Admin UI](/influxdb3/cloud-dedicated/admin/clusters/list/) or [CLI](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json))_. +- {{% code-placeholder-key %}}`MANAGEMENT_TOKEN`{{% /code-placeholder-key %}}: a valid [management token](/influxdb3/cloud-dedicated/admin/tokens/management/) for your {{% product-name %}} cluster +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: name for the new database +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +Partitioning defaults to `%Y-%m-%d` (daily). + +## Create a database with custom partitioning + +{{< product-name >}} lets you define a [custom partitioning](/influxdb3/cloud-dedicated/admin/custom-partitions/) strategy for each database and table. +A _partition_ is a logical grouping of data stored in [Apache Parquet](https://parquet.apache.org/) +By default, data is partitioned by day, +but, depending on your schema and workload, customizing the partitioning +strategy can improve query performance. + +To use custom partitioning, you define a [partition template](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/). +If a table doesn't have a custom partition template, it inherits the database's template. {{< tabs-wrapper >}} {{% tabs %}} @@ -79,41 +149,30 @@ or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) to crea [Management API](#) {{% /tabs %}} {{% tab-content %}} + +1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/cloud-dedicated/get-started/setup/#download-install-and-configure-the-influxctl-cli). +2. Use the following `influxctl database create` command flags to specify the +[partition template parts](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#tag-part-templates): - -Use the [`influxctl database create` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/create/) -to create a database in your {{< product-name omit=" Clustered" >}} cluster. + - `--template-timeformat`: A [Rust strftime date and time](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#time-part-templates) + string that specifies the time part in the partition template and determines + the time interval to partition by. + Use one of the following: + + - `%Y-%m-%d` (daily) + - `%Y-%m` (monthly) + - `%Y` (annually) + - `--template-tag`: An [InfluxDB tag] + to use in the partition template. + - `--template-tag-bucket`: An [InfluxDB tag](/influxdb3/cloud-dedicated/reference/glossary/#tag) + and number of "buckets" to group tag values into. + Provide the tag key and the number of buckets to bucket tag values into + separated by a comma: `tagKey,N`. -1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/#download-and-install-influxctl), and then [configure an `influxctl` connection profile](/influxdb3/cloud-dedicated/reference/cli/influxctl/#configure-connection-profiles) for your cluster. - -2. In your terminal, run the `influxctl database create` command and provide the following: - - - _Optional_: Database [retention period](/influxdb3/cloud-dedicated/admin/databases/#retention-periods) - Default is `infinite` (`0`). - - _Optional_: Database table (measurement) limit. Default is `500`. - - _Optional_: Database column limit. Default is `250`. - - _Optional_: [InfluxDB tags](/influxdb3/cloud-dedicated/reference/glossary/#tag) - to use in the partition template. Limit is 7 total tags or tag buckets. - - _Optional_: [InfluxDB tag buckets](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#tag-bucket-part-templates) - to use in the partition template. Limit is 7 total tags or tag buckets. - - _Optional_: A [Rust strftime date and time string](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#time-part-templates) - that specifies the time format in the partition template and determines - the time interval to partition by. Default is `%Y-%m-%d`. - - Database name _(see [Database naming restrictions](#database-naming-restrictions))_ - - > [!Note] - > _{{< product-name >}} supports up to 7 total tags or tag buckets in the partition template._ - - - - -{{% code-placeholders "DATABASE_NAME|30d|500|100|300|(TAG_KEY(_\d)?)" %}} - -```sh +{{% code-placeholders "DATABASE_NAME|30d|(TAG_KEY(_\d)?)|100|300" %}} +```bash influxctl database create \ --retention-period 30d \ - --max-tables 500 \ - --max-columns 250 \ --template-tag TAG_KEY_1 \ --template-tag TAG_KEY_2 \ --template-tag-bucket TAG_KEY_3,100 \ @@ -121,233 +180,128 @@ influxctl database create \ --template-timeformat '%Y-%m-%d' \ DATABASE_NAME ``` - {{% /code-placeholders %}} -Replace the following in your command: +Replace the following: -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: your {{% product-name %}} [database](/influxdb3/cloud-dedicated/admin/databases/) -- {{% code-placeholder-key %}}`TAG_KEY_1`, `TAG_KEY_2`, `TAG_KEY_3`, and `TAG_KEY_4`{{% /code-placeholder-key %}}: [tag](/influxdb3/cloud-dedicated/reference/glossary/#tag) keys from your data - -## Database attributes - -- [Database attributes](#database-attributes) - - [Retention period syntax (influxctl CLI)](#retention-period-syntax-influxctl-cli) - - [Custom partitioning (influxctl CLI)](#custom-partitioning-influxctl-cli) -- [Database attributes](#database-attributes-1) - - [Retention period syntax (Management API)](#retention-period-syntax-management-api) - - [Custom partitioning (Management API)](#custom-partitioning-management-api) - - [Database naming restrictions](#database-naming-restrictions) - - [InfluxQL DBRP naming convention](#influxql-dbrp-naming-convention) - - [Table and column limits](#table-and-column-limits) - -### Retention period syntax (influxctl CLI) - -Use the `--retention-period` flag to define the -[retention period](/influxdb3/cloud-dedicated/admin/databases/#retention-periods) -for the database. -The retention period value is a time duration value made up of a numeric value -plus a duration unit. -For example, `30d` means 30 days. -A zero duration (`0d`) retention period is infinite and data won't expire. -The retention period value cannot be negative or contain whitespace. - -{{< flex >}} -{{% flex-content "half" %}} - -#### Valid durations units include - -- **m**: minute -- **h**: hour -- **d**: day -- **w**: week -- **mo**: month -- **y**: year - -{{% /flex-content %}} -{{% flex-content "half" %}} - -#### Example retention period values - -- `0d`: infinite/none -- `3d`: 3 days -- `6w`: 6 weeks -- `1mo`: 1 month (30 days) -- `1y`: 1 year -- `30d30d`: 60 days -- `2.5d`: 60 hours - -{{% /flex-content %}} -{{< /flex >}} - -### Custom partitioning (influxctl CLI) - -{{< product-name >}} lets you define a custom partitioning strategy for each database. -A _partition_ is a logical grouping of data stored in [Apache Parquet](https://parquet.apache.org/) -format in the InfluxDB 3 storage engine. By default, data is partitioned by day, -but, depending on your schema and workload, customizing the partitioning -strategy can improve query performance. - -Use the `--template-tag`, `--template-tag-bucket`, and `--template-timeformat` -flags to define partition template parts used to generate partition keys for the database. - -For more information, see [Manage data partitioning](/influxdb3/cloud-dedicated/admin/custom-partitions/). - - +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to create +- {{% code-placeholder-key %}}`TAG_KEY_1`, `TAG_KEY_2`{{% /code-placeholder-key %}}: [tag](/influxdb3/cloud-dedicated/reference/glossary/#tag) keys to partition by +- {{% code-placeholder-key %}}`TAG_KEY_3`, `TAG_KEY_4`{{% /code-placeholder-key %}}: [tag](/influxdb3/cloud-dedicated/reference/glossary/#tag) keys for bucketed partitioning +- {{% code-placeholder-key %}}`100`, `300`{{% /code-placeholder-key %}}: number of buckets to group tag values into +- {{% code-placeholder-key %}}`'%Y-%m-%d'`{{% /code-placeholder-key %}}: [Rust strftime date and time](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#time-part-templates) string that specifies the time part in the partition template {{% /tab-content %}} {{% tab-content %}} - + _This example uses [cURL](https://curl.se/) to send a Management HTTP API request, but you can use any HTTP client._ 1. If you haven't already, follow the instructions to [install cURL](https://everything.curl.dev/install/index.html) for your system. 2. In your terminal, use cURL to send a request to the following {{% product-name %}} endpoint: - {{% api-endpoint endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases" method="post" api-ref="/influxdb3/cloud-dedicated/api/management/#operation/CreateClusterDatabase" %}} +{{% api-endpoint method="POST" +endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases" +api-ref="/influxdb3/cloud-dedicated/api/management/#operation/CreateClusterDatabase" %}} - In the URL, provide the following credentials: +In the request body, include the `partitionTemplate` property and specify the [partition template parts](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#tag-part-templates) as an array of objects--for example: - - `ACCOUNT_ID`: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the cluster belongs to _(see how to [list cluster details](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json))_. - - `CLUSTER_ID`: The ID of the [cluster](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that you want to manage _(see how to [list cluster details](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json))_. - - Provide the following request headers: - - - `Accept: application/json` to ensure the response body is JSON content - - `Content-Type: application/json` to indicate the request body is JSON content - - `Authorization: Bearer` and a [Management API token](/influxdb3/cloud-dedicated/admin/tokens/management/) for your cluster _(see how to [create a management token](/influxdb3/cloud-dedicated/admin/tokens/management/) for Management API requests)_. - - In the request body, provide the following parameters: - - - _Optional:_ Database [retention period](/influxdb3/cloud-dedicated/admin/databases/#retention-periods) in nanoseconds. - Default is `0` (infinite). - - _Optional_: Database table (measurement) limit. Default is `500`. - - _Optional_: Database column limit. Default is `250`. - - _Optional_: [InfluxDB tags](/influxdb3/cloud-dedicated/reference/glossary/#tag) - to use in the partition template. Limit is 7 total tags or tag buckets. - - _Optional_: [InfluxDB tag buckets](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#tag-bucket-part-templates) - to use in the partition template. Limit is 7 total tags or tag buckets. - - _Optional_: A supported [Rust strftime date and time string](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#time-part-templates) - that specifies the time format in the partition template and determines - the time interval to partition by. Default is `%Y-%m-%d`. - - Database name _(see [Database naming restrictions](#database-naming-restrictions))_. - - > [!Note] - > _{{< product-name >}} supports up to 7 total tags or tag buckets in the partition template._ - -The following example shows how to use the Management API to create a database with custom partitioning: - - - - -{{% code-placeholders "DATABASE_NAME|2592000000000|500|100|300|250|ACCOUNT_ID|CLUSTER_ID|MANAGEMENT_TOKEN|(TAG_KEY(_\d)?)" %}} - -```sh +{{% code-placeholders "ACCOUNT_ID|CLUSTER_ID|DATABASE_NAME|MANAGEMENT_TOKEN|(TAG_KEY(_\d)?)|100|300|%Y-%m-%d" %}} +```bash curl \ - --location "https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases" \ - --request POST \ - --header "Accept: application/json" \ - --header 'Content-Type: application/json' \ - --header "Authorization: Bearer MANAGEMENT_TOKEN" \ - --data '{ - "name": "'DATABASE_NAME'", - "maxTables": 500, - "maxColumnsPerTable": 250, - "retentionPeriod": 2592000000000, - "partitionTemplate": [ - { - "type": "tag", - "value": "TAG_KEY_1" - }, - { - "type": "tag", - "value": "TAG_KEY_2" - }, - { - "type": "bucket", - "value": { - "tagName": "TAG_KEY_3", - "numberOfBuckets": 100 - } - }, - { - "type": "bucket", - "value": { - "tagName": "TAG_KEY_4", - "numberOfBuckets": 300 - } - }, - { - "type": "time", - "value": "%Y-%m-%d" - } - ] - }' + --location "https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases" \ + --header "Authorization: Bearer MANAGEMENT_TOKEN" \ + --json '{ + "name": "DATABASE_NAME", + "maxTables": 500, + "maxColumnsPerTable": 250, + "retentionPeriod": 2592000000000, + "partitionTemplate": [ + { "type": "tag", "value": "TAG_KEY_1" }, + { "type": "tag", "value": "TAG_KEY_2" }, + { "type": "bucket", "value": { "tagName": "TAG_KEY_3", "numberOfBuckets": 100 } }, + { "type": "bucket", "value": { "tagName": "TAG_KEY_4", "numberOfBuckets": 300 } }, + { "type": "time", "value": "%Y-%m-%d" } + ] + }' ``` - {{% /code-placeholders %}} -Replace the following in your request: +Replace the following: -- {{% code-placeholder-key %}}`ACCOUNT_ID`{{% /code-placeholder-key %}}: the ID of the {{% product-name %}} [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) to create the database for -- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: the ID of the {{% product-name %}} [cluster](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) to create the database for -- {{% code-placeholder-key %}}`MANAGEMENT TOKEN`{{% /code-placeholder-key %}}: a [management token](/influxdb3/cloud-dedicated/admin/tokens/management/) for your {{% product-name %}} cluster -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: your {{% product-name %}} [database](/influxdb3/cloud-dedicated/admin/databases/) -- {{% code-placeholder-key %}}`TAG_KEY_1`, `TAG_KEY_2`, `TAG_KEY_3`, and `TAG_KEY_4`{{% /code-placeholder-key %}}: [tag](/influxdb3/cloud-dedicated/reference/glossary/#tag) keys from your data +- {{% code-placeholder-key %}}`ACCOUNT_ID`{{% /code-placeholder-key %}}: the [account](/influxdb3/cloud-dedicated/admin/account/) ID for the cluster _(list details via the [Admin UI](/influxdb3/cloud-dedicated/admin/clusters/list/) or [CLI](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json))_ +- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: the [cluster](/influxdb3/cloud-dedicated/admin/clusters/) ID _(list details via the [Admin UI](/influxdb3/cloud-dedicated/admin/clusters/list/) or [CLI](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json))_. +- {{% code-placeholder-key %}}`MANAGEMENT_TOKEN`{{% /code-placeholder-key %}}: a valid [management token](/influxdb3/cloud-dedicated/admin/tokens/management/) for your {{% product-name %}} cluster +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: name for the new database +- {{% code-placeholder-key %}}`TAG_KEY_1`, `TAG_KEY_2`{{% /code-placeholder-key %}}: [tag](/influxdb3/cloud-dedicated/reference/glossary/#tag) keys to partition by +- {{% code-placeholder-key %}}`TAG_KEY_3`, `TAG_KEY_4`{{% /code-placeholder-key %}}: [tag](/influxdb3/cloud-dedicated/reference/glossary/#tag) keys for bucketed partitioning +- {{% code-placeholder-key %}}`100`, `300`{{% /code-placeholder-key %}}: number of buckets to group tag values into +- {{% code-placeholder-key %}}`'%Y-%m-%d'`{{% /code-placeholder-key %}}: [Rust strftime date and time](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#time-part-templates) string that specifies the time part in the partition template + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +## Partition template requirements and guidelines + +Always specify 1 time part in your template. +A template has a maximum of 8 parts: 1 time part and up to 7 total tag and tag bucket parts. + +For more information about partition template requirements and restrictions, see [Partition templates](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/). + +> [!Warning] +> #### Partition templates can only be applied on create +> +> You can only apply a partition template when creating a database. +> You can't update a partition template on an existing database. ## Database attributes -- [Database attributes](#database-attributes) - - [Retention period syntax (influxctl CLI)](#retention-period-syntax-influxctl-cli) - - [Custom partitioning (influxctl CLI)](#custom-partitioning-influxctl-cli) -- [Database attributes](#database-attributes-1) - - [Retention period syntax (Management API)](#retention-period-syntax-management-api) - - [Custom partitioning (Management API)](#custom-partitioning-management-api) - - [Database naming restrictions](#database-naming-restrictions) - - [InfluxQL DBRP naming convention](#influxql-dbrp-naming-convention) - - [Table and column limits](#table-and-column-limits) +### Retention period syntax -### Retention period syntax (Management API) +Specify how long InfluxDB retains data before automatically removing it. -Use the `retentionPeriod` property to specify the -[retention period](/influxdb3/cloud-dedicated/admin/databases/#retention-periods) -for the database. -The retention period value is an integer (``) that represents the number of nanoseconds. -For example, `2592000000000` means 30 days. -A zero (`0`) retention period is infinite and data won't expire. -The retention period value cannot be negative or contain whitespace. +{{< tabs-wrapper >}} +{{% tabs %}} +[influxctl CLI](#) +[Management API](#) +{{% /tabs %}} -#### Example retention period values +{{% tab-content %}} +Use the `--retention-period` flag to define the retention period as a duration. +For example, `30d` means 30 days. A zero duration (`0d`) keeps data indefinitely. +{{< flex >}} +{{% flex-content "half" %}} +#### Valid duration units +- **m**: minute +- **h**: hour +- **d**: day +- **w**: week +- **mo**: month +- **y**: year +{{% /flex-content %}} + +{{% flex-content "half" %}} +#### Example values +- `0d`: infinite/none +- `3d`: 3 days +- `6w`: 6 weeks +- `1mo`: 1 month (30 days) +- `1y`: 1 year +{{% /flex-content %}} +{{< /flex >}} +{{% /tab-content %}} + +{{% tab-content %}} +Use the `retentionPeriod` property to specify the retention period as nanoseconds. +For example, `2592000000000` means 30 days. A value of `0` keeps data indefinitely. + +#### Example values - `0`: infinite/none - `259200000000000`: 3 days - `2592000000000000`: 30 days - `31536000000000000`: 1 standard year (365 days) - -### Custom partitioning (Management API) - -{{< product-name >}} lets you define a custom partitioning strategy for each database. -A _partition_ is a logical grouping of data stored in [Apache Parquet](https://parquet.apache.org/) -format in the InfluxDB 3 storage engine. By default, data is partitioned by day, -but, depending on your schema and workload, customizing the partitioning -strategy can improve query performance. - -Use the [`partitionTemplate`](/influxdb3/cloud-dedicated/api/management/#operation/CreateClusterDatabase) -property to define an array of partition template parts used to generate -partition keys for the database. - -For more information, see [Manage data partitioning](/influxdb3/cloud-dedicated/admin/custom-partitions/). - - {{% /tab-content %}} {{< /tabs-wrapper >}} -> [!Warning] -> -> #### Partition templates can only be applied on create -> -> You can only apply a partition template when creating a database. -> You can't update a partition template on an existing database. - ### Database naming restrictions Database names must adhere to the following naming restrictions: @@ -365,11 +319,9 @@ and [retention policies](/influxdb/v1/concepts/glossary/#retention-policy-rp). In {{% product-name %}}, databases and retention policies have been merged into _databases_, where databases have a retention period, but retention policies are no longer part of the data model. -Because InfluxQL uses the 1.x data model, a database must be mapped to a v1 -database and retention policy (DBRP) to be queryable with InfluxQL. **When naming a database that you want to query with InfluxQL**, use the following -naming convention to automatically map v1 DBRP combinations to an {{% product-name %}} database: +naming convention: ```text database_name/retention_policy_name @@ -407,15 +359,13 @@ cluster in the following ways: {{< expand-wrapper >}} {{% expand "**May improve query performance** View more info" %}} - Schemas with many measurements that contain [focused sets of tags and fields](/influxdb3/cloud-dedicated/write-data/best-practices/schema-design/#design-for-performance) can make it easier for the query engine to identify what partitions contain the queried data, resulting in better query performance. - {{% /expand %}} -{{% expand "**More PUTs into object storage** View more info" %}} +{{% expand "**More PUTs into object storage** View more info" %}} By default, {{< product-name >}} partitions data by measurement and time range and stores each partition as a Parquet file in your cluster's object store. By increasing the number of measurements @@ -423,16 +373,14 @@ file in your cluster's object store. By increasing the number of measurements more `PUT` requests into your object store as InfluxDB creates more partitions. Each `PUT` request incurs a monetary cost and will increase the operating cost of your cluster. - {{% /expand %}} -{{% expand "**More work for the compactor** View more info" %}} +{{% expand "**More work for the compactor** View more info" %}} To optimize storage over time, your {{< product-name omit=" Clustered" >}} cluster contains a compactor that routinely compacts Parquet files in object storage. With more tables and partitions to compact, the compactor may need to be scaled (either vertically or horizontally) to keep up with demand, adding to the operating cost of your cluster. - {{% /expand %}} {{< /expand-wrapper >}} @@ -446,7 +394,6 @@ cluster in the following ways: {{< expand-wrapper >}} {{% expand "May adversely affect query performance" %}} - At query time, the InfluxDB query engine identifies what table contains the queried data and then evaluates each row in the table to match the conditions of the query. The more columns that are in each row, the longer it takes to evaluate each row. @@ -454,6 +401,5 @@ The more columns that are in each row, the longer it takes to evaluate each row. Through performance testing, InfluxData has identified 250 columns as the threshold beyond which query performance may be affected (depending on the shape of and data types in your schema). - {{% /expand %}} -{{< /expand-wrapper >}} +{{< /expand-wrapper >}} \ No newline at end of file diff --git a/content/influxdb3/cloud-dedicated/admin/databases/delete.md b/content/influxdb3/cloud-dedicated/admin/databases/delete.md index 0903085f2..fa59968db 100644 --- a/content/influxdb3/cloud-dedicated/admin/databases/delete.md +++ b/content/influxdb3/cloud-dedicated/admin/databases/delete.md @@ -1,7 +1,7 @@ --- title: Delete a database description: > - Use the [`influxctl database delete` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/delete/) + Use the Admin UI, the [`influxctl database delete` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/delete/), or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) to delete a database from your InfluxDB Cloud Dedicated cluster. Provide the name of the database you want to delete. @@ -28,7 +28,7 @@ related: - /influxdb3/cloud-dedicated/reference/api/ --- -Use the [`influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/) +Use the Admin UI, the [`influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/), or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) to delete a database from your {{< product-name omit=" Clustered" >}} cluster. @@ -47,10 +47,33 @@ to delete a database from your {{< product-name omit=" Clustered" >}} cluster. {{< tabs-wrapper >}} {{% tabs %}} +[Admin UI](#) [influxctl](#) [Management API](#) {{% /tabs %}} {{% tab-content %}} + +The InfluxDB Cloud Dedicated administrative UI includes a portal for +managing databases. + +1. To access the {{< product-name >}} Admin UI, visit the following URL in your browser: + +
+   https://console.influxdata.com
+   
+2. Use the credentials provided by InfluxData to log into the Admin UI. + If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). +3. In the cluster list, find the cluster you want to create a database in. You + can sort on column headers or use the **Search** field to find a specific cluster. +4. Click the options button (three vertical dots) to the right of the token you want to revoke. + The options menu displays. +5. In the options menu, click **Delete Database**. The **Delete Database** dialog displays. +6. In the **Delete Database** dialog, check the box to confirm that you "understand the risk of this action". +7. Click the **Delete Database** button to delete the database. + +{{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-delete-database.png" alt="Create database dialog" />}} +{{% /tab-content %}} +{{% tab-content %}} diff --git a/content/influxdb3/cloud-dedicated/admin/databases/list.md b/content/influxdb3/cloud-dedicated/admin/databases/list.md index ad3b34b5e..b696c7a0c 100644 --- a/content/influxdb3/cloud-dedicated/admin/databases/list.md +++ b/content/influxdb3/cloud-dedicated/admin/databases/list.md @@ -1,7 +1,7 @@ --- title: List databases description: > - Use the [`influxctl database list` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/list/) + Use the Admin UI, the [`influxctl database list` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/list/), or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) to list databases in your InfluxDB Cloud Dedicated cluster. menu: influxdb3_cloud_dedicated: @@ -25,15 +25,57 @@ related: - /influxdb3/cloud-dedicated/reference/api/ --- -Use the [`influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/create/) -or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) to create a database in your {{< product-name omit=" Clustered" >}} cluster. +Use the Admin UI, the [`influxctl database list` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/list/), +or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) to list databases in your {{< product-name omit=" Clustered" >}} cluster. {{< tabs-wrapper >}} {{% tabs %}} +[Admin UI](#) [influxctl](#) [Management API](#) {{% /tabs %}} {{% tab-content %}} +## Access the Cloud Dedicated Admin UI + +1. To access the {{< product-name >}} Admin UI, visit the following URL in your browser: + +
+   https://console.influxdata.com
+   
+ +2. Use the credentials provided by InfluxData to log into the Admin UI. + If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). + + After you log in, the Account Management portal displays [account information](/influxdb3/cloud-dedicated/admin/account/) + and lists all clusters associated with your account. +3. You can **Search** for clusters by name or ID to filter the list and use the sort button and column headers to sort the list. +4. Click the cluster row to view the list of databases associated with the cluster. + +The database list displays the following database details: + +- Name +- Database ID +- Max tables +- Max columns per table +- Retention period + +You can **Search** for databases by name or ID to filter the list and use the sort button and column headers to sort the list. + +{{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-list-databases.png" alt="List databases" />}} + +### Database management tools + +The options button (3 vertical dots) to the right of any database provides additional tools: + +- **Copy Database ID**: Copy the database ID to your clipboard +- **Set Retention Period**: Set the retention period for the database +- **Delete Database**: Delete the database + +### Manage database tables + +To view database details and manage database tables, click the database row in the list. +{{% /tab-content %}} +{{% tab-content %}} diff --git a/content/influxdb3/cloud-dedicated/admin/databases/update.md b/content/influxdb3/cloud-dedicated/admin/databases/update.md index f8fc40fff..de250dc3e 100644 --- a/content/influxdb3/cloud-dedicated/admin/databases/update.md +++ b/content/influxdb3/cloud-dedicated/admin/databases/update.md @@ -1,7 +1,7 @@ --- title: Update a database description: > - Use the [`influxctl database update` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/update/) + Use the Admin UI, the [`influxctl database update` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/update/), or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) to update attributes for a database in your InfluxDB Cloud Dedicated cluster. Provide the database name and the attributes to update. @@ -38,15 +38,37 @@ related: - /influxdb3/cloud-dedicated/reference/api/ --- -Use the [`influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/create/) +Use the Admin UI, the [`influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/create/), or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) to update attributes such as retention period, column limits, and table limits for a database in your {{< product-name omit=" Clustered" >}} cluster. {{< tabs-wrapper >}} {{% tabs %}} +[Admin UI](#) [influxctl](#) [Management API](#) {{% /tabs %}} {{% tab-content %}} + +The InfluxDB Cloud Dedicated administrative UI includes a portal for +managing databases. + +1. To access the {{< product-name >}} Admin UI, visit the following URL in your browser: + +
+   https://console.influxdata.com
+   
+2. Use the credentials provided by InfluxData to log into the Admin UI. + If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). + + After you log in, the Account Management portal displays [account information](/influxdb3/cloud-dedicated/admin/account/) + and lists all clusters associated with your account. +3. Click a cluster row to view the list of databases associated with the cluster. You can **Search** for clusters by name or ID to filter the list and use the sort button and column headers to sort the list. +4. Find the database you want to update. + You can **Search** for databases by name or ID to filter the list and use the sort button and column headers to sort the list. +5. To set the retention period, click the options button (3 vertical dots) to the right of the database. +6. In the options menu, click **Set Retention Period**. +{{% /tab-content %}} +{{% tab-content %}} Use the [`influxctl database update` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/update/) diff --git a/content/influxdb3/cloud-dedicated/admin/tables/create.md b/content/influxdb3/cloud-dedicated/admin/tables/create.md index 4a5ec8784..7040ee235 100644 --- a/content/influxdb3/cloud-dedicated/admin/tables/create.md +++ b/content/influxdb3/cloud-dedicated/admin/tables/create.md @@ -1,23 +1,34 @@ --- title: Create a table description: > - Use the [`influxctl table create` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/table/create/) - to create a new table in a specified database your InfluxDB cluster. - Provide the database name and a table name. + Use the Admin UI, the [`influxctl table create` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/table/create/), or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) + for {{< product-name >}} to create a new table in a specified database your InfluxDB cluster. + Create a table with the same partitioning as the database or with a custom partition template. menu: influxdb3_cloud_dedicated: parent: Manage tables weight: 201 list_code_example: | - ```sh + + ##### CLI + ```bash influxctl table create ``` + + + ##### API + ```bash + curl \ + --location "https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases/DATABASE_NAME/tables/" \ + --header "Authorization: Bearer MANAGEMENT_TOKEN" \ + --json '{ "name": "TABLE_NAME" }' + ``` related: - /influxdb3/cloud-dedicated/reference/cli/influxctl/table/create/ - /influxdb3/cloud-dedicated/admin/custom-partitions/ --- -Use the [`influxctl table create` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/table/create/) +Use the Admin UI or the [`influxctl table create` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/table/create/) to create a table in a specified database in your {{< product-name omit=" Clustered" >}} cluster. @@ -25,26 +36,181 @@ With {{< product-name >}}, tables and measurements are synonymous. Typically, tables are created automatically on write using the measurement name specified in line protocol written to InfluxDB. However, to apply a [custom partition template](/influxdb3/cloud-dedicated/admin/custom-partitions/) -to a table, you must manually create the table before you write any data to it. +to a table, you must manually [create the table with custom partitioning](#create-a-table-with-custom-partitioning) before you write any data to it. -1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/#download-and-install-influxctl). -2. Run the `influxctl table create` command and provide the following: +Partitioning defaults to `%Y-%m-%d` (daily). +When a partition template is applied to a database, it becomes the default template +for all tables in that database, but can be overridden when creating a +table. - - _Optional_: [InfluxDB tags](/influxdb3/cloud-dedicated/reference/glossary/#tag) - to use in the partition template - - _Optional_: [InfluxDB tag buckets](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#tag-bucket-part-templates) - to use in the partition template - - _Optional_: A supported [Rust strftime date and time string](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#time-part-templates) - that specifies the time format in the partition template and determines - the time interval to partition by _(default is `%Y-%m-%d`)_ - - The name of the database to create the table in - - The name of the table to create +- [Create a table](#create-a-table) +- [Create a table with custom partitioning](#create-a-table-with-custom-partitioning) +- [Partition template requirements and guidelines](#partition-template-requirements-and-guidelines) - > [!Note] - > _{{< product-name >}} supports up to 7 total tags or tag buckets in the partition template._ +## Create a table + +{{< tabs-wrapper >}} +{{% tabs %}} +[Admin UI](#) +[influxctl](#) +[Management API](#) +{{% /tabs %}} +{{% tab-content %}} + +The InfluxDB Cloud Dedicated administrative UI includes a portal for creating +and managing tables. + +1. To access the {{< product-name >}} Admin UI, visit the following URL in your browser: + +
+   https://console.influxdata.com
+   
+2. Use the credentials provided by InfluxData to log into the Admin UI. + If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). +3. In the cluster list, find and click the cluster you want to create a database in. You + can sort on column headers or use the **Search** field to find a specific cluster. +4. In the database list, find and click the database you want to create a table in. You + can sort on column headers or use the **Search** field to find a specific database. +5. Click the **New Table** button above the table list. + The **Create table** dialog displays. + + {{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-create-table-default.png" alt="Create table dialog" />}} + +6. In the **Create table** dialog, provide a **Table name**. +7. Leave **Use custom partitioning** set to **Off**. + By default, the table inherits the database's partition template. + If no custom partition template is applied to the database, the table inherits the default partitioning of `%Y-%m-%d` (daily). +8. Click the **Create Table** button. + +{{% /tab-content %}} +{{% tab-content %}} + +1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/#download-and-install-influxctl). +2. Run the `influxctl table create` command: {{% code-placeholders "(DATABASE|TABLE)_NAME" %}} -```sh +```bash +# Create a table with the same partitioning as the database +influxctl table create \ + DATABASE_NAME \ + TABLE_NAME +``` +{{% /code-placeholders %}} + +Replace: +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the database to create the table in +- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name for your new table + +{{% /tab-content %}} +{{% tab-content %}} + +_This example uses [cURL](https://curl.se/) to send a Management HTTP API request, but you can use any HTTP client._ + +1. If you haven't already, follow the instructions to [install cURL](https://everything.curl.dev/install/index.html) for your system. +2. In your terminal, use cURL to send a request to the following {{% product-name %}} endpoint: + +{{% api-endpoint method="POST" +endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases/DATABASE_NAME/tables" +api-ref="/influxdb3/cloud-dedicated/api/management/#operation/CreateClusterDatabaseTable" %}} + +{{% code-placeholders "ACCOUNT_ID|CLUSTER_ID|DATABASE_NAME|TABLE_NAME|MANAGEMENT_TOKEN" %}} +```bash +# Create a table with the same partitioning as the database +curl \ + --location "https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases/DATABASE_NAME/tables" \ + --request POST \ + --header "Authorization: Bearer MANAGEMENT_TOKEN" \ + --json '{ + "name": "TABLE_NAME" + }' +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`ACCOUNT_ID`{{% /code-placeholder-key %}}: the [account](/influxdb3/cloud-dedicated/admin/account/) ID for the cluster _(list details via the [Admin UI](/influxdb3/cloud-dedicated/admin/clusters/list/) or [CLI](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json))_ +- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: the [cluster](/influxdb3/cloud-dedicated/admin/clusters/) ID _(list details via the [Admin UI](/influxdb3/cloud-dedicated/admin/clusters/list/) or [CLI](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json))_. +- {{% code-placeholder-key %}}`MANAGEMENT_TOKEN`{{% /code-placeholder-key %}}: a valid [management token](/influxdb3/cloud-dedicated/admin/tokens/management/) for your {{% product-name %}} cluster +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the [database](/influxdb3/cloud-dedicated/admin/databases/) to create the table in +- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name for your new table + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +## Create a table with custom partitioning + +{{< product-name >}} lets you define a [custom partitioning](/influxdb3/cloud-dedicated/admin/custom-partitions/) strategy for each database and table. +A _partition_ is a logical grouping of data stored in [Apache Parquet](https://parquet.apache.org/) +By default, data is partitioned by day, +but, depending on your schema and workload, customizing the partitioning +strategy can improve query performance. + +To use custom partitioning, you define a [partition template](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/). +If a table doesn't have a custom partition template, it inherits the database's template. + +{{< tabs-wrapper >}} +{{% tabs %}} +[Admin UI](#) +[influxctl](#) +[Management API](#) +{{% /tabs %}} +{{% tab-content %}} + +1. To access the {{< product-name >}} Admin UI, visit the following URL in your browser: + +
+   https://console.influxdata.com
+   
+2. Use the credentials provided by InfluxData to log into the Admin UI. + If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). +3. In the cluster list, find and click the cluster you want to create a database in. You + can sort on column headers or use the **Search** field to find a specific cluster. +4. In the database list, find and click the database you want to create a table in. You + can sort on column headers or use the **Search** field to find a specific database. +5. Click the **New Table** button above the table list. + The **Create table** dialog displays. + + {{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-create-table-default.png" alt="Create table dialog" />}} + +6. In the **Create table** dialog, provide a **Table name**. +7. Toggle **Use custom partitioning** to **On**. + The **Custom partition template** section displays. + + {{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-create-table-custom-partitioning.png" alt="Create table dialog with custom partitioning" />}} + +8. Provide the following: + + - **Custom partition template time format**: The time part for partitioning data (yearly, monthly, or daily). + - _Optional_: **Custom partition template tag parts**: The [tag parts](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#tag-part-templates) for partitioning data. + - _Optional_: **Custom partition template tag bucket parts**: The [tag bucket parts](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#tag-bucket-part-templates) for partitioning data. +9. _Optional_: To add more parts to the partition template, click the **Add Tag** button. For more information, see [Partition template requirements and guidelines](#partition-template-requirements-and-guidelines). +10. Click the **Create Table** button to create the table. + The new table displays in the list of tables for the cluster. +{{% /tab-content %}} +{{% tab-content %}} + +1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/cloud-dedicated/get-started/setup/#download-install-and-configure-the-influxctl-cli). +2. Use the following `influxctl table create` command flags to specify the +[partition template parts](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#tag-part-templates): + + - `--template-timeformat`: A [Rust strftime date and time](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#time-part-templates) + string that specifies the time part in the partition template and determines + the time interval to partition by. + Use one of the following: + + - `%Y-%m-%d` (daily) + - `%Y-%m` (monthly) + - `%Y` (annually) + - `--template-tag`: An [InfluxDB tag] + to use in the partition template. + - `--template-tag-bucket`: An [InfluxDB tag](/influxdb3/cloud-dedicated/reference/glossary/#tag) + and number of "buckets" to group tag values into. + Provide the tag key and the number of buckets to bucket tag values into + separated by a comma: `tagKey,N`. + +{{% code-placeholders "DATABASE_NAME|30d|(TAG_KEY(_\d)?)|100|300" %}} +```bash +# Create a table with custom partitioning influxctl table create \ --template-tag tag1 \ --template-tag tag2 \ @@ -56,22 +222,71 @@ influxctl table create \ ``` {{% /code-placeholders %}} -### Custom partitioning +Replace the following: -{{< product-name >}} lets you define a custom partitioning strategy for each table. -A _partition_ is a logical grouping of data stored in [Apache Parquet](https://parquet.apache.org/) -format in the InfluxDB 3 storage engine. By default, data is partitioned by day, -but, depending on your schema and workload, customizing the partitioning -strategy can improve query performance. +- {{% code-placeholder-key %}}`TAG_KEY_1`, `TAG_KEY_2`{{% /code-placeholder-key %}}: [tag](/influxdb3/cloud-dedicated/reference/glossary/#tag) keys to partition by +- {{% code-placeholder-key %}}`TAG_KEY_3`, `TAG_KEY_4`{{% /code-placeholder-key %}}: [tag](/influxdb3/cloud-dedicated/reference/glossary/#tag) keys for bucketed partitioning +- {{% code-placeholder-key %}}`100`, `300`{{% /code-placeholder-key %}}: number of buckets to group tag values into +- {{% code-placeholder-key %}}`'%Y-%m-%d'`{{% /code-placeholder-key %}}: [Rust strftime date and time](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#time-part-templates) string that specifies the time part in the partition template +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the [database](/influxdb3/cloud-dedicated/admin/databases/) to create the table in +- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name you want for the new table +{{% /tab-content %}} +{{% tab-content %}} + +_This example uses [cURL](https://curl.se/) to send a Management HTTP API request, but you can use any HTTP client._ -Use the `--template-tag`, `--template-tag-bucket`, and `--template-timeformat` -flags to define partition template parts used to generate partition keys for the table. -If no template flags are provided, the table uses the partition template of the -target database. -For more information, see [Manage data partitioning](/influxdb3/cloud-dedicated/admin/custom-partitions/). +1. If you haven't already, follow the instructions to [install cURL](https://everything.curl.dev/install/index.html) for your system. +2. In your terminal, use cURL to send a request to the following {{% product-name %}} endpoint: + +{{% api-endpoint method="POST" +endpoint="https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases/DATABASE_NAME/tables" +api-ref="/influxdb3/cloud-dedicated/api/management/#operation/CreateClusterDatabaseTable" %}} + +In the request body, include the `partitionTemplate` property and specify the [partition template parts](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#tag-part-templates) as an array of objects--for example: + +{{% code-placeholders "ACCOUNT_ID|CLUSTER_ID|DATABASE_NAME|MANAGEMENT_TOKEN|TABLE_NAME|(TAG_KEY(_\d)?)|100|300|%Y-%m-%d" %}} +```bash +# Create a table with custom partitioning +curl \ + --location "https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases/DATABASE_NAME/tables" \ + --request POST \ + --header "Authorization: Bearer MANAGEMENT_TOKEN" \ + --json '{ + "name": "TABLE_NAME", + "partitionTemplate": [ + { "type": "tag", "value": "TAG_KEY_1" }, + { "type": "tag", "value": "TAG_KEY_2" }, + { "type": "bucket", "value": { "tagName": "TAG_KEY_3", "numberOfBuckets": 100 } }, + { "type": "bucket", "value": { "tagName": "TAG_KEY_4", "numberOfBuckets": 300 } }, + { "type": "time", "value": "%Y-%m-%d" } + ] + }' +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`ACCOUNT_ID`{{% /code-placeholder-key %}}: the [account](/influxdb3/cloud-dedicated/admin/account/) ID for the cluster _(list details via the [Admin UI](/influxdb3/cloud-dedicated/admin/clusters/list/) or [CLI](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json))_ +- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: the [cluster](/influxdb3/cloud-dedicated/admin/clusters/) ID _(list details via the [Admin UI](/influxdb3/cloud-dedicated/admin/clusters/list/) or [CLI](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json))_. +- {{% code-placeholder-key %}}`MANAGEMENT_TOKEN`{{% /code-placeholder-key %}}: a valid [management token](/influxdb3/cloud-dedicated/admin/tokens/management/) for your {{% product-name %}} cluster +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the [database](/influxdb3/cloud-dedicated/admin/databases/) to create the table in +- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name you want for the new table +- {{% code-placeholder-key %}}`TAG_KEY_1`, `TAG_KEY_2`{{% /code-placeholder-key %}}: [tag](/influxdb3/cloud-dedicated/reference/glossary/#tag) keys to partition by +- {{% code-placeholder-key %}}`TAG_KEY_3`, `TAG_KEY_4`{{% /code-placeholder-key %}}: [tag](/influxdb3/cloud-dedicated/reference/glossary/#tag) keys for bucketed partitioning +- {{% code-placeholder-key %}}`100`, `300`{{% /code-placeholder-key %}}: number of buckets to group tag values into +- {{% code-placeholder-key %}}`'%Y-%m-%d'`{{% /code-placeholder-key %}}: [Rust strftime date and time](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#time-part-templates) string that specifies the time part in the partition template +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +## Partition template requirements and guidelines + +Always specify 1 time part in your template. +A template has a maximum of 8 parts: 1 time part and up to 7 total tag and tag bucket parts. + +For more information about partition template requirements and restrictions, see [Partition templates](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/). > [!Warning] > #### Partition templates can only be applied on create -> +> > You can only apply a partition template when creating a table. -> There is no way to update a partition template on an existing table. +> You can't update a partition template on an existing table. diff --git a/content/influxdb3/cloud-dedicated/admin/tables/list.md b/content/influxdb3/cloud-dedicated/admin/tables/list.md index 73737fe93..5533845e0 100644 --- a/content/influxdb3/cloud-dedicated/admin/tables/list.md +++ b/content/influxdb3/cloud-dedicated/admin/tables/list.md @@ -1,7 +1,7 @@ --- title: List tables description: > - Use the [`SHOW TABLES` SQL statement](/influxdb3/cloud-dedicated/query-data/sql/explore-schema/#list-measurements-in-a-database) + Use the Admin UI, the [`SHOW TABLES` SQL statement](/influxdb3/cloud-dedicated/query-data/sql/explore-schema/#list-measurements-in-a-database), or the [`SHOW MEASUREMENTS` InfluxQL statement](/influxdb3/cloud-dedicated/query-data/influxql/explore-schema/#list-measurements-in-a-database) to list tables in a database. menu: @@ -25,13 +25,53 @@ related: - /influxdb3/cloud-dedicated/query-data/influxql/explore-schema/ --- -Use the [`SHOW TABLES` SQL statement](/influxdb3/cloud-dedicated/query-data/sql/explore-schema/#list-measurements-in-a-database) +Use the Admin UI, the [`SHOW TABLES` SQL statement](/influxdb3/cloud-dedicated/query-data/sql/explore-schema/#list-measurements-in-a-database), or the [`SHOW MEASUREMENTS` InfluxQL statement](/influxdb3/cloud-dedicated/query-data/influxql/explore-schema/#list-measurements-in-a-database) to list tables in a database. > [!Note] > With {{< product-name >}}, tables and measurements are synonymous. +{{< tabs-wrapper >}} +{{% tabs %}} +[Admin UI](#admin-ui) +[influxctl](#influxctl) +{{% /tabs %}} +{{% tab-content %}} + +The InfluxDB Cloud Dedicated administrative UI includes a portal for managing +tables. You can view the list of tables associated with a database and +their details, including: + +- Name +- Table ID +- Table size (in bytes) + +1. To access the {{< product-name >}} Admin UI, visit the following URL in your browser: + +
+   https://console.influxdata.com
+   
+2. Use the credentials provided by InfluxData to log into the Admin UI. + If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). + + After you log in, the Account Management portal displays [account information](/influxdb3/cloud-dedicated/admin/account/) + and lists all clusters associated with your account. +3. In the cluster list, find the cluster that contains the database and table. You can **Search** for clusters by name or ID to filter the list and use the sort button and column headers to sort the list. +4. Click the cluster row to view the list of databases associated with the cluster. +5. In the database list, find the database that contains the table. You can **Search** for databases by name or ID to filter the list and use the sort button and column headers to sort the list. +6. Click the database row to view the list of tables associated with the database. +7. The table list displays the following table details: + - Name + - Table ID + - Table size (in bytes) +8. You can **Search** for tables by name or ID to filter the list and use the sort button and column headers to sort the list. + +You can **Search** for databases by name or ID to filter the list and use the sort button and column headers to sort the list. + +{{% /tab-content %}} +{{% tab-content %}} + ###### SQL ```sql @@ -78,3 +118,5 @@ Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Name of the database to query +{{% /tab-content %}} +{{< /tabs-wrapper >}} \ No newline at end of file diff --git a/content/influxdb3/cloud-dedicated/admin/tokens/_index.md b/content/influxdb3/cloud-dedicated/admin/tokens/_index.md index 3381e2412..994f5eaa4 100644 --- a/content/influxdb3/cloud-dedicated/admin/tokens/_index.md +++ b/content/influxdb3/cloud-dedicated/admin/tokens/_index.md @@ -16,34 +16,5 @@ aliases: InfluxDB uses token authentication to authorize access to data in your {{< product-name omit=" Clustered" >}} cluster. -There are two types of tokens: - -- [Database tokens](#database-tokens) -- [Management tokens](#management-tokens) - -#### Database tokens - -Database tokens grant read and write permissions to one or more databases -and allows for actions like writing and querying data. - -All read and write actions performed against time series data in your -{{< product-name omit=" Clustered" >}} cluster must be authorized using a database token. - -#### Management tokens - -Management tokens grant permission to perform administrative actions such as -managing users, databases, and database tokens. -Management tokens allow clients, such as the -[`influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/), -to perform administrative actions. - -> [!Note] -> #### Store secure tokens in a secret store -> -> Token strings are returned _only_ on token creation. -> We recommend storing database tokens in a **secure secret store**. -> For example, see how to [authenticate Telegraf using tokens in your OS secret store](https://github.com/influxdata/telegraf/tree/master/plugins/secretstores/os). - ---- {{< children hlevel="h2" readmore=true hr=true >}} diff --git a/content/influxdb3/cloud-dedicated/admin/tokens/database/create.md b/content/influxdb3/cloud-dedicated/admin/tokens/database/create.md index 5148268ec..ea40d4768 100644 --- a/content/influxdb3/cloud-dedicated/admin/tokens/database/create.md +++ b/content/influxdb3/cloud-dedicated/admin/tokens/database/create.md @@ -1,7 +1,7 @@ --- title: Create a database token description: > - Use the [`influxctl token create` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/token/create/) + Use the Admin UI, the [`influxctl token create` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/token/create/), or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) to create a [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) for reading and writing data in your InfluxDB Cloud Dedicated cluster. Provide a token description and permissions for databases. @@ -52,16 +52,49 @@ related: - /influxdb3/cloud-dedicated/reference/api/ --- -Use the [`influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/) +Use the Admin UI, the [`influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/), or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) to create a [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) with permissions for reading and writing data in your {{< product-name omit=" Clustered" >}} cluster. {{< tabs-wrapper >}} {{% tabs %}} +[Admin UI](#) [influxctl](#) [Management API](#) {{% /tabs %}} {{% tab-content %}} + +The InfluxDB Cloud Dedicated administrative UI includes a portal for creating and managing database tokens. + +1. To access the {{< product-name >}} Admin UI, visit the following URL in your browser: + +
+    https://{{< influxdb/host >}}
+    
+ +2. Use the credentials provided by InfluxData to log into the Admin UI. + If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). +3. After you log in, the Account Management portal displays [account information](/influxdb3/cloud-dedicated/admin/account/) + and the list of clusters associated with your account. +4. Find the cluster that you want to create a database token for. You can **Search** clusters by name or ID to filter the list and use the sort button and column headers to sort the list. +5. Click the row for the cluster. +6. Click the **Database Tokens** button in the upper right corner of the Cluster screen. +7. In the Database Tokens portal, click the **New Database Token** button. + The **Create Database Token** dialog displays. + + {{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-create-database-token.png" alt="Create database token dialog" />}} + +8. Add a token description. + The description is used to identify the token in the list of tokens. +9. To create a token that expires, enable the **Expiration** toggle, + and then select the expiration date and time. +10. Set the token permissions: + - Select the database or **All Databases** for the permission + - Use the **Read** and **Write** buttons under **Actions** to toggle these permissions on or off for the selected database. +11. Click the **Create Token** button. The dialog displays the **Token secret** string and the description you provided. + +{{% /tab-content %}} +{{% tab-content %}} Use the [`influxctl token create` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/token/create/) diff --git a/content/influxdb3/cloud-dedicated/admin/tokens/database/list.md b/content/influxdb3/cloud-dedicated/admin/tokens/database/list.md index 832867852..a67f7f268 100644 --- a/content/influxdb3/cloud-dedicated/admin/tokens/database/list.md +++ b/content/influxdb3/cloud-dedicated/admin/tokens/database/list.md @@ -1,7 +1,7 @@ --- title: List database tokens description: > - Use the [`influxctl token list` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/token/list/) + Use the Admin UI, the [`influxctl token list` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/token/list/), or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) to list database tokens in your InfluxDB Cloud Dedicated cluster. menu: @@ -35,7 +35,7 @@ related: - /influxdb3/cloud-dedicated/reference/api/ --- -Use the [`influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/) +Use the Admin UI, the [`influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/), or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) to list database tokens in your {{< product-name omit=" Clustered" >}} cluster. @@ -46,10 +46,49 @@ to list database tokens in your {{< product-name omit=" Clustered" >}} cluster. {{< tabs-wrapper >}} {{% tabs %}} +[Admin UI](#admin-ui-list-tokens) [influxctl](#) [Management API](#) {{% /tabs %}} {{% tab-content %}} + + +The InfluxDB Cloud Dedicated administrative UI includes a portal for creating and managing database tokens. + +Administrators can use this portal to: + +- View token details +- Add read and write permissions for specific databases to a token +- Edit a token's existing read and write permissions for a database +- Create a database token +- Revoke a database token + +1. To access the {{< product-name >}} Admin UI, visit the following URL in your browser: + +
+    https://{{< influxdb/host >}}
+    
+ +2. Use the credentials provided by InfluxData to log into the Admin UI. + If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). +3. After you log in, the Account Management screen displays [account information](/influxdb3/cloud-dedicated/admin/account/) + and the list of clusters associated with your account. +4. Click the cluster row that you want to manage tokens for. You can **Search** clusters by name or ID to filter the list and use the sort button and column headers to sort the list. +5. Click the **Database Tokens** button in the upper right corner of the Cluster screen. + +The Database Tokens portal lists all database tokens associated with the cluster +and provides the following information about each token: + +- Token ID +- Description +- Databases +- Status (Active or Revoked) +- Created At date +- Expires At date + +You can **Search** tokens by description or ID to filter the list and use the sort button and column headers to sort the list. +{{% /tab-content %}} +{{% tab-content %}} 1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/#download-and-install-influxctl), and then [configure an `influxctl` connection profile](/influxdb3/cloud-dedicated/reference/cli/influxctl/#configure-connection-profiles) for your cluster.2. In your terminal, run the `influxctl token list` command and provide the following: diff --git a/content/influxdb3/cloud-dedicated/admin/tokens/database/revoke.md b/content/influxdb3/cloud-dedicated/admin/tokens/database/revoke.md index 33c30cb85..df870c745 100644 --- a/content/influxdb3/cloud-dedicated/admin/tokens/database/revoke.md +++ b/content/influxdb3/cloud-dedicated/admin/tokens/database/revoke.md @@ -1,9 +1,9 @@ --- title: Revoke a database token description: > - Use the [`influxctl token revoke` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/token/revoke/) + Use the Admin UI, the [`influxctl token revoke` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/token/revoke/), or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) - to revoke a database token associated with your {{% product-name omit-" Clustered" %}} + to revoke a database token associated with your {{% product-name omit=" Clustered" %}} cluster and remove all permissions associated with the token. Provide the ID of the database token you want to revoke. menu: @@ -39,10 +39,48 @@ to revoke a database token associated with your {{< tabs-wrapper >}} {{% tabs %}} +[Admin UI](#) [influxctl](#) [Management API](#) {{% /tabs %}} {{% tab-content %}} + +The InfluxDB Cloud Dedicated administrative UI includes a portal for creating +and managing database tokens. + +Administrators can use this portal to: + +- View token details +- Add read and write permissions for specific databases to a token +- Edit a token's existing read and write permissions for a database +- Create a database token +- Revoke a database token + +1. To access the {{< product-name >}} Admin UI, visit the following URL in your browser: + +
+    https://{{< influxdb/host >}}
+    
+ +2. Use the credentials provided by InfluxData to log into the Admin UI. + If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). +3. After you log in, the Account Management portal displays [account information](/influxdb3/cloud-dedicated/admin/account/) + and the [list of clusters](/influxdb3/cloud-dedicated/admin/clusters/list/) associated with your account. +4. Click the row for the cluster that contains the database you want to manage tokens for. You can **Search** clusters by name or ID to filter the list and use the sort button and column headers to sort the list. +5. Click the **Database Tokens** button in the upper right corner of the Cluster screen. +6. The Database Tokens portal displays the [list of database tokens](/influxdb3/cloud-dedicated/admin/tokens/database/list/) associated with the cluster. + Use the sort and filter options above the list to find a specific token. +7. Click the **Options** button (three vertical dots) to the right of the token you want to revoke. +8. In the options menu, click **Revoke Token**. + The **Revoke Database Token** dialog displays. + + {{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-revoke-database-token.png" alt="Revoke database token dialog" />}} + +9. Check the box to confirm that you understand the risk. +10. Click the **Revoke Token** button. + The token is revoked and filtered from the list of active tokens. +{{% /tab-content %}} +{{% tab-content %}} Use the [`influxctl token revoke` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/token/revoke/) diff --git a/content/influxdb3/cloud-dedicated/admin/tokens/database/update.md b/content/influxdb3/cloud-dedicated/admin/tokens/database/update.md index e4f968056..c43861077 100644 --- a/content/influxdb3/cloud-dedicated/admin/tokens/database/update.md +++ b/content/influxdb3/cloud-dedicated/admin/tokens/database/update.md @@ -1,9 +1,9 @@ --- title: Update a database token description: > - Use the [`influxctl token update` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/token/update/) + Use the Admin UI, the [`influxctl token update` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/token/update/), or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) - to update a database token's permissions in your InfluxDB Cloud Dedicated cluster. + to update a database token's permissions in your {{< product-name omit=" Clustered" >}} cluster. menu: influxdb3_cloud_dedicated: parent: Database tokens @@ -52,17 +52,74 @@ related: - /influxdb3/cloud-dedicated/reference/api/ --- -Use the [`influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/) +Use the Admin UI, the [`influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/), or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/) to update a database token's permissions {{< product-name omit=" Clustered" >}} cluster. {{< tabs-wrapper >}} {{% tabs %}} +[Admin UI](#) [influxctl](#) [Management API](#) {{% /tabs %}} {{% tab-content %}} + +The InfluxDB Cloud Dedicated administrative UI includes a portal for creating +and managing database tokens. +Through this portal, administrators can edit a token's permissions to: + +- Add read and write permissions for specific databases +- Edit a token's existing read and write permissions for a database + +### Open the Edit Database Token dialog + +1. To access the {{< product-name >}} Admin UI, visit the following URL in your browser: + +
+    https://{{< influxdb/host >}}
+    
+ +2. Use the credentials provided by InfluxData to log into the Admin UI. + If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). +3. After you log in, the Account Management portal displays [account information](/influxdb3/cloud-dedicated/admin/account/) + and the [list of clusters](/influxdb3/cloud-dedicated/admin/clusters/list/) associated with your account. + Use the sort and filter options above the list to find a specific cluster. +4. Click the row for the cluster that contains the database you want to manage tokens for. +5. Click the **Database Tokens** button in the upper right corner of the Cluster screen. +6. The Database Tokens portal displays the [list of database tokens](/influxdb3/cloud-dedicated/admin/tokens/database/list/) associated with the cluster. + Use the sort and filter options above the list to find a specific token. +7. Click the **Options** button (three vertical dots) to the right of the token you want to edit. + + {{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-database-token-options-menu.png" alt="Database token option menu" />}} + +8. Click **Edit Token** in the dropdown menu. The **Edit Database Token** dialog displays. + + {{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-edit-database-token.png" alt="Edit Database Token dialog" />}} + +9. In the **Edit Database Token** dialog, you can edit the token's **Description** and permissions. + +### Edit token permissions + +1. [Open the Edit Database Token dialog](#open-the-edit-database-token-dialog) for the database token. + + The **Edit Database Token** dialog displays the token's existing permissions. + Each permission consists of: + + - A database (specific database name or **All Databases**) + - Action permissions (Read and Write) + +2. To change which database a permission applies to, click the **Database** dropdown and select a different database or **All Databases**. +3. To adjust the access level of the permission, use the **Read** and **Write** buttons under **Actions** to toggle these permissions on or off for the selected database. + +### Add token permissions + +1. [Open the Edit Database Token dialog](#open-the-edit-database-token-dialog) for the database token. +2. In the dialog, click **Add Permission**. +3. For the new permission, select a database from the dropdown. +4. Toggle the **Read** and **Write** buttons to set the access level. +{{% /tab-content %}} +{{% tab-content %}} Use the [`influxctl token update` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/token/update/) to update a database token's permissions in your {{< product-name omit=" Clustered" >}} cluster. diff --git a/content/influxdb3/cloud-dedicated/admin/tokens/management/create.md b/content/influxdb3/cloud-dedicated/admin/tokens/management/create.md index 27ae83db6..7c0de3fe2 100644 --- a/content/influxdb3/cloud-dedicated/admin/tokens/management/create.md +++ b/content/influxdb3/cloud-dedicated/admin/tokens/management/create.md @@ -1,7 +1,7 @@ --- title: Create a management token description: > - Use the [`influxctl management create` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/management/create) + Use the Admin UI or the [`influxctl management create` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/management/create) to manually create a management token. menu: influxdb3_cloud_dedicated: @@ -18,6 +18,8 @@ list_code_example: | --description "Example token description" ``` --- +Use the Admin UI or the [`influxctl management create` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/management/create) +to manually create a management token. By default, management tokens are short-lived tokens issued by an OAuth2 identity provider that grant a specific user administrative access to your @@ -37,6 +39,40 @@ interaction with your identity provider. > and authorized through your OAuth2 identity provider to manually create a > management token. +{{< tabs-wrapper >}} +{{% tabs %}} +[Admin UI](#admin-ui) +[influxctl](#influxctl) +{{% /tabs %}} +{{% tab-content %}} + +The InfluxDB Cloud Dedicated administrative UI includes a portal for creating +and managing management tokens. + +1. To access the {{< product-name >}} Admin UI, visit the following URL in your browser: + +
+   https://console.influxdata.com
+   
+ +2. Use the credentials provided by InfluxData to log into the Admin UI. + If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). +3. Click the **Management Tokens** button in the upper right corner of the Account Management portal. +4. In the Management Tokens portal, click the **New Management Token** button. + The **Create Management Token** dialog displays. + + {{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-create-management-token.png" alt="Create management token dialog" />}} + +5. You can optionally set the following fields: + - **Expiration date**: Set an expiration date for the token + - **Expiration time**: Set an expiration time for the token + - **Description**: Enter a description for the token + - + If an expiration isn't set, the token does not expire until revoked. +6. Click the **Create Token** button. The dialog displays the **Token secret** string and the description you provided. +{{% /tab-content %}} +{{% tab-content %}} + 1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/#download-and-install-influxctl). 2. Use the [`influxctl management create` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/management/create/) to manually create a management token. Provide the following: @@ -61,6 +97,8 @@ Replace the following: `{{< datetime/current-date offset=1 >}}`. - {{% code-placeholder-key %}}`TOKEN_DESCRIPTION`{{% /code-placeholder-key %}}: Management token description. +{{% /tab-content %}} +{{< /tabs-wrapper >}} Once created, the command returns the management token string. diff --git a/content/influxdb3/cloud-dedicated/admin/tokens/management/list.md b/content/influxdb3/cloud-dedicated/admin/tokens/management/list.md index b568e2c1f..a63ff2efd 100644 --- a/content/influxdb3/cloud-dedicated/admin/tokens/management/list.md +++ b/content/influxdb3/cloud-dedicated/admin/tokens/management/list.md @@ -1,7 +1,7 @@ --- title: List management tokens description: > - Use the [`influxctl management list` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/management/list/) + Use the Admin UI or the [`influxctl management list` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/management/list/) to list manually-created management tokens. menu: influxdb3_cloud_dedicated: @@ -17,9 +17,44 @@ list_code_example: | ``` --- -Use the [`influxctl management list` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/management/list) +Use the Admin UI or the [`influxctl management list` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/management/list) to list manually-created management tokens. +{{< tabs-wrapper >}} +{{% tabs %}} +[Admin UI](#admin-ui) +[influxctl](#influxctl) +{{% /tabs %}} +{{% tab-content %}} + +The InfluxDB Cloud Dedicated administrative UI includes a portal for creating +and managing management tokens. + +1. To access the {{< product-name >}} Admin UI, visit the following URL in your browser: + +
+   https://console.influxdata.com
+   
+ +2. Use the credentials provided by InfluxData to log into the Admin UI. + If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). + +3. To list management tokens, click the **Management Tokens** button in the upper right corner of the Account Management portal. + +The Management Tokens portal displays the following information about each token: + +- Status +- Description +- Token ID +- Created date +- Expiration date + +You can **Search** tokens by description or ID to filter the list and use the sort button and column headers to sort the list. + +{{% /tab-content %}} +{{% tab-content %}} + + 1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/#download-and-install-influxctl). 2. Run `influxctl management list` with the following: @@ -40,6 +75,8 @@ influxctl management list --format json > Revoked tokens still appear when listing management tokens, but they are no > longer valid for any operations. +{{% /tab-content %}} +{{< /tabs-wrapper >}} ### Output formats The `influxctl management list` command supports two output formats: `table` and `json`. diff --git a/content/influxdb3/cloud-dedicated/admin/tokens/management/revoke.md b/content/influxdb3/cloud-dedicated/admin/tokens/management/revoke.md index 3886fbf52..8c17f6758 100644 --- a/content/influxdb3/cloud-dedicated/admin/tokens/management/revoke.md +++ b/content/influxdb3/cloud-dedicated/admin/tokens/management/revoke.md @@ -1,7 +1,7 @@ --- title: Revoke a management token description: > - Use the [`influxctl management revoke` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/management/revoke/) + Use the Admin UI or the [`influxctl management revoke` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/management/revoke/) to revoke a management token and remove all access associated with the token. Provide the ID of the management token you want to revoke. menu: @@ -16,9 +16,35 @@ list_code_example: | ``` --- -Use the [`influxctl management revoke` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/management/revoke/) +Use the Admin UI or the [`influxctl management revoke` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/management/revoke/) to revoke a management token and remove all access associated with the token. +{{< tabs-wrapper >}} +{{% tabs %}} +[Admin UI](#admin-ui) +[influxctl](#influxctl) +{{% /tabs %}} +{{% tab-content %}} + +The {{% product-name %}} administrative UI includes a portal for managing management tokens. + +1. To access the {{< product-name omit="InfluxDB " >}} Admin UI, visit the following URL in your browser: + +
+    https://{{< influxdb/host >}}
+    
+2. Use the credentials provided by InfluxData to log into the Admin UI. + If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). +3. To revoke a management token, click the **Management Tokens** button in the upper right corner of the Account Management portal. +4. **Search** for the token or use the sort button and column headers to sort the token list and find the token you want to revoke. +5. Click the options button (three vertical dots) to the right of the token you want to revoke. + The options menu displays. +6. In the options menu, click **Revoke Token**. +7. In the **Revoke Management Token** dialog, check the box to confirm you "Understand the risk of this action". +8. Click the **Revoke Token** button to revoke the token. + The token is revoked and filtered from the list of active tokens. +{{% /tab-content %}} +{{% tab-content %}} 1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/#download-and-install-influxctl). 2. Run the [`influxctl management list` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/management/list) to output tokens with their IDs. @@ -43,6 +69,8 @@ influxctl management revoke --force TOKEN_ID Replace {{% code-placeholder-key %}}`TOKEN_ID`{{% /code-placeholder-key %}} with the ID of the token you want to revoke. +{{% /tab-content %}} +{{% /tabs-wrapper %}} > [!Note] > #### Revoked tokens are included when listing management tokens diff --git a/content/influxdb3/cloud-dedicated/get-started/_index.md b/content/influxdb3/cloud-dedicated/get-started/_index.md index 7c5f09a36..a89b9a931 100644 --- a/content/influxdb3/cloud-dedicated/get-started/_index.md +++ b/content/influxdb3/cloud-dedicated/get-started/_index.md @@ -17,6 +17,8 @@ provides nearly unlimited series cardinality, improved query performance, and interoperability with widely used data processing tools and platforms. +Run an {{% product-name %}} proof of concept (PoC) + **Time series data** is a sequence of data points indexed in time order. Data points typically consist of successive measurements made from the same source and are used to track changes over time. Examples of time series data include: diff --git a/content/influxdb3/cloud-dedicated/reference/admin-ui.md b/content/influxdb3/cloud-dedicated/reference/admin-ui.md new file mode 100644 index 000000000..eef8a8cc6 --- /dev/null +++ b/content/influxdb3/cloud-dedicated/reference/admin-ui.md @@ -0,0 +1,100 @@ +--- +title: Administrative UI +seotitle: Administrative UI for {{% product-name %}} +description: > + The Administrative (Admin) UI for {{% product-name %}} is a browser-based, no-code way to manage your {{% product-name %}} environment and perform administrative tasks, such as creating and managing clusters, databases, and tokens. +menu: + influxdb3_cloud_dedicated: + parent: Reference +weight: 105 +--- + +The Administrative (Admin) UI for {{% product-name %}} is a browser-based, no-code way to manage your {{% product-name %}} environment and perform administrative tasks, such as creating and managing clusters, databases, and tokens. + +- [Access the Admin UI](#access-the-admin-ui) +- [Account management](#account-management) +- [Resource management](#resource-management) + - [Manage clusters](#manage-clusters) + - [Manage databases](#manage-databases) + - [Manage tables](#manage-tables) + - [Manage database tokens](#manage-database-tokens) + - [Additional Features](#additional-features) + + +## Access the Admin UI + +{{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-login.png" alt="InfluxDB Cloud Dedicated Admin UI login page" />}} + +Customers can access the Admin UI at [console.influxdata.com](http://console.influxdata.com) using the credentials provided by InfluxData. +If you don't have login credentials, [contact InfluxData support](https://support.influxdata.com). + +After you log in to the Admin UI, the Account Management portal provides an entrypoint to view your account information and manage your {{% product-name %}} resources. + +## Account management + +{{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-clusters-options.png" alt="InfluxDB Cloud Dedicated Admin UI account management cluster list" />}} + +- View account details and associated clusters +- Create, view, and manage management tokens for account-level operations +- Access contract information (status, start date) + +For more information, see the following: + +- [View account information](/influxdb3/cloud-dedicated/admin/account/) +- [Manage management tokens](/influxdb3/cloud-dedicated/admin/tokens/) + +## Resource management + +The Admin UI lets you manage {{% product-name %}} resources, such as databases, +tables, and tokens, associated with a cluster. + +- [Manage clusters](#manage-clusters) +- [Manage databases](#manage-databases) +- [Manage tables](#manage-tables) +- [Manage database tokens](manage-database-tokens) + +### Manage clusters + +{{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-clusters-options.png" alt="InfluxDB Cloud Dedicated Admin UI cluster options" />}} + +- View cluster IDs, statuses, creation date, and sizing information +- Access Grafana dashboards for operational monitoring (if enabled for the account) +- View and manage resources (such as databases, tables, and database tokens) associated with a cluster + +For more information, see [Manage clusters](/influxdb3/cloud-dedicated/admin/clusters/). + +### Manage databases + +{{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-databases.png" alt="InfluxDB Cloud Dedicated Admin UI cluster resources databases list" />}} + +- Create and delete databases +- Update retention periods +- Configure maximum tables and columns per table +- View and manage tables associated with a database + +For more information, see [Manage databases](/influxdb3/cloud-dedicated/admin/databases/). + +### Manage tables + +{{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-tables.png" alt="InfluxDB Cloud Dedicated Admin UI database tables list" />}} + +- View tables associated with databases +- See table IDs and sizes +- Create new tables + +For more information, see [Manage tables](/influxdb3/cloud-dedicated/admin/tables/). + +### Manage database tokens + +{{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-database-tokens.png" alt="InfluxDB Cloud Dedicated Admin UI manage database tokens portal" />}} + +- Create and manage authentication tokens for database-level operations +- Edit permissions or revoke existing tokens +- Control access with granular read and write permissions + +For more information, see [Manage database tokens](/influxdb3/cloud-dedicated/admin/tokens/). + +### Additional Features + +- Help center for access to documentation +- One-click connections to InfluxData sales and support diff --git a/content/influxdb3/cloud-dedicated/reference/api/_index.md b/content/influxdb3/cloud-dedicated/reference/api/_index.md index f1ed454af..806c248ae 100644 --- a/content/influxdb3/cloud-dedicated/reference/api/_index.md +++ b/content/influxdb3/cloud-dedicated/reference/api/_index.md @@ -9,7 +9,7 @@ menu: influxdb3_cloud_dedicated: parent: Reference name: InfluxDB HTTP API -weight: 104 +weight: 105 influxdb3/cloud-dedicated/tags: [api] --- diff --git a/content/influxdb3/cloud-dedicated/reference/cli/_index.md b/content/influxdb3/cloud-dedicated/reference/cli/_index.md index 85d870138..1a9997f4d 100644 --- a/content/influxdb3/cloud-dedicated/reference/cli/_index.md +++ b/content/influxdb3/cloud-dedicated/reference/cli/_index.md @@ -10,10 +10,9 @@ menu: parent: Reference name: CLIs weight: 104 +# draft: true --- -InfluxDB provides command line tools designed to manage and work with your -InfluxDB Cloud Dedicated cluster from the command line. The following command line interfaces (CLIs) are available: {{< children >}} diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/_index.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/_index.md index 5c29cbc7a..7dd0ced1c 100644 --- a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/_index.md +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/_index.md @@ -9,12 +9,16 @@ menu: name: influxctl parent: CLIs weight: 101 +aliases: + - /influxdb3/cloud-dedicated/reference/cli/ influxdb3/cloud-dedicated/tags: [cli] --- The `influxctl` command line interface (CLI) writes to, queries, and performs -administrative tasks in an {{< product-name omit=" Clustered" >}} cluster. +administrative tasks in an {{< product-name omit=" Clustered" >}} cluster and +provides a scriptable way to manage your {{% product-name %}} resources. +- [Key features](#key-features) - [Usage](#usage) - [Commands](#commands) - [Global flags](#global-flags) @@ -22,6 +26,12 @@ administrative tasks in an {{< product-name omit=" Clustered" >}} cluster. - [Configure connection profiles](#configure-connection-profiles) - [Authentication](#authentication) +## Key features + +- Authentication via environment variables or config file +- JSON output for scripting and automation +- Can be integrated into CI/CD pipelines + ## Usage ```sh diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/cluster/create.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/cluster/create.md index 217931389..e6e776475 100644 --- a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/cluster/create.md +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/cluster/create.md @@ -9,42 +9,4 @@ weight: 301 draft: true --- -The `influxctl cluster create` command creates an {{% product-name omit=" Clustered" %}} cluster. - -## Usage - -```sh -influxctl cluster create [flags] -``` - -## Flags - -| Flag | | Description | -| :--- | ------------------------- | :-------------------------------------------------------------------------------------------- | -| | `--region` | {{< req >}}: Region to create cluster in | -| | `--category` | {{< req >}}: Cluster category (`contract`, `internal`, `unpaid_poc`, `paid_poc`, or `system`) | -| | `--ingestor-units` | Ingestor units _(default is 0)_ | -| | `--ingestor-granularity` | Ingestor granularity _(default is 0)_ | -| | `--compactor-units` | Compactor units _(default is 0)_ | -| | `--compactor-granularity` | Compactor granularity _(default is 0)_ | -| | `--query-units` | Query units _(default is 0)_ | -| | `--query-granularity` | Query granularity _(default is 0)_ | -| `-h` | `--help` | Output command help | - -{{% caption %}} -_Also see [`influxctl` global flags](/influxdb3/cloud-dedicated/reference/cli/influxctl/#global-flags)._ -{{% /caption %}} - -## Examples - -##### Create an InfluxDB Cloud Dedicated cluster - -```sh -influxctl cluster create \ - --region us-west-2 \ - --category internal \ - --ingestor-units 3 \ - --compactor-units 1 \ - --query-units 1 \ - example-cluster-name -``` + diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/cluster/update.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/cluster/update.md index 59ea3f7df..6d282b5e0 100644 --- a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/cluster/update.md +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/cluster/update.md @@ -9,47 +9,4 @@ weight: 301 draft: true --- -The `influxctl cluster update` command updates an {{% product-name omit=" Clustered" %}} cluster. - -## Usage - -```sh -influxctl cluster update [flags] -``` - -## Arguments - -| Argument | Description | -| :------------- | :----------------------- | -| **CLUSTER_ID** | ID of the [cluster](/influxdb3/cloud-dedicated/reference/glossary/#cluster) to get | - -## Flags - -| Flag | | Description | -| :--- | ------------------------- | :-------------------------------------------------------------------------------------------- | -| | `--state` | {{< req >}}: Cluster state (`ready` or `deleted`) | -| | `--category` | {{< req >}}: Cluster category (`contract`, `internal`, `unpaid_poc`, `paid_poc`, or `system`) | -| | `--ingestor-units` | Ingestor units _(default is 0)_ | -| | `--ingestor-granularity` | Ingestor granularity _(default is 0)_ | -| | `--compactor-units` | Compactor units _(default is 0)_ | -| | `--compactor-granularity` | Compactor granularity _(default is 0)_ | -| | `--query-units` | Query units _(default is 0)_ | -| | `--query-granularity` | Query granularity _(default is 0)_ | -| `-h` | `--help` | Output command help | - -{{% caption %}} -_Also see [`influxctl` global flags](/influxdb3/cloud-dedicated/reference/cli/influxctl/#global-flags)._ -{{% /caption %}} - -## Examples - -##### Update an InfluxDB Cloud Dedicated cluster - -```sh -influxctl cluster update \ - --state ready \ - --category contract \ - --ingestor-units 3 \ - --compactor-units 1 \ - --query-units 1 \ -``` + \ No newline at end of file diff --git a/content/influxdb3/cloud-dedicated/reference/client-libraries/_index.md b/content/influxdb3/cloud-dedicated/reference/client-libraries/_index.md index 76fd68ac2..9771b37a0 100644 --- a/content/influxdb3/cloud-dedicated/reference/client-libraries/_index.md +++ b/content/influxdb3/cloud-dedicated/reference/client-libraries/_index.md @@ -4,7 +4,7 @@ description: > InfluxDB client libraries are language-specific tools that integrate with InfluxDB APIs. View the list of available client libraries. list_title: API client libraries -weight: 105 +weight: 106 aliases: - /influxdb3/cloud-dedicated/reference/api/client-libraries/ - /influxdb3/cloud-dedicated/tools/client-libraries/ diff --git a/content/influxdb3/cloud-dedicated/write-data/use-telegraf/_index.md b/content/influxdb3/cloud-dedicated/write-data/use-telegraf/_index.md index 28d0fa1d0..f00b102cd 100644 --- a/content/influxdb3/cloud-dedicated/write-data/use-telegraf/_index.md +++ b/content/influxdb3/cloud-dedicated/write-data/use-telegraf/_index.md @@ -39,7 +39,7 @@ Each Telegraf configuration must **have at least one input plugin and one output Telegraf input plugins retrieve metrics from different sources. Telegraf output plugins write those metrics to a destination. -Use the [`outputs.influxdb_v2`](/telegraf/v1/plugins/#output-influxdb_v2) plugin to write metrics collected by Telegraf to {{< product-name >}}. +Use the [`outputs.influxdb_v2`](/telegraf/v1/plugins/#output-influxdb_v2) plugin to write metrics collected by Telegraf to {{% product-name %}}. ```toml # ... diff --git a/content/influxdb3/cloud-dedicated/write-data/use-telegraf/configure/_index.md b/content/influxdb3/cloud-dedicated/write-data/use-telegraf/configure/_index.md index 8d68f0b14..3f7181e7d 100644 --- a/content/influxdb3/cloud-dedicated/write-data/use-telegraf/configure/_index.md +++ b/content/influxdb3/cloud-dedicated/write-data/use-telegraf/configure/_index.md @@ -115,7 +115,7 @@ For {{% product-name %}}, set this to an empty string (`""`). The name of the {{% product-name %}} database to write data to. > [!Note] -> ##### Write to InfluxDB v1.x and {{< product-name >}} +> ##### Write to InfluxDB v1.x and {{% product-name %}} > > If a Telegraf agent is already writing to an InfluxDB v1.x database, > enabling the InfluxDB v2 output plugin will write data to both v1.x and your {{< product-name omit="Clustered" >}} cluster. diff --git a/content/influxdb3/cloud-serverless/admin/billing/data-usage.md b/content/influxdb3/cloud-serverless/admin/billing/data-usage.md index 9db594efa..0ae2e30a9 100644 --- a/content/influxdb3/cloud-serverless/admin/billing/data-usage.md +++ b/content/influxdb3/cloud-serverless/admin/billing/data-usage.md @@ -9,8 +9,8 @@ menu: parent: Manage billing name: View data usage related: - - /flux/v0.x/stdlib/experimental/usage/from/ - - /flux/v0.x/stdlib/experimental/usage/limits/ + - /flux/v0/stdlib/experimental/usage/from/ + - /flux/v0/stdlib/experimental/usage/limits/ alt_links: cloud: /influxdb/cloud/account-management/data-usage/ aliases: diff --git a/content/influxdb3/cloud-serverless/admin/billing/limits.md b/content/influxdb3/cloud-serverless/admin/billing/limits.md index cc14d2fb5..2d5eca88a 100644 --- a/content/influxdb3/cloud-serverless/admin/billing/limits.md +++ b/content/influxdb3/cloud-serverless/admin/billing/limits.md @@ -9,8 +9,8 @@ menu: parent: Manage billing name: Adjustable quotas and limits related: - - /flux/v0.x/stdlib/experimental/usage/from/ - - /flux/v0.x/stdlib/experimental/usage/limits/ + - /flux/v0/stdlib/experimental/usage/from/ + - /flux/v0/stdlib/experimental/usage/limits/ - /influxdb3/cloud-serverless/write-data/best-practices/ alt_links: cloud: /influxdb/cloud/account-management/limits/ diff --git a/content/influxdb3/cloud-serverless/reference/syntax/delete-predicate.md b/content/influxdb3/cloud-serverless/reference/syntax/delete-predicate.md index 67eb5b02d..dcff10aac 100644 --- a/content/influxdb3/cloud-serverless/reference/syntax/delete-predicate.md +++ b/content/influxdb3/cloud-serverless/reference/syntax/delete-predicate.md @@ -5,7 +5,7 @@ description: > InfluxDB uses an InfluxQL-like predicate syntax to determine what data points to delete. menu: influxdb3_cloud_serverless: - parent: Syntax + parent: Other syntaxes name: Delete predicate weight: 104 influxdb3/cloud-serverless/tags: [syntax, delete] diff --git a/content/influxdb3/cloud-serverless/write-data/use-telegraf/_index.md b/content/influxdb3/cloud-serverless/write-data/use-telegraf/_index.md index d1ce67734..618b79969 100644 --- a/content/influxdb3/cloud-serverless/write-data/use-telegraf/_index.md +++ b/content/influxdb3/cloud-serverless/write-data/use-telegraf/_index.md @@ -40,7 +40,7 @@ Each Telegraf configuration must **have at least one input plugin and one output Telegraf input plugins retrieve metrics from different sources. Telegraf output plugins write those metrics to a destination. -Use the [`outputs.influxdb_v2`](/telegraf/v1/plugins/#output-influxdb_v2) plugin to write metrics collected by Telegraf to {{< product-name >}}. +Use the [`outputs.influxdb_v2`](/telegraf/v1/plugins/#output-influxdb_v2) plugin to write metrics collected by Telegraf to {{% product-name %}}. ```toml # ... diff --git a/content/influxdb3/cloud-serverless/write-data/use-telegraf/configure/_index.md b/content/influxdb3/cloud-serverless/write-data/use-telegraf/configure/_index.md index a207a9961..2e45a57e0 100644 --- a/content/influxdb3/cloud-serverless/write-data/use-telegraf/configure/_index.md +++ b/content/influxdb3/cloud-serverless/write-data/use-telegraf/configure/_index.md @@ -110,7 +110,7 @@ For {{% product-name %}}, set this to an empty string (`""`). The name of the {{% product-name %}} bucket to write data to. > [!Note] -> ##### Write to InfluxDB v1.x and {{< product-name >}} +> ##### Write to InfluxDB v1.x and {{% product-name %}} > > If a Telegraf agent is already writing to an InfluxDB v1.x database, > enabling the InfluxDB v2 output plugin will write data to both v1.x and your {{< product-name >}} bucket. diff --git a/content/influxdb3/clustered/_index.md b/content/influxdb3/clustered/_index.md index 718c4892b..64079c17a 100644 --- a/content/influxdb3/clustered/_index.md +++ b/content/influxdb3/clustered/_index.md @@ -18,7 +18,7 @@ The InfluxDB time series platform is designed to handle high write and query loa Learn how to use and leverage InfluxDB Clustered for your specific time series use case. -Contact InfluxData Sales +Run an {{% product-name %}} proof of concept (PoC) Get started with InfluxDB Clustered ## InfluxDB 3 diff --git a/content/influxdb3/clustered/admin/backup-restore.md b/content/influxdb3/clustered/admin/backup-restore.md index e5dfebd7c..d321c81c3 100644 --- a/content/influxdb3/clustered/admin/backup-restore.md +++ b/content/influxdb3/clustered/admin/backup-restore.md @@ -55,7 +55,12 @@ snapshot. When a snapshot is restored to the Catalog store, the Compactor A _soft delete_ refers to when, on compaction, the Compactor sets a `deleted_at` timestamp on the Parquet file entry in the Catalog. The Parquet file is no -longer queryable, but remains intact in the object store. +longer queryable, but remains intact in the object store. + +> [!Note] +> Soft deletes are a mechanism of the {{% product-name %}} Catalog, not of the +> underlying object storage provider. Soft deletes do not modify objects in the +> object store; only Catalog entries that reference objects in the object store. ## Hard delete @@ -219,6 +224,15 @@ written on or around the beginning of the next hour. Use the following process to restore your InfluxDB cluster to a recovery point using Catalog store snapshots: +> [!Warning] +> +> #### Use the same InfluxDB Clustered version used to generate the snapshot +> +> When restoring an InfluxDB cluster to a recovery point, use the same version +> of InfluxDB Clustered used to generate the Catalog store snapshot. +> You may need to [downgrade to a previous version](/influxdb3/clustered/admin/upgrade/) +> before restoring. + 1. **Install prerequisites:** - `kubectl` CLI for managing your Kubernetes deployment. @@ -273,7 +287,8 @@ using Catalog store snapshots: metadata: name: influxdb namespace: influxdb - pause: true + spec: + pause: true # ... ``` @@ -331,7 +346,8 @@ using Catalog store snapshots: metadata: name: influxdb namespace: influxdb - pause: false + spec: + pause: false # ... ``` @@ -349,8 +365,6 @@ Your InfluxDB cluster is now restored to the recovery point. When the Garbage Collector runs, it identifies what Parquet files are not associated with the recovery point and [soft deletes](#soft-delete) them. - - ## Resources ### prep\_pg\_dump.awk diff --git a/content/influxdb3/clustered/admin/databases/_index.md b/content/influxdb3/clustered/admin/databases/_index.md index f37eeb678..940c3ad01 100644 --- a/content/influxdb3/clustered/admin/databases/_index.md +++ b/content/influxdb3/clustered/admin/databases/_index.md @@ -52,7 +52,7 @@ never be removed by the retention enforcement service. You can customize [table (measurement) limits](#table-limit) and [table column limits](#column-limit) when you [create](#create-a-database) or -[update a database](#update-a-database) in {{< product-name >}}. +[update a database](#update-a-database) in {{% product-name %}}. ### Table limit diff --git a/content/influxdb3/clustered/admin/databases/delete.md b/content/influxdb3/clustered/admin/databases/delete.md index 72c42b14c..9f39e8fd5 100644 --- a/content/influxdb3/clustered/admin/databases/delete.md +++ b/content/influxdb3/clustered/admin/databases/delete.md @@ -22,9 +22,9 @@ to delete a database from your InfluxDB cluster. 1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/clustered/reference/cli/influxctl/#download-and-install-influxctl). 2. Run the `influxctl database delete` command and provide the following: - - Name of the database to delete + - The name of the database to delete -3. Confirm that you want to delete the database. +3. Confirm that you want to delete the database. {{% code-placeholders "DATABASE_NAME" %}} ```sh @@ -37,9 +37,12 @@ influxctl database delete DATABASE_NAME > > Once a database is deleted, data stored in that database cannot be recovered. > -> #### Cannot reuse database names -> -> After a database is deleted, you cannot reuse the same name for a new database. +> #### Wait before writing to a new database with the same name +> +> After deleting a database from your {{% product-name omit=" Clustered" %}} +> cluster, you can reuse the name to create a new database, but **wait two to +> three minutes** after deleting the previous database before writing to the new +> database to allow write caches to clear. > > #### Never directly modify the Catalog > diff --git a/content/influxdb3/clustered/admin/tokens/database/delete.md b/content/influxdb3/clustered/admin/tokens/database/delete.md deleted file mode 100644 index cef34cba3..000000000 --- a/content/influxdb3/clustered/admin/tokens/database/delete.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Delete a database token -description: > - Use the [`influxctl token delete` command](/influxdb3/clustered/reference/cli/influxctl/token/delete/) - to delete a token from your InfluxDB cluster and revoke all - permissions associated with the token. - Provide the ID of the token you want to delete. -menu: - influxdb3_clustered: - parent: Database tokens -weight: 203 -list_code_example: | - ```sh - influxctl token delete - ``` -aliases: - - /influxdb3/clustered/admin/tokens/delete/ ---- - -Use the [`influxctl token delete` command](/influxdb3/clustered/reference/cli/influxctl/token/delete/) -to delete a database token from your InfluxDB cluster and revoke -all permissions associated with the token. - -1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/clustered/reference/cli/influxctl/#download-and-install-influxctl). -2. Run the [`influxctl token list` command](/influxdb3/clustered/reference/cli/influxctl/token/list) - to output tokens with their IDs. - Copy the **token ID** of the token you want to delete. - - ```sh - influxctl token list - ``` - -3. Run the `influxctl token delete` command and provide the following: - - - Token ID to delete - -4. Confirm that you want to delete the token. - -{{% code-placeholders "TOKEN_ID" %}} -```sh -influxctl token delete TOKEN_ID -``` -{{% /code-placeholders %}} - -> [!Warning] -> #### Deleting a token is immediate and cannot be undone -> -> Deleting a database token is a destructive action that takes place immediately -> and cannot be undone. -> -> #### Rotate deleted tokens -> -> After deleting a database token, any clients using the deleted token need to be -> updated with a new database token to continue to interact with your InfluxDB -> cluster. diff --git a/content/influxdb3/clustered/admin/tokens/database/revoke.md b/content/influxdb3/clustered/admin/tokens/database/revoke.md new file mode 100644 index 000000000..90a3a71c9 --- /dev/null +++ b/content/influxdb3/clustered/admin/tokens/database/revoke.md @@ -0,0 +1,56 @@ +--- +title: Revoke a database token +description: > + Use the [`influxctl token revoke` command](/influxdb3/clustered/reference/cli/influxctl/token/revoke/) + to revoke a token from your InfluxDB cluster and disable all + permissions associated with the token. + Provide the ID of the token you want to revoke. +menu: + influxdb3_clustered: + parent: Database tokens +weight: 203 +list_code_example: | + ```sh + influxctl token revoke + ``` +aliases: + - /influxdb3/clustered/admin/tokens/delete/ + - /influxdb3/clustered/admin/tokens/database/delete/ +--- + +Use the [`influxctl token revoke` command](/influxdb3/clustered/reference/cli/influxctl/token/revoke/) +to revoke a database token from your {{< product-name omit=" Clustered" >}} cluster and disable +all permissions associated with the token. + +1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/clustered/reference/cli/influxctl/#download-and-install-influxctl). +2. Run the [`influxctl token list` command](/influxdb3/clustered/reference/cli/influxctl/token/list) + to output tokens with their IDs. + Copy the **token ID** of the token you want to delete. + + ```sh + influxctl token list + ``` + +3. Run the `influxctl token revoke` command and provide the following: + + - Token ID to revoke + +4. Confirm that you want to revoke the token. + +{{% code-placeholders "TOKEN_ID" %}} +```sh +influxctl token revoke TOKEN_ID +``` +{{% /code-placeholders %}} + +> [!Warning] +> #### Revoking a token is immediate and cannot be undone +> +> Revoking a database token is a destructive action that takes place immediately +> and cannot be undone. +> +> #### Rotate revoked tokens +> +> After revoking a database token, any clients using the revoked token need to +> be updated with a new database token to continue to interact with your +> {{% product-name omit=" Clustered" %}} cluster. diff --git a/content/influxdb3/clustered/admin/upgrade.md b/content/influxdb3/clustered/admin/upgrade.md index 5691fc4f0..26ad743ef 100644 --- a/content/influxdb3/clustered/admin/upgrade.md +++ b/content/influxdb3/clustered/admin/upgrade.md @@ -66,8 +66,8 @@ us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:PACKAGE_VERSION ### Identify the version to upgrade to -All available InfluxDB Clustered package versions are provided at -[oci.influxdata.com](https://oci.influxdata.com). +All available InfluxDB Clustered package versions are provided in the +[InfluxDB Clustered release notes](/influxdb3/clustered/reference/release-notes/clustered/). Find the package version you want to upgrade to and copy the version number. @@ -76,7 +76,7 @@ Find the package version you want to upgrade to and copy the version number. Some InfluxDB Clustered releases are _checkpoint releases_ that introduce a breaking change to an InfluxDB component. Checkpoint releases are only made when absolutely necessary and are clearly -identified at [oci.influxdata.com](https://oci.influxdata.com). +identified in the [InfluxDB Clustered release notes](/influxdb3/clustered/reference/release-notes/clustered/). **When upgrading, always upgrade to each checkpoint release first, before proceeding to newer versions.** diff --git a/content/influxdb3/clustered/reference/cli/influxctl/database/create.md b/content/influxdb3/clustered/reference/cli/influxctl/database/create.md index ce9d00750..7a71ec00d 100644 --- a/content/influxdb3/clustered/reference/cli/influxctl/database/create.md +++ b/content/influxdb3/clustered/reference/cli/influxctl/database/create.md @@ -68,17 +68,12 @@ Be sure to follow [partitioning best practices](/influxdb3/clustered/admin/custo > Otherwise, InfluxDB omits time from the partition template and won't compact partitions. > [!Warning] -> #### Cannot reuse deleted database names -> -> You cannot reuse the name of a deleted database when creating a new database. -> If you try to reuse the name, the API response status code -> is `400` and the `message` field contains the following: -> -> ```text -> 'iox_proxy.app.CreateDatabase failed to create database: \ -> rpc error: code = AlreadyExists desc = A namespace with the -> name `` already exists' -> ``` +> #### Wait before writing to a new database with the same name as a deleted database +> +> After deleting a database from your {{% product-name omit=" Clustered" %}} +> cluster, you can reuse the name to create a new database, but **wait two to +> three minutes** after deleting the previous database before writing to the new +> database to allow write caches to clear. ## Usage diff --git a/content/influxdb3/clustered/reference/cli/influxctl/database/delete.md b/content/influxdb3/clustered/reference/cli/influxctl/database/delete.md index 21ee800f0..990a0b662 100644 --- a/content/influxdb3/clustered/reference/cli/influxctl/database/delete.md +++ b/content/influxdb3/clustered/reference/cli/influxctl/database/delete.md @@ -1,14 +1,16 @@ --- title: influxctl database delete description: > - The `influxctl database delete` command deletes a database from an InfluxDB cluster. + The `influxctl database delete` command deletes a database from an + {{% product-name omit=" Clustered" %}} cluster. menu: influxdb3_clustered: parent: influxctl database weight: 301 --- -The `influxctl database delete` command deletes a database from an InfluxDB cluster. +The `influxctl database delete` command deletes a database from an +{{< product-name omit=" Clustered" >}} cluster. ## Usage @@ -24,10 +26,12 @@ influxctl database delete [command options] [--force] [ > Deleting a database is a destructive action that cannot be undone. > -> #### Cannot reuse deleted database names -> -> After deleting a database, you cannot reuse the name of the deleted database -> when creating a new database. +> #### Wait before writing to a new database with the same name +> +> After deleting a database from your {{% product-name omit=" Clustered" %}} +> cluster, you can reuse the name to create a new database, but **wait two to +> three minutes** after deleting the previous database before writing to the new +> database to allow write caches to clear. ## Arguments diff --git a/content/influxdb3/clustered/reference/release-notes/_index.md b/content/influxdb3/clustered/reference/release-notes/_index.md index 30397e407..28b4332a4 100644 --- a/content/influxdb3/clustered/reference/release-notes/_index.md +++ b/content/influxdb3/clustered/reference/release-notes/_index.md @@ -7,7 +7,7 @@ menu: influxdb3_clustered: name: Release notes parent: Reference -weight: 190 +weight: 101 --- View release notes and updates for products and tools related to diff --git a/content/influxdb3/clustered/reference/release-notes/clustered.md b/content/influxdb3/clustered/reference/release-notes/clustered.md index 1aaf9cef1..7c0d688fe 100644 --- a/content/influxdb3/clustered/reference/release-notes/clustered.md +++ b/content/influxdb3/clustered/reference/release-notes/clustered.md @@ -25,6 +25,30 @@ weight: 201 --- +## 20250508-1719206 {date="2025-05-08"} + +### Quickstart + +```yaml +spec: + package: + image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250508-1719206 +``` + +### Changes + +#### Deployment + +- Expose the v0 REST API for the management and authorization service (Granite). + +#### Database Engine + +- Reuse database names after deletion. +- Create database tokens with expiration dates. +- Revoke database tokens rather than deleting them. + +--- + ## 20250212-1570743 {date="2025-02-12"} ### Quickstart diff --git a/content/influxdb3/clustered/write-data/use-telegraf/_index.md b/content/influxdb3/clustered/write-data/use-telegraf/_index.md index a0c1bc189..49d266833 100644 --- a/content/influxdb3/clustered/write-data/use-telegraf/_index.md +++ b/content/influxdb3/clustered/write-data/use-telegraf/_index.md @@ -39,7 +39,7 @@ Each Telegraf configuration must **have at least one input plugin and one output Telegraf input plugins retrieve metrics from different sources. Telegraf output plugins write those metrics to a destination. -Use the [`outputs.influxdb_v2`](/telegraf/v1/plugins/#output-influxdb_v2) plugin to write metrics collected by Telegraf to {{< product-name >}}. +Use the [`outputs.influxdb_v2`](/telegraf/v1/plugins/#output-influxdb_v2) plugin to write metrics collected by Telegraf to {{% product-name %}}. ```toml # ... diff --git a/content/influxdb3/clustered/write-data/use-telegraf/configure/_index.md b/content/influxdb3/clustered/write-data/use-telegraf/configure/_index.md index 2489bfa6b..f6d3c681e 100644 --- a/content/influxdb3/clustered/write-data/use-telegraf/configure/_index.md +++ b/content/influxdb3/clustered/write-data/use-telegraf/configure/_index.md @@ -112,7 +112,7 @@ For {{% product-name %}}, set this to an empty string (`""`). The name of the {{% product-name %}} database to write data to. > [!Note] -> ##### Write to InfluxDB v1.x and {{< product-name >}} +> ##### Write to InfluxDB v1.x and {{% product-name %}} > > If a Telegraf agent is already writing to an InfluxDB v1.x database, > enabling the InfluxDB v2 output plugin will write data to both v1.x and your {{< product-name omit="Clustered" >}} cluster. diff --git a/content/influxdb3/core/_index.md b/content/influxdb3/core/_index.md index 82dfea7d1..ef374524f 100644 --- a/content/influxdb3/core/_index.md +++ b/content/influxdb3/core/_index.md @@ -9,9 +9,10 @@ menu: influxdb3_core: name: InfluxDB 3 Core weight: 1 -source: /shared/v3-core-get-started/_index.md +source: /shared/influxdb3/_index.md --- \ No newline at end of file diff --git a/content/influxdb3/core/admin/distinct-value-cache/_index.md b/content/influxdb3/core/admin/distinct-value-cache/_index.md index cdf0b5837..5e9004280 100644 --- a/content/influxdb3/core/admin/distinct-value-cache/_index.md +++ b/content/influxdb3/core/admin/distinct-value-cache/_index.md @@ -16,4 +16,5 @@ source: /shared/influxdb3-admin/distinct-value-cache/_index.md --- +// SOURCE content/shared/influxdb3-admin/distinct-value-cache/_index.md +--> diff --git a/content/influxdb3/core/admin/last-value-cache/_index.md b/content/influxdb3/core/admin/last-value-cache/_index.md index 20013f73b..2560c1ead 100644 --- a/content/influxdb3/core/admin/last-value-cache/_index.md +++ b/content/influxdb3/core/admin/last-value-cache/_index.md @@ -17,4 +17,5 @@ source: /shared/influxdb3-admin/last-value-cache/_index.md --- +// SOURCE content/shared/influxdb3-admin/last-value-cache/_index.md +--> diff --git a/content/influxdb3/core/admin/tokens/_index.md b/content/influxdb3/core/admin/tokens/_index.md index 6ace87894..c4604b078 100644 --- a/content/influxdb3/core/admin/tokens/_index.md +++ b/content/influxdb3/core/admin/tokens/_index.md @@ -1,7 +1,7 @@ --- title: Manage tokens description: > - Manage tokens to authenticate and authorize access to resources and data in an {{< product-name >}} instance. + Manage tokens to authenticate and authorize access to server actions, resources, and data in an {{< product-name >}} instance. menu: influxdb3_core: parent: Administer InfluxDB @@ -11,4 +11,4 @@ source: /shared/influxdb3-admin/tokens/_index.md > \ No newline at end of file +--> \ No newline at end of file diff --git a/content/influxdb3/core/admin/tokens/admin/_index.md b/content/influxdb3/core/admin/tokens/admin/_index.md index ac5510003..f776fca98 100644 --- a/content/influxdb3/core/admin/tokens/admin/_index.md +++ b/content/influxdb3/core/admin/tokens/admin/_index.md @@ -11,9 +11,9 @@ menu: name: Admin tokens weight: 101 influxdb3/core/tags: [tokens] -source: /shared/influxdb3-admin/tokens/_index.md +source: /shared/influxdb3-admin/tokens/admin/_index.md --- \ No newline at end of file diff --git a/content/influxdb3/core/admin/tokens/admin/create.md b/content/influxdb3/core/admin/tokens/admin/create.md index fe1abbe5e..498c6cdef 100644 --- a/content/influxdb3/core/admin/tokens/admin/create.md +++ b/content/influxdb3/core/admin/tokens/admin/create.md @@ -2,7 +2,7 @@ title: Create an admin token description: > Use the [`influxdb3 create token --admin` command](/influxdb3/core/reference/cli/influxdb3/create/token/) - or the [HTTP API](/influxdb3/core/api/v3/) + or the HTTP API [`/api/v3/configure/token/admin`](/influxdb3/core/api/v3/#operation/PostCreateAdminToken) endpoint to create an [admin token](/influxdb3/core/admin/tokens/admin/) for your {{< product-name omit="Clustered" >}} instance. An admin token grants access to all actions on the server. menu: diff --git a/content/influxdb3/core/admin/tokens/admin/regenerate.md b/content/influxdb3/core/admin/tokens/admin/regenerate.md index fe7038826..f8e8d7ab9 100644 --- a/content/influxdb3/core/admin/tokens/admin/regenerate.md +++ b/content/influxdb3/core/admin/tokens/admin/regenerate.md @@ -2,10 +2,9 @@ title: Regenerate an admin token description: > Use the [`influxdb3 create token --admin` command](/influxdb3/core/reference/cli/influxdb3/create/token/) - or the [HTTP API](/influxdb3/core/api/v3/) - to regenerate an [admin token](/influxdb3/core/admin/tokens/admin/) for your {{< product-name omit="Clustered" >}} instance. - An admin token grants access to all actions on the server. - Regenerating an admin token deactivates the previous token. + or the HTTP API [`/api/v3/configure/token/admin/regenerate`](/influxdb3/core/api/v3/#operation/PostRegenerateAdminToken) endpoint + to regenerate an [operator token](/influxdb3/core/admin/tokens/admin/) for your {{< product-name omit="Clustered" >}} instance. + Regenerating an operator token deactivates the previous token. menu: influxdb3_core: parent: Admin tokens @@ -14,8 +13,15 @@ list_code_example: | ##### CLI ```bash influxdb3 create token --admin \ - --token ADMIN_TOKEN \ --regenerate + OPERATOR_TOKEN + ``` + #### HTTP API + ```bash + curl -X POST "http://{{< influxdb/host >}}/api/v3/configure/token/admin/regenerate" \ + --header 'Authorization Bearer OPERATOR_TOKEN' \ + --header 'Accept: application/json' + --header 'Content-Type: application/json' ``` source: /shared/influxdb3-admin/tokens/admin/regenerate.md --- diff --git a/content/influxdb3/core/extend-plugin.md b/content/influxdb3/core/extend-plugin.md new file mode 100644 index 000000000..736672a07 --- /dev/null +++ b/content/influxdb3/core/extend-plugin.md @@ -0,0 +1,17 @@ +--- +title: Extend plugins with API features and state management +description: | + The Processing engine includes an API that allows your plugins to interact with your data, build and write line protocol, and maintain state between executions. +menu: + influxdb3_core: + name: Extend plugins + parent: Processing engine and Python plugins +weight: 4 +influxdb3/core/tags: [processing engine, plugins, API, python] +source: /shared/extended-plugin-api.md +--- + + + diff --git a/content/influxdb3/core/get-started/_index.md b/content/influxdb3/core/get-started/_index.md index fd81839af..16398f32f 100644 --- a/content/influxdb3/core/get-started/_index.md +++ b/content/influxdb3/core/get-started/_index.md @@ -13,7 +13,7 @@ related: - /influxdb3/core/admin/query-system-data/ - /influxdb3/core/write-data/ - /influxdb3/core/query-data/ -source: /shared/v3-core-get-started/_index.md +source: /shared/influxdb3-get-started/_index.md prepend: | > [!Note] > InfluxDB 3 Core is purpose-built for real-time data monitoring and recent data. @@ -26,5 +26,5 @@ prepend: | diff --git a/content/influxdb3/core/install.md b/content/influxdb3/core/install.md index 6b6bf6540..a423ff48c 100644 --- a/content/influxdb3/core/install.md +++ b/content/influxdb3/core/install.md @@ -12,7 +12,7 @@ alt_links: - [System Requirements](#system-requirements) - [Quick install](#quick-install) -- [Download {{< product-name >}} binaries](#download-influxdb-3-{{< product-key >}}-binaries) +- [Download {{% product-name %}} binaries](#download-influxdb-3-{{< product-key >}}-binaries) - [Docker image](#docker-image) ## System Requirements @@ -79,7 +79,7 @@ source ~/.zshrc {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} -## Download {{< product-name >}} binaries +## Download {{% product-name %}} binaries {{< tabs-wrapper >}} {{% tabs %}} @@ -175,7 +175,7 @@ influxdb:3-{{< product-key >}} container_name: influxdb3-{{< product-key >}} image: influxdb:3-{{< product-key >}} ports: - - 9999:9999 + - 8181:8181 command: - influxdb3 - serve diff --git a/content/influxdb3/core/query-data/execute-queries/influxdb3-cli.md b/content/influxdb3/core/query-data/execute-queries/influxdb3-cli.md index 43222ff08..af911f486 100644 --- a/content/influxdb3/core/query-data/execute-queries/influxdb3-cli.md +++ b/content/influxdb3/core/query-data/execute-queries/influxdb3-cli.md @@ -13,7 +13,7 @@ related: - /influxdb3/core/reference/cli/influxdb3/query/ - /influxdb3/core/reference/sql/ - /influxdb3/core/reference/influxql/ - # - /influxdb3/core/get-started/query/#execute-an-sql-query, Get started querying data + # - /influxdb3/core/query-data/execute-queries/, Get started querying data list_code_example: | ```sh influxdb3 query \ diff --git a/content/influxdb3/core/reference/cli/influxdb3/_index.md b/content/influxdb3/core/reference/cli/influxdb3/_index.md index efa782e42..221e4e654 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/_index.md +++ b/content/influxdb3/core/reference/cli/influxdb3/_index.md @@ -103,7 +103,7 @@ influxdb3 -h influxdb3 --help ``` -### Run the {{< product-name >}} server with extra verbose logging +### Run the {{% product-name %}} server with extra verbose logging @@ -114,7 +114,7 @@ influxdb3 serve -v \ --node-id my-host-01 ``` -### Run {{< product-name >}} with debug logging using LOG_FILTER +### Run {{% product-name %}} with debug logging using LOG_FILTER diff --git a/content/influxdb3/core/reference/cli/influxdb3/create/plugin.md b/content/influxdb3/core/reference/cli/influxdb3/create/plugin.md deleted file mode 100644 index 84b793b53..000000000 --- a/content/influxdb3/core/reference/cli/influxdb3/create/plugin.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: influxdb3 create plugin -description: > - The `influxdb3 create plugin` command creates a new processing engine plugin. -menu: - influxdb3_core: - parent: influxdb3 create - name: influxdb3 create plugin -weight: 400 -source: /shared/influxdb3-cli/create/plugin.md ---- - - diff --git a/content/influxdb3/core/reference/cli/influxdb3/create/token.md b/content/influxdb3/core/reference/cli/influxdb3/create/token/_index.md similarity index 60% rename from content/influxdb3/core/reference/cli/influxdb3/create/token.md rename to content/influxdb3/core/reference/cli/influxdb3/create/token/_index.md index a27d89942..c85988446 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/create/token.md +++ b/content/influxdb3/core/reference/cli/influxdb3/create/token/_index.md @@ -5,11 +5,11 @@ description: > menu: influxdb3_core: parent: influxdb3 create - name: influxdb3 create token + name: influxdb3 create token weight: 400 -source: /shared/influxdb3-cli/create/token.md +source: /shared/influxdb3-cli/create/token/_index.md --- \ No newline at end of file diff --git a/content/influxdb3/core/reference/cli/influxdb3/create/token/admin.md b/content/influxdb3/core/reference/cli/influxdb3/create/token/admin.md new file mode 100644 index 000000000..a2646e4ec --- /dev/null +++ b/content/influxdb3/core/reference/cli/influxdb3/create/token/admin.md @@ -0,0 +1,15 @@ +--- +title: influxdb3 create token admin +description: > + The `influxdb3 create token admin` command creates an operator token or named admin token with full administrative privileges for server actions. +menu: + influxdb3_core: + parent: influxdb3 create token + name: influxdb3 create token admin +weight: 400 +source: /shared/influxdb3-cli/create/token/admin.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/core/reference/cli/influxdb3/delete/plugin.md b/content/influxdb3/core/reference/cli/influxdb3/delete/plugin.md deleted file mode 100644 index 9a151c04b..000000000 --- a/content/influxdb3/core/reference/cli/influxdb3/delete/plugin.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: influxdb3 delete plugin -description: > - The `influxdb3 delete plugin` command deletes a processing engine plugin. -menu: - influxdb3_core: - parent: influxdb3 delete - name: influxdb3 delete plugin -weight: 400 -source: /shared/influxdb3-cli/delete/last_cache.md ---- - - diff --git a/content/influxdb3/core/write-data/best-practices/optimize-writes.md b/content/influxdb3/core/write-data/best-practices/optimize-writes.md index fb4b43eb1..be2bb6b5f 100644 --- a/content/influxdb3/core/write-data/best-practices/optimize-writes.md +++ b/content/influxdb3/core/write-data/best-practices/optimize-writes.md @@ -16,5 +16,6 @@ source: /shared/influxdb3-write-guides/best-practices/optimize-writes.md --- diff --git a/content/influxdb3/core/write-data/best-practices/schema-design.md b/content/influxdb3/core/write-data/best-practices/schema-design.md index f6b24a7e6..4cbb8a9ec 100644 --- a/content/influxdb3/core/write-data/best-practices/schema-design.md +++ b/content/influxdb3/core/write-data/best-practices/schema-design.md @@ -16,5 +16,6 @@ source: /shared/influxdb3-write-guides/best-practices/schema-design.md --- diff --git a/content/influxdb3/enterprise/_index.md b/content/influxdb3/enterprise/_index.md index df990c211..bcf454928 100644 --- a/content/influxdb3/enterprise/_index.md +++ b/content/influxdb3/enterprise/_index.md @@ -9,9 +9,10 @@ menu: influxdb3_enterprise: name: InfluxDB 3 Enterprise weight: 1 -source: /shared/v3-enterprise-get-started/_index.md +source: /shared/influxdb3/_index.md --- diff --git a/content/influxdb3/enterprise/admin/license.md b/content/influxdb3/enterprise/admin/license.md index 38f6ac503..cb1f1bf37 100644 --- a/content/influxdb3/enterprise/admin/license.md +++ b/content/influxdb3/enterprise/admin/license.md @@ -153,7 +153,7 @@ existing license if it's still valid. environment variable 7. If no license is found, the server won't start -#### Example: Start the {{< product-name >}} server with your license email: +#### Example: Start the {{% product-name %}} server with your license email: {{< code-tabs-wrapper >}} {{% code-tabs %}} @@ -187,7 +187,7 @@ influxdb3 serve \ {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} -#### Example: Start the {{< product-name >}} server with your license file: +#### Example: Start the {{% product-name %}} server with your license file: {{< code-tabs-wrapper >}} {{% code-tabs %}} diff --git a/content/influxdb3/enterprise/admin/tokens/_index.md b/content/influxdb3/enterprise/admin/tokens/_index.md index 85bfad732..6c9a079ea 100644 --- a/content/influxdb3/enterprise/admin/tokens/_index.md +++ b/content/influxdb3/enterprise/admin/tokens/_index.md @@ -1,7 +1,7 @@ --- title: Manage tokens description: > - Manage tokens to authenticate and authorize access to resources and data in an {{< product-name >}} instance. + Manage tokens to authenticate and authorize access to server actions, resources, and data in an {{< product-name >}} instance. menu: influxdb3_enterprise: parent: Administer InfluxDB diff --git a/content/influxdb3/enterprise/admin/tokens/admin/create.md b/content/influxdb3/enterprise/admin/tokens/admin/create.md index c073c622d..9c821b4ab 100644 --- a/content/influxdb3/enterprise/admin/tokens/admin/create.md +++ b/content/influxdb3/enterprise/admin/tokens/admin/create.md @@ -2,8 +2,8 @@ title: Create an admin token description: > Use the [`influxdb3 create token --admin` command](/influxdb3/enterprise/reference/cli/influxdb3/create/token/) - or the [HTTP API](/influxdb3/enterprise/api/v3/) - to create an [admin token](/influxdb3/enterprise/admin/tokens/admin/) for your {{< product-name omit="Clustered" >}} instance. + or the HTTP API [`/api/v3/configure/token/admin`](/influxdb3/enterprise/api/v3/#operation/PostCreateAdminToken) + endpoint to create an operator or named [admin token](/influxdb3/enterprise/admin/tokens/admin/) for your {{< product-name omit="Clustered" >}} instance. An admin token grants access to all actions on the server. menu: influxdb3_enterprise: @@ -12,13 +12,15 @@ weight: 201 list_code_example: | ##### CLI ```bash - influxdb3 create token --admin + influxdb3 create token --admin --name TOKEN_NAME ``` #### HTTP API ```bash - curl -X POST "http://{{< influxdb/host >}}/api/v3/enterprise/configure/token/admin" \ - --header 'Accept: application/json' \ - --header 'Content-Type: application/json' + curl -X POST "http://{{< influxdb/host >}}/api/v3/configure/token/admin" \ + --header 'Authorization Bearer ADMIN_TOKEN' \ + --json '{ + "name": "TOKEN_NAME" + }' ``` alt_links: cloud-dedicated: /influxdb3/cloud-dedicated/admin/tokens/create-token/ diff --git a/content/influxdb3/enterprise/admin/tokens/admin/regenerate.md b/content/influxdb3/enterprise/admin/tokens/admin/regenerate.md index f595ce3b5..7da106f73 100644 --- a/content/influxdb3/enterprise/admin/tokens/admin/regenerate.md +++ b/content/influxdb3/enterprise/admin/tokens/admin/regenerate.md @@ -1,10 +1,9 @@ --- -title: Regenerate an admin token +title: Regenerate an operator admin token description: > Use the [`influxdb3 create token --admin` command](/influxdb3/enterprise/reference/cli/influxdb3/create/token/) or the [HTTP API](/influxdb3/enterprise/api/v3/) - to regenerate an [admin token](/influxdb3/enterprise/admin/tokens/admin/) for your {{< product-name omit="Clustered" >}} instance. - An admin token grants access to all actions on the server. + to regenerate an [operator token](/influxdb3/enterprise/admin/tokens/admin/) for your {{< product-name omit="Clustered" >}} instance. Regenerating an admin token deactivates the previous token. menu: influxdb3_enterprise: @@ -14,9 +13,15 @@ list_code_example: | ##### CLI ```bash influxdb3 create token --admin \ - --token ADMIN_TOKEN \ + --token OPERATOR_TOKEN \ --regenerate ``` + + #### HTTP API + ```bash + curl -X POST "http://{{< influxdb/host >}}/api/v3/configure/token/admin/regenerate" \ + --header 'Authorization Bearer OPERATOR_TOKEN' + ``` source: /shared/influxdb3-admin/tokens/admin/regenerate.md --- diff --git a/content/influxdb3/enterprise/admin/tokens/resource/_index.md b/content/influxdb3/enterprise/admin/tokens/resource/_index.md index 2d66b6888..2e9aecb80 100644 --- a/content/influxdb3/enterprise/admin/tokens/resource/_index.md +++ b/content/influxdb3/enterprise/admin/tokens/resource/_index.md @@ -3,7 +3,7 @@ title: Manage resource tokens seotitle: Manage resource tokens in {{< product-name >}} description: > Manage resource tokens in your {{< product-name >}} instance. - Resource tokens grant fine-grained permissions on resources, such as databases + Resource tokens grant permissions on specific resources, such as databases and system information endpoints in your {{< product-name >}} instance. Database resource tokens allow for actions like writing and querying data. menu: @@ -15,13 +15,12 @@ influxdb3/enterprise/tags: [tokens] --- Manage resource tokens in your {{< product-name >}} instance. -Resource tokens grant fine-grained permissions on resources, such as databases -and system information endpoints in your {{< product-name >}} instance. - -- **Databases**: Database tokens allow for actions like writing and querying data. +Resource tokens provide scoped access to specific resources: -- **System resources**: System information tokens allow read access to server runtime statistics and health. - Access controls for system information API endpoints help prevent information leaks and attacks (such as DoS). +- **Database tokens**: provide access to specific databases for actions like writing and querying data +- **System tokens**: provide access to system-level resources, such as API endpoints for server runtime statistics and health. + +Resource tokens are user-defined and available only in {{% product-name %}}. {{< children depth="1" >}} diff --git a/content/influxdb3/enterprise/extend-plugin.md b/content/influxdb3/enterprise/extend-plugin.md new file mode 100644 index 000000000..c4752fa97 --- /dev/null +++ b/content/influxdb3/enterprise/extend-plugin.md @@ -0,0 +1,16 @@ +--- +title: Extend plugins with API features and state management +description: | + The Processing engine includes an API that allows your plugins to interact with your data, build and write line protocol, and maintain state between executions. +menu: + influxdb3_enterprise: + name: Extend plugins + parent: Processing engine and Python plugins +weight: 4 +influxdb3/enterprise/tags: [processing engine, plugins, API, python] +source: /shared/extended-plugin-api.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/enterprise/get-started/_index.md b/content/influxdb3/enterprise/get-started/_index.md index 8255d737d..f14095083 100644 --- a/content/influxdb3/enterprise/get-started/_index.md +++ b/content/influxdb3/enterprise/get-started/_index.md @@ -13,10 +13,10 @@ related: - /influxdb3/enterprise/admin/query-system-data/ - /influxdb3/enterprise/write-data/ - /influxdb3/enterprise/query-data/ -source: /shared/v3-enterprise-get-started/_index.md +source: /shared/influxdb3-get-started/_index.md --- diff --git a/content/influxdb3/enterprise/install.md b/content/influxdb3/enterprise/install.md index 90ae37ae1..3893d08d1 100644 --- a/content/influxdb3/enterprise/install.md +++ b/content/influxdb3/enterprise/install.md @@ -12,7 +12,7 @@ alt_links: - [System Requirements](#system-requirements) - [Quick install](#quick-install) -- [Download {{< product-name >}} binaries](#download-influxdb-3-{{< product-key >}}-binaries) +- [Download {{% product-name %}} binaries](#download-influxdb-3-{{< product-key >}}-binaries) - [Docker image](#docker-image) ## System Requirements @@ -79,7 +79,7 @@ source ~/.zshrc {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} -## Download {{< product-name >}} binaries +## Download {{% product-name %}} binaries {{< tabs-wrapper >}} {{% tabs %}} diff --git a/content/influxdb3/enterprise/plugins.md b/content/influxdb3/enterprise/plugins.md index 73ad8c3c3..6862a163f 100644 --- a/content/influxdb3/enterprise/plugins.md +++ b/content/influxdb3/enterprise/plugins.md @@ -5,7 +5,7 @@ description: | code on different events in an {{< product-name >}} instance. menu: influxdb3_enterprise: - name: Processing Engine and Python plugins + name: Processing engine and Python plugins weight: 4 influxdb3/enterprise/tags: [processing engine, python] related: diff --git a/content/influxdb3/enterprise/query-data/execute-queries/influxdb3-cli.md b/content/influxdb3/enterprise/query-data/execute-queries/influxdb3-cli.md index 8b19fdc9c..e3540b906 100644 --- a/content/influxdb3/enterprise/query-data/execute-queries/influxdb3-cli.md +++ b/content/influxdb3/enterprise/query-data/execute-queries/influxdb3-cli.md @@ -13,7 +13,7 @@ related: - /influxdb3/enterprise/reference/cli/influxdb3/query/ - /influxdb3/enterprise/reference/sql/ - /influxdb3/enterprise/reference/influxql/ - # - /influxdb3/enterprise/get-started/query/#execute-an-sql-query, Get started querying data + # - /influxdb3/enterprise/query-data/execute-queries/, Get started querying data list_code_example: | ```sh influxdb3 query \ diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/_index.md b/content/influxdb3/enterprise/reference/cli/influxdb3/_index.md index 46eaaff51..db57936cb 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/_index.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/_index.md @@ -108,7 +108,7 @@ influxdb3 -h influxdb3 --help ``` -### Run the {{< product-name >}} server with extra verbose logging +### Run the {{% product-name %}} server with extra verbose logging @@ -120,7 +120,7 @@ influxdb3 serve -v \ --cluster-id my-cluster-01 ``` -### Run {{< product-name >}} with debug logging using LOG_FILTER +### Run {{% product-name %}} with debug logging using LOG_FILTER diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/create/plugin.md b/content/influxdb3/enterprise/reference/cli/influxdb3/create/plugin.md deleted file mode 100644 index 06f2d8f97..000000000 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/create/plugin.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: influxdb3 create plugin -description: > - The `influxdb3 create plugin` command creates a new processing engine plugin. -menu: - influxdb3_enterprise: - parent: influxdb3 create - name: influxdb3 create plugin -weight: 400 -source: /shared/influxdb3-cli/create/plugin.md ---- - - diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/create/token.md b/content/influxdb3/enterprise/reference/cli/influxdb3/create/token.md deleted file mode 100644 index 968bbde4c..000000000 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/create/token.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: influxdb3 create token -description: > - The `influxdb3 create token` command creates a new authentication token. -menu: - influxdb3_enterprise: - parent: influxdb3 create - name: influxdb3 create token -weight: 400 -source: /shared/influxdb3-cli/create/token.md ---- - - diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/_index.md b/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/_index.md index daffcce7d..1411c22bf 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/_index.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/_index.md @@ -1,19 +1,16 @@ --- title: influxdb3 create token description: > - The `influxdb3 create token` command creates an admin token or a resource (fine-grained - permissions) token for authenticating and authorizing actions in an {{% product-name %}} instance. + The `influxdb3 create token` command creates an admin token or a scoped resource token for authenticating and authorizing actions in an {{% product-name %}} instance. menu: influxdb3_enterprise: parent: influxdb3 name: influxdb3 create token weight: 300 -aliases: - - /influxdb3/enterprise/reference/cli/influxdb3/create/token/admin/ -source: /shared/influxdb3-cli/create/token.md +source: /shared/influxdb3-cli/create/token/_index.md --- \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/admin.md b/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/admin.md new file mode 100644 index 000000000..7c4bab6bc --- /dev/null +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/admin.md @@ -0,0 +1,15 @@ +--- +title: influxdb3 create token admin +description: > + The `influxdb3 create token admin` command creates an operator token or named admin token with full administrative privileges for server actions. +menu: + influxdb3_enterprise: + parent: influxdb3 create token + name: influxdb3 create token admin +weight: 400 +source: /shared/influxdb3-cli/create/token/admin.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/delete/plugin.md b/content/influxdb3/enterprise/reference/cli/influxdb3/delete/plugin.md deleted file mode 100644 index b72a3cee0..000000000 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/delete/plugin.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: influxdb3 delete plugin -description: > - The `influxdb3 delete plugin` command deletes a processing engine plugin. -menu: - influxdb3_enterprise: - parent: influxdb3 delete - name: influxdb3 delete plugin -weight: 400 -source: /shared/influxdb3-cli/delete/last_cache.md ---- - - diff --git a/content/influxdb3/enterprise/reference/internals/authentication.md b/content/influxdb3/enterprise/reference/internals/authentication.md index 95ebeac5a..1df9cc97f 100644 --- a/content/influxdb3/enterprise/reference/internals/authentication.md +++ b/content/influxdb3/enterprise/reference/internals/authentication.md @@ -14,5 +14,5 @@ source: /shared/influxdb3-internals-reference/authentication.md --- \ No newline at end of file +// SOURCE content/shared/influxdb3-internals-reference/authentication.md +--> \ No newline at end of file diff --git a/content/influxdb3/enterprise/write-data/best-practices/optimize-writes.md b/content/influxdb3/enterprise/write-data/best-practices/optimize-writes.md index 42966bfa2..88847adeb 100644 --- a/content/influxdb3/enterprise/write-data/best-practices/optimize-writes.md +++ b/content/influxdb3/enterprise/write-data/best-practices/optimize-writes.md @@ -16,5 +16,6 @@ source: /shared/influxdb3-write-guides/best-practices/optimize-writes.md --- diff --git a/content/influxdb3/enterprise/write-data/best-practices/schema-design.md b/content/influxdb3/enterprise/write-data/best-practices/schema-design.md index e9451b2f3..ed7c9d4b2 100644 --- a/content/influxdb3/enterprise/write-data/best-practices/schema-design.md +++ b/content/influxdb3/enterprise/write-data/best-practices/schema-design.md @@ -16,5 +16,6 @@ source: /shared/influxdb3-write-guides/best-practices/schema-design.md --- diff --git a/content/influxdb3/explorer/_index.md b/content/influxdb3/explorer/_index.md new file mode 100644 index 000000000..5078f75bb --- /dev/null +++ b/content/influxdb3/explorer/_index.md @@ -0,0 +1,48 @@ +--- +title: InfluxDB 3 Explorer documentation +description: > + InfluxDB 3 Explorer is a standalone web-based interface for interacting with InfluxDB 3 Core and Enterprise. Visualize, query, and manage your time series data efficiently. +menu: + influxdb3_explorer: + name: InfluxDB 3 Explorer +weight: 1 +--- + +InfluxDB 3 Explorer is the standalone web application designed for visualizing, querying, and managing your data stored in InfluxDB 3 Core and Enterprise. +Explorer provides an intuitive interface for interacting with your time series data, streamlining database operations and enhancing data insights. + +> [!Important] +> #### InfluxDB 3 Core or Enterprise v3.1.0 or later required +> +> InfluxDB 3 Explorer is compatible with the following: +> +> - [InfluxDB 3 Core v3.1.0 or later](/influxdb3/core/install/) +> - [InfluxDB 3 Enterprise v3.1.0 or later](/influxdb3/enterprise/install/) + +## Key features + +Use InfluxDB 3 Explorer for: + +- **Database and query management**: Create and manage InfluxDB 3 databases, admin and resource tokens, and configure new InfluxDB 3 Enterprise instances +- **Data visualization and analysis**: Query data with a built-in visualizer for enhanced data insights +- **Data ingestion**: Write new data and setup Telegraf configurations + +## Quick start + +Run the Docker image to start InfluxDB 3 Explorer: + +```sh +# Pull the Docker image +docker pull quay.io/influxdb/influxdb3-explorer:latest + +# Run the Docker container +docker run --detach \ + --name influxdb3-explorer \ + --publish 8888:80 \ + --publish 8889:8888 \ + quay.io/influxdb/influxdb3-explorer:latest \ + --mode=admin +``` + +Install and run InfluxDB 3 Explorer +Get started with InfluxDB 3 Explorer diff --git a/content/influxdb3/explorer/about/_index.md b/content/influxdb3/explorer/about/_index.md new file mode 100644 index 000000000..9054300a4 --- /dev/null +++ b/content/influxdb3/explorer/about/_index.md @@ -0,0 +1,28 @@ +--- +title: About the InfluxDB 3 Explorer project +description: > + Learn about InfluxDB 3 Explorer, the user interface and query tool for InfluxDB 3. +menu: + influxdb3_explorer: + name: About Explorer +weight: 10 +--- + +InfluxDB 3 Explorer is the user interface component of the InfluxDB 3 platform. +It provides visual management of databases and tokens and an easy way to querying +your time series data. Explorer is fully-featured for [InfluxDB 3 Core](/influxdb3/core/) +and [Enterprise](/influxdb3/enterprise/). In a future release it will also be able to +be used to query [InfluxDB Cloud Serverless](/influxdb3/cloud-serverless/), +[InfluxDB Cloud Dedicated](/influxdb3/cloud-dedicated/) +and [InfluxDB Clustered](/influxdb3/clustered/). + +## Third Party Software + +InfluxData products contain third party software, which means the copyrighted, +patented, or otherwise legally protected software of third parties that is +incorporated in InfluxData products. + +Third party suppliers make no representation nor warranty with respect to such +third party software or any portion thereof. Third party suppliers assume no +liability for any claim that might arise with respect to such third party software, +nor for a customer’s use of or inability to use the third party software. diff --git a/content/influxdb3/explorer/get-started/_index.md b/content/influxdb3/explorer/get-started/_index.md new file mode 100644 index 000000000..e15e4a3c4 --- /dev/null +++ b/content/influxdb3/explorer/get-started/_index.md @@ -0,0 +1,14 @@ +--- +title: Get started using InfluxDB 3 Explorer +description: Follow steps to get started using InfluxDB 3 Explorer. +menu: + influxdb3_explorer: + name: Get started +weight: 3 +--- + +Follow steps to get started using InfluxDB 3 Explorer. + +{{< youtube "zW2Hi1Ki4Eo" >}} + + diff --git a/content/influxdb3/explorer/get-started/connect.md b/content/influxdb3/explorer/get-started/connect.md new file mode 100644 index 000000000..7d717c024 --- /dev/null +++ b/content/influxdb3/explorer/get-started/connect.md @@ -0,0 +1,12 @@ +--- +title: Connect to a server +description: + Use InfluxDB 3 Explorer to connect to an InfluxDB 3 server. +menu: + influxdb3_explorer: + parent: Get started +weight: 101 +draft: true +--- + +Use InfluxDB 3 Explorer to connect to an InfluxDB 3 server. \ No newline at end of file diff --git a/content/influxdb3/explorer/install.md b/content/influxdb3/explorer/install.md new file mode 100644 index 000000000..a1858d2a2 --- /dev/null +++ b/content/influxdb3/explorer/install.md @@ -0,0 +1,222 @@ +--- +title: Install and run InfluxDB 3 Explorer +description: > + Use [Docker](https://docker.com) to install and run **InfluxDB 3 Explorer**. +menu: + influxdb3_explorer: + name: Install Explorer +weight: 2 +--- + +Use [Docker](https://docker.com) to install and run **InfluxDB 3 Explorer**. + + +- [Run the InfluxDB 3 Explorer Docker container](#run-the-influxdb-3-explorer-docker-container) +- [Enable TLS/SSL (HTTPS)](#enable-tlsssl-https) +- [Pre-configure InfluxDB connection settings](#pre-configure-influxdb-connection-settings) +- [Run in query or admin mode](#run-in-query-or-admin-mode) + - [Run in query mode](#run-in-query-mode) + - [Run in admin mode](#run-in-admin-mode) +- [Environment Variables](#environment-variables) +- [Volume Reference](#volume-reference) +- [Exposed Ports](#exposed-ports) + - [Custom port mapping](#custom-port-mapping) + + +## Run the InfluxDB 3 Explorer Docker container + +1. **Install Docker** + + If you haven't already, install [Docker](https://docs.docker.com/engine/) or + [Docker Desktop](https://docs.docker.com/desktop/). + +2. **Pull the {{% product-name %}} Docker image** + + ```bash + docker pull quay.io/influxdb/influxdb3-explorer:latest + ``` + +3. **Create local directories** _(optional)_ + + {{% product-name %}} can mount the following directories on your local + machine: + + | Directory | Description | Permissions | + | :--------- | :------------------------------------------------------------------------------------------------ | :---------: | + | `./db` | Stores {{% product-name %}} application data | 700 | + | `./config` | Stores [pre-configured InfluxDB connection settings](#pre-configure-influxdb-connection-settings) | 755 | + | `./ssl` | Stores TLS/SSL certificates _(Required when [using TLS/SSL](#enable-tlsssl-https))_ | 755 | + + > [!Important] + > If you don't create and mount a local `./db` directory, {{% product-name %}} + > stores application data in the container's file system. + > This data will be lost when the container is deleted. + + To create these directories with the appropriate permissions, run the + following commands from your current working directory: + + ```bash + mkdir -m 700 ./db + mkdir -m 755 ./config + mkdir -m 755 ./ssl + ``` + +4. **Run the {{% product-name %}} container** + + Use the `docker run` command to start the {{% product-name %}} container. + Include the following: + + - Port mappings: + - `8888` to `80` (or `443` if using TLS/SSL) + - `8889` to `8888` + - Mounted volumes: + - `$(pwd)/db:/db:rw` + - `$(pwd)/config:/app-root/config:ro` + - `$(pwd)/ssl:/etc/nginx/ssl:ro` + - Any of the available [environment variables](#environment-variables) + + ```bash + docker run --detach \ + --name influxdb3-explorer \ + --publish 8888:80 \ + --publish 8889:8888 \ + --volume $(pwd)/config:/app-root/config:ro \ + --volume $(pwd)/db:/db:rw \ + --volume $(pwd)/ssl:/etc/nginx/ssl:ro \ + quay.io/influxdb/influxdb3-explorer:latest \ + --mode=admin + ``` + +5. **Access the {{% product-name %}} user interface (UI) at **. + +--- + +## Enable TLS/SSL (HTTPS) + +To enable TLS/SSL, mount valid certificate and key files into the container: + +1. **Place your TLS/SSL certificate files your local `./ssl` directory** + + Required files: + + - Certificate: `server.crt` or `fullchain.pem` + - Private key: `server.key` + +2. **When running your container, mount the SSL directory and map port 443 to port 8888** + + Include the following options when running your Docker container: + + ```sh + --volume $(pwd)/ssl:/etc/nginx/ssl:ro \ + --publish 8888:443 + ``` + +The nginx web server automatically uses certificate files when they are present +in the mounted path. + +--- + +## Pre-configure InfluxDB connection settings + +You can predefine InfluxDB connection settings using a `config.json` file. + +{{% code-placeholders "INFLUXDB3_HOST|INFLUXDB_DATABASE_NAME|INFLUXDB3_AUTH_TOKEN|INFLUXDB3_SERVER_NAME" %}} + +1. **Create a `config.json` file in your local `./config` directory** + + ```json + { + "DEFAULT_INFLUX_SERVER": "INFLUXDB3_HOST", + "DEFAULT_INFLUX_DATABASE": "INFLUXDB_DATABASE_NAME", + "DEFAULT_API_TOKEN": "INFLUXDB3_AUTH_TOKEN", + "DEFAULT_SERVER_NAME": "INFLUXDB3_SERVER_NAME" + } + ``` + + > [!Important] + > If connecting to an InfluxDB 3 Core or Enterprise instance running on + > localhost (outside of the container), use the internal Docker network to + > in your InfluxDB 3 host value--for example: + > + > ```txt + > http://host.docker.internal:8181 + > ``` + +2. **Mount the configuration directory** + + Include the following option when running your Docker container: + + ```sh + --volume $(pwd)/config:/app-root/config:ro + ``` + +{{% /code-placeholders %}} + +These settings will be used as defaults when the container starts. + +--- + +## Run in query or admin mode + +{{% product-name %}} has the following operational modes: + +- **Query mode (default):** Read-only UI and query interface +- **Admin mode:** Full UI and API access for administrators + +You can control the operational mode using the `--mode=` option in your +`docker run` command (after the image name). + +### Run in query mode {note="(default)"} + +```sh +docker run \ + ... + --mode=query +``` + +### Run in admin mode + +```sh +docker run \ + ... + --mode=admin +``` + +If `--mode` is not set, the container defaults to query mode. + +--- + +## Environment Variables + +Use the following environment variables to customize {{% product-name %}} settings +in your container. + +| Variable | Description | Default | +|----------------|--------------------------------------------------|----------------------| +| `DATABASE_URL` | Path to SQLite DB inside container | `/db/sqlite.db` | + +--- + +## Volume Reference + +| Container Path | Purpose | Host Example | +|----------------------|------------------------------|----------------------------| +| `/db` | SQLite DB storage | `./db` | +| `/app-root/config` | JSON config for defaults | `./config` | +| `/etc/nginx/ssl` | SSL certs for HTTPS | `./ssl` | + +--- + +## Exposed Ports + +| Port | Protocol | Purpose | +|------|----------|-------------------------| +| 80 | HTTP | Web access (unencrypted) | +| 443 | HTTPS | Web access (encrypted) | + +### Custom port mapping + +```sh +# Map ports to custom host values +--publish 8888:80 --publish 8443:443 +``` diff --git a/content/kapacitor/v1/guides/anomaly_detection.md b/content/kapacitor/v1/guides/anomaly_detection.md index 1009c943b..f1a91261d 100644 --- a/content/kapacitor/v1/guides/anomaly_detection.md +++ b/content/kapacitor/v1/guides/anomaly_detection.md @@ -407,15 +407,15 @@ if __name__ == '__main__': agent.handler = h # Anything printed to STDERR from a UDF process gets captured into the Kapacitor logs. - print >> sys.stderr, "Starting agent for TTestHandler" + print("Starting agent for TTestHandler", file=sys.stderr) agent.start() agent.wait() - print >> sys.stderr, "Agent finished" + print("Agent finished", file=sys.stderr) ``` That was a lot, but now we are ready to configure Kapacitor to run our -code. Create a scratch dir for working through the rest of this +code. Make sure that `scipy` is installed (`$ pip3 install scipy`). Create a scratch dir for working through the rest of this guide: ```bash @@ -434,7 +434,7 @@ Add this snippet to your Kapacitor configuration file (typically located at `/et [udf.functions] [udf.functions.tTest] # Run python - prog = "/usr/bin/python2" + prog = "/usr/bin/python3" # Pass args to python # -u for unbuffered STDIN and STDOUT # and the path to the script @@ -468,8 +468,8 @@ correctly: service kapacitor restart ``` -Check the logs (`/var/log/kapacitor/`) to make sure you see a -*Listening for signals* line and that no errors occurred. If you +Check the logs (`/var/log/kapacitor/` or `journalctl -f -n 256 -u kapacitor.service`) to make sure you see a +_Listening for signals_ line and that no errors occurred. If you don't see the line, it's because the UDF process is hung and not responding. It should be killed after a timeout, so give it a moment to stop properly. Once stopped, you can fix any errors and try again. @@ -544,6 +544,20 @@ the Kapacitor task: kapacitor define print_temps -tick print_temps.tick ``` +Ensure that the task is enabled: + +```bash +kapacitor enable print_temps +``` + +And then list the tasks: + +```bash +kapacitor list tasks +ID Type Status Executing Databases and Retention Policies +print_temps stream enabled true ["printer"."autogen"] +``` + ### Generating test data To simulate our printer for testing, we will write a simple Python @@ -557,7 +571,7 @@ to use real data for testing our TICKscript and UDF, but this is faster (and much cheaper than a 3D printer). ```python -#!/usr/bin/python2 +#!/usr/bin/env python from numpy import random from datetime import timedelta, datetime @@ -672,7 +686,11 @@ fake data so that we can easily iterate on the task: ```sh # Start the recording in the background kapacitor record stream -task print_temps -duration 24h -no-wait -# Grab the ID from the output and store it in a var +# List recordings to find the ID +kapacitor list recordings +ID Type Status Size Date +7bd3ced5-5e95-4a67-a0e1-f00860b1af47 stream running 0 B 04 May 16 11:34 MDT +# Copy the ID and store it in a variable rid=7bd3ced5-5e95-4a67-a0e1-f00860b1af47 # Run our python script to generate data chmod +x ./printer_data.py diff --git a/content/kapacitor/v1/reference/about_the_project/release-notes.md b/content/kapacitor/v1/reference/about_the_project/release-notes.md index 4fb722a07..beb05a02a 100644 --- a/content/kapacitor/v1/reference/about_the_project/release-notes.md +++ b/content/kapacitor/v1/reference/about_the_project/release-notes.md @@ -9,6 +9,22 @@ aliases: - /kapacitor/v1/about_the_project/releasenotes-changelog/ --- +## v1.7.7 {date="2025-05-27"} + +> [!Warning] +> #### Python 2 UDFs deprecated +> +> Python 2-based UDFs are deprecated** as of Kapacitor 1.7.7 and will be removed in **Kapacitor 1.8.0**. +> +> In preparation for Kapacitor 1.8.0, update your User-Defined Functions (UDFs) to be Python 3-compatible. +> This required change aligns with modern security practices and ensures your custom functions will continue to work after upgrading. + +### Dependency updates + +- Upgrade Go to 1.22.12. + +--- + ## v1.7.6 {date="2024-10-28"} ### Features @@ -105,7 +121,7 @@ aliases: ### Bug fixes - Update the `Kafka` client to fix a bug regarding write latency. -- Update to [Flux v0.171.0](/flux/v0.x/release-notes/#v01710) to fix "interface {} is nil, not string" issue. +- Update to [Flux v0.171.0](/flux/v0/release-notes/#v01710) to fix "interface {} is nil, not string" issue. --- diff --git a/content/resources/how-to-guides/assigning-more-than-four-states.md b/content/resources/how-to-guides/assigning-more-than-four-states.md index 8b54d8c9e..eda43b7b1 100644 --- a/content/resources/how-to-guides/assigning-more-than-four-states.md +++ b/content/resources/how-to-guides/assigning-more-than-four-states.md @@ -8,7 +8,7 @@ menu: weight: 101 --- ## Problem -You may want to use the [`monitor` package](/flux/v0.x/stdlib/influxdata/influxdb/monitor/) and take advantage of functions like [monitor.stateChangesOnly()](/flux/v0.x/stdlib/influxdata/influxdb/monitor/statechangesonly/). However, `monitor.stateChangesOnly()` only allows you to monitor four states: "crit", "warn", "ok", and "info". What if you want to be able to assign and monitor state changes across custom states or more than four states? +You may want to use the [`monitor` package](/flux/v0/stdlib/influxdata/influxdb/monitor/) and take advantage of functions like [monitor.stateChangesOnly()](/flux/v0/stdlib/influxdata/influxdb/monitor/statechangesonly/). However, `monitor.stateChangesOnly()` only allows you to monitor four states: "crit", "warn", "ok", and "info". What if you want to be able to assign and monitor state changes across custom states or more than four states? ## Solution Define your own custom `stateChangesOnly()` function. Use the function from the source code here and alter it to accommodate more than four levels. Here we account for six different levels instead of just four. @@ -42,7 +42,7 @@ stateChangesOnly = (tables=<-) => { } ``` -Construct some example data with [`array.from()`](/flux/v0.x/stdlib/array/from/) and map custom levels to it: +Construct some example data with [`array.from()`](/flux/v0/stdlib/array/from/) and map custom levels to it: ```js array.from( diff --git a/content/resources/how-to-guides/reduce-to-construct-JSON.md b/content/resources/how-to-guides/reduce-to-construct-JSON.md index 904a80c0a..ee19ec22d 100644 --- a/content/resources/how-to-guides/reduce-to-construct-JSON.md +++ b/content/resources/how-to-guides/reduce-to-construct-JSON.md @@ -9,9 +9,9 @@ weight: 105 --- ## Send data in JSON body with `http.post()` -Use the [reduce()](/flux/v0.x/stdlib/universe/reduce/) function to create a JSON object to include as the body with `http.post()`. +Use the [reduce()](/flux/v0/stdlib/universe/reduce/) function to create a JSON object to include as the body with `http.post()`. -1. Import both the [array](/flux/v0.x/stdlib/array/) package to query data and construct table(s), and the [http package](/flux/v0.x/stdlib/http/) to transfer JSON over http. +1. Import both the [array](/flux/v0/stdlib/array/) package to query data and construct table(s), and the [http package](/flux/v0/stdlib/http/) to transfer JSON over http. 2. Use `array.from()` to query data and construct a table. Or, use another method [to query data with Flux](/influxdb/v2/query-data/flux/). 3. Use the `reduce()` function to construct a JSON object, and then use `yield()` to store the output of reduce. This table looks like: @@ -19,7 +19,7 @@ Use the [reduce()](/flux/v0.x/stdlib/universe/reduce/) function to create a JSON | :-------------------- | :----------------------------- | | example-field:["3"4"1 | {example-tag-key:["bar"bar"bar | -4. Use the [map()](/flux/v0.x/stdlib/universe/map/) function to combine the two components together into a JSON object, and then use a second `yield()` function to store this object as `final JSON`. This table looks like: +4. Use the [map()](/flux/v0/stdlib/universe/map/) function to combine the two components together into a JSON object, and then use a second `yield()` function to store this object as `final JSON`. This table looks like: | field | tag | final | | :-------------------- | :----------------------------- | :------------------------------------------------------- | diff --git a/content/resources/how-to-guides/select-hours-from-data.md b/content/resources/how-to-guides/select-hours-from-data.md index 7f35f3f95..32f462d93 100644 --- a/content/resources/how-to-guides/select-hours-from-data.md +++ b/content/resources/how-to-guides/select-hours-from-data.md @@ -12,7 +12,7 @@ weight: 102 You may want to select data from specific hours of the day. For example, you may only want data within normal business hours (9am - 5pm). ## Solution 1 -Use [hourSelection()](/flux/v0.x/stdlib/universe/hourselection/) to filter data by a specific hour range in each day. +Use [hourSelection()](/flux/v0/stdlib/universe/hourselection/) to filter data by a specific hour range in each day. ```js import "date" @@ -26,7 +26,7 @@ from(bucket: "example-bucket") ## Solution 2 -Use [date.hour()](/flux/v0.x/stdlib/date/hour/) to evaluate hours in a `filter()` predicate. +Use [date.hour()](/flux/v0/stdlib/date/hour/) to evaluate hours in a `filter()` predicate. ```js import "date" @@ -37,4 +37,4 @@ from(bucket: "example-bucket") |> filter(fn: (r) => r["_field"] == "example-field") |> filter(fn: (r) => date.hour(t: r["_time"]) > 9 and date.hour(t: r["_time"]) < 17) -This solution also applies if you to select data from certain seconds in a minute, minutes in an hour, days in the month, months in the year, etc. Use the [Flux `date` package](/flux/v0.x/stdlib/date/) to assign integer representations to your data and filter for your desired schedule. \ No newline at end of file +This solution also applies if you to select data from certain seconds in a minute, minutes in an hour, days in the month, months in the year, etc. Use the [Flux `date` package](/flux/v0/stdlib/date/) to assign integer representations to your data and filter for your desired schedule. \ No newline at end of file diff --git a/content/resources/how-to-guides/state-changes-across-task-executions.md b/content/resources/how-to-guides/state-changes-across-task-executions.md index 30b053541..31ca6ea4f 100644 --- a/content/resources/how-to-guides/state-changes-across-task-executions.md +++ b/content/resources/how-to-guides/state-changes-across-task-executions.md @@ -17,7 +17,7 @@ It's common to use [InfluxDB tasks](/influxdb/cloud/process-data/) to evaluate a Explicitly assign levels to your data based on thresholds. ### Solution Advantages -This is the easiest solution to understand if you have never written a task with the [`monitor` package](/flux/v0.x/stdlib/influxdata/influxdb/monitor/). +This is the easiest solution to understand if you have never written a task with the [`monitor` package](/flux/v0/stdlib/influxdata/influxdb/monitor/). ### Solution Disadvantages You have to explicitly define your thresholds, which potentially requires more code. @@ -36,9 +36,9 @@ Create a task where you: ### Solution Explained 1. Import packages and define task options and secrets. Import the following packages: - - [Flux Telegram package](/flux/v0.x/stdlib/contrib/sranka/telegram/): This package - - [Flux InfluxDB secrets package](/flux/v0.x/stdlib/influxdata/influxdb/secrets/): This package contains the [secrets.get()](/flux/v0.x/stdlib/influxdata/influxdb/secrets/get/) function which allows you to retrieve secrets from the InfluxDB secret store. Learn how to [manage secrets](/influxdb/v2/admin/secrets/) in InfluxDB to use this package. - - [Flux InfluxDB monitoring package](https://docs.influxdata.com/flux/v0.x/stdlib/influxdata/influxdb/monitor/): This package contains functions and tools for monitoring your data. + - [Flux Telegram package](/flux/v0/stdlib/contrib/sranka/telegram/): This package + - [Flux InfluxDB secrets package](/flux/v0/stdlib/influxdata/influxdb/secrets/): This package contains the [secrets.get()](/flux/v0/stdlib/influxdata/influxdb/secrets/get/) function which allows you to retrieve secrets from the InfluxDB secret store. Learn how to [manage secrets](/influxdb/v2/admin/secrets/) in InfluxDB to use this package. + - [Flux InfluxDB monitoring package](https://docs.influxdata.com/flux/v0/stdlib/influxdata/influxdb/monitor/): This package contains functions and tools for monitoring your data. ```js @@ -88,7 +88,7 @@ Create a task where you: | example-measurement | example-tag-value | example-field | 50.0 | crit | 2022-01-01T00:01:00Z | -4. Write “states” back to InfluxDB. You can write the data to a new measurement or to a new bucket. To write the data to a new measurement, use [`set()`](/flux/v0.x/stdlib/universe/set/) to update the value of the `_measurement` column in your “states” data. +4. Write “states” back to InfluxDB. You can write the data to a new measurement or to a new bucket. To write the data to a new measurement, use [`set()`](/flux/v0/stdlib/universe/set/) to update the value of the `_measurement` column in your “states” data. ```js states @@ -115,7 +115,7 @@ Create a task where you: | :------------------ | :---------------- | :------------ | -----: | :----- | :------------------- | | example-measurement | example-tag-value | example-field | 55.0 | crit | 2021-12-31T23:59:00Z | -6. Union “states” and “last_state_previous_task”. Store this data in a variable “unioned_states”. Use [`sort()`](/flux/v0.x/stdlib/universe/sort/) to ensure rows are ordered by time. +6. Union “states” and “last_state_previous_task”. Store this data in a variable “unioned_states”. Use [`sort()`](/flux/v0/stdlib/universe/sort/) to ensure rows are ordered by time. ```js unioned_states = @@ -131,7 +131,7 @@ Create a task where you: | example-measurement | example-tag-value | example-field | 30.0 | ok | 2022-01-01T00:00:00Z | | example-measurement | example-tag-value | example-field | 50.0 | crit | 2022-01-01T00:01:00Z | -7. Use [`monitor.stateChangesOnly()`](/flux/v0.x/stdlib/influxdata/influxdb/monitor/statechangesonly/) to return only rows where the state changed in “unioned_states”. Store this data in a variable, “state_changes”. +7. Use [`monitor.stateChangesOnly()`](/flux/v0/stdlib/influxdata/influxdb/monitor/statechangesonly/) to return only rows where the state changed in “unioned_states”. Store this data in a variable, “state_changes”. ```js state_changes = diff --git a/content/resources/videos/Flux-Data-Structure.md b/content/resources/videos/Flux-Data-Structure.md index 228e7a4aa..d7a753552 100644 --- a/content/resources/videos/Flux-Data-Structure.md +++ b/content/resources/videos/Flux-Data-Structure.md @@ -1,7 +1,7 @@ --- title: Flux Data Structure description: > - [Flux](/flux/v0.x/) is the native data language for the InfluxDB platform. Here, Scott Anderson discusses the 'stream of tables' concept, and how that relates to Flux's data structure. + [Flux](/flux/v0/) is the native data language for the InfluxDB platform. Here, Scott Anderson discusses the 'stream of tables' concept, and how that relates to Flux's data structure. menu: resources: parent: Videos diff --git a/content/resources/videos/Flux-Functions.md b/content/resources/videos/Flux-Functions.md index f40c2563a..3d0044f5c 100644 --- a/content/resources/videos/Flux-Functions.md +++ b/content/resources/videos/Flux-Functions.md @@ -1,7 +1,7 @@ --- title: Flux Functions description: > - Functions are the building blocks of the Flux scripting language. Here, Scott Anderson describes what [Flux functions](/flux/v0.x/stdlib/all-functions/) are, how they work, and how to use them. + Functions are the building blocks of the Flux scripting language. Here, Scott Anderson describes what [Flux functions](/flux/v0/stdlib/all-functions/) are, how they work, and how to use them. menu: resources: parent: Videos diff --git a/content/shared/extended-plugin-api.md b/content/shared/extended-plugin-api.md new file mode 100644 index 000000000..58ea1a932 --- /dev/null +++ b/content/shared/extended-plugin-api.md @@ -0,0 +1,323 @@ +The Processing Engine includes a shared API that your plugins can use to interact with data, write new records in line protocol format, and maintain state between executions. These capabilities let you build plugins that transform, analyze, and respond to time series data as it flows through your database. + +The plugin API lets you: + +- [Write data](#write-data) +- [Query data](#query-data) +- [Log messages for monitoring and debugging](#log-messages-for-monitoring-and-debugging) +- [Maintain state with the in-memory cache](#maintain-state-with-in-memory-cache) + - [Store and retrieve cached data](#store-and-retrieve-cached-data) + - [Use TTL appropriately](#use-ttl-appropriately) + - [Share data across plugins](#share-data-across-plugins) + - [Build a counter](#building-a-counter) +- [Guidelines for in-memory caching](#guidelines-for-in-memory-caching) + - [Consider cache limitations](#consider-cache-limitations) + +## Get started with the shared API + +Each plugin automatically has access to the shared API through the `influxdb3_local` object. You don’t need to import any libraries. The API becomes available as soon as your plugin runs. + +## Write data + +To write data into your database, use the `LineBuilder` API to create line protocol data: + +```python +# Create a line protocol entry +line = LineBuilder("weather") +line.tag("location", "us-midwest") +line.float64_field("temperature", 82.5) +line.time_ns(1627680000000000000) + +# Write the data to the database +influxdb3_local.write(line) +``` + +InfluxDB 3 buffers your writes while the plugin runs and flushes them when the plugin completes. + +{{% expand-wrapper %}} +{{% expand "View the `LineBuilder` Python implementation" %}} + +```python +from typing import Optional +from collections import OrderedDict + +class InfluxDBError(Exception): + """Base exception for InfluxDB-related errors""" + pass + +class InvalidMeasurementError(InfluxDBError): + """Raised when measurement name is invalid""" + pass + +class InvalidKeyError(InfluxDBError): + """Raised when a tag or field key is invalid""" + pass + +class InvalidLineError(InfluxDBError): + """Raised when a line protocol string is invalid""" + pass + +class LineBuilder: + def __init__(self, measurement: str): + if ' ' in measurement: + raise InvalidMeasurementError("Measurement name cannot contain spaces") + self.measurement = measurement + self.tags: OrderedDict[str, str] = OrderedDict() + self.fields: OrderedDict[str, str] = OrderedDict() + self._timestamp_ns: Optional[int] = None + + def _validate_key(self, key: str, key_type: str) -> None: + """Validate that a key does not contain spaces, commas, or equals signs.""" + if not key: + raise InvalidKeyError(f"{key_type} key cannot be empty") + if ' ' in key: + raise InvalidKeyError(f"{key_type} key '{key}' cannot contain spaces") + if ',' in key: + raise InvalidKeyError(f"{key_type} key '{key}' cannot contain commas") + if '=' in key: + raise InvalidKeyError(f"{key_type} key '{key}' cannot contain equals signs") + + def tag(self, key: str, value: str) -> 'LineBuilder': + """Add a tag to the line protocol.""" + self._validate_key(key, "tag") + self.tags[key] = str(value) + return self + + def uint64_field(self, key: str, value: int) -> 'LineBuilder': + """Add an unsigned integer field to the line protocol.""" + self._validate_key(key, "field") + if value < 0: + raise ValueError(f"uint64 field '{key}' cannot be negative") + self.fields[key] = f"{value}u" + return self + + def int64_field(self, key: str, value: int) -> 'LineBuilder': + """Add an integer field to the line protocol.""" + self._validate_key(key, "field") + self.fields[key] = f"{value}i" + return self + + def float64_field(self, key: str, value: float) -> 'LineBuilder': + """Add a float field to the line protocol.""" + self._validate_key(key, "field") + # Check if value has no decimal component + self.fields[key] = f"{int(value)}.0" if value % 1 == 0 else str(value) + return self + + def string_field(self, key: str, value: str) -> 'LineBuilder': + """Add a string field to the line protocol.""" + self._validate_key(key, "field") + # Escape quotes and backslashes in string values + escaped_value = value.replace('"', '\\"').replace('\\', '\\\\') + self.fields[key] = f'"{escaped_value}"' + return self + + def bool_field(self, key: str, value: bool) -> 'LineBuilder': + """Add a boolean field to the line protocol.""" + self._validate_key(key, "field") + self.fields[key] = 't' if value else 'f' + return self + + def time_ns(self, timestamp_ns: int) -> 'LineBuilder': + """Set the timestamp in nanoseconds.""" + self._timestamp_ns = timestamp_ns + return self + + def build(self) -> str: + """Build the line protocol string.""" + # Start with measurement name (escape commas only) + line = self.measurement.replace(',', '\\,') + + # Add tags if present + if self.tags: + tags_str = ','.join( + f"{k}={v}" for k, v in self.tags.items() + ) + line += f",{tags_str}" + + # Add fields (required) + if not self.fields: + raise InvalidLineError(f"At least one field is required: {line}") + + fields_str = ','.join( + f"{k}={v}" for k, v in self.fields.items() + ) + line += f" {fields_str}" + + # Add timestamp if present + if self._timestamp_ns is not None: + line += f" {self._timestamp_ns}" + + return line +``` +{{% /expand %}} +{{% /expand-wrapper %}} + +## Query data + +Your plugins can execute SQL queries and process results directly: + +```python +# Simple query +results = influxdb3_local.query("SELECT * FROM metrics WHERE time > now() - INTERVAL '1 hour'") + +# Parameterized query for safer execution +params = {"table": "metrics", "threshold": 90} +results = influxdb3_local.query("SELECT * FROM $table WHERE value > $threshold", params) +``` + +Query results are a `List` of `Dict[String, Any]`, where each dictionary represents a row. Column names are keys, and column values are the corresponding values. + +## Log messages for monitoring and debugging + +Use the shared API's `info`, `warn`, and `error` functions to log messages from your plugin. Each function accepts one or more arguments, converts them to strings, and logs them as a space-separated message. + +Add logging to monitor plugin execution and assist with debugging: + +```python +influxdb3_local.info("Starting data processing") +influxdb3_local.warn("Could not process some records") +influxdb3_local.error("Failed to connect to external API") + +# Log structured data +obj_to_log = {"records": 157, "errors": 3} +influxdb3_local.info("Processing complete", obj_to_log) +``` + +The system writes all log messages to the server logs and stores them in [system tables](/influxdb3/version/reference/cli/influxdb3/show/system/summary/), where you can query them using SQL. + +## Maintain state with the in-memory cache + +The Processing Engine provides an in-memory cache that enables your plugins to persist and retrieve data between executions. + +Access the cache using the `cache` property of the shared API: + +```python +# Basic usage pattern +influxdb3_local.cache.METHOD(PARAMETERS) +``` + +`cache` provides the following methods to retrieve and manage cached values: + +| Method | Parameters | Returns | Description | +|--------|------------|---------|-------------| +| `put` | `key` (str): The key to store the value under
`value` (Any): Any Python object to cache
`ttl` (Optional[float], default=None): Time in seconds before expiration
`use_global` (bool, default=False): If True, uses global namespace | None | Stores a value in the cache with an optional time-to-live | +| `get` | `key` (str): The key to retrieve
`default` (Any, default=None): Value to return if key not found
`use_global` (bool, default=False): If True, uses global namespace | Any | Retrieves a value from the cache or returns default if not found | +| `delete` | `key` (str): The key to delete
`use_global` (bool, default=False): If True, uses global namespace | bool | Deletes a value from the cache. Returns True if deleted, False if not found | + +### Understanding cache namespaces + +The cache system offers two distinct namespaces: + +| Namespace | Scope | Best For | +| --- | --- | --- | +| **Trigger-specific** (default) | Isolated to a single trigger | Plugin state, counters, timestamps specific to one plugin | +| **Global** | Shared across all triggers | Configuration, lookup tables, service states that should be available to all plugins | + +### Common cache operations + +- [Store and retrieve cached data](#store-and-retrieve-cached-data) +- [Store cached data with expiration](#store-cached-data-with-expiration) +- [Share data across plugins](#share-data-across-plugins) +- [Build a counter](#build-a-counter) + +### Store and retrieve cached data + +```python +# Store a value +influxdb3_local.cache.put("last_run_time", time.time()) + +# Retrieve a value with a default if not found +last_time = influxdb3_local.cache.get("last_run_time", default=0) + +# Delete a cached value +influxdb3_local.cache.delete("temporary_data") +``` + +### Store cached data with expiration + +```python +# Cache with a 5-minute TTL (time-to-live) +influxdb3_local.cache.put("api_response", response_data, ttl=300) +``` + +### Share data across plugins + +```python +# Store in the global namespace +influxdb3_local.cache.put("config", {"version": "1.0"}, use_global=True) + +# Retrieve from the global namespace +config = influxdb3_local.cache.get("config", use_global=True) +``` + +### Building a counter + +You can track how many times a plugin has run: + +```python +# Get current counter or default to 0 +counter = influxdb3_local.cache.get("execution_count", default=0) + +# Increment counter +counter += 1 + +# Store the updated value +influxdb3_local.cache.put("execution_count", counter) + +influxdb3_local.info(f"This plugin has run {counter} times") +``` + +## Guidelines for in-memory caching + +To get the most out of the in-memory cache, follow these guidelines: + +- [Use the trigger-specific namespace](#use-the-trigger-specific-namespace) +- [Use TTL appropriately](#use-ttl-appropriately) +- [Cache computation results](#cache-computation-results) +- [Warm the cache](#warm-the-cache) +- [Consider cache limitations](#consider-cache-limitations) + +### Use the trigger-specific namespace + +The Processing Engine provides a cache that supports stateful operations while maintaining isolation between different triggers. For most use cases, use the trigger-specific namespace to keep plugin state isolated. Use the global namespace only when you need to share data across triggers. + +### Use TTL appropriately + +Set appropriate expiration times based on how frequently your data changes: + +```python +# Cache external API responses for 5 minutes +influxdb3_local.cache.put("weather_data", api_response, ttl=300) +``` + +### Cache computation results + +Store the results of expensive calculations that you frequently utilize: + +```python +# Cache aggregated statistics +influxdb3_local.cache.put("daily_stats", calculate_statistics(data), ttl=3600) +``` + +### Warm the cache + +For critical data, prime the cache at startup. This can be especially useful for global namespace data where multiple triggers need the data: + +```python +# Check if cache needs to be initialized +if not influxdb3_local.cache.get("lookup_table"): + influxdb3_local.cache.put("lookup_table", load_lookup_data()) +``` + +### Consider cache limitations + +- **Memory Usage**: Since the system stores cache contents in memory, monitor your memory usage when caching large datasets. +- **Server Restarts**: Because the server clears the cache on restart, design your plugins to handle cache initialization (as noted above). +- **Concurrency**: Be cautious of accessing inaccurate or out-of-date data when multiple trigger instances might simultaneously update the same cache key. + +## Next Steps + +With an understanding of the InfluxDB 3 Shared Plugin API, you can start building data workflows that transform, analyze, and respond to your time series data. + +To find example plugins you can extend, visit the [influxdb3_plugins repository](https://github.com/influxdata/influxdb3_plugins) on GitHub. \ No newline at end of file diff --git a/content/shared/influxctl/release-notes.md b/content/shared/influxctl/release-notes.md index acf26063e..4e35c655a 100644 --- a/content/shared/influxctl/release-notes.md +++ b/content/shared/influxctl/release-notes.md @@ -1,3 +1,22 @@ +## v2.10.1 {date="2025-05-30"} + +### Features + +- Implement `clustered generate` subcommand. +- Support setting the management token an using environment variable. +- Support setting profile name using an environment variable. + +### Dependency updates + +- Update `github.com/apache/arrow-go/v18` from 18.2.0 to 18.3.0. +- Update `github.com/containerd/containerd` from 1.7.12 to 1.7.27. +- Update `github.com/go-git/go-git/v5` from 5.15.0 to 5.16.0. +- Update `golang.org/x/oauth2` from 0.29.0 to 0.30.0. +- Update `google.golang.org/grpc` from 1.71.1 to 1.72.1. +- Update `helm.sh/helm/v3` from 3.14.2 to 3.17.3. + +--- + ## v2.10.0 {date="2025-04-04"} ### Features diff --git a/content/shared/influxdb-client-libraries-reference/flight/csharp-flight.md b/content/shared/influxdb-client-libraries-reference/flight/csharp-flight.md index 1163b3a2e..ad0984cfd 100644 --- a/content/shared/influxdb-client-libraries-reference/flight/csharp-flight.md +++ b/content/shared/influxdb-client-libraries-reference/flight/csharp-flight.md @@ -8,5 +8,5 @@ For more information, see the [C# client example on GitHub](https://github.com/a > We recommend using the [`influxdb3-csharp` C# client library](/influxdb3/version/reference/client-libraries/v3/csharp/) for integrating InfluxDB 3 with your C# application code. > > [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) wrap Apache Arrow Flight clients -> and provide convenient methods for [writing](/influxdb3/version/get-started/write/#write-line-protocol-to-influxdb), [querying](/influxdb3/version/get-started/query/#execute-an-sql-query), and processing data stored in {{% product-name %}}. +> and provide convenient methods for [writing](/influxdb3/version/write-data/api-client-libraries/), [querying](/influxdb3/version/query-data/execute-queries/), and processing data stored in {{% product-name %}}. > Client libraries can query using SQL or InfluxQL. diff --git a/content/shared/influxdb-client-libraries-reference/flight/go-flight.md b/content/shared/influxdb-client-libraries-reference/flight/go-flight.md index cc1b97109..93e588f95 100644 --- a/content/shared/influxdb-client-libraries-reference/flight/go-flight.md +++ b/content/shared/influxdb-client-libraries-reference/flight/go-flight.md @@ -6,7 +6,7 @@ > We recommend using the [`influxdb3-go` Go client library](/influxdb3/version/reference/client-libraries/v3/go/) for integrating InfluxDB 3 with your Go application code. > > [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) wrap Apache Arrow Flight clients -> and provide convenient methods for [writing](/influxdb3/version/get-started/write/#write-line-protocol-to-influxdb), [querying](/influxdb3/version/get-started/query/#execute-an-sql-query), and processing data stored in {{% product-name %}}. +> and provide convenient methods for [writing](/influxdb3/version/write-data/api-client-libraries/), [querying](/influxdb3/version/query-data/execute-queries/), and processing data stored in {{% product-name %}}. > Client libraries can query using SQL or InfluxQL. ## Flight SQL client diff --git a/content/shared/influxdb-client-libraries-reference/flight/java-flightsql.md b/content/shared/influxdb-client-libraries-reference/flight/java-flightsql.md index b0a57d52e..e29b31da5 100644 --- a/content/shared/influxdb-client-libraries-reference/flight/java-flightsql.md +++ b/content/shared/influxdb-client-libraries-reference/flight/java-flightsql.md @@ -6,7 +6,7 @@ > We recommend using the [`influxdb3-java` Java client library](/influxdb3/version/reference/client-libraries/v3/java/) for integrating InfluxDB 3 with your Java application code. > > [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) wrap Apache Arrow Flight clients -> and provide convenient methods for [writing](/influxdb3/version/get-started/write/#write-line-protocol-to-influxdb), [querying](/influxdb3/version/get-started/query/#execute-an-sql-query), and processing data stored in {{% product-name %}}. +> and provide convenient methods for [writing](/influxdb3/version/write-data/api-client-libraries/), [querying](/influxdb3/version/query-data/execute-queries/), and processing data stored in {{% product-name %}}. > Client libraries can query using SQL or InfluxQL. diff --git a/content/shared/influxdb-client-libraries-reference/flight/python-flight.md b/content/shared/influxdb-client-libraries-reference/flight/python-flight.md index 5fa77472c..207f9bec7 100644 --- a/content/shared/influxdb-client-libraries-reference/flight/python-flight.md +++ b/content/shared/influxdb-client-libraries-reference/flight/python-flight.md @@ -6,21 +6,20 @@ > We recommend using the [`influxdb3-python` Python client library](/influxdb3/version/reference/client-libraries/v3/python/) for integrating InfluxDB 3 with your Python application code. > > [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) wrap Apache Arrow Flight clients -> and provide convenient methods for [writing](/influxdb3/version/get-started/write/#write-line-protocol-to-influxdb), [querying](/influxdb3/version/get-started/query/#execute-an-sql-query), and processing data stored in {{% product-name %}}. +> and provide convenient methods for [writing](/influxdb3/version/write-data/api-client-libraries/), [querying](/influxdb3/version/query-data/execute-queries/), and processing data stored in {{% product-name %}}. > Client libraries can query using SQL or InfluxQL. The following examples show how to use the `pyarrow.flight` and `pandas` Python modules to query and format data stored in an {{% product-name %}} database: -{{% code-tabs-wrapper %}} +{{< code-tabs-wrapper >}} {{% code-tabs %}} [SQL](#sql-python) [InfluxQL](#influxql-python) {{% /code-tabs %}} - {{% code-tab-content %}} {{% code-placeholders "DATABASE_NAME|DATABASE_TOKEN" %}} -```py +```python # Using pyarrow>=12.0.0 FlightClient from pyarrow.flight import FlightClient, Ticket, FlightCallOptions import json @@ -62,7 +61,7 @@ print(data_frame.to_markdown()) {{% code-tab-content %}} {{% code-placeholders "DATABASE_NAME|DATABASE_TOKEN" %}} -```py +```python # Using pyarrow>=12.0.0 FlightClient from pyarrow.flight import FlightClient, Ticket, FlightCallOptions import json @@ -97,6 +96,7 @@ print(data_frame.to_markdown()) {{% /code-placeholders %}} {{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} Replace the following: @@ -104,5 +104,3 @@ Replace the following: - {{% code-placeholder-key %}}`DATABASE_TOKEN`{{% /code-placeholder-key %}}: a [database token](/influxdb3/version/admin/tokens/database/) with sufficient permissions to the specified database - -{{% /code-tabs-wrapper %}} diff --git a/content/shared/influxdb-client-libraries-reference/flight/python-flightsql-dbapi.md b/content/shared/influxdb-client-libraries-reference/flight/python-flightsql-dbapi.md index 2350510c5..61d75e8da 100644 --- a/content/shared/influxdb-client-libraries-reference/flight/python-flightsql-dbapi.md +++ b/content/shared/influxdb-client-libraries-reference/flight/python-flightsql-dbapi.md @@ -6,7 +6,7 @@ The [Python `flightsql-dbapi` Flight SQL DBAPI library](https://github.com/influ > We recommend using the [`influxdb3-python` Python client library](/influxdb3/version/reference/client-libraries/v3/python/) for integrating InfluxDB 3 with your Python application code. > > [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) wrap Apache Arrow Flight clients -> and provide convenient methods for [writing](/influxdb3/version/get-started/write/#write-line-protocol-to-influxdb), [querying](/influxdb3/version/get-started/query/#execute-an-sql-query), and processing data stored in {{% product-name %}}. +> and provide convenient methods for [writing](/influxdb3/version/write-data/api-client-libraries/), [querying](/influxdb3/version/query-data/execute-queries/), and processing data stored in {{% product-name %}}. > Client libraries can query using SQL or InfluxQL. ## Installation @@ -32,7 +32,7 @@ from flightsql import FlightSQLClient ``` - `flightsql.FlightSQLClient` class: an interface for [initializing -a client](#initialization) and interacting with a Flight SQL server. +a client](#initialize-a-client) and interacting with a Flight SQL server. ## API reference @@ -41,11 +41,11 @@ a client](#initialization) and interacting with a Flight SQL server. - [Initialize a client](#initialize-a-client) - [Instance methods](#instance-methods) - [FlightSQLClient.execute](#flightsqlclientexecute) - - [Syntax {#execute-query-syntax}](#syntax-execute-query-syntax) - - [Example {#execute-query-example}](#example-execute-query-example) + - [Syntax](#execute-query-syntax) + - [Example](#execute-query-example) - [FlightSQLClient.do_get](#flightsqlclientdo_get) - - [Syntax {#retrieve-data-syntax}](#syntax-retrieve-data-syntax) - - [Example {#retrieve-data-example}](#example-retrieve-data-example) + - [Syntax](#retrieve-data-syntax) + - [Example](#retrieve-data-example) ## Class FlightSQLClient diff --git a/content/shared/influxdb-v2/write-data/troubleshoot.md b/content/shared/influxdb-v2/write-data/troubleshoot.md new file mode 100644 index 000000000..fa1d04166 --- /dev/null +++ b/content/shared/influxdb-v2/write-data/troubleshoot.md @@ -0,0 +1,345 @@ + +Learn how to avoid unexpected results and recover from errors when writing to InfluxDB. + +{{% show-in "v2,cloud" %}} + +- [Handle `write` and `delete` responses](#handle-write-and-delete-responses) +- [Troubleshoot failures](#troubleshoot-failures) +- [Troubleshoot rejected points](#troubleshoot-rejected-points) + +{{% /show-in %}} + +## Handle `write` and `delete` responses + +{{% show-in "cloud" %}} + +In InfluxDB Cloud, writes and deletes are asynchronous and eventually consistent. +Once InfluxDB validates your request and [queues](/influxdb/cloud/reference/internals/durability/#backup-on-write) the write or delete, it sends a _success_ response (HTTP `204` status code) as an acknowledgement. +To ensure that InfluxDB handles writes and deletes in the order you request them, wait for the acknowledgement before you send the next request. +Because writes are asynchronous, keep the following in mind: + +- Data might not yet be queryable when you receive _success_ (HTTP `204` status code). +- InfluxDB may still reject points after you receive _success_ (HTTP `204` status code). + +{{% /show-in %}} + +{{% show-in "v2" %}} + +{{% product-name %}} does the following when you send a write request: + + 1. Validates the request. + 2. If successful, attempts to [ingest data](/influxdb/v2/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](#review-http-status-codes). + 3. Ingests or rejects data from the batch and returns one of the following HTTP status codes: + + - `204 No Content`: All of the data is ingested and queryable. + - `422 Unprocessable Entity`: Some or all of the data has been rejected. Data that has not been rejected is ingested and queryable. + + The response body contains error details about [rejected points](#troubleshoot-rejected-points). + + Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable. + + To ensure that InfluxDB handles writes in the order you request them, + wait for the response before you send the next request. + +{{% /show-in %}} + +### Review HTTP status codes + +InfluxDB uses conventional HTTP status codes to indicate the success or failure of a request. +Write requests return the following status codes: + +{{% show-in "cloud" %}} + +| HTTP response code | Message | Description | +| :-------------------------------| :--------------------------------------------------------------- | :------------- | +| `204 "Success"` | | If InfluxDB validated the request data format and queued the data for writing to the bucket | +| `400 "Bad request"` | `message` contains the first malformed line | If data is malformed | +| `401 "Unauthorized"` | | If the [`Authorization: Token` header](/influxdb/cloud/api-guide/api_intro/#authentication) is missing or malformed or if the [API token](/influxdb/cloud/api-guide/api_intro/#authentication) doesn't have [permission](/influxdb/cloud/admin/tokens/) to write to the bucket | +| `404 "Not found"` | requested **resource type** (for example, "organization") and **resource name** | If a requested resource, such as an organization or bucket, wasn't found | +| `413 "Request too large"` | cannot read data: points in batch is too large | If a **write** request exceeds the maximum [global limit](/influxdb/cloud/account-management/limits/#global-limits) | +| `429 “Too many requests”` | `Retry-After` header: xxx (seconds to wait before retrying the request) | If a **read** or **write** request exceeds your plan's [adjustable service quotas](/influxdb/cloud/account-management/limits/#adjustable-service-quotas) or if a **delete** request exceeds the maximum [global limit](/influxdb/cloud/account-management/limits/#global-limits) | +| `500 "Internal server error"` | | Default status for an error | +| `503 “Service unavailable“` | Series cardinality exceeds your plan's service quota | If **series cardinality** exceeds your plan's [adjustable service quotas](/influxdb/cloud/account-management/limits/#adjustable-service-quotas) | + +{{% /show-in %}} + +{{% show-in "v2" %}} + +- `204` **Success**: All request data was written to the bucket. +- `400` **Bad request**: + The response body contains the first malformed line in the data. All request data was rejected and not written. +- `401` **Unauthorized**: May indicate one of the following: + - [`Authorization: Token` header](/influxdb/v2/api-guide/api_intro/#authentication) is missing or malformed. + - [API token](/influxdb/v2/api-guide/api_intro/#authentication) value is missing from the header. + - API token does not have sufficient permissions to write to the organization and the bucket. For more information about token types and permissions, see [Manage API tokens](/influxdb/v2/admin/tokens/). +- `404` **Not found**: A requested resource, such as an organization or bucket, was not found. The response body contains the requested resource type (for example, "organization") and resource name. +- `413` **Request entity too large**: All request data was rejected and not written. InfluxDB OSS only returns this error if the [Go (golang) `ioutil.ReadAll()`](https://pkg.go.dev/io/ioutil#ReadAll) function raises an error. +- `422` **Unprocessable entity**: The request was well-formed, but some or all the points were rejected due to semantic errors--for example, schema conflicts or retention policy violations. +- `500` **Internal server error**: Default HTTP status for an error. +- `503` **Service unavailable**: Server is temporarily unavailable to accept writes. The `Retry-After` header describes when to try the write again. + +{{% /show-in %}} + +The `message` property of the response body may contain additional details about the error. +If some of your data did not write to the bucket, see how to [troubleshoot rejected points](#troubleshoot-rejected-points). + +{{% show-in "cloud" %}} + +### Troubleshoot partial writes + +For example, a partial write may occur when InfluxDB writes all points that conform to the bucket schema, but rejects points that have the wrong data type in a field. +To check for writes that fail asynchronously, create a [task](/influxdb/cloud/process-data/manage-tasks/) to [check the _monitoring bucket for rejected points](#review-rejected-points). +To resolve partial writes and rejected points, see [troubleshoot failures](#troubleshoot-failures). + +{{% /show-in %}} + +## Troubleshoot failures + +{{% show-in "v2" %}} + +If you notice data is missing in your bucket, do the following: + +- Check the [HTTP status code](#review-http-status-codes) in the response. +- Check the `message` property in the response body for details about the error--for example, `partial write` indicates [rejected points](#troubleshoot-rejected-points). +- Verify all lines contain valid syntax ([line protocol](/influxdb/v2/reference/syntax/line-protocol/) or [CSV](/influxdb/v2/reference/syntax/annotated-csv/)). +- Verify the timestamps match the [precision parameter](/influxdb/v2/write-data/#timestamp-precision) in your request. +- Minimize payload size and network errors by [optimizing writes](/influxdb/v2/write-data/best-practices/optimize-writes/). + +{{% /show-in %}} + +{{% show-in "cloud" %}} +If you notice data is missing in your bucket, do the following: + +- Check the `message` property in the response body for details about the error--for example, `partial write error` indicates [rejected points](#troubleshoot-rejected-points). +- Check for [rejected points](#troubleshoot-rejected-points) in your organization's `_monitoring` bucket. +- Verify all lines contain valid syntax ([line protocol](/influxdb/cloud/reference/syntax/line-protocol/) or [CSV](/influxdb/cloud/reference/syntax/annotated-csv/)). See how to [find parsing errors](#find-parsing-errors). +- Verify the data types match the [series](/influxdb/cloud/reference/key-concepts/data-elements/#series) or [bucket schema](/influxdb/cloud/admin/buckets/bucket-schema/). See how to resolve [explicit schema rejections](#resolve-explicit-schema-rejections). +- Verify the timestamps match the [precision parameter](/influxdb/cloud/write-data/#timestamp-precision). +- Minimize payload size and network errors by [optimizing writes](/influxdb/cloud/write-data/best-practices/optimize-writes/). + +{{% /show-in %}} + +## Troubleshoot rejected points + +{{% show-in "v2" %}} + +When writing points from a batch, InfluxDB rejects points that have syntax errors or schema conflicts. + +If InfluxDB processes the data in your batch and then rejects points, the [HTTP response](#handle-write-responses) body contains the following properties that describe rejected points: + +- `code`: `"unprocessable entity"` +- `message`: a string that describes the reason points were rejected and may provide details, such as database, retention policy, and which bound was violated. + +For example, the following `message` indicates that points were rejected because the timestamps fall outside the `1d` retention policy: + +```text +failure writing points to database: partial write: dropped 4 points outside retention policy of duration 24h0m0s - oldest point home,room=Living\\ Room at 1970-01-01T00:00:01.541Z dropped because it violates a Retention Policy Lower Bound at 2025-05-20T19:06:17.612973Z, newest point home,room=Living\\ Room at 1970-01-01T00:00:01.5410006Z dropped because it violates a Retention Policy Lower Bound at 2025-05-20T19:06:17.612973Z dropped=4 for database: 9f282d63c7d3a5c0 for retention policy: autogen +``` + +InfluxDB rejects points for the following reasons: + +- a line protocol parsing error +- an invalid timestamp +- a schema conflict +- retention policy violation + +Schema conflicts occur when you try to write data that contains any of the following: + +- The **batch** contains another point with the same series, but one of the fields has a different value type. +- The **bucket** contains another point with the same series, but one of the fields has a different value type. + +Check for [field type](/influxdb/v2/reference/key-concepts/data-elements/#field-value) differences between the missing data point and other points that have the same [series](/influxdb/v2/reference/key-concepts/data-elements/#series)--for example, did you attempt to write `string` data to an `int` field? + +{{% /show-in %}} + +{{% show-in "cloud" %}} + +When you receive an HTTP `204` (Success) status code, InfluxDB has validated your request format and queued your data for writing. +However, {{% product-name %}} processes data asynchronously, which means points may still be rejected after you receive a success response. + +InfluxDB may reject points for several reasons: +- Line protocol parsing errors +- Invalid timestamps +- Data type conflicts with existing schema +- Retention policy violations +- Series cardinality exceeding your plan's limits + +To verify if your data was successfully written, query your data or check the `_monitoring` bucket for rejected points. + +- [Review rejected points](#review-rejected-points) + - [Find parsing errors](#find-parsing-errors) + - [Find data type conflicts and schema rejections](#find-data-type-conflicts-and-schema-rejections) +- [Resolve data type conflicts](#resolve-data-type-conflicts) +- [Resolve explicit schema rejections](#resolve-explicit-schema-rejections) + +### Review rejected points + +To get a log of rejected points, query the [`rejected_points` measurement](/influxdb/cloud/reference/internals/system-buckets/#_monitoring-bucket-schema) in your organization's `_monitoring` bucket. +To more quickly locate `rejected_points`, keep the following in mind: + +- If your line protocol batch contains single lines with multiple [fields](/influxdb/cloud/reference/syntax/line-protocol/#field-set), InfluxDB logs an entry for each point (each unique field) that is rejected. +- Each entry contains a `reason` tag that describes why the point was rejected. +- Entries for [data type conflicts and schema rejections](#find-data-type-conflicts-and-schema-rejections) have a `count` field value of `1`. +- Entries for [parsing errors](#find-parsing-errors) contain an `error` field (and don't contain a `count` field). + +#### rejected_points schema + +| Name | Value | +|:------ |:----- | +| `_measurement`| `rejected_points` | +| `_field` | [`count`](#find-data-type-conflicts-and-schema-rejections) or [`error`](#find-parsing-errors) | +| `_value` | [`1`](#find-data-type-conflicts-and-schema-rejections) or [error details](#find-parsing-errors) | +| `bucket` | ID of the bucket that rejected the point | +| `measurement` | Measurement name of the point | +| `field` | Name of the field that caused the rejection | +| `reason` | Brief description of the problem. See specific reasons in [data type conflicts and schema rejections](#find-data-type-conflicts-and-schema-rejections) | +| `gotType` | Received [field](/influxdb/cloud/reference/key-concepts/data-elements/#field-value) type: `Boolean`, `Float`, `Integer`, `String`, or `UnsignedInteger` | +| `wantType` | Expected [field](/influxdb/cloud/reference/key-concepts/data-elements/#field-value) type: `Boolean`, `Float`, `Integer`, `String`, or `UnsignedInteger` | +| `` | Time the rejected point was logged | + +#### Find parsing errors + +If InfluxDB can't parse a line (for example, due to syntax problems), the response `message` might not provide details. +To find parsing error details, query `rejected_points` entries that contain the `error` field. + +```js +from(bucket: "_monitoring") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "rejected_points") + |> filter(fn: (r) => r._field == "error") +``` + +#### Find data type conflicts and schema rejections + +To find `rejected_points` caused by [data type conflicts](#resolve-data-type-conflicts) or [schema rejections](#resolve-explicit-schema-rejections), +query for the `count` field. + +```js +from(bucket: "_monitoring") + |> range(start: -1h) + |> filter(fn: (r) => r._measurement == "rejected_points") + |> filter(fn: (r) => r._field == "count") +``` + +### Resolve data type conflicts + +When you write to a bucket that has the `implicit` schema type, InfluxDB compares new points to points that have the same [series](/influxdb/cloud/reference/key-concepts/data-elements/#series). +If a point has a field with a different data type than the series, InfluxDB rejects the point and logs a `rejected_points` entry. +The `rejected_points` entry contains one of the following reasons: + +| Reason | Meaning | +|:------ |:------- | +| `type conflict in batch write` | The **batch** contains another point with the same series, but one of the fields has a different value type. | +| `type conflict with existing data` | The **bucket** contains another point with the same series, but one of the fields has a different value type. | + +### Resolve explicit schema rejections + +If you write to a bucket with an +[explicit schema](/influxdb/cloud/admin/buckets/bucket-schema/), +the data must conform to the schema. Otherwise, InfluxDB rejects the data. + +Do the following to interpret explicit schema rejections: + +- [Detect a measurement mismatch](#detect-a-measurement-mismatch) +- [Detect a field type mismatch](#detect-a-field-type-mismatch) + +#### Detect a measurement mismatch + +InfluxDB rejects a point if the [measurement](/influxdb/cloud/reference/key-concepts/data-elements/#measurement) doesn't match the **name** of a [bucket schema](/influxdb/cloud/admin/buckets/bucket-schema/). +The `rejected_points` entry contains the following `reason` tag value: + +| Reason | Meaning | +|:------ |:------- +| `measurement not allowed by schema` | The **bucket** is configured to use explicit schemas and none of the schemas matches the **measurement** of the point. | + +Consider the following [line protocol](/influxdb/cloud/reference/syntax/line-protocol) data. + +``` +airSensors,sensorId=TLM0201 temperature=73.97,humidity=35.23,co=0.48 1637014074 +``` + +The line has an `airSensors` measurement and three fields (`temperature`, `humidity`, and `co`). +If you try to write this data to a bucket that has the [`explicit` schema type](/influxdb/cloud/admin/buckets/bucket-schema/) and doesn't have an `airSensors` schema, the `/api/v2/write` InfluxDB API returns an error and the following data: + +```json +{ + "code": "invalid", + "message": "3 out of 3 points rejected (check rejected_points in your _monitoring bucket for further information)" +} +``` + +InfluxDB logs three `rejected_points` entries, one for each field. + +| _measurement | _field | _value | field | measurement | reason | +|:----------------|:-------|:-------|:------------|:------------|:----------------------------------| +| rejected_points | count | 1 | humidity | airSensors | measurement not allowed by schema | +| rejected_points | count | 1 | co | airSensors | measurement not allowed by schema | +| rejected_points | count | 1 | temperature | airSensors | measurement not allowed by schema | + +#### Detect a field type mismatch + +InfluxDB rejects a point if the [measurement](/influxdb/cloud/reference/key-concepts/data-elements/#measurement) matches the **name** of a bucket schema and the field data types don't match. +The `rejected_points` entry contains the following reason: + +| Reason | Meaning | +|:------------------------------------|:-----------------------------------------------------------------------------------------------------| +| `field type mismatch with schema` | The point has the same measurement as a configured schema and they have different field value types. | + +Consider a bucket that has the following `airSensors` [`explicit bucket schema`](/influxdb/cloud/admin/buckets/bucket-schema/): + +```json +{ + "name": "airSensors", + "columns": [ + { + "name": "time", + "type": "timestamp" + }, + { + "name": "sensorId", + "type": "tag" + }, + { + "name": "temperature", + "type": "field", + "dataType": "float" + }, + { + "name": "humidity", + "type": "field", + "dataType": "float" + }, + { + "name": "co", + "type": "field", + "dataType": "float" + } + ] +} +``` + +The following [line protocol](/influxdb/cloud/reference/syntax/line-protocol/) data has an `airSensors` measurement, a `sensorId` tag, and three fields (`temperature`, `humidity`, and `co`). + +``` +airSensors,sensorId=L1 temperature=90.5,humidity=70.0,co=0.2 1637014074 +airSensors,sensorId=L1 temperature="90.5",humidity=70.0,co=0.2 1637014074 +``` + +In the example data above, the second point has a `temperature` field value with the _string_ data type. +Because the `airSensors` schema requires `temperature` to have the _float_ data type, +InfluxDB returns a `400` error and a message that describes the result: + +```json +{ + "code": "invalid", + "message": "partial write error (5 accepted): 1 out of 6 points rejected (check rejected_points in your _monitoring bucket for further information)" +} +``` + +InfluxDB logs the following `rejected_points` entry to the `_monitoring` bucket: + +| _measurement | _field | _value | bucket | field | gotType | measurement | reason | wantType | +|:------------------|:-------|:-------|:-------------------|:--------------|:---------|:------------|:----------------------------------|:---------| +| rejected_points | count | 1 | a7d5558b880a93da | temperature | String | airSensors | field type mismatch with schema | Float | + +{{% /show-in %}} \ No newline at end of file diff --git a/content/shared/influxdb3-admin/distinct-value-cache/_index.md b/content/shared/influxdb3-admin/distinct-value-cache/_index.md index bd3e42e2b..0a5ef6324 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/_index.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/_index.md @@ -4,7 +4,7 @@ values of one or more columns in a table, improving the performance of queries that return distinct tag and field values. The DVC is an in-memory cache that stores distinct values for specific columns -in a table. When you create an DVC, you can specify what columns' distinct +in a table. When you create a DVC, you can specify what columns' distinct values to cache, the maximum number of distinct value combinations to cache, and the maximum age of cached values. A DVC is associated with a table, which can have multiple DVCs. @@ -15,9 +15,6 @@ have multiple DVCs. {{% show-in "core" %}} - [Distinct Value Caches are flushed when the server stops](#distinct-value-caches-are-flushed-when-the-server-stops) {{% /show-in %}} - {{% show-in "enterprise" %}} - - [Distinct Value Caches are rebuilt on restart](#distinct-value-caches-are-rebuilt-on-restart) - {{% /show-in %}} Consider a dataset with the following schema: @@ -71,13 +68,16 @@ similar to this: DVCs are stored in memory; the larger the cache, the more memory your InfluxDB 3 node requires to maintain it. Consider the following: +- [Cache data loading](#cache-data-loading) - [High cardinality limits](#high-cardinality-limits) {{% show-in "core" %}} - [Distinct Value Caches are flushed when the server stops](#distinct-value-caches-are-flushed-when-the-server-stops) {{% /show-in %}} -{{% show-in "enterprise" %}} -- [Distinct Value Caches are rebuilt on restart](#distinct-value-caches-are-rebuilt-on-restart) -{{% /show-in %}} + +## Cache data loading + +On cache creation, {{% product-name %}} loads historical data into the cache. +On restart, the server automatically reloads cache data. ### High cardinality limits @@ -96,11 +96,3 @@ stops. After a server restart, {{% product-name %}} only writes new values to the DVC when you write data, so there may be a period of time when some values are unavailable in the DVC. {{% /show-in %}} - -{{% show-in "enterprise" %}} -### Distinct Value Caches are rebuilt on restart - -Because the DVC is an in-memory cache, the cache is flushed any time the server -stops. After a server restarts, {{< product-name >}} uses persisted data to -rebuild the DVC. -{{% /show-in %}} diff --git a/content/shared/influxdb3-admin/distinct-value-cache/create.md b/content/shared/influxdb3-admin/distinct-value-cache/create.md index d0e4e69ea..c897c0dbf 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/create.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/create.md @@ -95,12 +95,10 @@ Replace the following: - {{% code-placeholder-key %}}`DVC_NAME`{{% /code-placeholder-key %}}: a unique name for the DVC -> [!Note] -> #### Values are cached on write -> -> Values are cached on write. When you create a cache, it will not cache -> previously written points, only newly written points. -> + +The cache imports the distinct values from the table and starts caching them. + +> [!Important] > #### DVC size and persistence > > The DVC is stored in memory, so it's important to consider the size and diff --git a/content/shared/influxdb3-admin/last-value-cache/_index.md b/content/shared/influxdb3-admin/last-value-cache/_index.md index 6be6100d8..7b473c2d3 100644 --- a/content/shared/influxdb3-admin/last-value-cache/_index.md +++ b/content/shared/influxdb3-admin/last-value-cache/_index.md @@ -17,9 +17,6 @@ An LVC is associated with a table, which can have multiple LVCs. {{% show-in "core" %}} - [Last Value Caches are flushed when the server stops](#last-value-caches-are-flushed-when-the-server-stops) {{% /show-in %}} - {{% show-in "enterprise" %}} - - [Last Value Caches are rebuilt on restart](#last-value-caches-are-rebuilt-on-restart) - {{% /show-in %}} - [Defining value columns](#defining-value-columns) Consider a dataset with the following schema (similar to the @@ -84,15 +81,17 @@ similar to the following: LVCs are stored in memory; the larger the cache, the more memory your InfluxDB 3 node requires to maintain it. Consider the following: +- [Cache data loading](#cache-data-loading) - [High cardinality key columns](#high-cardinality-key-columns) - [Value count](#value-count) {{% show-in "core" %}} - [Last Value Caches are flushed when the server stops](#last-value-caches-are-flushed-when-the-server-stops) {{% /show-in %}} -{{% show-in "enterprise" %}} -- [Last Value Caches are rebuilt on restart](#last-value-caches-are-rebuilt-on-restart) -{{% /show-in %}} -- [Defining value columns](#defining-value-columns) + +## Cache data loading + +On cache creation, {{% product-name %}} loads historical data into the cache. +On restart, the server automatically reloads cache data. ### High cardinality key columns @@ -141,14 +140,6 @@ you write data, so there may be a period of time when some values are unavailable in the LVC. {{% /show-in %}} -{{% show-in "enterprise" %}} -### Last Value Caches are rebuilt on restart - -Because the LVC is an in-memory cache, the cache is flushed any time the server -stops. After a server restarts, {{< product-name >}} uses persisted data to -rebuild the LVC. -{{% /show-in %}} - ### Defining value columns When creating an LVC, if you include the `--value-columns` options to specify diff --git a/content/shared/influxdb3-admin/last-value-cache/create.md b/content/shared/influxdb3-admin/last-value-cache/create.md index 4a439ffc4..febc66f83 100644 --- a/content/shared/influxdb3-admin/last-value-cache/create.md +++ b/content/shared/influxdb3-admin/last-value-cache/create.md @@ -109,12 +109,9 @@ Replace the following: - {{% code-placeholder-key %}}`LVC_NAME`{{% /code-placeholder-key %}}: a unique name for the LVC -> [!Note] -> #### Values are cached on write -> -> Values are cached on write. When you create a cache, it will not cache -> previously written points, only newly written points. -> +The cache imports the distinct values from the table and starts caching them. + +> [!Important] > #### LVC size and persistence > > The LVC is stored in memory, so it's important to consider the size and persistence diff --git a/content/shared/influxdb3-admin/tokens/_index.md b/content/shared/influxdb3-admin/tokens/_index.md index dd9c51ea4..2ab0675ae 100644 --- a/content/shared/influxdb3-admin/tokens/_index.md +++ b/content/shared/influxdb3-admin/tokens/_index.md @@ -1,4 +1,70 @@ -Manage tokens to authenticate and authorize access to resources and data in your -{{< product-name >}} instance. +Manage tokens to authenticate and authorize access to server actions, resources, and data in your {{< product-name >}} instance. + +## Provide your token + +If you start the {{< product-name >}} server with authentication enabled (the default), future server actions (CLI commands and HTTP API requests) require a valid token for authorization. + +The first admin token you create is the _operator_ token (named `_admin`), which has full administrative privileges. +You can use the operator token to authenticate your requests and manage additional authorization tokens. + +The mechanism for providing your token depends on the client you use to interact with {{% product-name %}}--for example: + +{{< tabs-wrapper >}} +{{% tabs %}} +[influxdb3 CLI](#influxdb3-cli-auth) +[cURL](#curl-auth) +{{% /tabs %}} +{{% tab-content %}} + +When using the `influxdb3` CLI, you can set the `INFLUXDB3_AUTH_TOKEN` environment variable to automatically provide your +authorization token to all `influxdb3` commands--for example: + +{{% code-placeholders "YOUR_AUTH_TOKEN" %}} +```bash +# Export your token as an environment variable +export INFLUXDB3_AUTH_TOKEN=YOUR_AUTH_TOKEN + +# Run an influxdb3 command +influxdb3 query \ + --database DATABASE_NAME \ + "SELECT * FROM 'DATABASE_NAME' WHERE time > now() - INTERVAL '10 minutes'" +``` +{{% /code-placeholders %}} + +To specify a token in the command and override the environment variable, pass the `--token` option with your authorization token--for example: + +{{% code-placeholders "YOUR_AUTH_TOKEN" %}} +```bash +# Include the --token option in your influxdb3 command +influxdb3 query \ + --token YOUR_AUTH_TOKEN \ + --database DATABASE_NAME \ + "SELECT * FROM 'DATABASE_NAME' WHERE time > now() - INTERVAL '10 minutes'" +``` +{{% /code-placeholders %}} + +You can also set the `INFLUXDB3_AUTH_TOKEN` environment variable to automatically provide your +authorization token to all `influxdb3` commands. + +{{% /tab-content %}} +{{% tab-content %}} + +{{% code-placeholders "YOUR_AUTH_TOKEN" %}} +```bash +# Add your token to the HTTP Authorization header +curl "http://{{< influxdb/host >}}/api/v3/query_sql" \ + --header "Authorization: Bearer YOUR_AUTH_TOKEN" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=SELECT * FROM 'DATABASE_NAME' WHERE time > now() - INTERVAL '10 minutes'" +``` +{{% /code-placeholders %}} + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +Replace the following with your values: + +- {{% code-placeholder-key %}}`YOUR_AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link %}} +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database you want to query {{< children hlevel="h2" readmore=true hr=true >}} diff --git a/content/shared/influxdb3-admin/tokens/admin/_index.md b/content/shared/influxdb3-admin/tokens/admin/_index.md index da3e84112..ba8b9ac1d 100644 --- a/content/shared/influxdb3-admin/tokens/admin/_index.md +++ b/content/shared/influxdb3-admin/tokens/admin/_index.md @@ -1,2 +1,27 @@ - -{{< children hlevel="h2" readmore=true hr=true >}} \ No newline at end of file + +Manage {{< product-name omit="Clustered" >}} admin tokens to authorize server actions, `influxdb3` CLI commands, and HTTP API endpoints for your {{< product-name omit="Clustered" >}} instance. +Administrative (_admin_) tokens provide full system access and management capabilities for your {{< product-name omit="Clustered" >}} instance. +{{% show-in "core" %}} +Admin tokens can create, edit, and delete other admin tokens. +{{% /show-in %}} +{{% show-in "enterprise" %}} +Admin tokens can create, edit, and delete other admin tokens, as well as manage [resource tokens](/influxdb3/version/admin/tokens/resource/). +{{% /show-in %}} + +{{% product-name omit="Clustered" %}} supports two types of admin tokens: + +- **Operator token**: A system-generated administrative token with the name `_admin`. + - Cannot be edited or deleted + - Never expires + - Cannot be recreated if lost (future functionality) + - Can be regenerated using the CLI + +- **Named admin token**: User-defined administrative tokens with full admin permissions. + - Can be created, edited, and deleted + - Support expiration dates + - Cannot modify or remove the operator token + +An {{% product-name omit="Clustered" %}} instance can have one operator token and unlimited named admin tokens. + +{{< children hlevel="h2" readmore=true hr=true >}} diff --git a/content/shared/influxdb3-admin/tokens/admin/create.md b/content/shared/influxdb3-admin/tokens/admin/create.md index cba3c7def..d63014c26 100644 --- a/content/shared/influxdb3-admin/tokens/admin/create.md +++ b/content/shared/influxdb3-admin/tokens/admin/create.md @@ -1,18 +1,36 @@ - Use the [`influxdb3 create token --admin` subcommand](/influxdb3/version/reference/cli/influxdb3/create/token/) -or the [HTTP API](/influxdb3/version/api/v3/) -to create an [admin token](/influxdb3/version/admin/tokens/admin/) for your {{< product-name omit="Clustered" >}} instance. -An admin token grants full access to all actions for your InfluxDB 3 instance. +with the `--name` option or the HTTP API [`/api/v3/configure/token/admin`](/influxdb3/version/api/v3/) endpoint +to create an admin token for your {{< product-name omit="Clustered" >}} instance. +An admin token grants full access to all actions for your InfluxDB 3 instance and can be referenced by its name. + +{{% product-name omit="Clustered" %}} supports two types of admin tokens: +- **Operator token**: A system-generated administrative token with the name `_admin`. + - Cannot be edited or deleted + - Never expires + - Cannot be recreated if lost (future functionality) + - Can be regenerated using the CLI +- **Named admin token**: User-defined administrative tokens with full admin permissions. + - Can be created, edited, and deleted + - Support expiration dates + - Cannot modify or remove the operator token + +An {{% product-name omit="Clustered" %}} instance can have one operator token and unlimited named admin tokens. + +[Create an operator token](#create-an-operator-token) +[Create a named admin token](#create-a-named-admin-token) > [!Note] > #### Store secure tokens in a secret store > > Token strings are returned _only_ on token creation. > We recommend storing database tokens in a **secure secret store**. -> Anyone with access to the admin token has full control over your {{< product-name >}} instance. -> If you lose the admin token string, you must regenerate the token. +> Anyone with access to the named admin token has full control over your {{< product-name >}} instance. +> If you lose the named admin token string, you must regenerate the token. -## Create an admin token +## Create an operator token + +The first admin token your create for your {{% product-name %}} +instance is the operator token. {{< tabs-wrapper >}} {{% tabs %}} @@ -20,38 +38,71 @@ An admin token grants full access to all actions for your InfluxDB 3 instance. [HTTP API](#use-the-http-api) {{% /tabs %}} {{% tab-content %}} - -Use the `influxdb3 create token --admin` command: +Use the `influxdb3 create token --admin` command without a token name: ```bash influxdb3 create token --admin ``` -The output contains the token string in plain text. - -To use the token as the default for later commands, and to persist the token -across sessions, assign the token string to the `INFLUXDB3_AUTH_TOKEN` environment variable. {{% /tab-content %}} {{% tab-content %}} -Use the following endpoint to create an admin token: +Use the following endpoint to create an operator token: -{{% show-in "core" %}} {{% api-endpoint method="POST" endpoint="/api/v3/configure/token/admin" api-ref="/influxdb3/version/api/v3/#operation/PostCreateAdminToken" %}} ```bash curl -X POST "http://{{< influxdb/host >}}/api/v3/configure/token/admin" \ ---header 'Accept: application/json' \ ---header 'Content-Type: application/json' ``` -{{% /show-in %}} -{{% show-in "enterprise" %}} -{{% api-endpoint method="POST" endpoint="/api/v3/enterprise/configure/token/admin" api-ref="/influxdb3/version/api/v3/#operation/PostCreateAdminToken" %}} -```bash -curl -X POST "http://{{< influxdb/host >}}/api/v3/{{< product-key >}}/configure/token/admin" \ ---header 'Accept: application/json' \ ---header 'Content-Type: application/json' -``` -{{% /show-in %}} {{% /tab-content %}} {{< /tabs-wrapper >}} + +The output contains the token string in plain text. + +## Create a named admin token + +{{< tabs-wrapper >}} +{{% tabs %}} +[CLI](#use-the-influxdb3-cli) +[HTTP API](#use-the-http-api) +{{% /tabs %}} +{{% tab-content %}} +Use the `influxdb3 create token --admin` command with a token name: + +{{% code-placeholders "TOKEN_NAME|ADMIN_TOKEN" %}} +```bash +influxdb3 create token --admin --token ADMIN_TOKEN --name "TOKEN_NAME" +``` +{{% /code-placeholders %}} + +Replace the following with your values: + +- {{% code-placeholder-key %}}`ADMIN_TOKEN`{{% /code-placeholder-key %}}: your existing operator or named admin token +- {{% code-placeholder-key %}}`TOKEN_NAME`{{% /code-placeholder-key %}}: the name you want to assign to the new admin token + +The output contains the token string in plain text. + +{{% /tab-content %}} +{{% tab-content %}} +Use the following endpoint to create a named admin token: + +{{% api-endpoint method="POST" endpoint="/api/v3/configure/token/admin" api-ref="/influxdb3/version/api/v3/#operation/PostCreateAdminToken" %}} + +```bash +curl -X POST "http://{{< influxdb/host >}}/api/v3/configure/token/admin" \ + --header 'Authorization Bearer ADMIN_TOKEN' \ + --json '{ + "name": "TOKEN_NAME" + }' +``` +Replace the following with your values: + +- {{% code-placeholder-key %}}`ADMIN_TOKEN`{{% /code-placeholder-key %}}: your existing operator or named admin token +- {{% code-placeholder-key %}}`TOKEN_NAME`{{% /code-placeholder-key %}}: the name you want to assign to the new admin token + +The response body contains the token string in plain text. +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +_To use the token as the default for later commands, and to persist the token +across sessions, assign the token string to the `INFLUXDB3_AUTH_TOKEN` environment variable._ diff --git a/content/shared/influxdb3-admin/tokens/admin/list.md b/content/shared/influxdb3-admin/tokens/admin/list.md index 4fe7eebc5..ac2732cff 100644 --- a/content/shared/influxdb3-admin/tokens/admin/list.md +++ b/content/shared/influxdb3-admin/tokens/admin/list.md @@ -8,8 +8,11 @@ data and resources in your InfluxDB 3 instance. > Token metadata includes the hashed token string. > InfluxDB 3 does not store the raw token string. -In the following examples, replace {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}} with your InfluxDB {{% token-link "admin" %}} -{{% show-in "enterprise" %}} or a token with read permission on the `_internal` system database`{{% /show-in %}}. +> [!Important] +> #### Required permissions +> +> Listing admin tokens requires a valid InfluxDB {{% token-link "admin" %}}{{% show-in "enterprise" %}} or a token with read access to the `_internal` system database{{% /show-in %}}. +> For more information, see how to [provide your token](/influxdb3/version/admin/tokens/#provide-your-token). ## List all tokens diff --git a/content/shared/influxdb3-admin/tokens/admin/regenerate.md b/content/shared/influxdb3-admin/tokens/admin/regenerate.md index 8ab8b0b0e..76ac054c5 100644 --- a/content/shared/influxdb3-admin/tokens/admin/regenerate.md +++ b/content/shared/influxdb3-admin/tokens/admin/regenerate.md @@ -1,54 +1,45 @@ -Use the `influxdb3` CLI or the HTTP API to regenerate an admin token. +Use the `influxdb3` CLI or the HTTP API to regenerate the operator (`_admin`) token for your {{% product-name %}} instance. Regenerate a token to rotate it as part of your security practices or if you suspect the token has been compromised. -{{< show-in "enterprise" >}} -Regenerating an admin token deactivates the previous token, +Regenerating the operator token deactivates the previous token, stores the SHA512 hash and metadata of the new token, and returns the new token string. -{{< /show-in >}} -{{< show-in "core" >}} -Regenerating the admin token deactivates the previous token, updates the `_admin` token -SHA512 hash and metadata, and returns the new token string. -{{< /show-in >}} - -An admin token grants access to all actions on the server. ## Prerequisite -To regenerate a token, you need the current token string. +To regenerate an operator token, you need the current token string. -## Use the CLI or HTTP API to regenerate an admin token +## Use the CLI or HTTP API to regenerate the operator token > [!Important] -> #### Securely store your token -> -> InfluxDB lets you view the token string only when you create the token. -> Store your token in a secure location, as you cannot retrieve it from the database later. -> InfluxDB 3 stores only the token's hash and metadata in the catalog. +> #### Regenerating the operator token +> Regenerating the operator token invalidates the previous token. +> Make sure to update any applications or scripts that use the operator token. + +To regenerate the operator token, use the [`influxdb3 serve create token` command] with the `--admin` and `--regenerate` flags: {{< tabs-wrapper >}} {{% tabs %}} -[CLI](#cli-regenerate-admin-token) -[HTTP API](#http-api-regenerate-admin-token) +[CLI](#cli-regenerate) +[HTTP API](#http-api-regenerate) {{% /tabs %}} {{% tab-content %}} Use the `--regenerate` flag with the -`influxdb3 create token --admin` subcommand--for example: +[`influxdb3 create token --admin`](/influxdb3/version/reference/cli/influxdb3/create/token/) subcommand--for example: -{{% code-placeholders "ADMIN_TOKEN" %}} +{{% code-placeholders "OPERATOR_TOKEN" %}} ```bash influxdb3 create token --admin \ - --token ADMIN_TOKEN \ --regenerate + OPERATOR_TOKEN ``` {{% /code-placeholders %}} In your command, -replace {{% code-placeholder-key %}}`ADMIN_TOKEN`{{% /code-placeholder-key %}} -with the current token string. +replace {{% code-placeholder-key %}}`OPERATOR_TOKEN`{{% /code-placeholder-key %}} +with the current operator (`_admin`) token string. -The CLI asks for confirmation before regenerating the token. The output contains the new token string and InfluxDB deactivates the previous token string. {{% /tab-content %}} @@ -56,40 +47,33 @@ The output contains the new token string and InfluxDB deactivates the previous t Use the following HTTP API endpoint: -{{% show-in "core" %}} {{% api-endpoint method="POST" endpoint="/api/v3/configure/token/admin/regenerate" api-ref="/influxdb3/version/api/v3/configure/token/admin/regenerate" %}} -{{% /show-in %}} -{{% show-in "enterprise" %}} -{{% api-endpoint method="POST" endpoint="/api/v3/enterprise/configure/token/admin/regenerate" api-ref="/influxdb3/version/api/v3/enterprise/configure/token/admin" %}} -{{% /show-in %}} - -In your request, send an `Authorization` header with your current admin token string +In your request, send an `Authorization` header with your current operator token string --for example: -{{% show-in "core" %}} -{{% code-placeholders "ADMIN_TOKEN" %}} +{{% code-placeholders "OPERATOR_TOKEN" %}} ```bash -curl -X POST "http://{{< influxdb/host >}}/api/v3/configure/token/admin" \ - --header "Authorization: Bearer ADMIN_TOKEN" \ +curl -X POST "http://{{< influxdb/host >}}/api/v3/configure/token/admin/regenerate" \ + --header "Authorization: Bearer OPERATOR_TOKEN" \ --header "Accept: application/json" ``` {{% /code-placeholders %}} -{{% /show-in %}} -{{% show-in "enterprise" %}} -{{% code-placeholders "ADMIN_TOKEN" %}} -```bash -curl -X POST "http://{{< influxdb/host >}}/api/v3/enterprise/configure/token/admin" \ - --header "Authorization: Bearer ADMIN_TOKEN" \ - --header "Accept: application/json" -``` -{{% /code-placeholders %}} -{{% /show-in %}} +In your command, replace {{% code-placeholder-key %}}`OPERATOR_TOKEN`{{% /code-placeholder-key %}} with the current token string. -In your command, replace {{% code-placeholder-key %}}`ADMIN_TOKEN`{{% /code-placeholder-key %}} with the current token string. - -The output contains the new token string and InfluxDB deactivates the previous token string. +The response body contains the new operator token string in plain text, and InfluxDB deactivates the previous token string. {{% /tab-content %}} {{< /tabs-wrapper >}} + +To use the token as the default for later commands, and to persist the token +across sessions, assign the token string to the `INFLUXDB3_AUTH_TOKEN` environment variable. + +## Important considerations + +- Regenerating the operator token invalidates the previous token. +- If you lose the operator token, there is no recovery mechanism. +- `--regenerate` only works for the operator token. You can't use the `--regenerate` flag with the `influxdb3 create token --admin` command to regenerate a named admin token. +- Ensure that you update any applications or scripts that use the operator token with the new token string. +- Always store your operator token securely and consider implementing proper secret management practices. diff --git a/content/shared/influxdb3-cli/create/_index.md b/content/shared/influxdb3-cli/create/_index.md index cf436cf11..3c9fa9ae2 100644 --- a/content/shared/influxdb3-cli/create/_index.md +++ b/content/shared/influxdb3-cli/create/_index.md @@ -18,7 +18,6 @@ influxdb3 create | [file_index](/influxdb3/version/reference/cli/influxdb3/create/file_index/) | Create a new file index for a database or table | | [last_cache](/influxdb3/version/reference/cli/influxdb3/create/last_cache/) | Create a new last value cache | | [distinct_cache](/influxdb3/version/reference/cli/influxdb3/create/distinct_cache/) | Create a new distinct value cache | -| [plugin](/influxdb3/version/reference/cli/influxdb3/create/plugin/) | Create a new processing engine plugin | | [table](/influxdb3/version/reference/cli/influxdb3/create/table/) | Create a new table in a database | | [token](/influxdb3/version/reference/cli/influxdb3/create/token/) | Create a new authentication token | | [trigger](/influxdb3/version/reference/cli/influxdb3/create/trigger/) | Create a new trigger for the processing engine | diff --git a/content/shared/influxdb3-cli/create/database.md b/content/shared/influxdb3-cli/create/database.md index 3b3a99c91..630546c14 100644 --- a/content/shared/influxdb3-cli/create/database.md +++ b/content/shared/influxdb3-cli/create/database.md @@ -1,5 +1,6 @@ +The `influxdb3 create database` command creates a new database in your {{< product-name >}} instance. -The `influxdb3 create database` command creates a new database. +Provide a database name and, optionally, specify connection settings and authentication credentials using flags or environment variables. ## Usage @@ -11,11 +12,10 @@ influxdb3 create database [OPTIONS] ## Arguments -- **DATABASE_NAME**: The name of the database to create. - Valid database names are alphanumeric and start with a letter or number. - Dashes (`-`) and underscores (`_`) are allowed. + +- **`DATABASE_NAME`**: The name of the database to create. Valid database names are alphanumeric and start with a letter or number. Dashes (-) and underscores (_) are allowed. - Environment variable: `INFLUXDB3_DATABASE_NAME` +You can also set the database name using the `INFLUXDB3_DATABASE_NAME` environment variable. ## Options @@ -29,7 +29,7 @@ influxdb3 create database [OPTIONS] ### Option environment variables -You can use the following environment variables to set command options: +You can use the following environment variables instead of providing CLI options directly: | Environment Variable | Option | | :------------------------ | :----------- | @@ -38,11 +38,9 @@ You can use the following environment variables to set command options: ## Examples -- [Create a new database](#create-a-new-database) -- [Create a new database while specifying the token inline](#create-a-new-database-while-specifying-the-token-inline) - -In the examples below, replace the following: +The following examples show how to create a database. +In your commands replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Database name - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: @@ -50,7 +48,9 @@ In the examples below, replace the following: {{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} -### Create a new database +### Create a database (default) + +Creates a database using settings from environment variables and defaults. @@ -58,7 +58,10 @@ In the examples below, replace the following: influxdb3 create database DATABASE_NAME ``` -### Create a new database while specifying the token inline +### Create a database with an authentication token + +Creates a database using the specified arguments. +Flags override their associated environment variables. diff --git a/content/shared/influxdb3-cli/create/distinct_cache.md b/content/shared/influxdb3-cli/create/distinct_cache.md index 8fc124e1b..e1434c411 100644 --- a/content/shared/influxdb3-cli/create/distinct_cache.md +++ b/content/shared/influxdb3-cli/create/distinct_cache.md @@ -1,5 +1,6 @@ +The `influxdb3 create distinct_cache` command creates a new distinct value cache for a specific table and column set in your {{< product-name >}} instance. -The `influxdb3 create distinct_cache` command creates a new distinct value cache. +Use this command to configure a cache that tracks unique values in specified columns. You must provide the database, token, table, and columns. Optionally, you can specify a name for the cache. ## Usage @@ -16,10 +17,9 @@ influxdb3 create distinct_cache [OPTIONS] \ ## Arguments -- **CACHE_NAME**: _(Optional)_ Name for the cache. - If not provided, the command automatically generates a name. +- **`CACHE_NAME`**: _(Optional)_ A name to assign to the cache. If omitted, the CLI generates a name automatically. -## Options +## Options | Option | | Description | | :----- | :------------------ | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | @@ -52,4 +52,69 @@ You can use the following environment variables to set command options: | `INFLUXDB3_DATABASE_NAME` | `--database` | | `INFLUXDB3_AUTH_TOKEN` | `--token` | - + +## Prerequisites + +Before creating a distinct value cache, make sure you: + +1. [Create a database](/influxdb3/version/reference/cli/influxdb3/create/database/) + +2. [Create a table](/influxdb3/version/reference/cli/influxdb3/create/table/) that includes the columns you want to cache + +3. Have a valid authentication token + +## Examples + +Before running the following commands, replace the placeholder values with your own: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: + The database name +- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: + The name of the table to cache values from +- {{% code-placeholder-key %}}`CACHE_NAME`{{% /code-placeholder-key %}}: + The name of the distinct value cache to create +- {{% code-placeholder-key %}}`COLUMN_NAME`{{% /code-placeholder-key %}}: The column to +cache distinct values from + +You can also set environment variables (such as `INFLUXDB3_AUTH_TOKEN`) instead of passing options inline. + +{{% code-placeholders "(DATABASE|TABLE|COLUMN|CACHE)_NAME" %}} + +### Create a distinct cache for one column + +Track unique values from a single column. This setup is useful for testing or simple use cases. + + + +```bash +influxdb3 create distinct_cache \ + --database DATABASE_NAME \ + --table TABLE_NAME \ + --column COLUMN_NAME \ + CACHE_NAME +``` + +### Create a hierarchical cache with constraints + +Create a distinct value cache for multiple columns. The following example tracks unique combinations of `room` and `sensor_id`, and sets limits on the number of entries and their maximum age. + + + +```bash +influxdb3 create distinct_cache \ + --database my_test_db \ + --table my_sensor_table \ + --columns room,sensor_id \ + --max-cardinality 1000 \ + --max-age 30d \ + my_sensor_distinct_cache +``` + +{{% /code-placeholders %}} + +## Common pitfalls + +- `--column` is not valid. Use `--columns`. +- Tokens must be included explicitly unless set via `INFLUXDB3_AUTH_TOKEN` +- Table and column names must already exist or be recognized by the engine + diff --git a/content/shared/influxdb3-cli/create/last_cache.md b/content/shared/influxdb3-cli/create/last_cache.md index 8aa3a402d..8e1eef91a 100644 --- a/content/shared/influxdb3-cli/create/last_cache.md +++ b/content/shared/influxdb3-cli/create/last_cache.md @@ -1,18 +1,23 @@ - -The `influxdb3 create last_cache` command creates a new last value cache. +The `influxdb3 create last_cache` command creates a last value cache, which stores the most recent values for specified columns in a table. Use this to efficiently retrieve the latest values based on key column combinations. ## Usage +{{% code-placeholders "DATABASE_NAME|TABLE_NAME|AUTH_TOKEN|CACHE_NAME" %}} + ```bash -influxdb3 create last_cache [OPTIONS] --database --table [CACHE_NAME] +influxdb3 create last_cache [OPTIONS] \ + --database DATABASE_NAME \ + --table TABLE_NAME \ + --token AUTH_TOKEN \ + CACHE_NAME ``` +{{% /code-placeholders %}} ## Arguments -- **CACHE_NAME**: _(Optional)_ Name for the cache. - If not provided, the command automatically generates a name. +- **CACHE_NAME**: _(Optional)_ Name for the cache. If omitted, InfluxDB automatically generates one. ## Options @@ -32,7 +37,7 @@ influxdb3 create last_cache [OPTIONS] --database --table
### Option environment variables -You can use the following environment variables to set command options: +You can use the following environment variables as substitutes for CLI options: | Environment Variable | Option | | :------------------------ | :----------- | @@ -40,4 +45,59 @@ You can use the following environment variables to set command options: | `INFLUXDB3_DATABASE_NAME` | `--database` | | `INFLUXDB3_AUTH_TOKEN` | `--token` | - +## Prerequisites + +Before creating a last value cache, ensure you’ve done the following: + +- Create a [database](/influxdb3/version/reference/cli/influxdb3/create/database/). +- Create a [table](/influxdb3/version/reference/cli/influxdb3/create/table/) with the columns you want to cache. +- Have a valid authentication token. + +## Examples + +A last value cache stores the most recent values from specified columns in a table. + +### Create a basic last value cache for one column + +The following example shows how to track the most recent value for a single key (the last temperature for each room): + + + +```bash +influxdb3 create last_cache \ + --database DATABASE_NAME \ + --table my_sensor_table \ + --token AUTH_TOKEN \ + --key-columns room \ + --value-columns temp \ + my_temp_cache +``` + +### Create a last value cache with multiple keys and values + +The following example shows how to: + +- Use multiple columns as a composite key +- Track several values per key combination +- Set a cache entry limit with `--count` +- Configure automatic expiry with `--ttl` + + + +```bash +influxdb3 create last_cache \ + --database DATABASE_NAME \ + --table my_sensor_table \ + --token AUTH_TOKEN \ + --key-columns room,sensor_id \ + --value-columns temp,hum \ + --count 10 \ + --ttl 1h \ + my_sensor_cache +``` + +## Usage notes + +- Define the table schema to include all specified key and value columns. +- Pass tokens using `--token`, unless you've set one through an environment variable. +- Specify `--count` and `--ttl` to override the defaults; otherwise, the system uses default values. \ No newline at end of file diff --git a/content/shared/influxdb3-cli/create/plugin.md b/content/shared/influxdb3-cli/create/plugin.md deleted file mode 100644 index 33161b8af..000000000 --- a/content/shared/influxdb3-cli/create/plugin.md +++ /dev/null @@ -1,45 +0,0 @@ - -The `influxdb3 create plugin` command creates a new processing engine plugin. - -## Usage - - - -```bash -influxdb3 create plugin [OPTIONS] \ - --database \ - --token \ - --filename \ - --entry-point \ - -``` - -## Arguments - -- **PLUGIN_NAME**: The name of the plugin to create. - -## Options - -| Option | | Description | -| :----- | :-------------- | :--------------------------------------------------------------------------------------- | -| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | -| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on | -| | `--token` | _({{< req >}})_ Authentication token | -| | `--filename` | _({{< req >}})_ Name of the plugin Python file in the plugin directory | -| | `--entry-point` | _({{< req >}})_ Entry point function name for the plugin | -| | `--plugin-type` | Type of trigger the plugin processes (default is `wal_rows`) | -| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | -| `-h` | `--help` | Print help information | -| | `--help-all` | Print detailed help information | - -### Option environment variables - -You can use the following environment variables to set command options: - -| Environment Variable | Option | -| :------------------------ | :----------- | -| `INFLUXDB3_HOST_URL` | `--host` | -| `INFLUXDB3_DATABASE_NAME` | `--database` | -| `INFLUXDB3_AUTH_TOKEN` | `--token` | - - diff --git a/content/shared/influxdb3-cli/create/table.md b/content/shared/influxdb3-cli/create/table.md index 53d54b3e9..e3b858970 100644 --- a/content/shared/influxdb3-cli/create/table.md +++ b/content/shared/influxdb3-cli/create/table.md @@ -1,5 +1,10 @@ -The `influxdb3 create table` command creates a table in a database. +The `influxdb3 create table` command creates a new table in a specified database. Tables must include at least one tag column and can optionally include field columns with defined data types. + +> [!Note] +> InfluxDB automatically creates tables when you write line protocol data. Use this command +> only if you need to define a custom schema or apply a custom partition template before +> writing data. ## Usage @@ -39,7 +44,7 @@ influxdb3 create table [OPTIONS] \ ### Option environment variables -You can use the following environment variables to set command options: +You can use the following environment variables to set options instead of passing them via CLI flags: | Environment Variable | Option | | :------------------------ | :----------- | @@ -49,21 +54,20 @@ You can use the following environment variables to set command options: ## Examples -- [Create a table](#create-a-table) -- [Create a table with tag and field columns](#create-a-table-with-tag-and-field-columns) - -In the examples below, replace the following: +In the following examples, replace each placeholder with your actual values: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: - Database name + The database name - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: Authentication token - {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: - Table name + A name for the new table -{{% code-placeholders "(DATABASE|TABLE)_NAME" %}} +{{% code-placeholders "DATABASE_NAME|TABLE_NAME|AUTH_TOKEN" %}} -### Create a table +### Create an empty table + + ```bash influxdb3 create table \ @@ -86,4 +90,31 @@ influxdb3 create table \ TABLE_NAME ``` +### Verification + +Use the `SHOW TABLES` query to verify that the table was created successfully: + + + +```bash +influxdb3 query \ + --database my_test_db \ + --token AUTH_TOKEN \ + "SHOW TABLES" + +Example output: + ++---------------+--------------------+----------------------------+------------+ +| table_catalog | table_schema | table_name | table_type | ++---------------+--------------------+----------------------------+------------+ +| public | iox | my_sensor_table | BASE TABLE | +| public | system | distinct_caches | BASE TABLE | +| public | system | last_caches | BASE TABLE | +| public | system | parquet_files | BASE TABLE | ++---------------+--------------------+----------------------------+------------+ +``` + +>[!Note] +> `SHOW TABLES` is an SQL query. It isn't supported in InfluxQL. + {{% /code-placeholders %}} diff --git a/content/shared/influxdb3-cli/create/token.md b/content/shared/influxdb3-cli/create/token.md deleted file mode 100644 index f9da6f1c1..000000000 --- a/content/shared/influxdb3-cli/create/token.md +++ /dev/null @@ -1,32 +0,0 @@ - -The `influxdb3 create token` command creates a new authentication token. - -## Usage - - - -```bash -influxdb3 create token [OPTIONS] -``` - -## Commands - -| Command | Description | -| :----- | :----------- | :------------------------------ | -| `--admin` | Create an admin token for the {{< product-name >}} server. | -{{% show-in "enterprise" %}}| [`--permission`](/influxdb3/enterprise/reference/cli/influxdb3/create/token/permission/) | Create a resource token with fine-grained access permissions. |{{% /show-in %}} - -## Options - -| Option | | Description | -| :----- | :----------- | :------------------------------ | -| `-h` | `--help` | Print help information | -| | `--help-all` | Print detailed help information | - -## Examples - -### Create an admin token - -```bash -influxdb3 create token --admin -``` \ No newline at end of file diff --git a/content/shared/influxdb3-cli/create/token/_index.md b/content/shared/influxdb3-cli/create/token/_index.md new file mode 100644 index 000000000..d1cb2139f --- /dev/null +++ b/content/shared/influxdb3-cli/create/token/_index.md @@ -0,0 +1,28 @@ +The `influxdb3 create token` command creates a new authentication token. This returns the raw token string. Use it to authenticate future CLI commands and API requests. + +> [!Important] +> InfluxDB displays the raw token string only once. Be sure to copy and securely store it. + +## Usage + + + +```bash +influxdb3 create token +``` + +## Commands + +| Command | Description | +| :----- | :----------- | :------------------------------ | +| [`--admin`](/influxdb3/version/reference/cli/influxdb3/create/token/admin/) | Create an operator or named admin token for the {{< product-name >}} server. | +{{% show-in "enterprise" %}}| [`--permission`](/influxdb3/enterprise/reference/cli/influxdb3/create/token/permission/) | Create a resource token with fine-grained access permissions. |{{% /show-in %}} + +## Options + +| Option | | Description | +| :----- | :------- | :--------------------- | +| |`--admin`| Create an admin token | +| `-h` | `--help` | Print help information | + + diff --git a/content/shared/influxdb3-cli/create/token/admin.md b/content/shared/influxdb3-cli/create/token/admin.md new file mode 100644 index 000000000..84d79b033 --- /dev/null +++ b/content/shared/influxdb3-cli/create/token/admin.md @@ -0,0 +1,85 @@ + +Create an operator token or named admin token. + +## Usage + +``` +influxdb3 create token --admin [OPTIONS] +``` + +## Options + +| Option | Description | +|:-------|:------------| +| `--regenerate` | Regenerates the operator token. Requires `--token` and the current operator token | +| `--name ` | Name of the token | +| `--expiry ` | Expires in `duration`--for example, 10d for 10 days 1y for 1 year | +| `--host ` | The host URL of the running InfluxDB 3 server [env: `INFLUXDB3_HOST_URL=`] [default: `http://127.0.0.1:8181`] | +| `--token ` | An existing admin token for the InfluxDB 3 server | +| `--tls-ca ` | An optional arg to use a custom ca for useful for testing with self signed certs | +| `--format ` | Output format for token [possible values: `json`, `text`] | +| `-h`, `--help` | Print help information | +| `--help-all` | Print more detailed help information | + +## Examples + +### Create an operator token + +The operator token is a special token that has full administrative privileges on the InfluxDB server and doesn't expire. +The first admin token you create becomes the operator token for the instance. +You can create an operator token using the `--admin` flag without any additional options. + + + +```bash +influxdb3 create token --admin +``` + +The output is the raw token string you can use to authenticate future CLI commands and API requests. +For CLI commands, use the `--token` option or the `INFLUXDB3_AUTH_TOKEN` environment variable to pass the token string. + +### Use the operator token to create a named admin token + +{{% code-placeholders "OPERATOR_TOKEN|TOKEN_NAME|EXPIRY" %}} + + +```bash +influxdb3 create token \ + --admin \ + --token OPERATOR_TOKEN \ + --name TOKEN_NAME \ + --expiry DURATION +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`OPERATOR_TOKEN`{{% /code-placeholder-key %}}: Your operator token for the server +- {{% code-placeholder-key %}}`TOKEN_NAME`{{% /code-placeholder-key %}}: Name for your new admin token +- {{% code-placeholder-key %}}`DURATION`{{% /code-placeholder-key %}}: Duration for the token to remain valid, in [humantime](https://docs.rs/humantime/latest/humantime/fn.parse_duration.html) format (for example, `10d` for 10 days or `1y` for 1 year). + +### Use the token to create a database + +{{% code-placeholders "YOUR_ADMIN_TOKEN|DATABASE_NAME" %}} + + + +```bash +influxdb3 create database \ + --token ADMIN_TOKEN \ + DATABASE_NAME +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`ADMIN_TOKEN`{{% /code-placeholder-key %}}: Your InfluxDB admin token +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Name for your new database + +> [!Note] +> #### Use CLI environment variables +> Set the token as an environment variable to simplify repeated CLI commands: +> +> ```bash +> export INFLUXDB3_AUTH_TOKEN=ADMIN_TOKEN +> ``` diff --git a/content/shared/influxdb3-cli/create/trigger.md b/content/shared/influxdb3-cli/create/trigger.md index 5c6e7a763..dbb223128 100644 --- a/content/shared/influxdb3-cli/create/trigger.md +++ b/content/shared/influxdb3-cli/create/trigger.md @@ -10,7 +10,7 @@ processing engine. influxdb3 create trigger [OPTIONS] \ --database \ --token \ - --plugin \ + --plugin-filename \ --trigger-spec \ ``` @@ -21,17 +21,21 @@ influxdb3 create trigger [OPTIONS] \ ## Options -| Option | | Description | -| :----- | :--------------- | :--------------------------------------------------------------------------------------- | -| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | -| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on | -| | `--token` | _({{< req >}})_ Authentication token | -| | `--plugin` | Plugin to execute when the trigger fires | -| | `--trigger-spec` | Trigger specification--for example `table:` or `all_tables` | -| | `--disabled` | Create the trigger in disabled state | -| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | -| `-h` | `--help` | Print help information | -| | `--help-all` | Print detailed help information | +| Option | | Description | +| :----- | :------------------ | :------------------------------------------------------------------------------------------------------- | +| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | +| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on | +| | `--token` | _({{< req >}})_ Authentication token | +| | `--plugin-filename` | _({{< req >}})_ Name of the file, stored in the server's `plugin-dir`, that contains the Python plugin code to run | +| | `--trigger-spec` | Trigger specification--for example `table:` or `all_tables` | +| | `--disabled` | Create the trigger in disabled state | +| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information | + +If you want to use a plugin from the [Plugin Library](https://github.com/influxdata/influxdb3_plugins) repo, use the url path with `gh:` specified as the prefix. +For example, to use the [System Metrics](https://github.com/influxdata/influxdb3_plugins/blob/main/examples/schedule/system_metrics/system_metrics.py) plugin, the plugin filename is `gh:examples/schedule/system_metrics/system_metrics.py`. + ### Option environment variables @@ -43,4 +47,69 @@ You can use the following environment variables to set command options: | `INFLUXDB3_DATABASE_NAME` | `--database` | | `INFLUXDB3_AUTH_TOKEN` | `--token` | - +## Examples + +The following examples show how to use the `influxdb3 create trigger` command to create triggers in different scenarios. + + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Database name +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: Authentication token +- {{% code-placeholder-key %}}`PLUGIN_FILENAME`{{% /code-placeholder-key %}}: Python plugin filename +- {{% code-placeholder-key %}}`TRIGGER_NAME`{{% /code-placeholder-key %}}: +Name of the trigger to create +- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: +Name of the table to trigger on + +{{% code-placeholders "(DATABASE|TRIGGER)_NAME|AUTH_TOKEN|TABLE_NAME" %}} + +### Create a trigger for a specific table + +Create a trigger that processes data from a specific table. + + + +```bash +influxdb3 create trigger \ + --database DATABASE_NAME \ + --token AUTH_TOKEN \ + --plugin-filename PLUGIN_FILENAME \ + --trigger-spec table:TABLE_NAME \ + TRIGGER_NAME +``` + +### Create a trigger for all tables + +Create a trigger that applies to all tables in the specified database. + + + +```bash +influxdb3 create trigger \ + --database DATABASE_NAME \ + --token AUTH_TOKEN \ + --plugin-filename \ + --trigger-spec all_tables \ + TRIGGER_NAME +``` + +This is useful when you want a trigger to apply to any table in the database, regardless of name. + +### Create a disabled trigger + +Create a trigger in a disabled state. + + + +```bash +influxdb3 create trigger \ + --disabled \ + --database DATABASE_NAME \ + --token AUTH_TOKEN \ + --plugin-filename \ + --trigger-spec table:TABLE_NAME \ + TRIGGER_NAME +``` + +Creating a trigger in a disabled state prevents it from running immediately. You can enable it later when you're ready to activate it. + +{{% /code-placeholders %}} diff --git a/content/shared/influxdb3-cli/delete/database.md b/content/shared/influxdb3-cli/delete/database.md index f83e8238d..6d675b414 100644 --- a/content/shared/influxdb3-cli/delete/database.md +++ b/content/shared/influxdb3-cli/delete/database.md @@ -11,7 +11,7 @@ influxdb3 delete database [OPTIONS] ## Arguments -- **DATABASE_NAME**: The name of the database to delete. +- **DATABASE_NAME**: The name of the database to delete. Valid database names are alphanumeric and start with a letter or number. Dashes (`-`) and underscores (`_`) are allowed. Environment variable: `INFLUXDB3_DATABASE_NAME` diff --git a/content/shared/influxdb3-cli/delete/plugin.md b/content/shared/influxdb3-cli/delete/plugin.md deleted file mode 100644 index 9aca888af..000000000 --- a/content/shared/influxdb3-cli/delete/plugin.md +++ /dev/null @@ -1,61 +0,0 @@ - -The `influxdb3 delete plugin` command deletes a processing engine plugin. - -## Usage - - - -```bash -influxdb3 delete plugin [OPTIONS] --database -``` - -## Arguments - -- **PLUGIN_NAME**: The name of the plugin to delete. - -## Options - -| Option | | Description | -| :----- | :----------- | :--------------------------------------------------------------------------------------- | -| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | -| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on | -| | `--token` | _({{< req >}})_ Authentication token | -| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | -| `-h` | `--help` | Print help information | -| | `--help-all` | Print detailed help information | - -### Option environment variables - -You can use the following environment variables to set command options: - -| Environment Variable | Option | -| :------------------------ | :----------- | -| `INFLUXDB3_HOST_URL` | `--host` | -| `INFLUXDB3_DATABASE_NAME` | `--database` | -| `INFLUXDB3_AUTH_TOKEN` | `--token` | - -## Examples - -### Delete a plugin - -{{% code-placeholders "(DATABASE|PLUGIN)_NAME|AUTH_TOKEN" %}} - - - -```bash -influxdb3 delete plugin \ - --database DATABASE_NAME \ - --token AUTH_TOKEN \ - PLUGIN_NAME -``` - -{{% /code-placeholders %}} - -In the example above, replace the following: - -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: - Database name -- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - Authentication token -- {{% code-placeholder-key %}}`PLUGIN_NAME`{{% /code-placeholder-key %}}: - Name of the plugin to delete diff --git a/content/shared/v3-enterprise-get-started/_index.md b/content/shared/influxdb3-get-started/_index.md similarity index 65% rename from content/shared/v3-enterprise-get-started/_index.md rename to content/shared/influxdb3-get-started/_index.md index 50116db52..b2d12227a 100644 --- a/content/shared/v3-enterprise-get-started/_index.md +++ b/content/shared/influxdb3-get-started/_index.md @@ -1,37 +1,13 @@ -InfluxDB is a database built to collect, process, transform, and store event and time series data, and is ideal for use cases that require real-time ingest and fast query response times to build user interfaces, monitoring, and automation solutions. -Common use cases include: - -- Monitoring sensor data -- Server monitoring -- Application performance monitoring -- Network monitoring -- Financial market and trading analytics -- Behavioral analytics - -InfluxDB is optimized for scenarios where near real-time data monitoring is essential and queries need to return quickly to support user experiences such as dashboards and interactive user interfaces. - -{{% product-name %}} is built on InfluxDB 3 Core, the InfluxDB 3 open source release. -Core's feature highlights include: - -* Diskless architecture with object storage support (or local disk with no dependencies) -* Fast query response times (under 10ms for last-value queries, or 30ms for distinct metadata) -* Embedded Python VM for plugins and triggers -* Parquet file persistence -* Compatibility with InfluxDB 1.x and 2.x write APIs - -The Enterprise version adds the following features to Core: - -* Historical query capability and single series indexing -* High availability -* Read replicas -* Enhanced security (coming soon) -* Row-level delete support (coming soon) -* Integrated admin UI (coming soon) ### What's in this guide +{{% show-in "enterprise" %}} This guide covers Enterprise as well as InfluxDB 3 Core, including the following topics: +{{% /show-in %}} +{{% show-in "core" %}} +This guide covers InfluxDB 3 Core (the open source release), including the following topics: +{{% /show-in %}} - [Install and startup](#install-and-startup) - [Authentication and authorization](#authentication-and-authorization) @@ -42,12 +18,21 @@ This guide covers Enterprise as well as InfluxDB 3 Core, including the following - [Last values cache](#last-values-cache) - [Distinct values cache](#distinct-values-cache) - [Python plugins and the processing engine](#python-plugins-and-the-processing-engine) +{{% show-in "enterprise" %}} - [Multi-server setups](#multi-server-setup) +{{% /show-in %}} + +> [!Tip] +> #### Find support for {{% product-name %}} +> +> The [InfluxDB Discord server](https://discord.gg/9zaNCW2PRT) is the best place to find support for {{% product-name %}}. +> For other InfluxDB versions, see the [Support and feedback](#bug-reports-and-feedback) options. ### Install and startup {{% product-name %}} runs on **Linux**, **macOS**, and **Windows**. +{{% show-in "enterprise" %}} {{% tabs-wrapper %}} {{% tabs %}} [Linux or macOS](#linux-or-macos) @@ -101,18 +86,67 @@ Pull the image: docker pull influxdb:3-enterprise ``` -##### InfluxDB 3 Explorer -- Query Interface (beta) + +{{% /tab-content %}} +{{% /tabs-wrapper %}} +{{% /show-in %}} -You can download the new InfluxDB 3 Explorer query interface using Docker. -Explorer is currently in beta. Pull the image: +{{% show-in "core" %}} +{{% tabs-wrapper %}} +{{% tabs %}} +[Linux or macOS](#linux-or-macos) +[Windows](#windows) +[Docker](#docker) +{{% /tabs %}} +{{% tab-content %}} + +To get started quickly, download and run the install script--for example, using [curl](https://curl.se/download.html): + ```bash -docker pull quay.io/influxdb/influxdb3-explorer:latest +curl -O https://www.influxdata.com/d/install_influxdb3.sh \ +&& sh install_influxdb3.sh +``` +Or, download and install [build artifacts](/influxdb3/core/install/#download-influxdb-3-core-binaries): + +- [Linux | AMD64 (x86_64) | GNU](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_amd64.tar.gz) + • + [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_amd64.tar.gz.sha256) +- [Linux | ARM64 (AArch64) | GNU](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_arm64.tar.gz) + • + [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_arm64.tar.gz.sha256) +- [macOS | Silicon (ARM64)](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_darwin_arm64.tar.gz) + • + [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_darwin_arm64.tar.gz.sha256) + +> [!Note] +> macOS Intel builds are coming soon. + + +{{% /tab-content %}} +{{% tab-content %}} + +Download and install the {{% product-name %}} [Windows (AMD64, x86_64) binary](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}-windows_amd64.zip) + • +[sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}-windows_amd64.zip.sha256) + +{{% /tab-content %}} +{{% tab-content %}} + +The [`influxdb:3-core` image](https://hub.docker.com/_/influxdb/tags?tag=3-core&name=3-core) +is available for x86_64 (AMD64) and ARM64 architectures. + +Pull the image: + + +```bash +docker pull influxdb:3-core ``` {{% /tab-content %}} {{% /tabs-wrapper %}} +{{% /show-in %}} _Build artifacts and images update with every merge into the {{% product-name %}} `main` branch._ @@ -131,23 +165,28 @@ If your system doesn't locate `influxdb3`, then `source` the configuration file source ~/.zshrc ``` + #### Start InfluxDB -To start your InfluxDB instance, use the `influxdb3 serve` command -and provide the following: +To start your InfluxDB instance, use the `influxdb3 serve` command and provide the following: -- `--object-store`: Specifies the type of Object store to use. +- `--object-store`: Specifies the type of object store to use. InfluxDB supports the following: local file system (`file`), `memory`, S3 (and compatible services like Ceph or Minio) (`s3`), Google Cloud Storage (`google`), and Azure Blob Storage (`azure`). The default is `file`. Depending on the object store type, you may need to provide additional options for your object store configuration. -- `--cluster-id`: A string identifier that determines part of the storage path hierarchy. All nodes within the same cluster share this identifier. The storage path follows the pattern `//`. In a multi-node setup, this ID is used to reference the entire cluster. +{{% show-in "enterprise" %}} - `--node-id`: A string identifier that distinguishes individual server instances within the cluster. This forms the final part of the storage path: `//`. In a multi-node setup, this ID is used to reference specific nodes. +- `--cluster-id`: A string identifier that determines part of the storage path hierarchy. All nodes within the same cluster share this identifier. The storage path follows the pattern `//`. In a multi-node setup, this ID is used to reference the entire cluster. +{{% /show-in %}} +{{% show-in "core" %}} +- `--node-id`: A string identifier that distinguishes individual server instances. + This forms the final part of the storage path: `/`. +{{% /show-in %}} -> [!Note] -> The combined path structure `//` ensures proper organization of data in your object store, allowing for clean separation between clusters and individual nodes. +The following examples show how to start {{% product-name %}} with different object store configurations. > [!Note] > #### Diskless architecture @@ -156,17 +195,19 @@ and provide the following: > storage alone, eliminating the need for locally attached disks. > {{% product-name %}} can also work with only local disk storage when needed. -The following examples show how to start InfluxDB 3 with different object store configurations: +{{% show-in "enterprise" %}} +> [!Note] +> The combined path structure `//` ensures proper organization of data in your object store, allowing for clean separation between clusters and individual nodes. +{{% /show-in %}} -```bash -# Memory object store -# Stores data in RAM; doesn't persist data -influxdb3 serve \ ---node-id host01 \ ---cluster-id cluster01 \ ---object-store memory -``` +##### Filesystem object store +Store data in a specified directory on the local filesystem. +This is the default object store type. + +Replace the following with your values: + +{{% show-in "enterprise" %}} ```bash # Filesystem object store # Provide the filesystem directory @@ -176,20 +217,28 @@ influxdb3 serve \ --object-store file \ --data-dir ~/.influxdb3 ``` +{{% /show-in %}} +{{% show-in "core" %}} +```bash +# File system object store +# Provide the file system directory +influxdb3 serve \ + --node-id host01 \ + --object-store file \ + --data-dir ~/.influxdb3 +``` +{{% /show-in %}} -To run the [Docker image](/influxdb3/enterprise/install/#docker-image) and persist data to the filesystem, mount a volume for the Object store-for example, pass the following options: +To run the [Docker image](/influxdb3/version/install/#docker-image) and persist data to the file system, mount a volume for the object store-for example, pass the following options: -- `-v /path/on/host:/path/in/container`: Mounts a directory from your filesystem to the container +- `-v /path/on/host:/path/in/container`: Mounts a directory from your file system to the container - `--object-store file --data-dir /path/in/container`: Uses the mount for server storage -> [!Note] -> -> The {{% product-name %}} Docker image exposes port `8181`, the `influxdb3` server default for HTTP connections. -> To map the exposed port to a different port when running a container, see the Docker guide for [Publishing and exposing ports](https://docs.docker.com/get-started/docker-concepts/running-containers/publishing-ports/). +{{% show-in "enterprise" %}} ```bash -# Filesystem object store with Docker +# File system object store with Docker # Create a mount # Provide the mount path docker run -it \ @@ -200,19 +249,47 @@ docker run -it \ --object-store file \ --data-dir /path/in/container ``` +{{% /show-in %}} +{{% show-in "core" %}} + +```bash +# File system object store with Docker +# Create a mount +# Provide the mount path +docker run -it \ + -v /path/on/host:/path/in/container \ + influxdb:3-core influxdb3 serve \ + --node-id my_host \ + --object-store file \ + --data-dir /path/in/container +``` +{{% /show-in %}} +> [!Note] +> +> The {{% product-name %}} Docker image exposes port `8181`, the `influxdb3` server default for HTTP connections. +> To map the exposed port to a different port when running a container, see the Docker guide for [Publishing and exposing ports](https://docs.docker.com/get-started/docker-concepts/running-containers/publishing-ports/). + +##### S3 object store + +Store data in an S3-compatible object store. +This is useful for production deployments that require high availability and durability. +Provide your bucket name and credentials to access the S3 object store. + +{{% show-in "enterprise" %}} ```bash # S3 object store (default is the us-east-1 region) -# Specify the Object store type and associated options +# Specify the object store type and associated options influxdb3 serve \ --node-id host01 \ --cluster-id cluster01 \ --object-store s3 \ - --bucket BUCKET \ - --aws-access-key-id AWS_ACCESS_KEY_ID \ + --bucket OBJECT_STORE_BUCKET \ + --aws-access-key AWS_ACCESS_KEY_ID \ --aws-secret-access-key AWS_SECRET_ACCESS_KEY ``` + ```bash # Minio or other open source object store # (using the AWS S3 API with additional parameters) @@ -221,22 +298,84 @@ influxdb3 serve \ --node-id host01 \ --cluster-id cluster01 \ --object-store s3 \ - --bucket BUCKET \ + --bucket OBJECT_STORE_BUCKET \ --aws-access-key-id AWS_ACCESS_KEY_ID \ --aws-secret-access-key AWS_SECRET_ACCESS_KEY \ --aws-endpoint ENDPOINT \ --aws-allow-http ``` +{{% /show-in %}} +{{% show-in "core" %}} +```bash +# S3 object store (default is the us-east-1 region) +# Specify the object store type and associated options +influxdb3 serve \ + --node-id host01 \ + --object-store s3 \ + --bucket OBJECT_STORE_BUCKET \ + --aws-access-key AWS_ACCESS_KEY_ID \ + --aws-secret-access-key AWS_SECRET_ACCESS_KEY +``` -For more information about server options, use the CLI help: +```bash +# Minio or other open source object store +# (using the AWS S3 API with additional parameters) +# Specify the object store type and associated options +influxdb3 serve \ + --node-id host01 \ + --object-store s3 \ + --bucket OBJECT_STORE_BUCKET \ + --aws-access-key-id AWS_ACCESS_KEY_ID \ + --aws-secret-access-key AWS_SECRET_ACCESS_KEY \ + --aws-endpoint ENDPOINT \ + --aws-allow-http +``` +{{% /show-in %}} + +#### Memory object store + +Store data in RAM without persisting it on shutdown. +It's useful for rapid testing and development. + +{{% show-in "enterprise" %}} +```bash +# Memory object store +# Stores data in RAM; doesn't persist data +influxdb3 serve \ +--node-id host01 \ +--cluster-id cluster01 \ +--object-store memory +``` +{{% /show-in %}} +{{% show-in "core" %}} +```bash +# Memory object store +# Stores data in RAM; doesn't persist data +influxdb3 serve \ +--node-id host01 \ +--object-store memory +``` +{{% /show-in %}} + +For more information about server options, use the CLI help or view the [InfluxDB 3 CLI reference](/influxdb3/version/reference/cli/influxdb3/serve/): ```bash influxdb3 serve --help ``` +> [!Tip] +> #### Run the InfluxDB 3 Explorer query interface (beta) +> +> InfluxDB 3 Explorer (currently in beta) is the web-based query and +> administrative interface for InfluxDB 3. +> It provides visual management of databases and tokens and an easy way to query your time series data. +> +> For more information, see the [InfluxDB 3 Explorer documentation](/influxdb3/explorer/). + +{{% show-in "enterprise" %}} #### Licensing -When first starting a new instance, InfluxDB prompts you to select a license type. +When first starting a new instance, {{% product-name %}} prompts you to select a license type. InfluxDB 3 Enterprise licenses authorize the use of the InfluxDB 3 Enterprise software and apply to a single cluster. Licenses are primarily based on the number of CPUs InfluxDB can use, but there are other limitations depending on the license type. The following InfluxDB 3 Enterprise license types are available: @@ -245,141 +384,127 @@ InfluxDB 3 Enterprise licenses authorize the use of the InfluxDB 3 Enterprise so - **Commercial**: Commercial license with full access to InfluxDB 3 Enterprise capabilities. You can learn more on managing your InfluxDB 3 Enterprise license on the [Manage your license](https://docs.influxdata.com/influxdb3/enterprise/admin/license/)page. +{{% /show-in %}} ### Authentication and authorization -After you have [started the server](#start-influxdb), you can create and manage tokens using the `influxdb3` CLI or the HTTP API. -{{% product-name %}} uses token-based authentication and authorization which is enabled by default when you start the server. -With authentication enabled, you must provide a token to access server actions. +{{% product-name %}} uses token-based authentication and authorization, which is enabled by default when you start the server. + +With authentication enabled, you must provide a token with `influxdb3` CLI commands and HTTP API requests. + +{{% show-in "enterprise" %}} {{% product-name %}} supports the following types of tokens: - **admin token**: Grants access to all CLI actions and API endpoints. A server can have one admin token. -- **resource tokens**: Fine-grained permissions tokens that grant read and write access to specific resources (databases and system information endpoints) on the server. +- **resource tokens**: Tokens that grant read and write access to specific resources (databases and system information endpoints) on the server. - A database token grants access to write and query data in a database - A system token grants read access to system information endpoints and metrics for the server +{{% /show-in %}} +{{% show-in "core" %}} +{{% product-name %}} supports _admin_ tokens, which grant access to all CLI actions and API endpoints. +{{% /show-in %}} -InfluxDB 3 supports the `*` resource name wildcard to grant permissions to all -resources of a specific type. -You can create multiple resource tokens for different resources. +For more information about tokens and authorization, see [Manage tokens](/influxdb3/version/admin/tokens/). -When you create a token, InfluxDB 3 returns a token string in plain text -that you use to authenticate CLI commands and API requests. +#### Create an operator token -To have the `influxdb3` CLI use your admin token automatically, assign it to the -`INFLUXDB3_AUTH_TOKEN` environment variable. +After you start the server, create your first admin token. +The first admin token you create is the _operator_ token for the server. + +Use the `influxdb3` CLI or the HTTP API to create your operator token. > [!Important] -> #### Securely store your token -> -> InfluxDB lets you view the token string only when you create the token. -> Store your token in a secure location, as you cannot retrieve it from the database later. -> InfluxDB 3 stores only the token's hash and metadata in the catalog. +> **Store your token securely** +> +> InfluxDB displays the token string only when you create it. +> Store your token securely—you cannot retrieve it from the database later. -#### Create an admin token - -To create an admin token, use the `influxdb3 create token --admin` subcommand--for example: +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[CLI](#) +[Docker](#) +{{% /code-tabs %}} +{{% code-tab-content %}} ```bash -influxdb3 create token --admin \ - --host http://{{< influxdb/host >}} +influxdb3 create token --admin ``` + +{{% /code-tab-content %}} +{{% code-tab-content %}} + +{{% code-placeholders "CONTAINER_NAME" %}} ```bash -# With Docker -- In a new terminal, run: +# With Docker — in a new terminal: docker exec -it CONTAINER_NAME influxdb3 create token --admin ``` - -The command returns a token string that you can use to authenticate CLI commands and API requests. - -After you have created an admin token, you can use it to create database tokens and system tokens. - -For more information, see how to [Manage admin tokens](/influxdb3/version/admin/tokens/admin/). - -#### Create a database token - -To create a database token, use the `influxdb3 create token` subcommand and pass the following: - -- `--permission`: Create a token with fine-grained permissions -- `--name`: A unique name for the token -- _Options_, for example: - - `--expiry` option with the token expiration time as a [duration](/influxdb3/enterprise/reference/glossary/#duration). - If an expiration isn't set, the token does not expire until revoked. - - `--token` option with the admin token to use for authentication -- Token permissions as a string literal in the `RESOURCE_TYPE:RESOURCE_NAMES:ACTIONS` format--for example: - - `"db:mydb:read,write"` - - `db:`: The `db` resource type, which specifies the token is for a database - - `mydb`: The name of the database to grant permissions to. This part supports the `*` wildcard, which grants permissions to all databases. - - `read,write`: A comma-separated list of permissions to grant to the token. - -The following example shows how to create a database token that expires in 90 days and has read and write permissions for all databases on the server: - -{{% code-placeholders "ADMIN_TOKEN" %}} -```bash -influxdb3 create token \ - --permission \ - --expiry 90d \ - --token ADMIN_TOKEN \ - --host http://{{< influxdb/host >}} \ - --name "rw all databases" \ - "db:*:read,write" -``` {{% /code-placeholders %}} -In your command, replace {{% code-placeholder-key %}} `ADMIN_TOKEN`{{% /code-placeholder-key %}} -with the admin token you created earlier. +Replace {{% code-placeholder-key %}}`CONTAINER_NAME`{{% /code-placeholder-key %}} with the name of your running Docker container. -#### Create a system token +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} -A _system token_ grants read access to system information and metrics for the server, including the following HTTP API endpoints: +The command returns a token string for authenticating CLI commands and API requests. +Store your token securely—you cannot retrieve it from the database later. -- `/health` -- `/metrics` -- `/ping` +#### Set your token for authentication -To create a system token, use the `influxdb3 create token` subcommand and pass the following: -- `--permission`: Create a token with fine-grained permissions -- `--name`: A unique name for the token -- _Options_, for example: - - `--expiry` option with the token expiration time as a [duration](/influxdb3/enterprise/reference/glossary/#duration). - If an expiration isn't set, the token does not expire until revoked. - - `--token` option with the admin token to use for authentication - - `--host` option with the server host -- Token permissions as a string literal in the `RESOURCE_TYPE:RESOURCE_NAMES:ACTIONS` format--for example: - - `"system:health:read"` or `"system:*:read"` - - `system:`: The `system` resource type, which specifies the token is for a database. - - `health`: The list of system resources (endpoints) to grant permissions to. - This part supports the `*` wildcard, which grants permissions to all endpoints. - - `read`: The list of permissions to grant. _Only `read` is supported for system resources._ +Use your operator token to authenticate server actions in {{% product-name %}}, +such as creating additional tokens, performing administrative tasks, and writing and querying data. -The following example shows how to create a system token that expires in 1 year and has read permissions for all system endpoints on the server: +Use one of the following methods to provide your token and authenticate `influxdb3` CLI commands. +In your command, replace {{% code-placeholder-key %}}`YOUR_AUTH_TOKEN`{{% /code-placeholder-key %}} with your token string (for example, the [operator token](#create-an-operator-token) from the previous step). + +{{< tabs-wrapper >}} +{{% tabs %}} +[Environment variable (recommended)](#) +[Command option](#) +{{% /tabs %}} +{{% tab-content %}} + +Set the `INFLUXDB3_AUTH_TOKEN` environment variable to have the CLI use your token automatically: + +{{% code-placeholders "YOUR_AUTH_TOKEN" %}} ```bash -influxdb3 create token \ - --permission \ - --expiry 1y \ - --token ADMIN_TOKEN \ - --host http://{{< influxdb/host >}} \ - --name "all system endpoints" \ - "system:*:read" +export INFLUXDB3_AUTH_TOKEN=YOUR_AUTH_TOKEN ``` +{{% /code-placeholders %}} -For more information, see how to [Manage resource tokens](/influxdb3/version/admin/tokens/resource/). +{{% /tab-content %}} +{{% tab-content %}} -#### Use tokens to authorize CLI commands and API requests +Include the `--token` option with CLI commands: -- To authenticate `influxdb3` CLI commands, use the `--token` option or assign your - token to the `INFLUXDB3_AUTH_TOKEN` environment variable for `influxdb3` to use it automatically. -- To authenticate HTTP API requests, include `Bearer ` in the `Authorization` header value--for example: - - ```bash - curl "http://{{< influxdb/host >}}/health" \ - --header "Authorization: Bearer SYSTEM_TOKEN" - ``` +{{% code-placeholders "YOUR_AUTH_TOKEN" %}} +```bash +influxdb3 show databases --token AUTH_TOKEN +``` +{{% /code-placeholders %}} - In your request, replace - {{% code-placeholder-key %}}`SYSTEM_TOKEN`{{% /code-placeholder-key %}} with the system token you created earlier. +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +For HTTP API requests, include your token in the `Authorization` header--for example: + +{{% code-placeholders "AUTH_TOKEN" %}} +```bash +curl "http://{{< influxdb/host >}}/api/v3/configure/database" \ + --header "Authorization: Bearer AUTH_TOKEN" +``` +{{% /code-placeholders %}} + +#### Learn more about tokens and permissions + +- [Manage admin tokens](/influxdb3/version/admin/tokens/admin/) - Understand and manage operator and named admin tokens +{{% show-in "enterprise" %}} +- [Manage resource tokens](/influxdb3/version/admin/tokens/resource/) - Create, list, and delete resource tokens +{{% /show-in %}} +- [Authentication](/influxdb3/version/reference/internals/authentication/) - Understand authentication, authorizations, and permissions in {{% product-name %}} ### Data model @@ -397,17 +522,18 @@ This tutorial covers many of the recommended tools. | Tool | Administration | Write | Query | | :------------------------------------------------------------------------------------------------ | :----------------------: | :----------------------: | :----------------------: | -| [Chronograf](/chronograf/v1/) | - | - | **{{< icon "check" >}}** | -| `influx` CLI | - | - | - | -| [`influxdb3` CLI](#influxdb3-cli){{< req text="\* " color="magenta" >}} | **{{< icon "check" >}}** | **{{< icon "check" >}}** | **{{< icon "check" >}}** | -| `influxctl` CLI | - | - | - | -| [InfluxDB HTTP API](#influxdb-http-api){{< req text="\* " color="magenta" >}} | **{{< icon "check" >}}** | **{{< icon "check" >}}** | **{{< icon "check" >}}** | -| InfluxDB user interface | - | - | - | +| **`influxdb3` CLI** {{< req text="\* " color="magenta" >}} | **{{< icon "check" >}}** | **{{< icon "check" >}}** | **{{< icon "check" >}}** | +| **InfluxDB HTTP API** {{< req text="\* " color="magenta" >}} | **{{< icon "check" >}}** | **{{< icon "check" >}}** | **{{< icon "check" >}}** | +| **InfluxDB 3 Explorer** {{< req text="\* " color="magenta" >}} | **{{< icon "check" >}}** | - | **{{< icon "check" >}}** | | [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) | - | **{{< icon "check" >}}** | **{{< icon "check" >}}** | | [InfluxDB v2 client libraries](/influxdb3/version/reference/client-libraries/v2/) | - | **{{< icon "check" >}}** | - | | [InfluxDB v1 client libraries](/influxdb3/version/reference/client-libraries/v1/) | - | **{{< icon "check" >}}** | **{{< icon "check" >}}** | -| [InfluxDB 3 Processing engine](#python-plugins-and-the-processing-engine){{< req text="\* " color="magenta" >}} | | **{{< icon "check" >}}** | **{{< icon "check" >}}** | +| [InfluxDB 3 processing engine](#python-plugins-and-the-processing-engine){{< req text="\* " color="magenta" >}} | | **{{< icon "check" >}}** | **{{< icon "check" >}}** | | [Telegraf](/telegraf/v1/) | - | **{{< icon "check" >}}** | - | +| [Chronograf](/chronograf/v1/) | - | - | - | +| `influx` CLI | - | - | - | +| `influxctl` CLI | - | - | - | +| InfluxDB v2.x user interface | - | - | - | | **Third-party tools** | | | | | Flight SQL clients | - | - | **{{< icon "check" >}}** | | [Grafana](/influxdb3/version/visualize-data/grafana/) | - | - | **{{< icon "check" >}}** | @@ -422,6 +548,15 @@ InfluxDB is a schema-on-write database. You can start writing data and InfluxDB After a schema is created, InfluxDB validates future write requests against it before accepting the data. Subsequent requests can add new fields on-the-fly, but can't add new tags. +{{% show-in "core" %}} +> [!Note] +> #### Core is optimized for recent data +> +> {{% product-name %}} is optimized for recent data but accepts writes from any time period. +> The system persists data to Parquet files for historical analysis with [InfluxDB 3 Enterprise](/influxdb3/enterprise/get-started/) or third-party tools. +> For extended historical queries and optimized data organization, consider using [InfluxDB 3 Enterprise](/influxdb3/enterprise/get-started/). +{{% /show-in %}} + #### Write data in line protocol syntax {{% product-name %}} accepts data in [line protocol](/influxdb3/version/reference/syntax/line-protocol/) syntax. @@ -462,24 +597,18 @@ Use the `influxdb3 write` command to write data to a database. In the code samples, replace the following placeholders with your values: -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: The name of the [database](/influxdb3/version/admin/databases/) to write to. -{{% show-in "core" %}} -- {{% code-placeholder-key %}}`TOKEN`{{% /code-placeholder-key %}}: A [token](/influxdb3/version/admin/tokens/) for your {{% product-name %}} server. -{{% /show-in %}} -{{% show-in "enterprise" %}} -- {{% code-placeholder-key %}}`TOKEN`{{% /code-placeholder-key %}}: A [token](/influxdb3/version/admin/tokens/) - with permission to write to the specified database. -{{% /show-in %}} +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the [database](/influxdb3/version/admin/databases/) to write to. +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to write to the specified database{{% /show-in %}} ##### Write data via stdin Pass data as quoted line protocol via standard input (stdin)--for example: -{{% code-placeholders "DATABASE_NAME|TOKEN" %}} +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} ```bash influxdb3 write \ --database DATABASE_NAME \ - --token TOKEN \ + --token AUTH_TOKEN \ --precision ns \ --accept-partial \ 'cpu,host=Alpha,region=us-west,application=webserver val=1i,usage_percent=20.5,status="OK" @@ -497,17 +626,21 @@ Pass the `--file` option to write line protocol you have saved to a file--for ex [sample line protocol](#write-data-in-line-protocol-syntax) to a file named `server_data` and then enter the following command: -{{% code-placeholders "DATABASE_NAME|TOKEN" %}} +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} ```bash influxdb3 write \ --database DATABASE_NAME \ - --token TOKEN \ + --token AUTH_TOKEN \ --precision ns \ --accept-partial \ - --file server_data + --file path/to/server_data ``` {{% /code-placeholders %}} +Replace the following placeholders with your values: +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the [database](/influxdb3/version/admin/databases/) to write to. +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to write to the specified database{{% /show-in %}} + ### Write data using the HTTP API {{% product-name %}} provides three write API endpoints that respond to HTTP `POST` requests. @@ -539,7 +672,7 @@ and supports the following parameters: - `?precision=`: Specify the precision of the timestamp. The default is nanosecond precision. - request body: The line protocol data to write. -For more information about the parameters, see [Write data](/influxdb3/core/write-data/). +For more information about the parameters, see [Write data](/influxdb3/version/write-data/). ##### Example: write data using the /api/v3 HTTP API @@ -552,6 +685,7 @@ With `accept_partial=true` (default): ```bash curl -v "http://{{< influxdb/host >}}/api/v3/write_lp?db=sensors&precision=auto" \ + --header 'Authorization: Bearer apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0==' \ --data-raw 'home,room=Sunroom temp=96 home,room=Sunroom temp="hi"' ``` @@ -582,6 +716,7 @@ With `accept_partial=false`: ```bash curl -v "http://{{< influxdb/host >}}/api/v3/write_lp?db=sensors&precision=auto&accept_partial=false" \ + --header 'Authorization: Bearer apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0==' \ --data-raw 'home,room=Sunroom temp=96 home,room=Sunroom temp="hi"' ``` @@ -635,17 +770,17 @@ The `/write` InfluxDB v1 compatibility endpoint provides backwards compatibility #### Write responses -By default, InfluxDB acknowledges writes after flushing the WAL file to the Object store (occurring every second). +By default, InfluxDB acknowledges writes after flushing the WAL file to the object store (occurring every second). For high write throughput, you can send multiple concurrent write requests. #### Use no_sync for immediate write responses To reduce the latency of writes, use the `no_sync` write option, which acknowledges writes _before_ WAL persistence completes. -When `no_sync=true`, InfluxDB validates the data, writes the data to the WAL, and then immediately responds to the client, without waiting for persistence to the Object store. +When `no_sync=true`, InfluxDB validates the data, writes the data to the WAL, and then immediately responds to the client, without waiting for persistence to the object store. Using `no_sync=true` is best when prioritizing high-throughput writes over absolute durability. -- Default behavior (`no_sync=false`): Waits for data to be written to the Object store before acknowledging the write. Reduces the risk of data loss, but increases the latency of the response. +- Default behavior (`no_sync=false`): Waits for data to be written to the object store before acknowledging the write. Reduces the risk of data loss, but increases the latency of the response. - With `no_sync=true`: Reduces write latency, but increases the risk of data loss in case of a crash before WAL persistence. ##### Immediate write using the HTTP API @@ -654,30 +789,27 @@ The `no_sync` parameter controls when writes are acknowledged--for example: ```bash curl "http://{{< influxdb/host >}}/api/v3/write_lp?db=sensors&precision=auto&no_sync=true" \ + --header 'Authorization: Bearer apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0==' \ --data-raw "home,room=Sunroom temp=96" ``` -##### Immediate write using the influxdb3 CLI - -The `no_sync` CLI option controls when writes are acknowledged--for example: - -```bash -influxdb3 write \ - --bucket mydb \ - --org my_org \ - --token my-token \ - --no-sync -``` - ### Create a database or table To create a database without writing data, use the `create` subcommand--for example: +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} ```bash -influxdb3 create database mydb +influxdb3 create database DATABASE_NAME \ + --token AUTH_TOKEN ``` +{{% /code-placeholders %}} -To learn more about a subcommand, use the `-h, --help` flag: +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to create +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: the {{% token-link "admin" %}} for your {{% product-name %}} server + +To learn more about a subcommand, use the `-h, --help` flag or view the [InfluxDB 3 CLI reference](/influxdb3/version/reference/cli/influxdb3/create): ```bash influxdb3 create -h @@ -685,7 +817,15 @@ influxdb3 create -h ### Query data -InfluxDB 3 now supports native SQL for querying, in addition to InfluxQL, an SQL-like language customized for time series queries. +InfluxDB 3 supports native SQL for querying, in addition to InfluxQL, an +SQL-like language customized for time series queries. + +{{% show-in "core" %}} +{{< product-name >}} limits +query time ranges to 72 hours (both recent and historical) to ensure query performance. +For more information about the 72-hour limitation, see the +[update on InfluxDB 3 Core’s 72-hour limitation](https://www.influxdata.com/blog/influxdb3-open-source-public-alpha-jan-27/). +{{% /show-in %}} > [!Note] > Flux, the language introduced in InfluxDB 2.0, is **not** supported in InfluxDB 3. @@ -745,12 +885,20 @@ $ influxdb3 query --database servers "SELECT DISTINCT usage_percent, time FROM c To query using InfluxQL, enter the `influxdb3 query` subcommand and specify `influxql` in the language option--for example: +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} ```bash influxdb3 query \ - --database servers \ + --database DATABASE_NAME \ + --token \ --language influxql \ "SELECT DISTINCT usage_percent FROM cpu WHERE time >= now() - 1d" ``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}} ### Query using the API @@ -766,18 +914,35 @@ Use the `format` parameter to specify the response format: `pretty`, `jsonl`, `p The following example sends an HTTP `GET` request with a URL-encoded SQL query: +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} ```bash -curl -v "http://{{< influxdb/host >}}/api/v3/query_sql?db=servers&q=select+*+from+cpu+limit+5" +curl -G "http://{{< influxdb/host >}}/api/v3/query_sql" \ + --header 'Authorization: Bearer AUTH_TOKEN' \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=select * from cpu limit 5" ``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}} ##### Example: Query passing JSON parameters The following example sends an HTTP `POST` request with parameters in a JSON payload: +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} ```bash curl http://{{< influxdb/host >}}/api/v3/query_sql \ - --data '{"db": "server", "q": "select * from cpu limit 5"}' + --data '{"db": "DATABASE_NAME", "q": "select * from cpu limit 5"}' ``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}} ### Query using the Python client @@ -792,24 +957,35 @@ pip install influxdb3-python From here, you can connect to your database with the client library using just the **host** and **database name: +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} ```python from influxdb_client_3 import InfluxDBClient3 client = InfluxDBClient3( + token='AUTH_TOKEN', host='http://{{< influxdb/host >}}', - database='servers' + database='DATABASE_NAME' ) ``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}} The following example shows how to query using SQL, and then -use PyArrow to explore the schema and process results: +use PyArrow to explore the schema and process results. +To authorize the query, the example retrieves the {{% token-link "database" %}} +from the `INFLUXDB3_AUTH_TOKEN` environment variable. ```python from influxdb_client_3 import InfluxDBClient3 +import os client = InfluxDBClient3( + token=os.environ.get('INFLUXDB3_AUTH_TOKEN'), host='http://{{< influxdb/host >}}', - database='servers' ) @@ -834,38 +1010,35 @@ print(table.group_by('cpu').aggregate([('time_system', 'mean')])) For more information about the Python client library, see the [`influxdb3-python` repository](https://github.com/InfluxCommunity/influxdb3-python) in GitHub. - ### Query using InfluxDB 3 Explorer (Beta) -You can use the InfluxDB 3 Explorer query interface by downloading the Docker image. - -```bash -docker pull quay.io/influxdb/influxdb3-explorer:latest -``` - -Run the interface using: - -```bash -docker run --name influxdb3-explorer -p 8086:8888 quay.io/influxdb/influxdb3-explorer:latest -``` - -With the default settings above, you can access the UI at http://localhost:8086. -Set your expected database connection details on the Settings page. -From there, you can query data, browser your database schema, and do basic -visualization of your time series data. - +You can use the InfluxDB 3 Explorer web-based interface to query and visualize data, +and administer your {{% product-name %}} instance. +For more information, see how to [install InfluxDB 3 Explorer (Beta)](/influxdb3/explorer/install/) using Docker +and get started querying your data. ### Last values cache {{% product-name %}} supports a **last-n values cache** which stores the last N values in a series or column hierarchy in memory. This gives the database the ability to answer these kinds of queries in under 10 milliseconds. -You can use the `influxdb3` CLI to create a last value cache. +You can use the `influxdb3` CLI to [create a last value cache](/influxdb3/version/reference/cli/influxdb3/create/last_cache/). + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TABLE_NAME|CACHE_NAME" %}} ```bash influxdb3 create last_cache \ - -d \ - -t
\ - [CACHE_NAME] + --token AUTH_TOKEN + --database DATABASE_NAME \ + --table TABLE_NAME \ + CACHE_NAME ``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to create the last values cache in +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}} +- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to create the last values cache in +- {{% code-placeholder-key %}}`CACHE_NAME`{{% /code-placeholder-key %}}: Optionally, a name for the new cache Consider the following `cpu` sample table: @@ -881,6 +1054,7 @@ The following command creates a last value cache named `cpuCache`: ```bash influxdb3 create last_cache \ + --token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \ --database servers \ --table cpu \ --key-columns host,application \ @@ -892,10 +1066,11 @@ _You can create a last values cache per time series, but be mindful of high card #### Query a last values cache -To use the LVC, call it using the `last_cache()` function in your query--for example: +To query data from the LVC, use the [`last_cache()`](/influxdb3/version/reference/sql/functions/cache/#last_cache) function in your query--for example: ```bash influxdb3 query \ + --token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \ --database servers \ "SELECT * FROM last_cache('cpu', 'cpuCache') WHERE host = 'Bravo';" ``` @@ -903,32 +1078,53 @@ influxdb3 query \ > [!Note] > #### Only works with SQL > -> The Last values cache only works with SQL, not InfluxQL; SQL is the default language. +> The last values cache only works with SQL, not InfluxQL; SQL is the default language. -#### Delete a Last values cache +#### Delete a last values cache Use the `influxdb3` CLI to [delete a last values cache](/influxdb3/version/reference/cli/influxdb3/delete/last_cache/) +{{% code-placeholders "DATABASE_NAME|TABLE_NAME|CACHE_NAME" %}} ```bash influxdb3 delete last_cache \ - --database \ - --table
\ - --cache-name + --token AUTH_TOKEN \ + --database DATABASE_NAME \ + --table TABLE \ + --cache-name CACHE_NAME ``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}} +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to delete the last values cache from +- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to delete the last values cache from +- {{% code-placeholder-key %}}`CACHE_NAME`{{% /code-placeholder-key %}}: the name of the last values cache to delete ### Distinct values cache -Similar to the Last values cache, the database can cache in RAM the distinct values for a single column in a table or a hierarchy of columns. This is useful for fast metadata lookups, which can return in under 30 milliseconds. Many of the options are similar to the last value cache. +Similar to the [last values cache](#last-values-cache), the database can cache in RAM the distinct values for a single column in a table or a hierarchy of columns. +This is useful for fast metadata lookups, which can return in under 30 milliseconds. +Many of the options are similar to the last value cache. You can use the `influxdb3` CLI to [create a distinct values cache](/influxdb3/version/reference/cli/influxdb3/create/distinct_cache/). +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TABLE_NAME|CACHE_NAME" %}} ```bash influxdb3 create distinct_cache \ - --database \ - --table
\ - --columns \ - [CACHE_NAME] + --token AUTH_TOKEN \ + --database DATABASE_NAME \ + --table TABLE \ + --columns COLUMNS \ + CACHE_NAME ``` +{{% /code-placeholders %}} +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to create the last values cache in +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}} +- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to create the distinct values cache in +- {{% code-placeholder-key %}}`CACHE_NAME`{{% /code-placeholder-key %}}: Optionally, a name for the new cache Consider the following `cpu` sample table: @@ -944,6 +1140,7 @@ The following command creates a distinct values cache named `cpuDistinctCache`: ```bash influxdb3 create distinct_cache \ + --token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \ --database servers \ --table cpu \ --columns host,application \ @@ -952,10 +1149,11 @@ influxdb3 create distinct_cache \ #### Query a distinct values cache -To use the distinct values cache, call it using the `distinct_cache()` function in your query--for example: +To query data from the distinct values cache, use the [`distinct_cache()`](/influxdb3/version/reference/sql/functions/cache/#distinct_cache) function in your query--for example: ```bash influxdb3 query \ + --token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \ --database servers \ "SELECT * FROM distinct_cache('cpu', 'cpuDistinctCache')" ``` @@ -969,19 +1167,28 @@ influxdb3 query \ Use the `influxdb3` CLI to [delete a distinct values cache](/influxdb3/version/reference/cli/influxdb3/delete/distinct_cache/) +{{% code-placeholders "DATABASE_NAME|TABLE_NAME|CACHE_NAME" %}} ```bash influxdb3 delete distinct_cache \ - --database \ - --table
\ - --cache-name + --token AUTH_TOKEN \ + --database DATABASE_NAME \ + --table TABLE \ + --cache-name CACHE_NAME ``` +{{% /code-placeholders %}} -### Python plugins and the Processing engine +Replace the following placeholders with your values: +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}} +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to delete the distinct values cache from +- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to delete the distinct values cache from +- {{% code-placeholder-key %}}`CACHE_NAME`{{% /code-placeholder-key %}}: the name of the distinct values cache to delete -The InfluxDB 3 Processing engine is an embedded Python VM for running code inside the database to process and transform data. +### Python plugins and the processing engine -To activate the Processing engine, pass the `--plugin-dir ` option when starting the {{% product-name %}} server. -`PLUGIN_DIR` is your filesystem location for storing [plugin](#plugin) files for the Processing engine to run. +The InfluxDB 3 processing engine is an embedded Python VM for running code inside the database to process and transform data. + +To activate the processing engine, pass the `--plugin-dir ` option when starting the {{% product-name %}} server. +`PLUGIN_DIR` is your filesystem location for storing [plugin](#plugin) files for the processing engine to run. #### Plugin @@ -998,7 +1205,7 @@ InfluxDB 3 provides the following types of triggers, each with specific trigger- - **On WAL flush**: Sends a batch of written data (for a specific table or all tables) to a plugin (by default, every second). - **On Schedule**: Executes a plugin on a user-configured schedule (using a crontab or a duration); useful for data collection and deadman monitoring. -- **On Request**: Binds a plugin to a custom HTTP API endpoint at `/api/v3/engine/`. +- **On Request**: Binds a plugin to a custom HTTP API endpoint at `/api/v3/engine/`. The plugin receives the HTTP request headers and content, and can then parse, process, and send the data into the database or to third-party services. ### Test, create, and trigger plugin code @@ -1089,19 +1296,30 @@ To test a plugin, do the following: 1. Create a _plugin directory_--for example, `/path/to/.influxdb/plugins` 2. [Start the InfluxDB server](#start-influxdb) and include the `--plugin-dir ` option. -3. Save the [example plugin code](#example-python-plugin-for-wal-flush) to a plugin file inside of the plugin directory. If you haven't yet written data to the table in the example, comment out the lines where it queries. +3. Save the [example plugin code](#example-python-plugin-for-wal-rows) to a plugin file inside of the plugin directory. If you haven't yet written data to the table in the example, comment out the lines where it queries. 4. To run the test, enter the following command with the following options: - `--lp` or `--file`: The line protocol to test - Optional: `--input-arguments`: A comma-delimited list of `=` arguments for your plugin code - ```bash - influxdb3 test wal_plugin \ - --lp \ - --input-arguments "arg1=foo,arg2=bar" \ - --database \ - - ``` +{{% code-placeholders "INPUT_LINE_PROTOCOL|INPUT_ARGS|DATABASE_NAME|AUTH_TOKEN|PLUGIN_FILENAME" %}} +```bash +influxdb3 test wal_plugin \ +--lp INPUT_LINE_PROTOCOL \ +--input-arguments INPUT_ARGS \ +--database DATABASE_NAME \ +--token AUTH_TOKEN \ +PLUGIN_FILENAME +``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`INPUT_LINE_PROTOCOL`{{% /code-placeholder-key %}}: the line protocol to test +- Optional: {{% code-placeholder-key %}}`INPUT_ARGS`{{% /code-placeholder-key %}}: a comma-delimited list of `=` arguments for your plugin code--for example, `arg1=hello,arg2=world` +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to test against +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: the {{% token-link "admin" %}} for your {{% product-name %}} server +- {{% code-placeholder-key %}}`PLUGIN_FILENAME`{{% /code-placeholder-key %}}: the name of the plugin file to test The command runs the plugin code with the test data, yields the data to the plugin code, and then responds with the plugin result. You can quickly see how the plugin behaves, what data it would have written to the database, and any errors. @@ -1126,7 +1344,8 @@ trigger: # Test a plugin influxdb3 test wal_plugin \ --lp "my_measure,tag1=asdf f1=1.0 123" \ - --database mydb \ + --token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \ + --database sensors \ --input-arguments "arg1=hello,arg2=world" \ test.py ``` @@ -1134,7 +1353,8 @@ influxdb3 test wal_plugin \ ```bash # Create a trigger that runs the plugin influxdb3 create trigger \ - -d mydb \ + --token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \ + --database sensors \ --plugin test_plugin \ --trigger-spec "table:foo" \ --trigger-arguments "arg1=hello,arg2=world" \ @@ -1144,12 +1364,33 @@ influxdb3 create trigger \ After you have created a plugin and trigger, enter the following command to enable the trigger and have it run the plugin as you write data: +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TRIGGER_NAME" %}} ```bash -influxdb3 enable trigger --database mydb trigger1 +influxdb3 enable trigger \ + --token AUTH_TOKEN \ + --database DATABASE_NAME \ + TRIGGER_NAME +``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to enable the trigger in +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}} +- {{% code-placeholder-key %}}`TRIGGER_NAME`{{% /code-placeholder-key %}}: the name of the trigger to enable + +For example, to enable the trigger named `trigger1` in the `sensors` database: + +```bash +influxdb3 enable trigger \ + --token apiv3_0xxx0o0XxXxx00Xxxx000xXXxoo0== \ + --database sensors + trigger1 ``` For more information, see [Python plugins and the Processing engine](/influxdb3/version/plugins/). +{{% show-in "enterprise" %}} ### Multi-server setup {{% product-name %}} is built to support multi-node setups for high availability, read replicas, and flexible implementations depending on use case. @@ -1196,6 +1437,7 @@ influxdb3 serve \ --http-bind {{< influxdb/host >}} \ --aws-access-key-id \ --aws-secret-access-key +``` ```bash ## NODE 2 @@ -1212,8 +1454,8 @@ influxdb3 serve \ --object-store s3 \ --bucket influxdb-3-enterprise-storage \ --http-bind localhost:8282 \ - --aws-access-key-id \ - --aws-secret-access-key + --aws-access-key-id AWS_ACCESS_KEY_ID \ + --aws-secret-access-key AWS_SECRET_ACCESS_KEY ``` After the nodes have started, querying either node returns data for both nodes, and _NODE 1_ runs compaction. @@ -1311,7 +1553,7 @@ For a robust and effective setup for managing time-series data, you can run inge --mode ingest \ --object-store s3 \ --bucket influxdb-3-enterprise-storage \ - -- http-bind {{< influxdb/host >}} \ + --http-bind {{< influxdb/host >}} \ --aws-access-key-id \ --aws-secret-access-key ``` @@ -1373,7 +1615,7 @@ For a robust and effective setup for managing time-series data, you can run inge --mode query \ --object-store s3 \ --bucket influxdb-3-enterprise-storage \ - -- http-bind localhost:8383 \ + --http-bind localhost:8383 \ --aws-access-key-id \ --aws-secret-access-key ``` @@ -1392,7 +1634,7 @@ For a robust and effective setup for managing time-series data, you can run inge --mode query \ --object-store s3 \ --bucket influxdb-3-enterprise-storage \ - -- http-bind localhost:8484 \ + --http-bind localhost:8484 \ --aws-access-key-id \ ``` @@ -1413,28 +1655,43 @@ You can use the default port `8181` for any write or query, without changing any > > When running multiple local instances for testing or separate nodes in production, specifying the host ensures writes and queries are routed to the correct instance. +{{% code-placeholders "(http://localhost:8585)|AUTH_TOKEN|DATABASE_NAME|QUERY" %}} ```bash -# Example variables on a query +# Example querying a specific host # HTTP-bound Port: 8585 -influxdb3 query http://localhost:8585 --database "" +influxdb3 query \ + --host http://localhost:8585 + --token AUTH_TOKEN \ + --database DATABASE_NAME "QUERY" ``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`http://localhost:8585`{{% /code-placeholder-key %}}: the host and port of the node to query +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}} +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query +- {{% code-placeholder-key %}}`QUERY`{{% /code-placeholder-key %}}: the SQL or InfluxQL query to run against the database ### File index settings To accelerate performance on specific queries, you can define non-primary keys to index on, which helps improve performance for single-series queries. -This feature is only available in Enterprise and is not available in Core. +This feature is only available in {{% product-name %}} and is not available in Core. #### Create a file index +{{% code-placeholders "AUTH_TOKEN|DATABASE|TABLE|COLUMNS" %}} + ```bash # Example variables on a query # HTTP-bound Port: 8585 influxdb3 create file_index \ --host http://localhost:8585 \ - --database \ - --table
\ - + --token AUTH_TOKEN \ + --database DATABASE_NAME \ + --table TABLE_NAME \ + COLUMNS ``` #### Delete a file index @@ -1442,6 +1699,15 @@ influxdb3 create file_index \ ```bash influxdb3 delete file_index \ --host http://localhost:8585 \ - --database \ - --table
\ + --database DATABASE_NAME \ + --table TABLE_NAME \ ``` +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}} +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to create the file index in +- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table to create the file index in +- {{% code-placeholder-key %}}`COLUMNS`{{% /code-placeholder-key %}}: a comma-separated list of columns to index on, for example, `host,application` +{{% /show-in %}} \ No newline at end of file diff --git a/content/shared/influxdb3-internals-reference/authentication.md b/content/shared/influxdb3-internals-reference/authentication.md index 843fc7c6b..491238dce 100644 --- a/content/shared/influxdb3-internals-reference/authentication.md +++ b/content/shared/influxdb3-internals-reference/authentication.md @@ -1,7 +1,7 @@ {{% product-name %}} uses an Attribute-Based Access Control (ABAC) model to -manage permissions. +manage permissions and supports multiple token types for different authentication scenarios. {{% show-in "enterprise" %}} This model allows for fine-grained control over access to resources and actions @@ -39,5 +39,10 @@ The ABAC model includes the following components: {{% /show-in %}} - **Resource**: The objects that can be accessed or manipulated. + Resources have attributes such as identifier and name. In {{% product-name %}}, resources include databases and system information endpoints. - Resources have attributes such as identifier and name. \ No newline at end of file + {{% show-in "enterprise" %}} + - Database tokens provide access to specific databases for actions like writing and querying data. + - System tokens provide access to system-level resources, such as API endpoints for server runtime statistics and health. + Access controls for system information API endpoints help prevent information leaks and attacks (such as DoS). + {{% /show-in %}} \ No newline at end of file diff --git a/content/shared/influxdb3-write-guides/best-practices/optimize-writes.md b/content/shared/influxdb3-write-guides/best-practices/optimize-writes.md index aad783214..6abfdd5c7 100644 --- a/content/shared/influxdb3-write-guides/best-practices/optimize-writes.md +++ b/content/shared/influxdb3-write-guides/best-practices/optimize-writes.md @@ -3,7 +3,8 @@ Use these tips to optimize performance and system overhead when writing data to {{< product-name >}}. - [Batch writes](#batch-writes) -- [Sort tags by key](#sort-tags-by-key) +{{% hide-in "enterprise,core" %}}- [Sort tags by key](#sort-tags-by-key){{% /hide-in %}} +{{% show-in "enterprise,core" %}}- [On first write, sort tags by query priority](#on-first-write-sort-tags-by-query-priority){{% /show-in %}} - [Use the coarsest time precision possible](#use-the-coarsest-time-precision-possible) - [Use gzip compression](#use-gzip-compression) - [Enable gzip compression in Telegraf](#enable-gzip-compression-in-telegraf) @@ -34,6 +35,8 @@ Write data in batches to minimize network overhead when writing data to InfluxDB > The optimal batch size is 10,000 lines of line protocol or 10 MBs, whichever > threshold is met first. +{{% hide-in "enterprise,core" %}} + ## Sort tags by key Before writing data points to InfluxDB, sort tags by key in lexicographic order. @@ -49,6 +52,31 @@ measurement,tagC=therefore,tagE=am,tagA=i,tagD=i,tagB=think fieldKey=fieldValue measurement,tagA=i,tagB=think,tagC=therefore,tagD=i,tagE=am fieldKey=fieldValue 1562020262 ``` +{{% /hide-in %}} + +{{% show-in "enterprise,core" %}} + +## On first write, sort tags by query priority + +The first write to a table in {{% product-name %}} determines the physical column +order in storage, and that order has a direct impact on query performance. +Columns that appear earlier are typically faster to filter and access during +query execution. + +Sort your tags by query priority when performing the initial write to a table. +Place the most commonly queried tags first—those you frequently use in `WHERE` +clauses or joins—followed by less frequently queried ones. For example, if most +of your queries filter by `region` and then by `host`, structure your first +write so that `region` comes before `host`. + +> [!Important] +> Column order is determined on the first write and cannot be changed afterward. +> Tags added after the first write are added last in the column sort order. +> Plan your schema with your query workload in mind to ensure the best long-term +> performance. + +{{% /show-in %}} + ## Use the coarsest time precision possible {{< product-name >}} supports up to nanosecond timestamp precision. However, diff --git a/content/shared/influxdb3-write-guides/best-practices/schema-design.md b/content/shared/influxdb3-write-guides/best-practices/schema-design.md index d17ad27fb..5d6dc40f3 100644 --- a/content/shared/influxdb3-write-guides/best-practices/schema-design.md +++ b/content/shared/influxdb3-write-guides/best-practices/schema-design.md @@ -9,6 +9,7 @@ for simpler and more performant queries. - [Do not use duplicate names for tags and fields](#do-not-use-duplicate-names-for-tags-and-fields) - [Maximum number of columns per table](#maximum-number-of-columns-per-table) - [Design for performance](#design-for-performance) + {{% show-in "enterprise,core" %}}- [Sort tags by query priority](#sort-tags-by-query-priority){{% /show-in %}} - [Avoid wide schemas](#avoid-wide-schemas) - [Avoid sparse schemas](#avoid-sparse-schemas) - [Table schemas should be homogenous](#table-schemas-should-be-homogenous) @@ -135,11 +136,35 @@ the performance of queries against that table. The following guidelines help to optimize query performance: +{{% show-in "enterprise,core" %}}- [Sort tags by query priority](#sort-tags-by-query-priority){{% /show-in %}} - [Avoid wide schemas](#avoid-wide-schemas) - [Avoid sparse schemas](#avoid-sparse-schemas) - [Table schemas should be homogenous](#table-schemas-should-be-homogenous) - [Use the best data type for your data](#use-the-best-data-type-for-your-data) +{{% show-in "enterprise,core" %}} + +### Sort tags by query priority + +The first write to a table in {{% product-name %}} determines the physical column +order in storage, and that order has a direct impact on query performance. +Columns that appear earlier are typically faster to filter and access during +query execution. + +Sort your tags by query priority when performing the initial write to a table. +Place the most commonly queried tags first—those you frequently use in `WHERE` +clauses or joins—followed by less frequently queried ones. For example, if most +of your queries filter by `region` and then by `host`, structure your first +write so that `region` comes before `host`. + +> [!Important] +> Column order is determined on the first write and cannot be changed afterward. +> Tags added after the first write are added last in the column sort order. +> Plan your schema with your query workload in mind to ensure the best long-term +> performance. + +{{% /show-in %}} + ### Avoid wide schemas A wide schema refers to a schema with a large number of columns (tags and fields). diff --git a/content/shared/influxdb3-write-guides/troubleshoot.md b/content/shared/influxdb3-write-guides/troubleshoot.md index 1db4781c5..3d9691d2d 100644 --- a/content/shared/influxdb3-write-guides/troubleshoot.md +++ b/content/shared/influxdb3-write-guides/troubleshoot.md @@ -41,7 +41,7 @@ Write requests return the following status codes: | :-------------------------------| :--------------------------------------------------------------- | :------------- | | `204 "Success"` | | If InfluxDB ingested the data | | `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | If some or all request data isn't allowed (for example, if it is malformed or falls outside of the bucket's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected | -| `401 "Unauthorized"` | | If the `Authorization` header is missing or malformed or if the [token](/influxdb3/version/admin/tokens/) doesn't have [permission](/influxdb3/version/reference/cli/influxctl/token/create/#examples) to write to the database. See [examples using credentials](/influxdb3/version/get-started/write/#write-line-protocol-to-influxdb) in write requests. | +| `401 "Unauthorized"` | | If the `Authorization` header is missing or malformed or if the [token](/influxdb3/version/admin/tokens/) doesn't have [permission](/influxdb3/version/reference/cli/influxctl/token/create/#examples) to write to the database. See [examples using credentials](/influxdb3/version/write-data/api-client-libraries/) in write requests. | | `404 "Not found"` | requested **resource type** (for example, "organization" or "database"), and **resource name** | If a requested resource (for example, organization or database) wasn't found | | `500 "Internal server error"` | | Default status for an error | | `503` "Service unavailable" | | If the server is temporarily unavailable to accept writes. The `Retry-After` header describes when to try the write again. diff --git a/content/shared/influxdb3/_index.md b/content/shared/influxdb3/_index.md new file mode 100644 index 000000000..505e32a12 --- /dev/null +++ b/content/shared/influxdb3/_index.md @@ -0,0 +1,45 @@ + +{{% product-name %}} is a database built to collect, process, transform, and store event and time series data, and is ideal for use cases that require real-time ingest and fast query response times to build user interfaces, monitoring, and automation solutions. + +Common use cases include: + +- Monitoring sensor data +- Server monitoring +- Application performance monitoring +- Network monitoring +- Financial market and trading analytics +- Behavioral analytics + +InfluxDB is optimized for scenarios where near real-time data monitoring is essential and queries need to return quickly to support user experiences such as dashboards and interactive user interfaces. + +{{% show-in "enterprise" %}} +{{% product-name %}} is built on InfluxDB 3 Core, the InfluxDB 3 open source release. +{{% /show-in %}} +{{% show-in "core" %}} +{{% product-name %}} is the InfluxDB 3 open source release. +{{% /show-in %}} + +Core's feature highlights include: + +- Diskless architecture with object storage support (or local disk with no dependencies) +- Fast query response times (under 10ms for last-value queries, or 30ms for distinct metadata) +- Embedded Python VM for plugins and triggers +- Parquet file persistence +- Compatibility with InfluxDB 1.x and 2.x write APIs + +{{% show-in "core" %}} +[Get started with Core](/influxdb3/version/get-started/) +{{% /show-in %}} + +The Enterprise version adds the following features to Core: + +- Historical query capability and single series indexing +- High availability +- Read replicas +- Enhanced security (coming soon) +- Row-level delete support (coming soon) +- Integrated admin UI (coming soon) + +{{% show-in "core" %}} +For more information, see how to [get started with Enterprise](/influxdb3/enterprise/get-started/). +{{% /show-in %}} \ No newline at end of file diff --git a/content/shared/v3-core-enterprise-release-notes/_index.md b/content/shared/v3-core-enterprise-release-notes/_index.md index 236a67b5e..ff46e3d03 100644 --- a/content/shared/v3-core-enterprise-release-notes/_index.md +++ b/content/shared/v3-core-enterprise-release-notes/_index.md @@ -5,6 +5,116 @@ > All updates to Core are automatically included in Enterprise. > The Enterprise sections below only list updates exclusive to Enterprise. +## v3.1.0 {date="2025-05-29"} +**Core**: revision 482dd8aac580c04f37e8713a8fffae89ae8bc264 + +**Enterprise**: revision 2cb23cf32b67f9f0d0803e31b356813a1a151b00 + +### Core + +#### Token and Security Updates +- Named admin tokens can now be created, with configurable expirations +- `health`, `ping`, and `metrics` endpoints can now be opted out of authorization +- `Basic $TOKEN` is now supported for all APIs +- Additional info available when creating a new token +- Additional info available when starting InfuxDB using `--without-auth` + +#### Additional Updates +- New catalog metrics available for count operations +- New object store metrics available for transfer latencies and transfer sizes +- New query duration metrics available for Last Value caches +- `/ping` API now contains versioning headers +- Other performance improvements + +#### Fixes +- New tags are now backfilled with NULL instead of empty strings +- Bitcode deserialization error fixed +- Series key metadata not persisting to Parquet is now fixed +- Other general fixes and corrections + +### Enterprise + +#### Token and Security Updates +- Resource tokens now use resource names in `show tokens` +- Tokens can now be granted `CREATE` permission for creating databases + +#### Additional Updates +- Last value caches populate on creation and reload on restart +- Distinct value caches populate on creation and reload on restart +- Other performance improvements +- Replaces remaining "INFLUXDB_IOX" Dockerfile environment variables with the following: + - `ENV INFLUXDB3_OBJECT_STORE=file` + - `ENV INFLUXDB3_DB_DIR=/var/lib/influxdb3` + +#### Fixes +- Improvements and fixes for license validations +- False positive fixed for catalog error on shutdown +- UX improvements for error and onboarding messages +- Other general fixes and corrections + +## v3.0.3 {date="2025-05-16"} +**Core**: revision 384c457ef5f0d5ca4981b22855e411d8cac2688e + +**Enterprise**: revision 34f4d28295132b9efafebf654e9f6decd1a13caf + +### Core + +#### Fixes + +- Prevent operator token, `_admin`, from being deleted. + +### Enterprise + +#### Fixes + +- Fix object store info digest that is output during onboarding. +- Fix issues with false positive catalog error on shutdown. +- Fix licensing validation issues. +- Other fixes and performance improvements. + + + +## v3.0.2 {date="2025-05-01"} +**Core**: revision d80d6cd60049c7b266794a48c97b1b6438ac5da9 + +**Enterprise**: revision e9d7e03c2290d0c3e44d26e3eeb60aaf12099f29 + +### Core + +#### Security updates + +- Generate testing TLS certificates on the fly. +- Set the TLS CA via the INFLUXDB3_TLS_CA environment variable. +- Enforce a minimum TLS version for enhanced security. +- Allow CORS requests from browsers. + +#### General updates + +- Support the `--format json` option in the token creation output. +- Remove the Last Values Cache size limitation to improve performance and flexibility. +- Incorporate additional performance improvements. + +#### Fixes + +- Fix a counting bug in the distinct cache. +- Fix how the distinct cache handles rows with null values. +- Fix handling of `group by` tag columns that use escape quotes. +- Sort the IOx table schema consistently in the `SHOW TABLES` command. + +### Enterprise + +#### Updates + +- Introduce a command and system table to list cluster nodes. +- Support multiple custom permission argument matches. +- Improve overall performance. + +#### Fixes + +- Initialize the object store only once. +- Prevent the Home license server from crashing on restart. +- Enforce the `--num-cores` thread allocation limit. + ## v3.0.1 {date="2025-04-16"} **Core**: revision d7c071e0c4959beebc7a1a433daf8916abd51214 diff --git a/content/shared/v3-core-get-started/_index.md b/content/shared/v3-core-get-started/_index.md deleted file mode 100644 index f6f3bfa69..000000000 --- a/content/shared/v3-core-get-started/_index.md +++ /dev/null @@ -1,1047 +0,0 @@ -InfluxDB is a database built to collect, process, transform, and store event and time series data, and is ideal for use cases that require real-time ingest and fast query response times to build user interfaces, monitoring, and automation solutions. - -Common use cases include: - -- Monitoring sensor data -- Server monitoring -- Application performance monitoring -- Network monitoring -- Financial market and trading analytics -- Behavioral analytics - -InfluxDB is optimized for scenarios where near real-time data monitoring is essential and queries need to return quickly to support user experiences such as dashboards and interactive user interfaces. - -{{% product-name %}} is the InfluxDB 3 open source release. -Core's feature highlights include: - -* Diskless architecture with object storage support (or local disk with no dependencies) -* Fast query response times (under 10ms for last-value queries, or 30ms for distinct metadata) -* Embedded Python VM for plugins and triggers -* Parquet file persistence -* Compatibility with InfluxDB 1.x and 2.x write APIs - -The Enterprise version adds the following features to Core: - -* Historical query capability and single series indexing -* High availability -* Read replicas -* Enhanced security (coming soon) -* Row-level delete support (coming soon) -* Integrated admin UI (coming soon) - -For more information, see how to [get started with Enterprise](/influxdb3/enterprise/get-started/). - -### What's in this guide - -This guide covers InfluxDB 3 Core (the open source release), including the following topics: - -- [Install and startup](#install-and-startup) -- [Authentication and authorization](#authentication-and-authorization) -- [Data Model](#data-model) -- [Tools to use](#tools-to-use) -- [Write data](#write-data) -- [Query data](#query-data) -- [Last values cache](#last-values-cache) -- [Distinct values cache](#distinct-values-cache) -- [Python plugins and the processing engine](#python-plugins-and-the-processing-engine) - -### Install and startup - -{{% product-name %}} runs on **Linux**, **macOS**, and **Windows**. - -{{% tabs-wrapper %}} -{{% tabs %}} -[Linux or macOS](#linux-or-macos) -[Windows](#windows) -[Docker](#docker) -{{% /tabs %}} -{{% tab-content %}} - -To get started quickly, download and run the install script--for example, using [curl](https://curl.se/download.html): - - -```bash -curl -O https://www.influxdata.com/d/install_influxdb3.sh \ -&& sh install_influxdb3.sh -``` - -Or, download and install [build artifacts](/influxdb3/core/install/#download-influxdb-3-core-binaries): - -- [Linux | AMD64 (x86_64) | GNU](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_amd64.tar.gz) - • - [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_amd64.tar.gz.sha256) -- [Linux | ARM64 (AArch64) | GNU](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_arm64.tar.gz) - • - [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_arm64.tar.gz.sha256) -- [macOS | Silicon (ARM64)](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_darwin_arm64.tar.gz) - • - [sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_darwin_arm64.tar.gz.sha256) - -> [!Note] -> macOS Intel builds are coming soon. - - -{{% /tab-content %}} -{{% tab-content %}} - -Download and install the {{% product-name %}} [Windows (AMD64, x86_64) binary](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}-windows_amd64.zip) - • -[sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}-windows_amd64.zip.sha256) - -{{% /tab-content %}} -{{% tab-content %}} - - -The [`influxdb:3-core` image](https://hub.docker.com/_/influxdb/tags?tag=3-core&name=3-core) -is available for x86_64 (AMD64) and ARM64 architectures. - -Pull the image: - - -```bash -docker pull influxdb:3-core -``` - -##### InfluxDB 3 Explorer -- Query Interface (Beta) - -You can download the new InfluxDB 3 Explorer query interface using Docker. -Explorer is currently in beta. Pull the image: - -```bash -docker pull quay.io/influxdb/influxdb3-explorer:latest -``` - - -{{% /tab-content %}} -{{% /tabs-wrapper %}} - -_Build artifacts and images update with every merge into the {{% product-name %}} `main` branch._ - -#### Verify the install - -After you have installed {{% product-name %}}, enter the following command to verify that it completed successfully: - -```bash -influxdb3 --version -``` - -If your system doesn't locate `influxdb3`, then `source` the configuration file (for example, .bashrc, .zshrc) for your shell--for example: - - -```zsh -source ~/.zshrc -``` - -#### Start InfluxDB - -To start your InfluxDB instance, use the `influxdb3 serve` command -and provide the following: - -- `--object-store`: Specifies the type of Object store to use. - InfluxDB supports the following: local file system (`file`), `memory`, - S3 (and compatible services like Ceph or Minio) (`s3`), - Google Cloud Storage (`google`), and Azure Blob Storage (`azure`). - The default is `file`. - Depending on the object store type, you may need to provide additional options - for your object store configuration. -- `--node-id`: A string identifier that distinguishes individual server instances within the cluster. - This forms the final part of the storage path: `/`. - In a multi-node setup, this ID is used to reference specific nodes. - -> [!Note] -> #### Diskless architecture -> -> InfluxDB 3 supports a diskless architecture that can operate with object -> storage alone, eliminating the need for locally attached disks. -> {{% product-name %}} can also work with only local disk storage when needed. - -The following examples show how to start InfluxDB 3 with different object store configurations: - -```bash -# Memory object store -# Stores data in RAM; doesn't persist data -influxdb3 serve \ ---node-id host01 \ ---object-store memory -``` - -```bash -# Filesystem object store -# Provide the filesystem directory -influxdb3 serve \ - --node-id host01 \ - --object-store file \ - --data-dir ~/.influxdb3 -``` - -To run the [Docker image](/influxdb3/core/install/#docker-image) and persist data to the filesystem, mount a volume for the Object store-for example, pass the following options: - -- `-v /path/on/host:/path/in/container`: Mounts a directory from your filesystem to the container -- `--object-store file --data-dir /path/in/container`: Uses the mount for server storage - -> [!Note] -> -> The {{% product-name %}} Docker image exposes port `8181`, the `influxdb3` server default for HTTP connections. -> To map the exposed port to a different port when running a container, see the Docker guide for [Publishing and exposing ports](https://docs.docker.com/get-started/docker-concepts/running-containers/publishing-ports/). - - -```bash -# Filesystem object store with Docker -# Create a mount -# Provide the mount path -docker run -it \ - -v /path/on/host:/path/in/container \ - influxdb:3-core influxdb3 serve \ - --node-id my_host \ - --object-store file \ - --data-dir /path/in/container -``` - -```bash -# S3 object store (default is the us-east-1 region) -# Specify the Object store type and associated options -influxdb3 serve \ - --node-id host01 \ - --object-store s3 \ - --bucket BUCKET \ - --aws-access-key AWS_ACCESS_KEY_ID \ - --aws-secret-access-key AWS_SECRET_ACCESS_KEY -``` - -```bash -# Minio or other open source object store -# (using the AWS S3 API with additional parameters) -# Specify the object store type and associated options -influxdb3 serve \ - --node-id host01 \ - --object-store s3 \ - --bucket BUCKET \ - --aws-access-key-id AWS_ACCESS_KEY_ID \ - --aws-secret-access-key AWS_SECRET_ACCESS_KEY \ - --aws-endpoint ENDPOINT \ - --aws-allow-http -``` - -For more information about server options, use the CLI help: - -```bash -influxdb3 serve --help -``` - -### Authentication and authorization - -After you have [started the server](#start-influxdb), you can create and manage tokens using the `influxdb3` CLI or the HTTP API. -{{% product-name %}} uses token-based authentication and authorization which is enabled by default when you start the server. -With authentication enabled, you must provide a token to access server actions. -An {{% product-name %}} instance can have one _admin token_, which grants access to all CLI actions and API endpoints. - -When you create a token, InfluxDB 3 returns a token string in plain text -that you use to authenticate CLI commands and API requests. - -To have the `influxdb3` CLI use your admin token automatically, assign it to the -`INFLUXDB3_AUTH_TOKEN` environment variable. - -> [!Important] -> #### Securely store your token -> -> InfluxDB lets you view the token string only when you create the token. -> Store your token in a secure location, as you cannot retrieve it from the database later. -> InfluxDB 3 stores only the token's hash and metadata in the catalog. - -#### Create an admin token - -To create an admin token, use the `influxdb3 create token --admin` subcommand--for example: - -```bash -influxdb3 create token --admin \ - --host http://{{< influxdb/host >}} -``` -```bash -# With Docker -- In a new terminal, run: -docker exec -it CONTAINER_NAME influxdb3 create token --admin -``` - -The command returns a token string that you can use to authenticate CLI commands and API requests. - -For more information, see how to [Manage admin tokens](/influxdb3/version/admin/tokens/admin/). - -### Data model - -The database server contains logical databases, which have tables, which have columns. Compared to previous versions of InfluxDB you can think of a database as a `bucket` in v2 or as a `db/retention_policy` in v1. A `table` is equivalent to a `measurement`, which has columns that can be of type `tag` (a string dictionary), `int64`, `float64`, `uint64`, `bool`, or `string` and finally every table has a `time` column that is a nanosecond precision timestamp. - -In InfluxDB 3, every table has a primary key--the ordered set of tags and the time--for its data. -This is the sort order used for all Parquet files that get created. When you create a table, either through an explicit call or by writing data into a table for the first time, it sets the primary key to the tags in the order they arrived. This is immutable. Although InfluxDB is still a _schema-on-write_ database, the tag column definitions for a table are immutable. - -Tags should hold unique identifying information like `sensor_id`, or `building_id` or `trace_id`. All other data should be kept in fields. You will be able to add fast last N value and distinct value lookups later for any column, whether it is a field or a tag. - -### Tools to use - -The following table compares tools that you can use to interact with {{% product-name %}}. -This tutorial covers many of the recommended tools. - -| Tool | Administration | Write | Query | -| :------------------------------------------------------------------------------------------------ | :----------------------: | :----------------------: | :----------------------: | -| [Chronograf](/chronograf/v1/) | - | - | **{{< icon "check" >}}** | -| `influx` CLI | - | - | - | -| [`influxdb3` CLI](#influxdb3-cli){{< req text="\* " color="magenta" >}} | **{{< icon "check" >}}** | **{{< icon "check" >}}** | **{{< icon "check" >}}** | -| `influxctl` CLI | - | - | - | -| [InfluxDB HTTP API](#influxdb-http-api){{< req text="\* " color="magenta" >}} | **{{< icon "check" >}}** | **{{< icon "check" >}}** | **{{< icon "check" >}}** | -| InfluxDB user interface | - | - | - | -| [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) | - | **{{< icon "check" >}}** | **{{< icon "check" >}}** | -| [InfluxDB v2 client libraries](/influxdb3/version/reference/client-libraries/v2/) | - | **{{< icon "check" >}}** | - | -| [InfluxDB v1 client libraries](/influxdb3/version/reference/client-libraries/v1/) | - | **{{< icon "check" >}}** | **{{< icon "check" >}}** | -| [InfluxDB 3 Processing engine](#python-plugins-and-the-processing-engine){{< req text="\* " color="magenta" >}} | | **{{< icon "check" >}}** | **{{< icon "check" >}}** | -| [Telegraf](/telegraf/v1/) | - | **{{< icon "check" >}}** | - | -| **Third-party tools** | | | | -| Flight SQL clients | - | - | **{{< icon "check" >}}** | -| [Grafana](/influxdb3/version/visualize-data/grafana/) | - | - | **{{< icon "check" >}}** | - -{{< caption >}} -{{< req type="key" text="Covered in this guide" color="magenta" >}} -{{< /caption >}} - -### Write data - -InfluxDB is a schema-on-write database. You can start writing data and InfluxDB creates the logical database, tables, and their schemas on the fly. -After a schema is created, InfluxDB validates future write requests against it before accepting the data. -Subsequent requests can add new fields on-the-fly, but can't add new tags. - -> [!Note] -> #### Core is optimized for recent data -> -> {{% product-name %}} is optimized for recent data but accepts writes from any time period. -> The system persists data to Parquet files for historical analysis with [InfluxDB 3 Enterprise](/influxdb3/enterprise/get-started/) or third-party tools. -> For extended historical queries and optimized data organization, consider using [InfluxDB 3 Enterprise](/influxdb3/enterprise/get-started/). - -#### Write data in line protocol syntax - -{{% product-name %}} accepts data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) syntax. -The following code block is an example of time series data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) syntax: - -- `cpu`: the table name. -- `host`, `region`, `applications`: the tags. A tag set is an ordered, comma-separated list of key/value pairs where the values are strings. -- `val`, `usage_percent`, `status`: the fields. A field set is a comma-separated list of key/value pairs. -- timestamp: If you don't specify a timestamp, InfluxData uses the time when data is written. - The default precision is a nanosecond epoch. - To specify a different precision, pass the `precision` parameter in your CLI command or API request. - -``` -cpu,host=Alpha,region=us-west,application=webserver val=1i,usage_percent=20.5,status="OK" -cpu,host=Bravo,region=us-east,application=database val=2i,usage_percent=55.2,status="OK" -cpu,host=Charlie,region=us-west,application=cache val=3i,usage_percent=65.4,status="OK" -cpu,host=Bravo,region=us-east,application=database val=4i,usage_percent=70.1,status="Warn" -cpu,host=Bravo,region=us-central,application=database val=5i,usage_percent=80.5,status="OK" -cpu,host=Alpha,region=us-west,application=webserver val=6i,usage_percent=25.3,status="Warn" -``` - -### Write data using the CLI - -To quickly get started writing data, you can use the `influxdb3` CLI. - -> [!Note] -> For batching and higher-volume write workloads, we recommend using the [HTTP API](#write-data-using-the-http-api). -> -> #### Write data using InfluxDB API client libraries -> -> InfluxDB provides supported client libraries that integrate with your code -> to construct data as time series points and write the data as line protocol to your {{% product-name %}} database. -> For more information, see how to [use InfluxDB client libraries to write data](/influxdb3/version/write-data/api-client-libraries/). - -##### Example: write data using the influxdb3 CLI - -Use the `influxdb3 write` command to write data to a database. - -In the code samples, replace the following placeholders with your values: - -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: The name of the [database](/influxdb3/version/admin/databases/) to write to. -{{% show-in "core" %}} -- {{% code-placeholder-key %}}`TOKEN`{{% /code-placeholder-key %}}: A [token](/influxdb3/version/admin/tokens/) for your {{% product-name %}} server. -{{% /show-in %}} -{{% show-in "enterprise" %}} -- {{% code-placeholder-key %}}`TOKEN`{{% /code-placeholder-key %}}: A [token](/influxdb3/version/admin/tokens/) - with permission to write to the specified database. -{{% /show-in %}} - -##### Write data via stdin - -Pass data as quoted line protocol via standard input (stdin)--for example: - -{{% code-placeholders "DATABASE_NAME|TOKEN" %}} -```bash -influxdb3 write \ - --database DATABASE_NAME \ - --token TOKEN \ - --precision ns \ - --accept-partial \ -'cpu,host=Alpha,region=us-west,application=webserver val=1i,usage_percent=20.5,status="OK" -cpu,host=Bravo,region=us-east,application=database val=2i,usage_percent=55.2,status="OK" -cpu,host=Charlie,region=us-west,application=cache val=3i,usage_percent=65.4,status="OK" -cpu,host=Bravo,region=us-east,application=database val=4i,usage_percent=70.1,status="Warn" -cpu,host=Bravo,region=us-central,application=database val=5i,usage_percent=80.5,status="OK" -cpu,host=Alpha,region=us-west,application=webserver val=6i,usage_percent=25.3,status="Warn"' -``` -{{% /code-placeholders %}} - -##### Write data from a file - -Pass the `--file` option to write line protocol you have saved to a file--for example, save the -[sample line protocol](#write-data-in-line-protocol-syntax) to a file named `server_data` -and then enter the following command: - -{{% code-placeholders "DATABASE_NAME|TOKEN" %}} -```bash -influxdb3 write \ - --database DATABASE_NAME \ - --token TOKEN \ - --precision ns \ - --accept-partial \ - --file server_data -``` -{{% /code-placeholders %}} - -### Write data using the HTTP API - -{{% product-name %}} provides three write API endpoints that respond to HTTP `POST` requests. -The `/api/v3/write_lp` endpoint is the recommended endpoint for writing data and -provides additional options for controlling write behavior. - -If you need to write data using InfluxDB v1.x or v2.x tools, use the compatibility API endpoints. -Compatibility APIs work with [Telegraf](/telegraf/v1/), InfluxDB v2.x and v1.x [API client libraries](/influxdb3/version/reference/client-libraries), and other tools that support the v1.x or v2.x APIs. - -{{% tabs-wrapper %}} -{{% tabs %}} -[/api/v3/write_lp](#) -[v2 compatibility](#) -[v1 compatibility](#) -{{% /tabs %}} -{{% tab-content %}} - -{{% product-name %}} adds the `/api/v3/write_lp` endpoint. - -{{}} - -This endpoint accepts the same line protocol syntax as previous versions, -and supports the following parameters: - -- `?accept_partial=`: Accept or reject partial writes (default is `true`). -- `?no_sync=`: Control when writes are acknowledged: - - `no_sync=true`: Acknowledges writes before WAL persistence completes. - - `no_sync=false`: Acknowledges writes after WAL persistence completes (default). -- `?precision=`: Specify the precision of the timestamp. The default is nanosecond precision. -- request body: The line protocol data to write. - -For more information about the parameters, see [Write data](/influxdb3/core/write-data/). - -##### Example: write data using the /api/v3 HTTP API - -The following examples show how to write data using `curl` and the `/api/3/write_lp` HTTP endpoint. -To show the difference between accepting and rejecting partial writes, line `2` in the example contains a `string` value (`"hi"`) for a `float` field (`temp`). - -###### Partial write of line protocol occurred - -With `accept_partial=true` (default): - -```bash -curl -v "http://{{< influxdb/host >}}/api/v3/write_lp?db=sensors&precision=auto" \ - --data-raw 'home,room=Sunroom temp=96 -home,room=Sunroom temp="hi"' -``` - -The response is the following: - -``` -< HTTP/1.1 400 Bad Request -... -{ - "error": "partial write of line protocol occurred", - "data": [ - { - "original_line": "home,room=Sunroom temp=hi", - "line_number": 2, - "error_message": "invalid column type for column 'temp', expected iox::column_type::field::float, got iox::column_type::field::string" - } - ] -} -``` - -Line `1` is written and queryable. -The response is an HTTP error (`400`) status, and the response body contains the error message `partial write of line protocol occurred` with details about the problem line. - -###### Parsing failed for write_lp endpoint - -With `accept_partial=false`: - -```bash -curl -v "http://{{< influxdb/host >}}/api/v3/write_lp?db=sensors&precision=auto&accept_partial=false" \ - --data-raw 'home,room=Sunroom temp=96 -home,room=Sunroom temp="hi"' -``` - -The response is the following: - -``` -< HTTP/1.1 400 Bad Request -... -{ - "error": "parsing failed for write_lp endpoint", - "data": { - "original_line": "home,room=Sunroom temp=hi", - "line_number": 2, - "error_message": "invalid column type for column 'temp', expected iox::column_type::field::float, got iox::column_type::field::string" - } -} -``` - -InfluxDB rejects all points in the batch. -The response is an HTTP error (`400`) status, and the response body contains `parsing failed for write_lp endpoint` and details about the problem line. - -For more information about the ingest path and data flow, see [Data durability](/influxdb3/version/reference/internals/durability/). - -{{% /tab-content %}} -{{% tab-content %}} - -The `/api/v2/write` InfluxDB v2 compatibility endpoint provides backwards compatibility with clients (such as [Telegraf's InfluxDB v2 output plugin](/telegraf/v1/plugins/#output-influxdb_v2) and [InfluxDB v2 API client libraries](/influxdb3/version/reference/client-libraries/v2/)) that can write data to InfluxDB OSS v2.x and Cloud 2 (TSM). - -{{}} - -{{% /tab-content %}} - -{{% tab-content %}} - -The `/write` InfluxDB v1 compatibility endpoint provides backwards compatibility for clients that can write data to InfluxDB v1.x. - -{{}} - - -{{% /tab-content %}} -{{% /tabs-wrapper %}} - -> [!Note] -> #### Compatibility APIs differ from native APIs -> -> Keep in mind that the compatibility APIs differ from the v1 and v2 APIs in previous versions in the following ways: -> -> - Tags in a table (measurement) are _immutable_ -> - A tag and a field can't have the same name within a table. - -#### Write responses - -By default, InfluxDB acknowledges writes after flushing the WAL file to the Object store (occurring every second). -For high write throughput, you can send multiple concurrent write requests. - -#### Use no_sync for immediate write responses - -To reduce the latency of writes, use the `no_sync` write option, which acknowledges writes _before_ WAL persistence completes. -When `no_sync=true`, InfluxDB validates the data, writes the data to the WAL, and then immediately responds to the client, without waiting for persistence to the Object store. - -Using `no_sync=true` is best when prioritizing high-throughput writes over absolute durability. - -- Default behavior (`no_sync=false`): Waits for data to be written to the Object store before acknowledging the write. Reduces the risk of data loss, but increases the latency of the response. -- With `no_sync=true`: Reduces write latency, but increases the risk of data loss in case of a crash before WAL persistence. - -##### Immediate write using the HTTP API - -The `no_sync` parameter controls when writes are acknowledged--for example: - -```bash -curl "http://{{< influxdb/host >}}/api/v3/write_lp?db=sensors&precision=auto&no_sync=true" \ - --data-raw "home,room=Sunroom temp=96" -``` - -##### Immediate write using the influxdb3 CLI - -The `no_sync` CLI option controls when writes are acknowledged--for example: - -```bash -influxdb3 write \ - --bucket mydb \ - --org my_org \ - --token my-token \ - --no-sync -``` - -### Create a database or table - -To create a database without writing data, use the `create` subcommand--for example: - -```bash -influxdb3 create database mydb -``` - -To learn more about a subcommand, use the `-h, --help` flag: - -```bash -influxdb3 create -h -``` - -### Query data - -InfluxDB 3 now supports native SQL for querying, in addition to InfluxQL, an -SQL-like language customized for time series queries. - -{{< product-name >}} limits -query time ranges to 72 hours (both recent and historical) to ensure query performance. -For more information about the 72-hour limitation, see the -[update on InfluxDB 3 Core’s 72-hour limitation](https://www.influxdata.com/blog/influxdb3-open-source-public-alpha-jan-27/). - -> [!Note] -> Flux, the language introduced in InfluxDB 2.0, is **not** supported in InfluxDB 3. - -The quickest way to get started querying is to use the `influxdb3` CLI (which uses the Flight SQL API over HTTP2). - -The `query` subcommand includes options to help ensure that the right database is queried with the correct permissions. Only the `--database` option is required, but depending on your specific setup, you may need to pass other options, such as host, port, and token. - -| Option | Description | Required | -|---------|-------------|--------------| -| `--host` | The host URL of the server [default: `http://127.0.0.1:8181`] to query | No | -| `--database` | The name of the database to operate on | Yes | -| `--token` | The authentication token for the {{% product-name %}} server | No | -| `--language` | The query language of the provided query string [default: `sql`] [possible values: `sql`, `influxql`] | No | -| `--format` | The format in which to output the query [default: `pretty`] [possible values: `pretty`, `json`, `jsonl`, `csv`, `parquet`] | No | -| `--output` | The path to output data to | No | - -#### Example: query `“SHOW TABLES”` on the `servers` database: - -```console -$ influxdb3 query --database servers "SHOW TABLES" -+---------------+--------------------+--------------+------------+ -| table_catalog | table_schema | table_name | table_type | -+---------------+--------------------+--------------+------------+ -| public | iox | cpu | BASE TABLE | -| public | information_schema | tables | VIEW | -| public | information_schema | views | VIEW | -| public | information_schema | columns | VIEW | -| public | information_schema | df_settings | VIEW | -| public | information_schema | schemata | VIEW | -+---------------+--------------------+--------------+------------+ -``` - -#### Example: query the `cpu` table, limiting to 10 rows: - -```console -$ influxdb3 query --database servers "SELECT DISTINCT usage_percent, time FROM cpu LIMIT 10" -+---------------+---------------------+ -| usage_percent | time | -+---------------+---------------------+ -| 63.4 | 2024-02-21T19:25:00 | -| 25.3 | 2024-02-21T19:06:40 | -| 26.5 | 2024-02-21T19:31:40 | -| 70.1 | 2024-02-21T19:03:20 | -| 83.7 | 2024-02-21T19:30:00 | -| 55.2 | 2024-02-21T19:00:00 | -| 80.5 | 2024-02-21T19:05:00 | -| 60.2 | 2024-02-21T19:33:20 | -| 20.5 | 2024-02-21T18:58:20 | -| 85.2 | 2024-02-21T19:28:20 | -+---------------+---------------------+ -``` - -### Query using the CLI for InfluxQL - -[InfluxQL](/influxdb3/version/reference/influxql/) is an SQL-like language developed by InfluxData with specific features tailored for leveraging and working with InfluxDB. It’s compatible with all versions of InfluxDB, making it a good choice for interoperability across different InfluxDB installations. - -To query using InfluxQL, enter the `influxdb3 query` subcommand and specify `influxql` in the language option--for example: - -```bash -influxdb3 query \ - --database servers \ - --language influxql \ - "SELECT DISTINCT usage_percent FROM cpu WHERE time >= now() - 1d" -``` - -### Query using the API - -InfluxDB 3 supports Flight (gRPC) APIs and an HTTP API. -To query your database using the HTTP API, send a request to the `/api/v3/query_sql` or `/api/v3/query_influxql` endpoints. -In the request, specify the database name in the `db` parameter -and a query in the `q` parameter. -You can pass parameters in the query string or inside a JSON object. - -Use the `format` parameter to specify the response format: `pretty`, `jsonl`, `parquet`, `csv`, and `json`. Default is `json`. - -##### Example: Query passing URL-encoded parameters - -The following example sends an HTTP `GET` request with a URL-encoded SQL query: - -```bash -curl -v "http://{{< influxdb/host >}}/api/v3/query_sql?db=servers&q=select+*+from+cpu+limit+5" -``` - -##### Example: Query passing JSON parameters - -The following example sends an HTTP `POST` request with parameters in a JSON payload: - -```bash -curl http://{{< influxdb/host >}}/api/v3/query_sql \ - --data '{"db": "server", "q": "select * from cpu limit 5"}' -``` - -### Query using the Python client - -Use the InfluxDB 3 Python library to interact with the database and integrate with your application. -We recommend installing the required packages in a Python virtual environment for your specific project. - -To get started, install the `influxdb3-python` package. - -```bash -pip install influxdb3-python -``` - -From here, you can connect to your database with the client library using just the **host** and **database name: - -```python -from influxdb_client_3 import InfluxDBClient3 - -client = InfluxDBClient3( - host='http://{{< influxdb/host >}}', - database='servers' -) -``` - -The following example shows how to query using SQL, and then -use PyArrow to explore the schema and process results: - -```python -from influxdb_client_3 import InfluxDBClient3 - -client = InfluxDBClient3( - host='http://{{< influxdb/host >}}', - - database='servers' -) - -# Execute the query and return an Arrow table -table = client.query( - query="SELECT * FROM cpu LIMIT 10", - language="sql" -) - -print("\n#### View Schema information\n") -print(table.schema) - -print("\n#### Use PyArrow to read the specified columns\n") -print(table.column('usage_active')) -print(table.select(['host', 'usage_active'])) -print(table.select(['time', 'host', 'usage_active'])) - -print("\n#### Use PyArrow compute functions to aggregate data\n") -print(table.group_by('host').aggregate([])) -print(table.group_by('cpu').aggregate([('time_system', 'mean')])) -``` - -For more information about the Python client library, see the [`influxdb3-python` repository](https://github.com/InfluxCommunity/influxdb3-python) in GitHub. - - -### Query using InfluxDB 3 Explorer (Beta) - -You can use the InfluxDB 3 Explorer query interface by downloading the Docker image. - -```bash -docker pull quay.io/influxdb/influxdb3-explorer:latest -``` - -Run the interface using: - -```bash -docker run --name influxdb3-explorer -p 8086:8888 quay.io/influxdb/influxdb3-explorer:latest -``` - -With the default settings above, you can access the UI at http://localhost:8086. -Set your expected database connection details on the Settings page. -From there, you can query data, browser your database schema, and do basic -visualization of your time series data. - -### Last values cache - -{{% product-name %}} supports a **last-n values cache** which stores the last N values in a series or column hierarchy in memory. This gives the database the ability to answer these kinds of queries in under 10 milliseconds. -You can use the `influxdb3` CLI to [create a last value cache](/influxdb3/version/reference/cli/influxdb3/create/last_cache/). - -```bash -influxdb3 create last_cache \ - -d \ - -t
\ - [CACHE_NAME] -``` - -Consider the following `cpu` sample table: - -| host | application | time | usage\_percent | status | -| ----- | ----- | ----- | ----- | ----- | -| Bravo | database | 2024-12-11T10:00:00 | 55.2 | OK | -| Charlie | cache | 2024-12-11T10:00:00 | 65.4 | OK | -| Bravo | database | 2024-12-11T10:01:00 | 70.1 | Warn | -| Bravo | database | 2024-12-11T10:01:00 | 80.5 | OK | -| Alpha | webserver | 2024-12-11T10:02:00 | 25.3 | Warn | - -The following command creates a last value cache named `cpuCache`: - -```bash -influxdb3 create last_cache \ - --database servers \ - --table cpu \ - --key-columns host,application \ - --value-columns usage_percent,status \ - --count 5 cpuCache -``` - -_You can create a last values cache per time series, but be mindful of high cardinality tables that could take excessive memory._ - -#### Query a last values cache - -To use the LVC, call it using the `last_cache()` function in your query--for example: - -```bash -influxdb3 query \ - --database servers \ - "SELECT * FROM last_cache('cpu', 'cpuCache') WHERE host = 'Bravo';" -``` - -> [!Note] -> #### Only works with SQL -> -> The Last values cache only works with SQL, not InfluxQL; SQL is the default language. - -#### Delete a Last values cache - -Use the `influxdb3` CLI to [delete a last values cache](/influxdb3/version/reference/cli/influxdb3/delete/last_cache/) - -```bash -influxdb3 delete last_cache \ - --database \ - --table
\ - --cache-name -``` - -### Distinct values cache - -Similar to the Last values cache, the database can cache in RAM the distinct values for a single column in a table or a hierarchy of columns. This is useful for fast metadata lookups, which can return in under 30 milliseconds. Many of the options are similar to the last value cache. - -You can use the `influxdb3` CLI to [create a distinct values cache](/influxdb3/version/reference/cli/influxdb3/create/distinct_cache/). - -```bash -influxdb3 create distinct_cache \ - --database \ - --table
\ - --columns \ - [CACHE_NAME] -``` - -Consider the following `cpu` sample table: - -| host | application | time | usage\_percent | status | -| ----- | ----- | ----- | ----- | ----- | -| Bravo | database | 2024-12-11T10:00:00 | 55.2 | OK | -| Charlie | cache | 2024-12-11T10:00:00 | 65.4 | OK | -| Bravo | database | 2024-12-11T10:01:00 | 70.1 | Warn | -| Bravo | database | 2024-12-11T10:01:00 | 80.5 | OK | -| Alpha | webserver | 2024-12-11T10:02:00 | 25.3 | Warn | - -The following command creates a distinct values cache named `cpuDistinctCache`: - -```bash -influxdb3 create distinct_cache \ - --database servers \ - --table cpu \ - --columns host,application \ - cpuDistinctCache -``` - -#### Query a distinct values cache - -To use the distinct values cache, call it using the `distinct_cache()` function in your query--for example: - -```bash -influxdb3 query \ - --database servers \ - "SELECT * FROM distinct_cache('cpu', 'cpuDistinctCache')" -``` - -> [!Note] -> #### Only works with SQL -> -> The distinct cache only works with SQL, not InfluxQL; SQL is the default language. - -#### Delete a distinct values cache - -Use the `influxdb3` CLI to [delete a distinct values cache](/influxdb3/version/reference/cli/influxdb3/delete/distinct_cache/) - -```bash -influxdb3 delete distinct_cache \ - --database \ - --table
\ - --cache-name -``` - -### Python plugins and the Processing engine - -The InfluxDB 3 Processing engine is an embedded Python VM for running code inside the database to process and transform data. - -To activate the Processing engine, pass the `--plugin-dir ` option when starting the {{% product-name %}} server. -`PLUGIN_DIR` is your filesystem location for storing [plugin](#plugin) files for the Processing engine to run. - -#### Plugin - -A plugin is a Python function that has a signature compatible with a Processing engine [trigger](#trigger). - -#### Trigger - -When you create a trigger, you specify a [plugin](#plugin), a database, optional arguments, -and a _trigger-spec_, which defines when the plugin is executed and what data it receives. - -##### Trigger types - -InfluxDB 3 provides the following types of triggers, each with specific trigger-specs: - -- **On WAL flush**: Sends a batch of written data (for a specific table or all tables) to a plugin (by default, every second). -- **On Schedule**: Executes a plugin on a user-configured schedule (using a crontab or a duration); useful for data collection and deadman monitoring. -- **On Request**: Binds a plugin to a custom HTTP API endpoint at `/api/v3/engine/`. - The plugin receives the HTTP request headers and content, and can then parse, process, and send the data into the database or to third-party services. - -### Test, create, and trigger plugin code - -##### Example: Python plugin for WAL rows - -```python -# This is the basic structure for Python plugin code that runs in the -# InfluxDB 3 Processing engine. - -# When creating a trigger, you can provide runtime arguments to your plugin, -# allowing you to write generic code that uses variables such as monitoring -thresholds, environment variables, and host names. -# -# Use the following exact signature to define a function for the WAL flush -# trigger. -# When you create a trigger for a WAL flush plugin, you specify the database -# and tables that the plugin receives written data from on every WAL flush -# (default is once per second). -def process_writes(influxdb3_local, table_batches, args=None): - # here you can see logging. for now this won't do anything, but soon - # we'll capture this so you can query it from system tables - if args and "arg1" in args: - influxdb3_local.info("arg1: " + args["arg1"]) - - # here we're using arguments provided at the time the trigger was set up - # to feed into paramters that we'll put into a query - query_params = {"host": "foo"} - # here's an example of executing a parameterized query. Only SQL is supported. - # It will query the database that the trigger is attached to by default. We'll - # soon have support for querying other DBs. - query_result = influxdb3_local.query("SELECT * FROM cpu where host = '$host'", query_params) - # the result is a list of Dict that have the column name as key and value as - # value. If you run the WAL test plugin with your plugin against a DB that - # you've written data into, you'll be able to see some results - influxdb3_local.info("query result: " + str(query_result)) - - # this is the data that is sent when the WAL is flushed of writes the server - # received for the DB or table of interest. One batch for each table (will - # only be one if triggered on a single table) - for table_batch in table_batches: - # here you can see that the table_name is available. - influxdb3_local.info("table: " + table_batch["table_name"]) - - # example to skip the table we're later writing data into - if table_batch["table_name"] == "some_table": - continue - - # and then the individual rows, which are Dict with keys of the column names and values - for row in table_batch["rows"]: - influxdb3_local.info("row: " + str(row)) - - # this shows building a line of LP to write back to the database. tags must go first and - # their order is important and must always be the same for each individual table. Then - # fields and lastly an optional time, which you can see in the next example below - line = LineBuilder("some_table")\ - .tag("tag1", "tag1_value")\ - .tag("tag2", "tag2_value")\ - .int64_field("field1", 1)\ - .float64_field("field2", 2.0)\ - .string_field("field3", "number three") - - # this writes it back (it actually just buffers it until the completion of this function - # at which point it will write everything back that you put in) - influxdb3_local.write(line) - - # here's another example, but with us setting a nanosecond timestamp at the end - other_line = LineBuilder("other_table") - other_line.int64_field("other_field", 1) - other_line.float64_field("other_field2", 3.14) - other_line.time_ns(1302) - - # and you can see that we can write to any DB in the server - influxdb3_local.write_to_db("mytestdb", other_line) - - # just some log output as an example - influxdb3_local.info("done") -``` - -##### Test a plugin on the server - -Test your InfluxDB 3 plugin safely without affecting written data. During a plugin test: - -- A query executed by the plugin queries against the server you send the request to. -- Writes aren't sent to the server but are returned to you. - -To test a plugin, do the following: - -1. Create a _plugin directory_--for example, `/path/to/.influxdb/plugins` -2. [Start the InfluxDB server](#start-influxdb) and include the `--plugin-dir ` option. -3. Save the [example plugin code](#example-python-plugin-for-wal-flush) to a plugin file inside of the plugin directory. If you haven't yet written data to the table in the example, comment out the lines where it queries. -4. To run the test, enter the following command with the following options: - - - `--lp` or `--file`: The line protocol to test - - Optional: `--input-arguments`: A comma-delimited list of `=` arguments for your plugin code - - ```bash - influxdb3 test wal_plugin \ - --lp \ - --input-arguments "arg1=foo,arg2=bar" \ - --database \ - - ``` - -The command runs the plugin code with the test data, yields the data to the plugin code, and then responds with the plugin result. -You can quickly see how the plugin behaves, what data it would have written to the database, and any errors. -You can then edit your Python code in the plugins directory, and rerun the test. -The server reloads the file for every request to the `test` API. - -For more information, see [`influxdb3 test wal_plugin`](/influxdb3/version/reference/cli/influxdb3/test/wal_plugin/) or run `influxdb3 test wal_plugin -h`. - -With the plugin code inside the server plugin directory, and a successful test, -you're ready to create a plugin and a trigger to run on the server. - -##### Example: Test, create, and run a plugin - -The following example shows how to test a plugin, and then create the plugin and -trigger: - -```bash -# Test and create a plugin -# Requires: -# - A database named `mydb` with a table named `foo` -# - A Python plugin file named `test.py` -# Test a plugin -influxdb3 test wal_plugin \ - --lp "my_measure,tag1=asdf f1=1.0 123" \ - --database mydb \ - --input-arguments "arg1=hello,arg2=world" \ - test.py -``` - -```bash -# Create a trigger that runs the plugin -influxdb3 create trigger \ - -d mydb \ - --plugin test_plugin \ - --trigger-spec "table:foo" \ - --trigger-arguments "arg1=hello,arg2=world" \ - trigger1 -``` - -After you have created a plugin and trigger, enter the following command to -enable the trigger and have it run the plugin as you write data: - -```bash -influxdb3 enable trigger --database mydb trigger1 -``` - -For more information, see [Python plugins and the Processing engine](/influxdb3/version/plugins/). diff --git a/content/shared/v3-core-plugins/_index.md b/content/shared/v3-core-plugins/_index.md index 2ab099cd9..0b5d748f9 100644 --- a/content/shared/v3-core-plugins/_index.md +++ b/content/shared/v3-core-plugins/_index.md @@ -1,54 +1,72 @@ -Use the InfluxDB 3 Processing engine to run Python code directly in your -{{% product-name %}} database to automatically process data and respond to database events. +Use the Processing Engine in {{% product-name %}} to extend your database with custom Python code. Trigger your code on write, on a schedule, or on demand to automate workflows, transform data, and create API endpoints. -The Processing engine is an embedded Python VM that runs inside your InfluxDB 3 database and lets you: +## What is the Processing Engine? -- Process data as it's written to the database -- Run code on a schedule -- Create API endpoints that execute Python code -- Maintain state between executions with an in-memory cache +The Processing Engine is an embedded Python virtual machine that runs inside your {{% product-name %}} database. You configure _triggers_ to run your Python _plugin_ code in response to: -Learn how to create, configure, run, and extend Python plugins that execute when specific events occur. +- **Data writes** - Process and transform data as it enters the database +- **Scheduled events** - Run code at defined intervals or specific times +- **HTTP requests** - Expose custom API endpoints that execute your code -1. [Set up the Processing engine](#set-up-the-processing-engine) -2. [Add a Processing engine plugin](#add-a-processing-engine-plugin) - - [Get example plugins](#get-example-plugins) - - [Create a plugin](#create-a-plugin) -3. [Create a trigger to run a plugin](#create-a-trigger-to-run-a-plugin) - - [Create a trigger for data writes](#create-a-trigger-for-data-writes) - - [Create a trigger for scheduled events](#create-a-trigger-for-scheduled-events) - - [Create a trigger for HTTP requests](#create-a-trigger-for-http-requests) - - [Use community plugins from GitHub](#use-community-plugins-from-github) +You can use the Processing Engine's in-memory cache to manage state between executions and build stateful applications directly in your database. + +This guide walks you through setting up the Processing Engine, creating your first plugin, and configuring triggers that execute your code on specific events. + +## Before you begin + +Ensure you have: +- A working {{% product-name %}} instance +- Access to command line +- Python installed if you're writing your own plugin +- Basic knowledge of the InfluxDB CLI + +Once you have all the prerequisites in place, follow these steps to implement the Processing Engine for your data automation needs. + +1. [Set up the Processing Engine](#set-up-the-processing-engine) +2. [Add a Processing Engine plugin](#add-a-processing-engine-plugin) + - [Use example plugins](#use-example-plugins) + - [Create a custom plugin](#create-a-custom-plugin) +3. [Set up a trigger](#set-up-a-trigger) + - [Understand trigger types](#understand-trigger-types) + - [Use the create trigger command](#use-the-create-trigger-command) + - [Trigger specification examples](#trigger-specification-examples) +4. [Advanced trigger configuration](#advanced-trigger-configuration) + - [Access community plugins from GitHub](#access-community-plugins-from-github) - [Pass arguments to plugins](#pass-arguments-to-plugins) - [Control trigger execution](#control-trigger-execution) - [Configure error handling for a trigger](#configure-error-handling-for-a-trigger) -- [Extend plugins with API features and state management](#extend-plugins-with-api-features-and-state-management) -- [Install Python dependencies](#install-python-dependencies) + - [Install Python dependencies](#install-python-dependencies) -## Set up the Processing engine +## Set up the Processing Engine -To enable the Processing engine, start your InfluxDB server with the `--plugin-dir` option: +To activate the Processing Engine, start your {{% product-name %}} server with the `--plugin-dir` flag. This flag tells InfluxDB where to load your plugin files. + +{{% code-placeholders "NODE_ID|OBJECT_STORE_TYPE|PLUGIN_DIR" %}} ```bash influxdb3 serve \ - --node-id node0 \ - --object-store [OBJECT_STORE_TYPE] \ - --plugin-dir /path/to/plugins + --NODE_ID \ + --object-store OBJECT_STORE_TYPE \ + --plugin-dir PLUGIN_DIR ``` -Replace `/path/to/plugins` with the directory where you want to store your Python plugin files. All plugin files must be located in this directory or its subdirectories. +{{% /code-placeholders %}} +In the example above, replace the following: +- {{% code-placeholder-key %}}`NODE_ID`{{% /code-placeholder-key %}}: Unique identifier for your instance +- {{% code-placeholder-key %}}`OBJECT_STORE_TYPE`{{% /code-placeholder-key %}}: Type of object store (for example, file or s3) +- {{% code-placeholder-key %}}`PLUGIN_DIR`{{% /code-placeholder-key %}}: Absolute path to the directory where plugin files are stored. Store all plugin files in this directory or its subdirectories. ### Configure distributed environments -If you're running multiple {{% product-name %}} instances (distributed deployment): +When running {{% product-name %}} in a distributed setup, follow these steps to configure the Processing Engine: -1. Decide where plugins should run +1. Decide where each plugin should run - Data processing plugins, such as WAL plugins, run on ingester nodes - HTTP-triggered plugins run on nodes handling API requests - Scheduled plugins can run on any configured node -2. Enable plugins on selected instances +2. Enable plugins on the correct instance 3. Maintain identical plugin files across all instances where plugins run - Use shared storage or file synchronization tools to keep plugins consistent @@ -57,43 +75,58 @@ If you're running multiple {{% product-name %}} instances (distributed deploymen > > Configure your plugin directory on the same system as the nodes that run the triggers and plugins. +## Add a Processing Engine plugin -## Add a Processing engine plugin +A plugin is a Python script that defines a specific function signature for a trigger (_trigger spec_). When the specified event occurs, InfluxDB runs the plugin. -A plugin is a Python file that contains a specific function signature that corresponds to a trigger type. -Plugins: +### Choose a plugin strategy -- Receive plugin-specific arguments (such as written data, call time, or an HTTP request) -- Can receive keyword arguments (as `args`) from _trigger arguments_ -- Can access the `influxdb3_local` shared API for writing, querying, and managing state +You have two main options for adding plugins to your InfluxDB instance: -Get started using example plugins or create your own: +- [Use example plugins](#use-example-plugins) - Quickly get started with prebuilt plugins +- [Create a custom plugin](#create-a-custom-plugin) - Build your own for specialized use cases -- [Get example plugins](#get-example-plugins) -- [Create a plugin](#create-a-plugin) +### Use example plugins -### Get example plugins +InfluxData provides a public repository of example plugins that you can use immediately. -InfluxData maintains a repository of contributed plugins that you can use as-is or as a starting point for your own plugin. +#### Browse plugin examples -#### From local files +Visit the [influxdb3_plugins repository](https://github.com/influxdata/influxdb3_plugins) to find examples for: -You can copy example plugins from the [influxdb3_plugins repository](https://github.com/influxdata/influxdb3_plugins) to your local plugin directory: + - **Data transformation**: Process and transform incoming data + - **Alerting**: Send notifications based on data thresholds + - **Aggregation**: Calculate statistics on time series data + - **Integration**: Connect to external services and APIs + - **System monitoring**: Track resource usage and health metrics + +#### Add example plugins + +You can either copy a plugin or retrieve it directly from the repository: + +{{< code-tabs-wrapper >}} + +{{% code-tabs %}} +[Copy locally](#) +[Fetch via gh:](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} ```bash # Clone the repository git clone https://github.com/influxdata/influxdb3_plugins.git - -# Copy example plugins to your plugin directory -cp -r influxdb3_plugins/examples/wal_plugin/* /path/to/plugins/ + +# Copy a plugin to your configured plugin directory +cp influxdb3_plugins/examples/schedule/system_metrics/system_metrics.py /path/to/plugins/ ``` +{{% /code-tab-content %}} -#### Directly from GitHub - -You can use plugins directly from GitHub without downloading them first by using the `gh:` prefix in the plugin filename: - +{{% code-tab-content %}} + ```bash -# Use a plugin directly from GitHub +# To retrieve and use a plugin directly from GitHub, +# use the `gh:` prefix in the plugin filename: influxdb3 create trigger \ --trigger-spec "every:1m" \ --plugin-filename "gh:examples/schedule/system_metrics/system_metrics.py" \ @@ -101,26 +134,61 @@ influxdb3 create trigger \ system_metrics ``` -> [!Note] -> #### Find and contribute plugins -> -> The plugins repository includes examples for various use cases: -> -> - **Data transformation**: Process and transform incoming data -> - **Alerting**: Send notifications based on data thresholds -> - **Aggregation**: Calculate statistics on time series data -> - **Integration**: Connect to external services and APIs -> - **System monitoring**: Track resource usage and health metrics -> -> Visit [influxdata/influxdb3_plugins](https://github.com/influxdata/influxdb3_plugins) -> to browse available plugins or contribute your own. +{{% /code-tab-content %}} -### Create a plugin +{{< /code-tabs-wrapper >}} -1. Create a `.py` file in your plugins directory -2. Define a function with one of the following signatures: +Plugins have various functions such as: -#### For data write events +- Receive plugin-specific arguments (such as written data, call time, or an HTTP request) +- Access keyword arguments (as `args`) passed from _trigger arguments_ configurations +- Access the `influxdb3_local` shared API to write data, query data, and managing state between executions + +For more information about available functions, arguments, and how plugins interact with InfluxDB, see how to [Extend plugins](/influxdb3/version/extend-plugin/). + +### Create a custom plugin + +To build custom functionality, you can create your own Processing Engine plugin. + +#### Prerequisites + +Before you begin, make sure: + +- The Processing Engine is enabled on your {{% product-name %}} instance. +- You’ve configured the `--plugin-dir` where plugin files are stored. +- You have access to that plugin directory. + +#### Steps to create a plugin: + +- [Choose your plugin type](#choose-your-plugin-type) +- [Create your plugin file](#create-your-plugin-file) +- [Next Steps](#next-steps) + +#### Choose your plugin type + +Choose a plugin type based on your automation goals: + +| Plugin Type | Best For | Trigger Type | +|-------------|----------|-------------| +| **Data write** | Processing data as it arrives | `table:` or `all_tables` | +| **Scheduled** | Running code at specific times | `every:` or `cron:` | +| **HTTP request** | Creating API endpoints | `path:` | + +#### Create your plugin file + +- Create a `.py` file in your plugins directory +- Add the appropriate function signature based on your chosen plugin type +- Write your processing logic inside the function + +After writing your plugin, [create a trigger](#use-the-create-trigger-command) to connect it to a database event and define when it runs. + +#### Create a data write plugin + +Use a data write plugin to process data as it's written to the database. Ideal use cases include: + +- Data transformation and enrichment +- Alerting on incoming values +- Creating derived metrics ```python def process_writes(influxdb3_local, table_batches, args=None): @@ -139,7 +207,13 @@ def process_writes(influxdb3_local, table_batches, args=None): influxdb3_local.write(line) ``` -#### For scheduled events +#### Create a scheduled plugin + +Scheduled plugins run at defined intervals. Use them for: + +- Periodic data aggregation +- Report generation +- System health checks ```python def process_scheduled_call(influxdb3_local, call_time, args=None): @@ -155,7 +229,13 @@ def process_scheduled_call(influxdb3_local, call_time, args=None): influxdb3_local.warn("No recent metrics found") ``` -#### For HTTP requests +#### Create an HTTP request plugin + +HTTP request plugins respond to API calls. Use them for: + +- Creating custom API endpoints +- Webhooks for external integrations +- User interfaces for data interaction ```python def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None): @@ -174,25 +254,55 @@ def process_request(influxdb3_local, query_parameters, request_headers, request_ return {"status": "success", "message": "Request processed"} ``` -After adding your plugin, you can [install Python dependencies](#install-python-dependencies) or learn how to [extend plugins with API features and state management](#extend-plugins-with-api-features-and-state-management). +#### Next steps -## Create a trigger to run a plugin +After writing your plugin: -A trigger connects your plugin to a specific database event. -The plugin function signature in your plugin file determines which _trigger specification_ -you can choose for configuring and activating your plugin. +- [Create a trigger](#use-the-create-trigger-command) to connect your plugin to database events +- [Install any Python dependencies](#install-python-dependencies) your plugin requires +- Learn how to [extend plugins with the API](/influxdb3/version/extend-plugin/) -Create a trigger with the `influxdb3 create trigger` command. +## Set up a trigger + +### Understand trigger types + +| Plugin Type | Trigger Specification | When Plugin Runs | +|------------|----------------------|-----------------| +| Data write | `table:` or `all_tables` | When data is written to tables | +| Scheduled | `every:` or `cron:` | At specified time intervals | +| HTTP request | `path:` | When HTTP requests are received | + +### Use the create trigger command + +Use the `influxdb3 create trigger` command with the appropriate trigger specification: + +{{% code-placeholders "SPECIFICATION|PLUGIN_FILE|DATABASE_NAME|TRIGGER_NAME" %}} + +```bash +influxdb3 create trigger \ + --trigger-spec SPECIFICATION \ + --plugin-filename PLUGIN_FILE \ + --database DATABASE_NAME \ + TRIGGER_NAME + ``` + +{{% /code-placeholders %}} + +In the example above, replace the following: + +- {{% code-placeholder-key %}}`SPECIFICATION`{{% /code-placeholder-key %}}: Trigger specification +- {{% code-placeholder-key %}}`PLUGIN_FILE`{{% /code-placeholder-key %}}: Plugin filename relative to your configured plugin directory +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Name of the database +- {{% code-placeholder-key %}}`TRIGGER_NAME`{{% /code-placeholder-key %}}: Name of the new trigger > [!Note] > When specifying a local plugin file, the `--plugin-filename` parameter > _is relative to_ the `--plugin-dir` configured for the server. > You don't need to provide an absolute path. -### Create a trigger for data writes +### Trigger specification examples -Use the `table:` or the `all_tables` trigger specification to configure -and run a [plugin for data write events](#for-data-write-events)--for example: +#### Data write example ```bash # Trigger on writes to a specific table @@ -211,15 +321,11 @@ influxdb3 create trigger \ all_data_processor ``` -The trigger runs when the database flushes ingested data for the specified tables -to the Write-Ahead Log (WAL) in the Object store (default is every second). +The trigger runs when the database flushes ingested data for the specified tables to the Write-Ahead Log (WAL) in the Object store (default is every second). The plugin receives the written data and table information. -### Create a trigger for scheduled events - -Use the `every:` or the `cron:` trigger specification -to configure and run a [plugin for scheduled events](#for-scheduled-events)--for example: +#### Scheduled events example ```bash # Run every 5 minutes @@ -239,9 +345,7 @@ influxdb3 create trigger \ The plugin receives the scheduled call time. -### Create a trigger for HTTP requests - -For an [HTTP request plugin](#for-http-requests), use the `request:` trigger specification to configure and enable a [plugin for HTTP requests](#for-http-requests)--for example: +#### HTTP requests example ```bash # Create an endpoint at /api/v3/engine/webhook @@ -252,7 +356,7 @@ influxdb3 create trigger \ webhook_processor ``` -The trigger makes your endpoint available at `/api/v3/engine/`. +Access your endpoint available at `/api/v3/engine/`. To run the plugin, send a `GET` or `POST` request to the endpoint--for example: ```bash @@ -261,22 +365,10 @@ curl http://{{% influxdb/host %}}/api/v3/engine/webhook The plugin receives the HTTP request object with methods, headers, and body. -### Use community plugins from GitHub - -You can reference plugins directly from the GitHub repository by using the `gh:` prefix: - -```bash -# Create a trigger using a plugin from GitHub -influxdb3 create trigger \ - --trigger-spec "every:1m" \ - --plugin-filename "gh:examples/schedule/system_metrics/system_metrics.py" \ - --database my_database \ - system_metrics -``` - ### Pass arguments to plugins Use trigger arguments to pass configuration from a trigger to the plugin it runs. You can use this for: + - Threshold values for monitoring - Connection properties for external services - Configuration settings for plugin behavior @@ -344,300 +436,91 @@ influxdb3 create trigger \ auto_disable_processor ``` -## Extend plugins with API features and state management +## Advanced trigger configuration -The Processing engine includes API capabilities that allow your plugins to -interact with InfluxDB data and maintain state between executions. -These features let you build more sophisticated plugins that can transform, analyze, and respond to data. +After creating basic triggers, you can enhance your plugins with these advanced features: -### Use the shared API +### Access community plugins from GitHub -All plugins have access to the shared API to interact with the database. +Skip downloading plugins by referencing them directly from GitHub: -#### Write data +```bash +# Create a trigger using a plugin from GitHub +influxdb3 create trigger \ + --trigger-spec "every:1m" \ + --plugin-filename "gh:examples/schedule/system_metrics/system_metrics.py" \ + --database my_database \ + system_metrics +``` -Use the `LineBuilder` API to create line protocol data: +This approach: + +- Ensures you're using the latest version +- Simplifies updates and maintenance +- Reduces local storage requirements + +### Configure your triggers + +#### Pass configuration arguments + +Provide runtine configuration to your plugins: + +```bash +# Pass threshold and email settings to a plugin +Provide runtime configuration to your plugins: + --trigger-spec "every:1h" \ + --plugin-filename "threshold_check.py" \ + --trigger-arguments threshold=90,notify_email=admin@example.com \ + --database my_database \ + threshold_monitor +``` + +Your plugin accesses these values through the `args` parameter: ```python -# Create a line protocol entry -line = LineBuilder("weather") -line.tag("location", "us-midwest") -line.float64_field("temperature", 82.5) -line.time_ns(1627680000000000000) - -# Write the data to the database -influxdb3_local.write(line) +def process_scheduled_call(influxdb3_local, call_time, args=None): + if args and "threshold" in args: + threshold = float(args["threshold"]) + email = args.get("notify_email", "default@example.com") + + # Use the arguments in your logic + influxdb3_local.info(f"Checking threshold {threshold}, will notify {email}") ``` -Writes are buffered while the plugin runs and are flushed when the plugin completes. +#### Set execution mode -{{% expand-wrapper %}} -{{% expand "View the `LineBuilder` Python implementation" %}} +Choose between synchronous (default) or asynchronous execution: -```python -from typing import Optional -from collections import OrderedDict - -class InfluxDBError(Exception): - """Base exception for InfluxDB-related errors""" - pass - -class InvalidMeasurementError(InfluxDBError): - """Raised when measurement name is invalid""" - pass - -class InvalidKeyError(InfluxDBError): - """Raised when a tag or field key is invalid""" - pass - -class InvalidLineError(InfluxDBError): - """Raised when a line protocol string is invalid""" - pass - -class LineBuilder: - def __init__(self, measurement: str): - if ' ' in measurement: - raise InvalidMeasurementError("Measurement name cannot contain spaces") - self.measurement = measurement - self.tags: OrderedDict[str, str] = OrderedDict() - self.fields: OrderedDict[str, str] = OrderedDict() - self._timestamp_ns: Optional[int] = None - - def _validate_key(self, key: str, key_type: str) -> None: - """Validate that a key does not contain spaces, commas, or equals signs.""" - if not key: - raise InvalidKeyError(f"{key_type} key cannot be empty") - if ' ' in key: - raise InvalidKeyError(f"{key_type} key '{key}' cannot contain spaces") - if ',' in key: - raise InvalidKeyError(f"{key_type} key '{key}' cannot contain commas") - if '=' in key: - raise InvalidKeyError(f"{key_type} key '{key}' cannot contain equals signs") - - def tag(self, key: str, value: str) -> 'LineBuilder': - """Add a tag to the line protocol.""" - self._validate_key(key, "tag") - self.tags[key] = str(value) - return self - - def uint64_field(self, key: str, value: int) -> 'LineBuilder': - """Add an unsigned integer field to the line protocol.""" - self._validate_key(key, "field") - if value < 0: - raise ValueError(f"uint64 field '{key}' cannot be negative") - self.fields[key] = f"{value}u" - return self - - def int64_field(self, key: str, value: int) -> 'LineBuilder': - """Add an integer field to the line protocol.""" - self._validate_key(key, "field") - self.fields[key] = f"{value}i" - return self - - def float64_field(self, key: str, value: float) -> 'LineBuilder': - """Add a float field to the line protocol.""" - self._validate_key(key, "field") - # Check if value has no decimal component - self.fields[key] = f"{int(value)}.0" if value % 1 == 0 else str(value) - return self - - def string_field(self, key: str, value: str) -> 'LineBuilder': - """Add a string field to the line protocol.""" - self._validate_key(key, "field") - # Escape quotes and backslashes in string values - escaped_value = value.replace('"', '\\"').replace('\\', '\\\\') - self.fields[key] = f'"{escaped_value}"' - return self - - def bool_field(self, key: str, value: bool) -> 'LineBuilder': - """Add a boolean field to the line protocol.""" - self._validate_key(key, "field") - self.fields[key] = 't' if value else 'f' - return self - - def time_ns(self, timestamp_ns: int) -> 'LineBuilder': - """Set the timestamp in nanoseconds.""" - self._timestamp_ns = timestamp_ns - return self - - def build(self) -> str: - """Build the line protocol string.""" - # Start with measurement name (escape commas only) - line = self.measurement.replace(',', '\\,') - - # Add tags if present - if self.tags: - tags_str = ','.join( - f"{k}={v}" for k, v in self.tags.items() - ) - line += f",{tags_str}" - - # Add fields (required) - if not self.fields: - raise InvalidLineError(f"At least one field is required: {line}") - - fields_str = ','.join( - f"{k}={v}" for k, v in self.fields.items() - ) - line += f" {fields_str}" - - # Add timestamp if present - if self._timestamp_ns is not None: - line += f" {self._timestamp_ns}" - - return line -``` -{{% /expand %}} -{{% /expand-wrapper %}} - -#### Query data - -Execute SQL queries and get results: - -```python -# Simple query -results = influxdb3_local.query("SELECT * FROM metrics WHERE time > now() - INTERVAL '1 hour'") - -# Parameterized query for safer execution -params = {"table": "metrics", "threshold": 90} -results = influxdb3_local.query("SELECT * FROM $table WHERE value > $threshold", params) +```bash +# Allow multiple trigger instances to run simultaneously +influxdb3 create trigger \ + --trigger-spec "table:metrics" \ + --plugin-filename "heavy_process.py" \ + --run-asynchronous \ + --database my_database \ + async_processor ``` -The shared API `query` function returns results as a `List` of `Dict[String, Any]`, where the key is the column name and the value is the column value. +Use asynchronous execution when: -#### Log information +- Processing might take longer than the trigger interval +- Multiple events need to be handled simultaneously +- Performance is more important than sequential execution -The shared API `info`, `warn`, and `error` functions accept multiple arguments, -convert them to strings, and log them as a space-separated message to the database log, -which is output in the server logs and captured in system tables that you can -query using SQL. +#### Configure error handling -Add logging to track plugin execution: - -```python -influxdb3_local.info("Starting data processing") -influxdb3_local.warn("Could not process some records") -influxdb3_local.error("Failed to connect to external API") - -# Log structured data -obj_to_log = {"records": 157, "errors": 3} -influxdb3_local.info("Processing complete", obj_to_log) +Control how your trigger responds to errors: +```bash +# Automatically retry on error +influxdb3 create trigger \ + --trigger-spec "table:important_data" \ + --plugin-filename "critical_process.py" \ + --error-behavior retry \ + --database my_database \ + critical_processor ``` -#### Use the in-memory cache - -The Processing engine provides an in-memory cache system that enables plugins to persist and retrieve data between executions. - -Use the shared API `cache` property to access the cache API. - -```python -# Basic usage pattern -influxdb3_local.cache.METHOD(PARAMETERS) -``` - -| Method | Parameters | Returns | Description | -|--------|------------|---------|-------------| -| `put` | `key` (str): The key to store the value under
`value` (Any): Any Python object to cache
`ttl` (Optional[float], default=None): Time in seconds before expiration
`use_global` (bool, default=False): If True, uses global namespace | None | Stores a value in the cache with an optional time-to-live | -| `get` | `key` (str): The key to retrieve
`default` (Any, default=None): Value to return if key not found
`use_global` (bool, default=False): If True, uses global namespace | Any | Retrieves a value from the cache or returns default if not found | -| `delete` | `key` (str): The key to delete
`use_global` (bool, default=False): If True, uses global namespace | bool | Deletes a value from the cache. Returns True if deleted, False if not found | - -##### Cache namespaces - -The cache system offers two distinct namespaces: - -| Namespace | Scope | Best For | -| --- | --- | --- | -| **Trigger-specific** (default) | Isolated to a single trigger | Plugin state, counters, timestamps specific to one plugin | -| **Global** | Shared across all triggers | Configuration, lookup tables, service states that should be available to all plugins | - -##### Store and retrieve cached data - -```python -# Store a value -influxdb3_local.cache.put("last_run_time", time.time()) - -# Retrieve a value with a default if not found -last_time = influxdb3_local.cache.get("last_run_time", default=0) - -# Delete a cached value -influxdb3_local.cache.delete("temporary_data") -``` - -##### Store cached data with expiration - -```python -# Cache with a 5-minute TTL (time-to-live) -influxdb3_local.cache.put("api_response", response_data, ttl=300) -``` - -##### Share data across plugins - -```python -# Store in the global namespace -influxdb3_local.cache.put("config", {"version": "1.0"}, use_global=True) - -# Retrieve from the global namespace -config = influxdb3_local.cache.get("config", use_global=True) -``` - -##### Track state between executions - -```python -# Get current counter or default to 0 -counter = influxdb3_local.cache.get("execution_count", default=0) - -# Increment counter -counter += 1 - -# Store the updated value -influxdb3_local.cache.put("execution_count", counter) - -influxdb3_local.info(f"This plugin has run {counter} times") -``` - -#### Best practices for in-memory caching - -- [Use the trigger-specific namespace](#use-the-trigger-specific-namespace) -- [Use TTL appropriately](#use-ttl-appropriately) -- [Cache computation results](#cache-computation-results) -- [Warm the cache](#warm-the-cache) -- [Consider cache limitations](#consider-cache-limitations) - -##### Use the trigger-specific namespace - -The cache is designed to support stateful operations while maintaining isolation between different triggers. Use the trigger-specific namespace for most operations and the global namespace only when data sharing across triggers is necessary. - -##### Use TTL appropriately -Set realistic expiration times based on how frequently data changes. - -```python -# Cache external API responses for 5 minutes -influxdb3_local.cache.put("weather_data", api_response, ttl=300) -``` - -##### Cache computation results -Store the results of expensive calculations that need to be utilized frequently. -```python -# Cache aggregated statistics -influxdb3_local.cache.put("daily_stats", calculate_statistics(data), ttl=3600) -``` - -##### Warm the cache -For critical data, prime the cache at startup. This can be especially useful for global namespace data where multiple triggers need the data. - -```python -# Check if cache needs to be initialized -if not influxdb3_local.cache.get("lookup_table"): - influxdb3_local.cache.put("lookup_table", load_lookup_data()) -``` - -##### Consider cache limitations - -- **Memory Usage**: Since cache contents are stored in memory, monitor your memory usage when caching large datasets. -- **Server Restarts**: Because the cache is cleared when the server restarts, design your plugins to handle cache initialization (as noted above). -- **Concurrency**: Be cautious of accessing inaccurate or out-of-date data when multiple trigger instances might simultaneously update the same cache key. - -## Install Python dependencies +### Install Python dependencies If your plugin needs additional Python packages, use the `influxdb3 install` command: @@ -654,6 +537,7 @@ docker exec -it CONTAINER_NAME influxdb3 install package pandas This creates a Python virtual environment in your plugins directory with the specified packages installed. {{% show-in "enterprise" %}} + ### Connect Grafana to your InfluxDB instance When configuring Grafana to connect to an InfluxDB 3 Enterprise instance: diff --git a/content/shared/v3-distributed-admin-custom-partitions/define-custom-partitions.md b/content/shared/v3-distributed-admin-custom-partitions/define-custom-partitions.md index c09edf5da..cf266a8d6 100644 --- a/content/shared/v3-distributed-admin-custom-partitions/define-custom-partitions.md +++ b/content/shared/v3-distributed-admin-custom-partitions/define-custom-partitions.md @@ -1,4 +1,4 @@ -Use the [`influxctl` CLI](/influxdb/version/reference/cli/influxctl/) +Use the Admin UI, the [`influxctl` CLI](/influxdb/version/reference/cli/influxctl/), or the [Management HTTP API](/influxdb/version/api/management/) to define custom partition strategies when creating a database or table. By default, {{< product-name >}} partitions data by day. @@ -12,52 +12,27 @@ table. - [Create a database with a custom partition template](#create-a-database-with-a-custom-partition-template) - [Create a table with a custom partition template](#create-a-table-with-a-custom-partition-template) +- [Partition template requirements and guidelines](#partition-template-requirements-and-guidelines) - [Example partition templates](#example-partition-templates) -> [!Warning] -> -> #### Partition templates can only be applied on create -> -> You can only apply a partition template when creating a database or table. -> You can't update a partition template on an existing resource. - -Use the following command flags to identify -[partition template parts](/influxdb/version/admin/custom-partitions/partition-templates/#tag-part-templates): - -- `--template-tag`: An [InfluxDB tag](/influxdb/version/reference/glossary/#tag) - to use in the partition template. -- `--template-tag-bucket`: An [InfluxDB tag](/influxdb/version/reference/glossary/#tag) - and number of "buckets" to group tag values into. - Provide the tag key and the number of buckets to bucket tag values into - separated by a comma: `tagKey,N`. -- `--template-timeformat`: A [Rust strftime date and time](/influxdb/version/admin/custom-partitions/partition-templates/#time-part-templates) - string that specifies the time format in the partition template and determines - the time interval to partition by. - -> [!Note] -> A partition template can include up to 7 total tag and tag bucket parts -> and only 1 time part. -> -> _View [partition template part restrictions](/influxdb/version/admin/custom-partitions/partition-templates/#restrictions)._ - -> [!Important] -> #### Always provide a time format when using custom partitioning -> -> When defining a custom partition template for your database or table using any -> of the `influxctl` `--template-*` flags, always include the `--template-timeformat` -> flag with a time format to use in your partition template. -> Otherwise, InfluxDB omits time from the partition template and won't compact partitions. - ## Create a database with a custom partition template -The following example creates a new `example-db` database and applies a partition +The following examples show how to create a new `example-db` database and apply a partition template that partitions by distinct values of two tags (`room` and `sensor-type`), bucketed values of the `customerID` tag, and by day using the time format `%Y-%m-%d`: - +{{< tabs-wrapper >}} +{{% tabs %}} +[influxctl](#) +[Management API](#) +{{% /tabs %}} + +{{% tab-content %}} + + -```sh +```bash influxctl database create \ --template-tag room \ --template-tag sensor-type \ @@ -66,33 +41,161 @@ influxctl database create \ example-db ``` +The following command flags identify +[partition template parts](/influxdb/version/admin/custom-partitions/partition-templates/#tag-part-templates): + +- `--template-timeformat`: A [Rust strftime date and time](/influxdb/version/admin/custom-partitions/partition-templates/#time-part-templates) + string that specifies the time part in the partition template and determines + the time interval to partition by. + Use one of the following: + + - `%Y-%m-%d` (daily) + - `%Y-%m` (monthly) + - `%Y` (annually) +- `--template-tag`: An [InfluxDB tag](/influxdb/version/reference/glossary/#tag) + to use in the partition template. +- `--template-tag-bucket`: An [InfluxDB tag](/influxdb/version/reference/glossary/#tag) + and number of "buckets" to group tag values into. + Provide the tag key and the number of buckets to bucket tag values into + separated by a comma: `tagKey,N`. + + +{{% /tab-content %}} +{{% tab-content %}} + + + + + +{{% code-placeholders "ACCOUNT_ID|CLUSTER_ID|MANAGEMENT_TOKEN" %}} +```bash +curl \ + --location "https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases" \ + --header "Authorization: Bearer MANAGEMENT_TOKEN" \ + --json '{ + "name": "example-db", + "maxTables": 500, + "maxColumnsPerTable": 250, + "retentionPeriod": 2592000000000, + "partitionTemplate": [ + { "type": "tag", "value": "room" }, + { "type": "tag", "value": "sensor-type" }, + { "type": "bucket", "value": { "tagName": "customerID", "numberOfBuckets": 500 } }, + { "type": "time", "value": "%Y-%m-%d" } + ] + }' +``` +{{% /code-placeholders %}} + +Replace the following in your request: + +- {{% code-placeholder-key %}}`ACCOUNT_ID`{{% /code-placeholder-key %}}: the [account](/influxdb3/cloud-dedicated/admin/account/) ID for the cluster _(list details via the [Admin UI](/influxdb3/cloud-dedicated/admin/clusters/list/) or [CLI](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json))_ +- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: the [cluster](/influxdb3/cloud-dedicated/admin/clusters/) ID _(list details via the [Admin UI](/influxdb3/cloud-dedicated/admin/clusters/list/) or [CLI](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json))_. +- {{% code-placeholder-key %}}`MANAGEMENT TOKEN`{{% /code-placeholder-key %}}: a valid [management token](/influxdb3/cloud-dedicated/admin/tokens/management/) for your {{% product-name %}} cluster + +The `partitionTemplate` property in the request body +is an array of JSON objects that identify the [partition template parts](/influxdb/version/admin/custom-partitions/partition-templates/#tag-part-templates). + + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + ## Create a table with a custom partition template -The following example creates a new `example-table` table in the specified -database and applies a partition template that partitions by distinct values of +The following example creates a new `example-table` table in the `example-db` database and applies a partition template that partitions by distinct values of two tags (`room` and `sensor-type`), bucketed values of the `customerID` tag, and by month using the time format `%Y-%m`: - - +{{< tabs-wrapper >}} +{{% tabs %}} +[Admin UI](#) +[influxctl](#) +[Management API](#) +{{% /tabs %}} +{{% tab-content %}} + +The {{< product-name >}} Admin UI lets you apply a custom partition template when creating a table. +1. To access the {{< product-name >}} Admin UI, visit the following URL in your browser: -{{% code-placeholders "DATABASE_NAME" %}} +
+   https://console.influxdata.com
+   
+2. In the cluster list, click the cluster you want to manage. +3. Create the `example-db` database or click the row of an existing database. +4. Click the **New Table** button above the table list. -```sh +In the **Create Table** dialog: + +1. Set **Table name** to `example-table`. +2. If the **Use default partitioning** toggle is on, turn it off to enable custom partitioning. +3. Under **Custom partition template time format**, set the time format to `%Y-%m`. +4. Under **Custom partition template parts**: +5. In the **Partition template part type** dropdown, click **Tag**, set **Tag name** to `room`. +6. Click **Add Tag**. +7. In the **Partition template part type** dropdown, click **Tag**, set **Tag name** to `sensor-type`. +8. Click **Add Tag**. +9. In the **Partition template part type** dropdown, click **Bucket**, set **Tag name** to `customerID` and **Buckets** to `500`. +10. Click **Create Table** to apply the template. + +{{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-create-custom-partitioned-table.png" alt="Create table dialog with custom partitioning example values" />}} +{{% /tab-content %}} +{{% tab-content %}} + +```bash influxctl table create \ --template-tag room \ --template-tag sensor-type \ --template-tag-bucket customerID,500 \ --template-timeformat '%Y-%m' \ - DATABASE_NAME \ + example-db \ example-table ``` + +{{% /tab-content %}} +{{% tab-content %}} + + +{{% code-placeholders "ACCOUNT_ID|CLUSTER_ID|MANAGEMENT_TOKEN" %}} +```bash +curl \ + --location "https://console.influxdata.com/api/v0/accounts/ACCOUNT_ID/clusters/CLUSTER_ID/databases/example-db/tables" \ + --request POST \ + --header "Authorization: Bearer MANAGEMENT_TOKEN" \ + --json '{ + "name": "example-table", + "partitionTemplate": [ + { "type": "tag", "value": "room" }, + { "type": "tag", "value": "sensor-type" }, + { "type": "bucket", "value": { "tagName": "customerID", "numberOfBuckets": 500 } }, + { "type": "time", "value": "%Y-%m" } + ] + }' +``` {{% /code-placeholders %}} -Replace the following in your command: +Replace the following in your request: -- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: your {{% product-name %}} [database](/influxdb/version/admin/databases/) +- {{% code-placeholder-key %}}`ACCOUNT_ID`{{% /code-placeholder-key %}}: the [account](/influxdb3/cloud-dedicated/admin/account/) ID for the cluster _(list details via the [Admin UI](/influxdb3/cloud-dedicated/admin/clusters/list/) or [CLI](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json))_ +- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: the [cluster](/influxdb3/cloud-dedicated/admin/clusters/) ID _(list details via the [Admin UI](/influxdb3/cloud-dedicated/admin/clusters/list/) or [CLI](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json))_. +- {{% code-placeholder-key %}}`MANAGEMENT_TOKEN`{{% /code-placeholder-key %}}: a valid [management token](/influxdb3/cloud-dedicated/admin/tokens/management/) for your {{% product-name %}} cluster + + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +## Partition template requirements and guidelines + +Always specify 1 time part in your template. +A template has a maximum of 8 parts: 1 time part and up to 7 total tag and tag bucket parts. + +For more information about partition template requirements and restrictions, see [Partition templates](/influxdb/version/admin/custom-partitions/partition-templates/). + +> [!Warning] +> #### Partition templates can only be applied on create +> +> You can only apply a partition template when creating a database. +> You can't update a partition template on an existing database. +- [Template part types](#template-part-types) +- [Requirements and guidelines](#requirements-and-guidelines) - [Restrictions](#restrictions) - [Template part size limit](#template-part-size-limit) + - [Partition key size limit](#partition-key-size-limit) - [Reserved keywords](#reserved-keywords) - [Reserved Characters](#reserved-characters) - [Tag part templates](#tag-part-templates) - [Tag bucket part templates](#tag-bucket-part-templates) - [Time part templates](#time-part-templates) - + - [Date specifiers](#date-specifiers) + +## Template part types + +InfluxDB supports three types of partition template parts: + +- **Tag part**: Partitions data by the unique values of an [InfluxDB tag](/influxdb/version/reference/glossary/#tag). + For example, using `region` as a tag part creates separate partitions for each region value (us-west, us-east, eu-central). + +- **Tag bucket part**: Partitions data by "buckets" of [InfluxDB tag](/influxdb/version/reference/glossary/#tag) values. + Instead of creating a partition for every unique tag value, tag values are hashed and grouped into a specified number of buckets. + Use this for high-cardinality tags or when the number of distinct values is unknown. + +- {{< req type="key" >}} **Time part**: Partitions data by time intervals using a Rust strftime date and time format string. + The smallest time unit in your format determines the granularity of time partitioning (yearly with `%Y`, + monthly with `%Y-%m`, or daily with `%Y-%m-%d`). + +## Requirements and guidelines + +When creating a partition template: + +1. **Include exactly one time part** + - Always specify a [time part](#time-part-templates) in your template + - With `influxctl`, always include `--template-timeformat` with a valid format + - Without a time part, InfluxDB won't compact partitions, impacting performance + - If you include more than one time part, InfluxDB uses the smallest unit of time + - Use one of the following Rust strftime date and time strings: + + - `%Y-%m-%d` (daily) + - `%Y-%m` (monthly) + - `%Y` (annually) + +2. **Tag and tag bucket limitations** + - Include up to seven [tag](#tag-part-templates) and [tag bucket](#tag-bucket-part-templates) parts + - Don't use the same tag key in both a tag part and a tag bucket part--for example, + if your template uses `region` as a tag part, you cannot use `region` as a tag bucket part + +3. **Maximum template parts**: 8 total (1 time part + up to 7 tag and tag bucket parts) ## Restrictions @@ -59,18 +85,17 @@ characters must be [percent encoded](https://developer.mozilla.org/en-US/docs/Gl ## Tag part templates -Tag part templates consist of a _tag key_ to partition by. +Tag part templates consist of a [_tag key_](/influxdb3/cloud-dedicated/reference/glossary/#tag) to partition by. Generated partition keys include the unique _tag value_ specific to each partition. A partition template may include a given tag key only once in template parts -that operate on tags (tag value and tag bucket)--for example: - -If a template partitions on unique values of `tag_A`, then +that operate on tags (tag value and tag bucket)--for example, +if a template partitions on unique values of `tag_A`, then you can't use `tag_A` as a tag bucket part. ## Tag bucket part templates -Tag bucket part templates consist of a _tag key_ to partition by and the +Tag bucket part templates consist of a [_tag key_](/influxdb3/cloud-dedicated/reference/glossary/#tag) to partition by and the _number of "buckets" to partition tag values into_--for example: ``` @@ -95,9 +120,8 @@ each partition. > unknown number of distinct values. A partition template may include a given tag key only once in template parts -that operate on tags (tag value and tag bucket)--for example: - -If a template partitions on unique values of `tag_A`, then +that operate on tags (tag value and tag bucket)--for example, +if a template partitions on unique values of `tag_A`, then you can't use `tag_A` as a tag bucket part. ## Time part templates diff --git a/content/telegraf/v1/release-notes.md b/content/telegraf/v1/release-notes.md index 44925f7e0..a5cd55648 100644 --- a/content/telegraf/v1/release-notes.md +++ b/content/telegraf/v1/release-notes.md @@ -11,7 +11,7 @@ menu: weight: 60 --- -## v1.34.1 [2025-03-24] +## v1.34.1 {date="2025-03-24"} ### Bugfixes @@ -40,7 +40,7 @@ menu: - [#16653](https://github.com/influxdata/telegraf/pull/16653) `deps` Bump k8s.io/api from 0.32.1 to 0.32.3 - [#16659](https://github.com/influxdata/telegraf/pull/16659) `deps` Bump tj-actions/changed-files from v45 to v46.0.1 -## v1.34.0 [2025-03-10] +## v1.34.0 {date="2025-03-10"} ### New Plugins @@ -94,7 +94,7 @@ menu: - [#16575](https://github.com/influxdata/telegraf/pull/16575) `deps` Bump github.com/tidwall/wal from 1.1.7 to 1.1.8 - [#16578](https://github.com/influxdata/telegraf/pull/16578) `deps` Bump super-linter/super-linter from 7.2.1 to 7.3.0 -## v1.33.3 [2025-02-25] +## v1.33.3 {date="2025-02-25"} ### Important Changes @@ -128,7 +128,7 @@ menu: - [#16504](https://github.com/influxdata/telegraf/pull/16504) `deps` Bump golang.org/x/net from 0.34.0 to 0.35.0 - [#16512](https://github.com/influxdata/telegraf/pull/16512) `deps` Bump golangci-lint from v1.63.4 to v1.64.5 -## v1.33.2 [2025-02-10] +## v1.33.2 {date="2025-02-10"} ### Important Changes @@ -177,7 +177,7 @@ menu: - [#16482](https://github.com/influxdata/telegraf/pull/16482) `deps` Update Apache arrow from 0.0-20240716144821-cf5d7c7ec3cf to 18.1.0 - [#16423](https://github.com/influxdata/telegraf/pull/16423) `deps` Update ClickHouse SQL driver from 1.5.4 to to 2.30.1 -## v1.33.1 [2025-01-10] +## v1.33.1 {date="2025-01-10"} ### Important Changes diff --git a/cypress.config.js b/cypress.config.js index 6bc148d05..f1b1655c8 100644 --- a/cypress.config.js +++ b/cypress.config.js @@ -1,10 +1,17 @@ -const { defineConfig } = require('cypress'); -const process = require('process'); +import { defineConfig } from 'cypress'; +import { cwd as _cwd } from 'process'; +import * as fs from 'fs'; +import * as yaml from 'js-yaml'; +import { + BROKEN_LINKS_FILE, + FIRST_BROKEN_LINK_FILE, + initializeReport, + readBrokenLinksReport, +} from './cypress/support/link-reporter.js'; -module.exports = defineConfig({ +export default defineConfig({ e2e: { - // Automatically prefix cy.visit() and cy.request() commands with a baseUrl. - baseUrl: 'http://localhost:1313', + baseUrl: 'http://localhost:1315', defaultCommandTimeout: 10000, pageLoadTimeout: 30000, responseTimeout: 30000, @@ -12,34 +19,177 @@ module.exports = defineConfig({ numTestsKeptInMemory: 5, projectId: 'influxdata-docs', setupNodeEvents(on, config) { - // implement node event listeners here + // Browser setup on('before:browser:launch', (browser, launchOptions) => { if (browser.name === 'chrome' && browser.isHeadless) { - // Force Chrome to use a less memory-intensive approach launchOptions.args.push('--disable-dev-shm-usage'); launchOptions.args.push('--disable-gpu'); launchOptions.args.push('--disable-extensions'); return launchOptions; } }); + on('task', { // Fetch the product list configured in /data/products.yml getData(filename) { return new Promise((resolve, reject) => { - const yq = require('js-yaml'); - const fs = require('fs'); - const cwd = process.cwd(); + const cwd = _cwd(); try { resolve( - yq.load(fs.readFileSync(`${cwd}/data/${filename}.yml`, 'utf8')) + yaml.load( + fs.readFileSync(`${cwd}/data/${filename}.yml`, 'utf8') + ) ); } catch (e) { reject(e); } }); }, + + // Log task for reporting + log(message) { + if (typeof message === 'object') { + if (message.type === 'error') { + console.error(`\x1b[31m${message.message}\x1b[0m`); // Red + } else if (message.type === 'warning') { + console.warn(`\x1b[33m${message.message}\x1b[0m`); // Yellow + } else if (message.type === 'success') { + console.log(`\x1b[32m${message.message}\x1b[0m`); // Green + } else if (message.type === 'divider') { + console.log(`\x1b[90m${message.message}\x1b[0m`); // Gray + } else { + console.log(message.message || message); + } + } else { + console.log(message); + } + return null; + }, + + // File tasks + writeFile({ path, content }) { + try { + fs.writeFileSync(path, content); + return true; + } catch (error) { + console.error(`Error writing to file ${path}: ${error.message}`); + return { error: error.message }; + } + }, + + readFile(path) { + try { + return fs.existsSync(path) ? fs.readFileSync(path, 'utf8') : null; + } catch (error) { + console.error(`Error reading file ${path}: ${error.message}`); + return { error: error.message }; + } + }, + + // Broken links reporting tasks + initializeBrokenLinksReport() { + return initializeReport(); + }, + + // Special case domains are now handled directly in the test without additional reporting + // This task is kept for backward compatibility but doesn't do anything special + reportSpecialCaseLink(linkData) { + console.log( + `✅ Expected status code: ${linkData.url} (status: ${linkData.status}) is valid for this domain` + ); + return true; + }, + + reportBrokenLink(linkData) { + try { + // Validate link data + if (!linkData || !linkData.url || !linkData.page) { + console.error('Invalid link data provided'); + return false; + } + + // Read current report + const report = readBrokenLinksReport(); + + // Find or create entry for this page + let pageReport = report.find((r) => r.page === linkData.page); + if (!pageReport) { + pageReport = { page: linkData.page, links: [] }; + report.push(pageReport); + } + + // Check if link is already in the report to avoid duplicates + const isDuplicate = pageReport.links.some( + (link) => link.url === linkData.url && link.type === linkData.type + ); + + if (!isDuplicate) { + // Add the broken link to the page's report + pageReport.links.push({ + url: linkData.url, + status: linkData.status, + type: linkData.type, + linkText: linkData.linkText, + }); + + // Write updated report back to file + fs.writeFileSync( + BROKEN_LINKS_FILE, + JSON.stringify(report, null, 2) + ); + + // Store first broken link if not already recorded + const firstBrokenLinkExists = + fs.existsSync(FIRST_BROKEN_LINK_FILE) && + fs.readFileSync(FIRST_BROKEN_LINK_FILE, 'utf8').trim() !== ''; + + if (!firstBrokenLinkExists) { + // Store first broken link with complete information + const firstBrokenLink = { + url: linkData.url, + status: linkData.status, + type: linkData.type, + linkText: linkData.linkText, + page: linkData.page, + time: new Date().toISOString(), + }; + + fs.writeFileSync( + FIRST_BROKEN_LINK_FILE, + JSON.stringify(firstBrokenLink, null, 2) + ); + + console.error( + `🔴 FIRST BROKEN LINK: ${linkData.url} (${linkData.status}) - ${linkData.type} on page ${linkData.page}` + ); + } + + // Log the broken link immediately to console + console.error( + `❌ BROKEN LINK: ${linkData.url} (${linkData.status}) - ${linkData.type} on page ${linkData.page}` + ); + } + + return true; + } catch (error) { + console.error(`Error reporting broken link: ${error.message}`); + // Even if there's an error, we want to ensure the test knows there was a broken link + return true; + } + }, + }); + + // Load plugins file using dynamic import for ESM compatibility + return import('./cypress/plugins/index.js').then((module) => { + return module.default(on, config); }); - return config; }, + specPattern: 'cypress/e2e/**/*.cy.{js,jsx,ts,tsx}', + supportFile: 'cypress/support/e2e.js', + viewportWidth: 1280, + viewportHeight: 720, + }, + env: { + test_subjects: '', }, }); diff --git a/cypress/downloads/downloads.html b/cypress/downloads/downloads.html new file mode 100644 index 000000000..523cdaa3e Binary files /dev/null and b/cypress/downloads/downloads.html differ diff --git a/cypress/e2e/content/article-links.cy.js b/cypress/e2e/content/article-links.cy.js index 3aca5298c..3b9ef7b01 100644 --- a/cypress/e2e/content/article-links.cy.js +++ b/cypress/e2e/content/article-links.cy.js @@ -1,11 +1,17 @@ /// -describe('Article links', () => { +describe('Article', () => { const subjects = Cypress.env('test_subjects').split(','); // Always use HEAD for downloads to avoid timeouts const useHeadForDownloads = true; - // Helper function to identify download links - improved + // Set up initialization for tests + before(() => { + // Initialize the broken links report + cy.task('initializeBrokenLinksReport'); + }); + + // Helper function to identify download links function isDownloadLink(href) { // Check for common download file extensions const downloadExtensions = [ @@ -45,130 +51,192 @@ describe('Article links', () => { } // Helper function to make appropriate request based on link type - function testLink(href) { + function testLink(href, linkText = '', pageUrl) { + // Common request options for both methods + const requestOptions = { + failOnStatusCode: true, + timeout: 15000, // Increased timeout for reliability + followRedirect: true, // Explicitly follow redirects + retryOnNetworkFailure: true, // Retry on network issues + retryOnStatusCodeFailure: true, // Retry on 5xx errors + }; + + function handleFailedLink(url, status, type, redirectChain = '') { + // Report the broken link + cy.task('reportBrokenLink', { + url: url + redirectChain, + status, + type, + linkText, + page: pageUrl, + }); + + // Throw error for broken links + throw new Error( + `BROKEN ${type.toUpperCase()} LINK: ${url} (status: ${status})${redirectChain} on ${pageUrl}` + ); + } + if (useHeadForDownloads && isDownloadLink(href)) { cy.log(`** Testing download link with HEAD: ${href} **`); cy.request({ method: 'HEAD', url: href, + ...requestOptions, }).then((response) => { - const message = `Link is broken: ${href} (status: ${response.status})`; - try { - expect(response.status).to.be.lt(400); - } catch (e) { - // Log the broken link with the URL for better visibility in reports - cy.log(`❌ BROKEN LINK: ${href} (${response.status})`); - throw new Error(message); + // Check final status after following any redirects + if (response.status >= 400) { + // Build redirect info string if available + const redirectInfo = + response.redirects && response.redirects.length > 0 + ? ` (redirected to: ${response.redirects.join(' -> ')})` + : ''; + + handleFailedLink(href, response.status, 'download', redirectInfo); } }); } else { cy.log(`** Testing link: ${href} **`); + cy.log(JSON.stringify(requestOptions)); cy.request({ url: href, - failOnStatusCode: false, - timeout: 10000, // 10 second timeout for regular links + ...requestOptions, }).then((response) => { - const message = `Link is broken: ${href} (status: ${response.status})`; - try { - expect(response.status).to.be.lt(400); - } catch (e) { - // Log the broken link with the URL for better visibility in reports - cy.log(`❌ BROKEN LINK: ${href} (${response.status})`); - throw new Error(message); + // Check final status after following any redirects + if (response.status >= 400) { + // Build redirect info string if available + const redirectInfo = + response.redirects && response.redirects.length > 0 + ? ` (redirected to: ${response.redirects.join(' -> ')})` + : ''; + + handleFailedLink(href, response.status, 'regular', redirectInfo); } }); } } + // Test implementation for subjects subjects.forEach((subject) => { - it(`contains valid internal links on ${subject}`, function () { - cy.visit(`${subject}`); + it(`${subject} has valid internal links`, function () { + cy.visit(`${subject}`, { timeout: 20000 }); + // Test internal links - // 1. Timeout and fail the test if article is not found - // 2. Check each link. - // 3. If no links are found, continue without failing - cy.get('article').then(($article) => { + cy.get('article, .api-content').then(($article) => { // Find links without failing the test if none are found const $links = $article.find('a[href^="/"]'); if ($links.length === 0) { cy.log('No internal links found on this page'); return; } + + // Now test each link cy.wrap($links).each(($a) => { const href = $a.attr('href'); - testLink(href); + const linkText = $a.text().trim(); + testLink(href, linkText, subject); }); }); }); - it(`checks anchor links on ${subject} (with warnings for missing targets)`, function () { + it(`${subject} has valid anchor links`, function () { cy.visit(`${subject}`); - // Track missing anchors for summary - const missingAnchors = []; + // Define selectors for anchor links to ignore, such as behavior triggers + const ignoreLinks = ['.tabs a[href^="#"]', '.code-tabs a[href^="#"]']; - // Process anchor links individually - cy.get('article').then(($article) => { - const $anchorLinks = $article.find('a[href^="#"]'); + const anchorSelector = + 'a[href^="#"]:not(' + ignoreLinks.join('):not(') + ')'; + + cy.get('article, .api-content').then(($article) => { + const $anchorLinks = $article.find(anchorSelector); if ($anchorLinks.length === 0) { cy.log('No anchor links found on this page'); return; } + cy.wrap($anchorLinks).each(($a) => { - const href = $a.prop('href'); - if (href && href.length > 1) { - // Skip empty anchors (#) - // Get just the fragment part - const url = new URL(href); - const anchorId = url.hash.substring(1); // Remove the # character + const href = $a.prop('href'); + const linkText = $a.text().trim(); - if (!anchorId) { - cy.log(`Skipping empty anchor in ${href}`); - return; + if (href && href.length > 1) { + // Get just the fragment part + const url = new URL(href); + const anchorId = url.hash.substring(1); // Remove the # character + + if (!anchorId) { + cy.log(`Skipping empty anchor in ${href}`); + return; + } + + // Use DOM to check if the element exists + cy.window().then((win) => { + const element = win.document.getElementById(anchorId); + if (!element) { + cy.task('reportBrokenLink', { + url: `#${anchorId}`, + status: 404, + type: 'anchor', + linkText, + page: subject, + }); + cy.log(`⚠️ Missing anchor target: #${anchorId}`); } - - // Use DOM to check if the element exists, but don't fail if missing - cy.window().then((win) => { - const element = win.document.getElementById(anchorId); - if (element) { - cy.log(`✅ Anchor target exists: #${anchorId}`); - } else { - // Just warn about the missing anchor - cy.log(`⚠️ WARNING: Missing anchor target: #${anchorId}`); - missingAnchors.push(anchorId); - } - }); - } - }) - .then(() => { - // After checking all anchors, log a summary - if (missingAnchors.length > 0) { - cy.log( - `⚠️ Found ${missingAnchors.length} missing anchor targets: ${missingAnchors.join(', ')}` - ); - } else { - cy.log('✅ All anchor targets are valid'); - } - }); - }); - - it(`contains valid external links on ${subject}`, function () { - cy.visit(`${subject}`); - // Test external links - // 1. Timeout and fail the test if article is not found - // 2. Check each link. - // 3. If no links are found, continue without failing - cy.get('article').then(($article) => { - // Find links without failing the test if none are found - const $links = $article.find('a[href^="http"]'); - if ($links.length === 0) { - cy.log('No external links found on this page'); - return; + }); } - cy.wrap($links).each(($a) => { - const href = $a.attr('href'); - testLink(href); - }); + }); + }); + }); + + it(`${subject} has valid external links`, function () { + // Check if we should skip external links entirely + if (Cypress.env('skipExternalLinks') === true) { + cy.log( + 'Skipping all external links as configured by skipExternalLinks' + ); + return; + } + + cy.visit(`${subject}`); + + // Define allowed external domains to test + const allowedExternalDomains = ['github.com', 'kapa.ai']; + + // Test external links + cy.get('article, .api-content').then(($article) => { + // Find links without failing the test if none are found + const $links = $article.find('a[href^="http"]'); + if ($links.length === 0) { + cy.log('No external links found on this page'); + return; + } + + // Filter links to only include allowed domains + const $allowedLinks = $links.filter((_, el) => { + const href = el.getAttribute('href'); + try { + const url = new URL(href); + return allowedExternalDomains.some( + (domain) => + url.hostname === domain || url.hostname.endsWith(`.${domain}`) + ); + } catch (e) { + return false; + } + }); + + if ($allowedLinks.length === 0) { + cy.log('No links to allowed external domains found on this page'); + return; + } + + cy.log( + `Found ${$allowedLinks.length} links to allowed external domains to test` + ); + cy.wrap($allowedLinks).each(($a) => { + const href = $a.attr('href'); + const linkText = $a.text().trim(); + testLink(href, linkText, subject); }); }); }); diff --git a/cypress/e2e/content/example.cy.js b/cypress/e2e/content/example.cy.js new file mode 100644 index 000000000..e69de29bb diff --git a/cypress/e2e/content/stable-version-callout.cy.js b/cypress/e2e/content/stable-version-callout.cy.js new file mode 100644 index 000000000..f8c6d7f20 --- /dev/null +++ b/cypress/e2e/content/stable-version-callout.cy.js @@ -0,0 +1,107 @@ +/// + +describe('Stable version', function () { + before(function () { + // Track JavaScript errors + cy.on('uncaught:exception', (err, runnable) => { + // Log the error to the Cypress command log + cy.log(`JavaScript error: ${err.message}`); + + // Add the error to the test failure message + Cypress.failures = Cypress.failures || []; + Cypress.failures.push(err.message); + + // Return false to prevent Cypress from failing the test + return false; + }); + }); + + beforeEach(function () { + // Clear any stored failures before each test + Cypress.failures = []; + }); + + it('should show InfluxDB 3 Core as successor product in InfluxDB v2 page', function () { + // Visit the v2 documentation page + cy.visit('/influxdb/v1/introduction/install/'); + + // Check for the warning block that appears for older versions + cy.get('.warn.block.old-version').should('exist'); + + // Verify that the warning message references original product name + cy.get('.warn.block.old-version p').should( + 'contain', + 'This page documents an earlier version of InfluxDB OSS' + ); + + // Check for the link to the successor product + cy.get('.warn.block.old-version a') + .first() + .should('contain', 'InfluxDB 3 Core') + .and('have.attr', 'href', '/influxdb3/core/'); + + // Verify no JavaScript errors were recorded + cy.wrap(Cypress.failures).should( + 'be.empty', + 'The following JavaScript errors were detected:\n' + + (Cypress.failures || []).join('\n') + ); + }); + + it('should show InfluxDB 3 Core as successor product in InfluxDB v1 page', function () { + // Visit the v1 documentation page + cy.visit('/influxdb/v1/'); + + // Check for the warning block that appears for older versions + cy.get('.warn.block.old-version').should('exist'); + + // Verify that the warning message references original product name + cy.get('.warn.block.old-version p').should( + 'contain', + 'This page documents an earlier version of InfluxDB OSS' + ); + + // Check for the link to the latest stable version (successor product) + cy.get('.warn.block.old-version a') + .first() + .should('contain', 'InfluxDB 3 Core') + .and('have.attr', 'href', '/influxdb3/core/'); + + // Verify no JavaScript errors were recorded + cy.wrap(Cypress.failures).should( + 'be.empty', + 'The following JavaScript errors were detected:\n' + + (Cypress.failures || []).join('\n') + ); + }); + + it('should verify the product succeeded_by relationship is configured correctly', function () { + // Get the product data to verify succeeded_by field + cy.task('getData', 'products').then((productData) => { + // Check succeeded_by relationship in products.yml + expect(productData.influxdb).to.have.property( + 'succeeded_by', + 'influxdb3_core' + ); + + // Verify successor product exists + expect(productData).to.have.property('influxdb3_core'); + expect(productData.influxdb3_core).to.have.property( + 'name', + 'InfluxDB 3 Core' + ); + }); + }); + + it('should verify behavior if the stable-version.html template changes', function () { + // Visit a page that shouldn't have a successor redirect + cy.visit('/telegraf/v1/'); + cy.get('.warn.block.old-version').should('not.exist'); + + cy.wrap(Cypress.failures).should( + 'be.empty', + 'The following JavaScript errors were detected:\n' + + (Cypress.failures || []).join('\n') + ); + }); +}); diff --git a/cypress/plugins/index.js b/cypress/plugins/index.js new file mode 100644 index 000000000..904971cc0 --- /dev/null +++ b/cypress/plugins/index.js @@ -0,0 +1,26 @@ +/// +// *********************************************************** +// This example plugins/index.js can be used to load plugins +// +// You can change the location of this file or turn off loading +// the plugins file with the 'pluginsFile' configuration option. +// +// You can read more here: +// https://on.cypress.io/plugins-guide +// *********************************************************** + +// This function is called when a project is opened or re-opened (e.g. due to +// the project's config changing) + +/** + * @type {Cypress.PluginConfig} + */ +export default (on, config) => { + // `on` is used to hook into various events Cypress emits + // `config` is the resolved Cypress config + + // NOTE: The log task is now defined in cypress.config.js + // We don't need to register it here to avoid duplication + + return config; +}; diff --git a/cypress/support/e2e.js b/cypress/support/e2e.js index 3eaffffa6..b0265634d 100644 --- a/cypress/support/e2e.js +++ b/cypress/support/e2e.js @@ -14,4 +14,4 @@ // *********************************************************** // Import commands.js using ES2015 syntax: -import './commands' \ No newline at end of file +import './commands'; diff --git a/cypress/support/hugo-server.js b/cypress/support/hugo-server.js new file mode 100644 index 000000000..0e4e6a646 --- /dev/null +++ b/cypress/support/hugo-server.js @@ -0,0 +1,174 @@ +import { spawn } from 'child_process'; +import fs from 'fs'; +import http from 'http'; +import net from 'net'; + +// Hugo server constants +export const HUGO_PORT = 1315; +export const HUGO_LOG_FILE = '/tmp/hugo_server.log'; + +/** + * Check if a port is already in use + * @param {number} port - The port to check + * @returns {Promise} True if port is in use, false otherwise + */ +export async function isPortInUse(port) { + return new Promise((resolve) => { + const tester = net + .createServer() + .once('error', () => resolve(true)) + .once('listening', () => { + tester.close(); + resolve(false); + }) + .listen(port, '127.0.0.1'); + }); +} + +/** + * Start the Hugo server with the specified options + * @param {Object} options - Configuration options for Hugo + * @param {string} options.configFile - Path to Hugo config file (e.g., 'config/testing/config.yml') + * @param {number} options.port - Port number for Hugo server + * @param {boolean} options.buildDrafts - Whether to build draft content + * @param {boolean} options.noHTTPCache - Whether to disable HTTP caching + * @param {string} options.logFile - Path to write Hugo logs + * @returns {Promise} Child process object + */ +export async function startHugoServer({ + configFile = 'config/testing/config.yml', + port = HUGO_PORT, + buildDrafts = true, + noHTTPCache = true, + logFile = HUGO_LOG_FILE, +} = {}) { + console.log(`Starting Hugo server on port ${port}...`); + + // Prepare command arguments + const hugoArgs = [ + 'hugo', + 'server', + '--config', + configFile, + '--port', + String(port), + ]; + + if (buildDrafts) { + hugoArgs.push('--buildDrafts'); + } + + if (noHTTPCache) { + hugoArgs.push('--noHTTPCache'); + } + + return new Promise((resolve, reject) => { + try { + // Use npx to find and execute Hugo, which will work regardless of installation method + console.log(`Running Hugo with npx: npx ${hugoArgs.join(' ')}`); + const hugoProc = spawn('npx', hugoArgs, { + stdio: ['ignore', 'pipe', 'pipe'], + shell: true, + }); + + // Check if the process started successfully + if (!hugoProc || !hugoProc.pid) { + return reject(new Error('Failed to start Hugo server via npx')); + } + + // Set up logging + if (logFile) { + hugoProc.stdout.on('data', (data) => { + const output = data.toString(); + fs.appendFileSync(logFile, output); + process.stdout.write(`Hugo: ${output}`); + }); + + hugoProc.stderr.on('data', (data) => { + const output = data.toString(); + fs.appendFileSync(logFile, output); + process.stderr.write(`Hugo ERROR: ${output}`); + }); + } + + // Handle process errors + hugoProc.on('error', (err) => { + console.error(`Error in Hugo server process: ${err}`); + reject(err); + }); + + // Check for early exit + hugoProc.on('close', (code) => { + if (code !== null && code !== 0) { + reject(new Error(`Hugo process exited early with code ${code}`)); + } + }); + + // Resolve with the process object after a short delay to ensure it's running + setTimeout(() => { + if (hugoProc.killed) { + reject(new Error('Hugo process was killed during startup')); + } else { + resolve(hugoProc); + } + }, 500); + } catch (err) { + console.error(`Error starting Hugo server: ${err.message}`); + reject(err); + } + }); +} + +/** + * Wait for the Hugo server to be ready + * @param {number} timeoutMs - Timeout in milliseconds + * @returns {Promise} + */ +export async function waitForHugoReady(timeoutMs = 30000) { + console.log( + `Waiting for Hugo server to be ready on http://localhost:${HUGO_PORT}...` + ); + + const startTime = Date.now(); + + return new Promise((resolve, reject) => { + // Poll the server + function checkServer() { + const req = http.get(`http://localhost:${HUGO_PORT}`, (res) => { + if (res.statusCode === 200) { + resolve(); + } else { + // If we get a response but not 200, try again after delay + const elapsed = Date.now() - startTime; + if (elapsed > timeoutMs) { + reject( + new Error( + `Hugo server responded with status ${res.statusCode} after timeout` + ) + ); + } else { + setTimeout(checkServer, 1000); + } + } + }); + + req.on('error', (err) => { + // Connection errors are expected while server is starting + const elapsed = Date.now() - startTime; + if (elapsed > timeoutMs) { + reject( + new Error(`Timed out waiting for Hugo server: ${err.message}`) + ); + } else { + // Try again after a delay + setTimeout(checkServer, 1000); + } + }); + + req.end(); + } + + // Start polling + checkServer(); + }); +} diff --git a/cypress/support/link-reporter.js b/cypress/support/link-reporter.js new file mode 100644 index 000000000..39097cefe --- /dev/null +++ b/cypress/support/link-reporter.js @@ -0,0 +1,215 @@ +/** + * Broken Links Reporter + * Handles collecting, storing, and reporting broken links found during tests + */ +import fs from 'fs'; + +export const BROKEN_LINKS_FILE = '/tmp/broken_links_report.json'; +export const FIRST_BROKEN_LINK_FILE = '/tmp/first_broken_link.json'; +const SOURCES_FILE = '/tmp/test_subjects_sources.json'; + +/** + * Reads the broken links report from the file system + * @returns {Array} Parsed report data or empty array if file doesn't exist + */ +export function readBrokenLinksReport() { + if (!fs.existsSync(BROKEN_LINKS_FILE)) { + return []; + } + + try { + const fileContent = fs.readFileSync(BROKEN_LINKS_FILE, 'utf8'); + + // Check if the file is empty or contains only an empty array + if (!fileContent || fileContent.trim() === '' || fileContent === '[]') { + return []; + } + + // Try to parse the JSON content + try { + const parsedContent = JSON.parse(fileContent); + + // Ensure the parsed content is an array + if (!Array.isArray(parsedContent)) { + console.error('Broken links report is not an array'); + return []; + } + + return parsedContent; + } catch (parseErr) { + console.error( + `Error parsing broken links report JSON: ${parseErr.message}` + ); + return []; + } + } catch (err) { + console.error(`Error reading broken links report: ${err.message}`); + return []; + } +} + +/** + * Reads the sources mapping file + * @returns {Object} A mapping from URLs to their source files + */ +function readSourcesMapping() { + try { + if (fs.existsSync(SOURCES_FILE)) { + const sourcesData = JSON.parse(fs.readFileSync(SOURCES_FILE, 'utf8')); + return sourcesData.reduce((acc, item) => { + if (item.url && item.source) { + acc[item.url] = item.source; + } + return acc; + }, {}); + } + } catch (err) { + console.warn(`Warning: Could not read sources mapping: ${err.message}`); + } + return {}; +} + +/** + * Formats and displays the broken links report to the console + * @param {Array} brokenLinksReport - The report data to display + * @returns {number} The total number of broken links found + */ +export function displayBrokenLinksReport(brokenLinksReport = null) { + // If no report provided, read from file + if (!brokenLinksReport) { + brokenLinksReport = readBrokenLinksReport(); + } + + // Check both the report and first broken link file to determine if we have broken links + const firstBrokenLink = readFirstBrokenLink(); + + // Only report "no broken links" if both checks pass + if ( + (!brokenLinksReport || brokenLinksReport.length === 0) && + !firstBrokenLink + ) { + console.log('✅ No broken links detected in the validation report'); + return 0; + } + + // Special case: check if the single broken link file could be missing from the report + if ( + firstBrokenLink && + (!brokenLinksReport || brokenLinksReport.length === 0) + ) { + console.error( + '\n⚠️ Warning: First broken link record exists but no links in the report.' + ); + console.error('This could indicate a reporting issue.'); + } + + // Load sources mapping + const sourcesMapping = readSourcesMapping(); + + // Print a prominent header + console.error('\n\n' + '='.repeat(80)); + console.error(' 🚨 BROKEN LINKS DETECTED 🚨 '); + console.error('='.repeat(80)); + + // Show first failing link if available + if (firstBrokenLink) { + console.error('\n🔴 FIRST FAILING LINK:'); + console.error(` URL: ${firstBrokenLink.url}`); + console.error(` Status: ${firstBrokenLink.status}`); + console.error(` Type: ${firstBrokenLink.type}`); + console.error(` Page: ${firstBrokenLink.page}`); + if (firstBrokenLink.linkText) { + console.error( + ` Link text: "${firstBrokenLink.linkText.substring(0, 50)}${firstBrokenLink.linkText.length > 50 ? '...' : ''}"` + ); + } + console.error('-'.repeat(40)); + } + + let totalBrokenLinks = 0; + + brokenLinksReport.forEach((report) => { + console.error(`\n📄 PAGE: ${report.page}`); + + // Add source information if available + const source = sourcesMapping[report.page]; + if (source) { + console.error(` PAGE CONTENT SOURCE: ${source}`); + } + + console.error('-'.repeat(40)); + + report.links.forEach((link) => { + console.error(`• ${link.url}`); + console.error(` - Status: ${link.status}`); + console.error(` - Type: ${link.type}`); + if (link.linkText) { + console.error( + ` - Link text: "${link.linkText.substring(0, 50)}${link.linkText.length > 50 ? '...' : ''}"` + ); + } + console.error(''); + totalBrokenLinks++; + }); + }); + + // Print a prominent summary footer + console.error('='.repeat(80)); + console.error(`📊 TOTAL BROKEN LINKS FOUND: ${totalBrokenLinks}`); + console.error('='.repeat(80) + '\n'); + + return totalBrokenLinks; +} + +/** + * Reads the first broken link info from the file system + * @returns {Object|null} First broken link data or null if not found + */ +export function readFirstBrokenLink() { + if (!fs.existsSync(FIRST_BROKEN_LINK_FILE)) { + return null; + } + + try { + const fileContent = fs.readFileSync(FIRST_BROKEN_LINK_FILE, 'utf8'); + + // Check if the file is empty or contains whitespace only + if (!fileContent || fileContent.trim() === '') { + return null; + } + + // Try to parse the JSON content + try { + return JSON.parse(fileContent); + } catch (parseErr) { + console.error( + `Error parsing first broken link JSON: ${parseErr.message}` + ); + return null; + } + } catch (err) { + console.error(`Error reading first broken link: ${err.message}`); + return null; + } +} + +/** + * Initialize the broken links report files + * @returns {boolean} True if initialization was successful + */ +export function initializeReport() { + try { + // Create an empty array for the broken links report + fs.writeFileSync(BROKEN_LINKS_FILE, '[]', 'utf8'); + + // Reset the first broken link file by creating an empty file + // Using empty string as a clear indicator that no broken link has been recorded yet + fs.writeFileSync(FIRST_BROKEN_LINK_FILE, '', 'utf8'); + + console.debug('🔄 Initialized broken links reporting system'); + return true; + } catch (err) { + console.error(`Error initializing broken links report: ${err.message}`); + return false; + } +} diff --git a/cypress/support/map-files-to-urls.js b/cypress/support/map-files-to-urls.js index d41d9a8c5..bde313fc8 100644 --- a/cypress/support/map-files-to-urls.js +++ b/cypress/support/map-files-to-urls.js @@ -1,79 +1,139 @@ #!/usr/bin/env node -import { execSync } from 'child_process'; import process from 'process'; +import fs from 'fs'; +import { execSync } from 'child_process'; +import matter from 'gray-matter'; // Get file paths from command line arguments -const filePaths = process.argv.slice(2); +const filePaths = process.argv.slice(2).filter((arg) => !arg.startsWith('--')); // Parse options -const debugMode = process.argv.includes('--debug'); +const debugMode = process.argv.includes('--debug'); // deprecated, no longer used +const jsonMode = process.argv.includes('--json'); -// Filter for content files -const contentFiles = filePaths.filter(file => - file.startsWith('content/') && (file.endsWith('.md') || file.endsWith('.html')) +// Separate shared content files and regular content files +const sharedContentFiles = filePaths.filter( + (file) => + file.startsWith('content/shared/') && + (file.endsWith('.md') || file.endsWith('.html')) ); -if (contentFiles.length === 0) { - console.log('No content files to check.'); +const regularContentFiles = filePaths.filter( + (file) => + file.startsWith('content/') && + !file.startsWith('content/shared/') && + (file.endsWith('.md') || file.endsWith('.html')) +); + +// Find pages that reference shared content files in their frontmatter +function findPagesReferencingSharedContent(sharedFilePath) { + try { + // Remove the leading "content/" to match how it would appear in frontmatter + const relativePath = sharedFilePath.replace(/^content\//, ''); + + // Use grep to find files that reference this shared content in frontmatter + // Look for source: pattern in YAML frontmatter + const grepCmd = `grep -l "source: .*${relativePath}" --include="*.md" --include="*.html" -r content/`; + + // Execute grep command and parse results + const result = execSync(grepCmd, { encoding: 'utf8' }).trim(); + + if (!result) { + return []; + } + + return result.split('\n').filter(Boolean); + } catch (error) { + // grep returns non-zero exit code when no matches are found + if (error.status === 1) { + return []; + } + console.error( + `Error finding references to ${sharedFilePath}: ${error.message}` + ); + return []; + } +} + +/** + * Extract source from frontmatter or use the file path as source + * @param {string} filePath - Path to the file + * @returns {string} Source path + */ +function extractSourceFromFile(filePath) { + try { + if (fs.existsSync(filePath)) { + const fileContent = fs.readFileSync(filePath, 'utf8'); + const { data } = matter(fileContent); + + // If source is specified in frontmatter, return it + if (data.source) { + if (data.source.startsWith('/shared')) { + return 'content' + data.source; + } + return data.source; + } + } + + // If no source in frontmatter or can't read file, use the file path itself + return filePath; + } catch (error) { + console.error(`Error extracting source from ${filePath}: ${error.message}`); + return filePath; + } +} + +// Process shared content files to find pages that reference them +let pagesToTest = [...regularContentFiles]; + +if (sharedContentFiles.length > 0) { + console.log( + `Processing ${sharedContentFiles.length} shared content files...` + ); + + for (const sharedFile of sharedContentFiles) { + const referencingPages = findPagesReferencingSharedContent(sharedFile); + + if (referencingPages.length > 0) { + console.log( + `Found ${referencingPages.length} pages referencing ${sharedFile}` + ); + // Add referencing pages to the list of pages to test (avoid duplicates) + pagesToTest = [...new Set([...pagesToTest, ...referencingPages])]; + } else { + console.log(`No pages found referencing ${sharedFile}`); + } + } +} + +if (pagesToTest.length === 0) { + console.log('No content files to map.'); process.exit(0); } -// Map file paths to URL paths -function mapFilePathToUrl(filePath) { - // Remove content/ prefix +// Map file paths to URL paths and source information +function mapFilePathToUrlAndSource(filePath) { + // Map to URL let url = filePath.replace(/^content/, ''); - - // Handle _index files (both .html and .md) url = url.replace(/\/_index\.(html|md)$/, '/'); - - // Handle regular .md files url = url.replace(/\.md$/, '/'); - - // Handle regular .html files url = url.replace(/\.html$/, '/'); - - // Ensure URL starts with a slash if (!url.startsWith('/')) { url = '/' + url; } - - return url; + + // Extract source + const source = extractSourceFromFile(filePath); + + return { url, source }; } -const urls = contentFiles.map(mapFilePathToUrl); -const urlList = urls.join(','); +const mappedFiles = pagesToTest.map(mapFilePathToUrlAndSource); -console.log(`Testing links in URLs: ${urlList}`); - -// Create environment object with the cypress_test_subjects variable -const envVars = { - ...process.env, - cypress_test_subjects: urlList, - NODE_OPTIONS: '--max-http-header-size=80000 --max-old-space-size=4096' -}; - -// Run Cypress tests with the mapped URLs -try { - // Choose run mode based on debug flag - if (debugMode) { - // For debug mode, set the environment variable and open Cypress - // The user will need to manually select the test file - console.log('Opening Cypress in debug mode.'); - console.log('Please select the "article-links.cy.js" test file when Cypress opens.'); - - execSync('npx cypress open --e2e', { - stdio: 'inherit', - env: envVars - }); - } else { - // For normal mode, run the test automatically - execSync(`npx cypress run --spec "cypress/e2e/content/article-links.cy.js"`, { - stdio: 'inherit', - env: envVars - }); - } -} catch (error) { - console.error('Link check failed:', error); - process.exit(1); -} \ No newline at end of file +if (jsonMode) { + console.log(JSON.stringify(mappedFiles, null, 2)); +} else { + // Print URL and source info in a format that's easy to parse + mappedFiles.forEach((item) => console.log(`${item.url}|${item.source}`)); +} diff --git a/cypress/support/map-files-to-urls.mjs b/cypress/support/map-files-to-urls.mjs deleted file mode 100644 index 8ded4157f..000000000 --- a/cypress/support/map-files-to-urls.mjs +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env node - -import { execSync } from 'child_process'; -import process from 'process'; - -// Get file paths from command line arguments -const filePaths = process.argv.slice(2); - -// Parse options -const debugMode = process.argv.includes('--debug'); - -// Filter for content files -const contentFiles = filePaths.filter(file => - file.startsWith('content/') && (file.endsWith('.md') || file.endsWith('.html')) -); - -if (contentFiles.length === 0) { - console.log('No content files to check.'); - process.exit(0); -} - -// Map file paths to URL paths -function mapFilePathToUrl(filePath) { - // Remove content/ prefix - let url = filePath.replace(/^content/, ''); - - // Handle _index files (both .html and .md) - url = url.replace(/\/_index\.(html|md)$/, '/'); - - // Handle regular .md files - url = url.replace(/\.md$/, '/'); - - // Handle regular .html files - url = url.replace(/\.html$/, '/'); - - // Ensure URL starts with a slash - if (!url.startsWith('/')) { - url = '/' + url; - } - - return url; -} - -const urls = contentFiles.map(mapFilePathToUrl); -const urlList = urls.join(','); - -console.log(`Testing links in URLs: ${urlList}`); - -// Create environment object with the cypress_test_subjects variable -const envVars = { - ...process.env, - cypress_test_subjects: urlList, - NODE_OPTIONS: '--max-http-header-size=80000 --max-old-space-size=4096' -}; - -// Run Cypress tests with the mapped URLs -try { - // Choose run mode based on debug flag - if (debugMode) { - // For debug mode, set the environment variable and open Cypress - // The user will need to manually select the test file - console.log('Opening Cypress in debug mode.'); - console.log('Please select the "article-links.cy.js" test file when Cypress opens.'); - - execSync('npx cypress open --e2e', { - stdio: 'inherit', - env: envVars - }); - } else { - // For normal mode, run the test automatically - execSync(`npx cypress run --spec "cypress/e2e/content/article-links.cy.js"`, { - stdio: 'inherit', - env: envVars - }); - } -} catch (error) { - console.error('Link check failed'); - process.exit(1); -} \ No newline at end of file diff --git a/cypress/support/run-e2e-specs.js b/cypress/support/run-e2e-specs.js new file mode 100644 index 000000000..9ff3c5f31 --- /dev/null +++ b/cypress/support/run-e2e-specs.js @@ -0,0 +1,481 @@ +/** + * InfluxData Documentation E2E Test Runner + * + * This script automates running Cypress end-to-end tests for the InfluxData documentation site. + * It handles starting a local Hugo server, mapping content files to their URLs, running Cypress tests, + * and reporting broken links. + * + * Usage: node run-e2e-specs.js [file paths...] [--spec test // Display broken links report + const brokenLinksCount = displayBrokenLinksReport(); + + // Check if we might have special case failures + const hasSpecialCaseFailures = + results && + results.totalFailed > 0 && + brokenLinksCount === 0; + + if (hasSpecialCaseFailures) { + console.warn( + `ℹ️ Note: Tests failed (${results.totalFailed}) but no broken links were reported. This may be due to special case URLs (like Reddit) that return expected status codes.` + ); + } + + if ( + (results && results.totalFailed && results.totalFailed > 0 && !hasSpecialCaseFailures) || + brokenLinksCount > 0 + ) { + console.error( + `⚠️ Tests failed: ${results.totalFailed || 0} test(s) failed, ${brokenLinksCount || 0} broken links found` + ); + cypressFailed = true; + exitCode = 1; * + * Example: node run-e2e-specs.js content/influxdb/v2/write-data.md --spec cypress/e2e/content/article-links.cy.js + */ + +import { spawn } from 'child_process'; +import process from 'process'; +import fs from 'fs'; +import path from 'path'; +import cypress from 'cypress'; +import net from 'net'; +import matter from 'gray-matter'; +import { displayBrokenLinksReport, initializeReport } from './link-reporter.js'; +import { + HUGO_PORT, + HUGO_LOG_FILE, + startHugoServer, + waitForHugoReady, +} from './hugo-server.js'; + +const MAP_SCRIPT = path.resolve('cypress/support/map-files-to-urls.js'); +const URLS_FILE = '/tmp/test_subjects.txt'; + +/** + * Parses command line arguments into file and spec arguments + * @param {string[]} argv - Command line arguments (process.argv) + * @returns {Object} Object containing fileArgs and specArgs arrays + */ +function parseArgs(argv) { + const fileArgs = []; + const specArgs = []; + let i = 2; // Start at index 2 to skip 'node' and script name + + while (i < argv.length) { + if (argv[i] === '--spec') { + i++; + if (i < argv.length) { + specArgs.push(argv[i]); + i++; + } + } else { + fileArgs.push(argv[i]); + i++; + } + } + + return { fileArgs, specArgs }; +} + +// Check if port is already in use +async function isPortInUse(port) { + return new Promise((resolve) => { + const tester = net + .createServer() + .once('error', () => resolve(true)) + .once('listening', () => { + tester.close(); + resolve(false); + }) + .listen(port, '127.0.0.1'); + }); +} + +/** + * Extract source information from frontmatter + * @param {string} filePath - Path to the markdown file + * @returns {string|null} Source information if present + */ +function getSourceFromFrontmatter(filePath) { + if (!fs.existsSync(filePath)) { + return null; + } + + try { + const fileContent = fs.readFileSync(filePath, 'utf8'); + const { data } = matter(fileContent); + return data.source || null; + } catch (err) { + console.warn( + `Warning: Could not extract frontmatter from ${filePath}: ${err.message}` + ); + return null; + } +} + +/** + * Ensures a directory exists, creating it if necessary + * Also creates an empty file to ensure the directory is not empty + * @param {string} dirPath - The directory path to ensure exists + */ +function ensureDirectoryExists(dirPath) { + if (!fs.existsSync(dirPath)) { + try { + fs.mkdirSync(dirPath, { recursive: true }); + console.log(`Created directory: ${dirPath}`); + + // Create an empty .gitkeep file to ensure the directory exists and isn't empty + fs.writeFileSync(path.join(dirPath, '.gitkeep'), ''); + } catch (err) { + console.warn( + `Warning: Could not create directory ${dirPath}: ${err.message}` + ); + } + } +} + +async function main() { + // Keep track of processes to cleanly shut down + let hugoProc = null; + let exitCode = 0; + let hugoStarted = false; + + // Add this signal handler to ensure cleanup on unexpected termination + const cleanupAndExit = (code = 1) => { + console.log(`Performing cleanup before exit with code ${code}...`); + if (hugoProc && hugoStarted) { + try { + hugoProc.kill('SIGKILL'); // Use SIGKILL to ensure immediate termination + } catch (err) { + console.error(`Error killing Hugo process: ${err.message}`); + } + } + process.exit(code); + }; + + // Handle various termination signals + process.on('SIGINT', () => cleanupAndExit(1)); + process.on('SIGTERM', () => cleanupAndExit(1)); + process.on('uncaughtException', (err) => { + console.error(`Uncaught exception: ${err.message}`); + cleanupAndExit(1); + }); + + const { fileArgs, specArgs } = parseArgs(process.argv); + if (fileArgs.length === 0) { + console.error('No file paths provided.'); + process.exit(1); + } + + // Separate content files from non-content files + const contentFiles = fileArgs.filter((file) => file.startsWith('content/')); + const nonContentFiles = fileArgs.filter( + (file) => !file.startsWith('content/') + ); + + // Log what we're processing + if (contentFiles.length > 0) { + console.log( + `Processing ${contentFiles.length} content files for URL mapping...` + ); + } + + if (nonContentFiles.length > 0) { + console.log( + `Found ${nonContentFiles.length} non-content files that will be passed directly to tests...` + ); + } + + let urlList = []; + + // Only run the mapper if we have content files + if (contentFiles.length > 0) { + // 1. Map file paths to URLs and write to file + const mapProc = spawn('node', [MAP_SCRIPT, ...contentFiles], { + stdio: ['ignore', 'pipe', 'inherit'], + }); + + const mappingOutput = []; + mapProc.stdout.on('data', (chunk) => { + mappingOutput.push(chunk.toString()); + }); + + await new Promise((res) => mapProc.on('close', res)); + + // Process the mapping output + urlList = mappingOutput + .join('') + .split('\n') + .map((line) => line.trim()) + .filter(Boolean) + .map((line) => { + // Parse the URL|SOURCE format + if (line.includes('|')) { + const [url, source] = line.split('|'); + return { url, source }; + } else if (line.startsWith('/')) { + // Handle URLs without source (should not happen with our new code) + return { url: line, source: null }; + } else { + // Skip log messages + return null; + } + }) + .filter(Boolean); // Remove null entries + } + + // Add non-content files directly to be tested, using their path as both URL and source + nonContentFiles.forEach((file) => { + urlList.push({ url: file, source: file }); + }); + + // Log the URLs and sources we'll be testing + console.log(`Found ${urlList.length} items to test:`); + urlList.forEach(({ url, source }) => { + console.log(` URL/FILE: ${url}`); + console.log(` SOURCE: ${source}`); + console.log('---'); + }); + + if (urlList.length === 0) { + console.log('No URLs or files to test.'); + process.exit(0); + } + + // Write just the URLs/files to the test_subjects file for Cypress + fs.writeFileSync(URLS_FILE, urlList.map((item) => item.url).join(',')); + + // Add source information to a separate file for reference during reporting + fs.writeFileSync( + '/tmp/test_subjects_sources.json', + JSON.stringify(urlList, null, 2) + ); + + // 2. Check if port is in use before starting Hugo + const portInUse = await isPortInUse(HUGO_PORT); + + if (portInUse) { + console.log( + `Port ${HUGO_PORT} is already in use. Checking if Hugo is running...` + ); + try { + // Try to connect to verify it's a working server + await waitForHugoReady(5000); // Short timeout - if it's running, it should respond quickly + console.log( + `Hugo server already running on port ${HUGO_PORT}, will use existing instance` + ); + } catch (err) { + console.error( + `Port ${HUGO_PORT} is in use but not responding as expected: ${err.message}` + ); + console.error('Please stop any processes using this port and try again.'); + process.exit(1); + } + } else { + // Start Hugo server using the imported function + try { + console.log( + `Starting Hugo server (logs will be written to ${HUGO_LOG_FILE})...` + ); + + // Create or clear the log file + fs.writeFileSync(HUGO_LOG_FILE, ''); + + // First, check if Hugo is installed and available + try { + // Try running a simple Hugo version command to check if Hugo is available + const hugoCheck = spawn('hugo', ['version'], { shell: true }); + await new Promise((resolve, reject) => { + hugoCheck.on('close', (code) => { + if (code === 0) { + resolve(); + } else { + reject(new Error(`Hugo check failed with code ${code}`)); + } + }); + hugoCheck.on('error', (err) => reject(err)); + }); + + console.log('Hugo is available on the system'); + } catch (checkErr) { + console.log( + 'Hugo not found on PATH, will use project-local Hugo via yarn' + ); + } + + // Use the startHugoServer function from hugo-server.js + hugoProc = await startHugoServer({ + configFile: 'config/testing/config.yml', + port: HUGO_PORT, + buildDrafts: true, + noHTTPCache: true, + logFile: HUGO_LOG_FILE, + }); + + // Ensure hugoProc is a valid process object with kill method + if (!hugoProc || typeof hugoProc.kill !== 'function') { + throw new Error('Failed to get a valid Hugo process object'); + } + + hugoStarted = true; + console.log(`Started Hugo process with PID: ${hugoProc.pid}`); + + // Wait for Hugo to be ready + await waitForHugoReady(); + console.log(`Hugo server ready on port ${HUGO_PORT}`); + } catch (err) { + console.error(`Error starting or waiting for Hugo: ${err.message}`); + if (hugoProc && typeof hugoProc.kill === 'function') { + hugoProc.kill('SIGTERM'); + } + process.exit(1); + } + } + + // 3. Prepare Cypress directories + try { + const screenshotsDir = path.resolve('cypress/screenshots'); + const videosDir = path.resolve('cypress/videos'); + const specScreenshotDir = path.join(screenshotsDir, 'article-links.cy.js'); + + // Ensure base directories exist + ensureDirectoryExists(screenshotsDir); + ensureDirectoryExists(videosDir); + + // Create spec-specific screenshot directory with a placeholder file + ensureDirectoryExists(specScreenshotDir); + + // Create a dummy screenshot file to prevent trash errors + const dummyScreenshotPath = path.join(specScreenshotDir, 'dummy.png'); + if (!fs.existsSync(dummyScreenshotPath)) { + // Create a minimal valid PNG file (1x1 transparent pixel) + const minimalPng = Buffer.from([ + 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, + 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, + 0x08, 0x06, 0x00, 0x00, 0x00, 0x1f, 0x15, 0xc4, 0x89, 0x00, 0x00, 0x00, + 0x0a, 0x49, 0x44, 0x41, 0x54, 0x78, 0x9c, 0x63, 0x00, 0x01, 0x00, 0x00, + 0x05, 0x00, 0x01, 0x0d, 0x0a, 0x2d, 0xb4, 0x00, 0x00, 0x00, 0x00, 0x49, + 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82, + ]); + fs.writeFileSync(dummyScreenshotPath, minimalPng); + console.log(`Created dummy screenshot file: ${dummyScreenshotPath}`); + } + + console.log('Cypress directories prepared successfully'); + } catch (err) { + console.warn( + `Warning: Error preparing Cypress directories: ${err.message}` + ); + // Continue execution - this is not a fatal error + } + + // 4. Run Cypress tests + let cypressFailed = false; + try { + // Initialize/clear broken links report before running tests + console.log('Initializing broken links report...'); + initializeReport(); + + console.log(`Running Cypress tests for ${urlList.length} URLs...`); + const cypressOptions = { + reporter: 'junit', + browser: 'chrome', + config: { + baseUrl: `http://localhost:${HUGO_PORT}`, + video: true, + trashAssetsBeforeRuns: false, // Prevent trash errors + }, + env: { + // Pass URLs as a comma-separated string for backward compatibility + test_subjects: urlList.map((item) => item.url).join(','), + // Add new structured data with source information + test_subjects_data: JSON.stringify(urlList), + // Skip testing external links (non-influxdata.com URLs) + skipExternalLinks: true, + }, + }; + + if (specArgs.length > 0) { + console.log(`Using specified test specs: ${specArgs.join(', ')}`); + cypressOptions.spec = specArgs.join(','); + } + + const results = await cypress.run(cypressOptions); + + // Process broken links report + const brokenLinksCount = displayBrokenLinksReport(); + + // Determine why tests failed + const testFailureCount = results?.totalFailed || 0; + + if (testFailureCount > 0 && brokenLinksCount === 0) { + console.warn( + `ℹ️ Note: ${testFailureCount} test(s) failed but no broken links were detected in the report.` + ); + console.warn( + ` This usually indicates test errors unrelated to link validation.` + ); + + // We should not consider special case domains (those with expected errors) as failures + // but we'll still report other test failures + cypressFailed = true; + exitCode = 1; + } else if (brokenLinksCount > 0) { + console.error( + `⚠️ Tests failed: ${brokenLinksCount} broken link(s) detected` + ); + cypressFailed = true; + exitCode = 1; + } else if (results) { + console.log('✅ Tests completed successfully'); + } + } catch (err) { + console.error(`❌ Cypress execution error: ${err.message}`); + console.error( + `Check Hugo server logs at ${HUGO_LOG_FILE} for any server issues` + ); + + // Still try to display broken links report if available + displayBrokenLinksReport(); + + cypressFailed = true; + exitCode = 1; + } finally { + // Stop Hugo server only if we started it + if (hugoProc && hugoStarted && typeof hugoProc.kill === 'function') { + console.log(`Stopping Hugo server (fast shutdown: ${cypressFailed})...`); + + if (cypressFailed) { + hugoProc.kill('SIGKILL'); + console.log('Hugo server forcibly terminated'); + } else { + const shutdownTimeout = setTimeout(() => { + console.error( + 'Hugo server did not shut down gracefully, forcing termination' + ); + hugoProc.kill('SIGKILL'); + process.exit(exitCode); + }, 2000); + + hugoProc.kill('SIGTERM'); + + hugoProc.on('close', () => { + clearTimeout(shutdownTimeout); + console.log('Hugo server shut down successfully'); + process.exit(exitCode); + }); + + // Return to prevent immediate exit + return; + } + } else if (hugoStarted) { + console.log('Hugo process was started but is not available for cleanup'); + } + + process.exit(exitCode); + } +} + +main().catch((err) => { + console.error(`Fatal error: ${err}`); + process.exit(1); +}); diff --git a/data/influxd_flags.yml b/data/influxd_flags.yml index e3e1998de..29f57a2e7 100644 --- a/data/influxd_flags.yml +++ b/data/influxd_flags.yml @@ -161,6 +161,9 @@ - flag: "--storage-validate-keys" added: 2.0 +- flag: "--storage-wal-flush-on-shutdown" + added: 2.7 + - flag: "--storage-wal-fsync-delay" added: 2.0 diff --git a/data/products.yml b/data/products.yml index 831b4b70f..a0d613cb8 100644 --- a/data/products.yml +++ b/data/products.yml @@ -1,12 +1,12 @@ influxdb3_core: name: InfluxDB 3 Core altname: InfluxDB 3 Core - namespace: influxdb + namespace: influxdb3 menu_category: self-managed versions: [core] list_order: 2 latest: core - latest_patch: 3.0.1 + latest_patch: 3.1.0 placeholder_host: localhost:8181 ai_sample_questions: - How do I install and run InfluxDB 3 Core? @@ -16,17 +16,31 @@ influxdb3_core: influxdb3_enterprise: name: InfluxDB 3 Enterprise altname: InfluxDB 3 Enterprise - namespace: influxdb + namespace: influxdb3 menu_category: self-managed versions: [enterprise] list_order: 2 latest: enterprise - latest_patch: 3.0.1 + latest_patch: 3.1.0 placeholder_host: localhost:8181 ai_sample_questions: - How do I install and run InfluxDB 3 Enterprise? - Help me write a plugin for the Python Processing engine? - How do I start a read replica node with InfluxDB 3 Enterprise? + +influxdb3_explorer: + name: InfluxDB 3 Explorer + altname: Explorer + namespace: influxdb3_explorer + menu_category: tools + list_order: 1 + latest: explorer + latest_patch: 1.0.0 + placeholder_host: localhost:8888 + ai_sample_questions: + - How do I query data using InfluxDB 3 Explorer? + - How do I use InfluxDB 3 Explorer to visualize data? + - How do I install InfluxDB 3 Explorer? influxdb3_cloud_serverless: name: InfluxDB Cloud Serverless @@ -49,8 +63,8 @@ influxdb3_cloud_dedicated: versions: [cloud-dedicated] list_order: 3 latest: cloud-dedicated - link: "https://www.influxdata.com/contact-sales-form/" - latest_cli: 2.10.0 + link: "https://www.influxdata.com/contact-sales-cloud-dedicated/" + latest_cli: 2.10.1 placeholder_host: cluster-id.a.influxdb.io ai_sample_questions: - How do I migrate from InfluxDB v1 to InfluxDB Cloud Dedicated? @@ -65,7 +79,7 @@ influxdb3_clustered: versions: [clustered] list_order: 3 latest: clustered - link: "https://www.influxdata.com/contact-sales-influxdb-clustered" + link: "https://www.influxdata.com/contact-sales-influxdb-clustered/" placeholder_host: cluster-host.com ai_sample_questions: - How do I use a Helm chart to configure Clustered? @@ -76,6 +90,7 @@ influxdb: name: InfluxDB altname: InfluxDB OSS namespace: influxdb + succeeded_by: influxdb3_core menu_category: self-managed list_order: 1 placeholder_host: localhost:8086 @@ -84,7 +99,7 @@ influxdb: - v1 latest: v2.7 latest_patches: - v2: 2.7.11 + v2: 2.7.12 v1: 1.11.8 latest_cli: v2: 2.7.5 @@ -107,6 +122,20 @@ influxdb_cloud: - How is Cloud 2 different from Cloud Serverless? - How do I manage auth tokens in InfluxDB Cloud 2? +explorer: + name: InfluxDB 3 Explorer + namespace: explorer + menu_category: other + list_order: 4 + versions: [v1] + latest: v1.0 + latest_patches: + v1: 1.0.0 + ai_sample_questions: + - How do I use InfluxDB 3 Explorer to visualize data? + - How do I create a dashboard in InfluxDB 3 Explorer? + - How do I query data using InfluxDB 3 Explorer? + telegraf: name: Telegraf namespace: telegraf @@ -143,7 +172,7 @@ kapacitor: versions: [v1] latest: v1.7 latest_patches: - v1: 1.7.6 + v1: 1.7.7 ai_sample_questions: - How do I configure Kapacitor for InfluxDB v1? - How do I write a custom Kapacitor task? diff --git a/eslint.config.js b/eslint.config.js new file mode 100644 index 000000000..0f7cd7e65 --- /dev/null +++ b/eslint.config.js @@ -0,0 +1,126 @@ +import globals from 'globals'; +import jsdocPlugin from 'eslint-plugin-jsdoc'; +import pluginJs from '@eslint/js'; +import tseslint from 'typescript-eslint'; +import importPlugin from 'eslint-plugin-import'; +import a11yPlugin from 'eslint-plugin-jsx-a11y'; +import prettierConfig from 'eslint-config-prettier'; + +/** @type {import('eslint').Linter.Config[]} */ +export default [ + // Base configurations + { + languageOptions: { + globals: { + ...globals.browser, + // Hugo-specific globals + hugo: 'readonly', + params: 'readonly', + // Common libraries used in docs + Alpine: 'readonly', + CodeMirror: 'readonly', + d3: 'readonly', + }, + ecmaVersion: 2022, + sourceType: 'module', + }, + }, + + // JavaScript config (extract rules only) + { + rules: { ...pluginJs.configs.recommended.rules }, + }, + + // TypeScript configurations with proper plugin format + { + plugins: { + '@typescript-eslint': tseslint.plugin, + }, + rules: { ...tseslint.configs.recommended.rules }, + }, + + // Import plugin with proper plugin format + { + plugins: { + import: importPlugin, + }, + rules: { ...importPlugin.configs.recommended.rules }, + }, + + // Accessibility rules with proper plugin format + { + plugins: { + 'jsx-a11y': a11yPlugin, + }, + rules: { ...a11yPlugin.configs.recommended.rules }, + }, + + // Add to your config array: + { + plugins: { + jsdoc: jsdocPlugin, + }, + rules: { + 'jsdoc/require-description': 'warn', + 'jsdoc/require-param-description': 'warn', + 'jsdoc/require-returns-description': 'warn', + // Add more JSDoc rules as needed + }, + }, + + // Prettier compatibility (extract rules only) + { + rules: { ...prettierConfig.rules }, + }, + + // Custom rules for documentation project + { + rules: { + // Documentation projects often need to use console for examples + 'no-console': 'off', + + // Module imports + 'import/extensions': ['error', 'ignorePackages'], + 'import/no-unresolved': 'off', // Hugo handles module resolution differently + + // Code formatting + 'max-len': ['warn', { code: 80, ignoreUrls: true, ignoreStrings: true }], + quotes: ['error', 'single', { avoidEscape: true }], + + // Hugo template string linting (custom rule) + 'no-template-curly-in-string': 'off', // Allow ${} in strings for Hugo templates + + // Accessibility + 'jsx-a11y/anchor-is-valid': 'warn', + }, + }, + + // Configuration for specific file patterns + { + files: ['**/*.js'], + rules: { + // Rules specific to JavaScript files + }, + }, + { + files: ['assets/js/**/*.js'], + rules: { + // Rules specific to JavaScript in Hugo assets + }, + }, + { + files: ['**/*.ts'], + rules: { + // Rules specific to TypeScript files + }, + }, + { + // Ignore rules for build files and external dependencies + ignores: [ + '**/node_modules/**', + '**/public/**', + '**/resources/**', + '**/.hugo_build.lock', + ], + }, +]; diff --git a/eslint.config.mjs b/eslint.config.mjs deleted file mode 100644 index 6d1eeba04..000000000 --- a/eslint.config.mjs +++ /dev/null @@ -1,9 +0,0 @@ -import globals from "globals"; -import pluginJs from "@eslint/js"; - - -/** @type {import('eslint').Linter.Config[]} */ -export default [ - {languageOptions: { globals: globals.browser }}, - pluginJs.configs.recommended, -]; \ No newline at end of file diff --git a/flux-build-scripts/inject-flux-stdlib-frontmatter.js b/flux-build-scripts/inject-flux-stdlib-frontmatter.cjs similarity index 100% rename from flux-build-scripts/inject-flux-stdlib-frontmatter.js rename to flux-build-scripts/inject-flux-stdlib-frontmatter.cjs diff --git a/flux-build-scripts/update-flux-versions.js b/flux-build-scripts/update-flux-versions.cjs similarity index 100% rename from flux-build-scripts/update-flux-versions.js rename to flux-build-scripts/update-flux-versions.cjs diff --git a/hugo.yml b/hugo.yml index cb4775438..909917486 100644 --- a/hugo.yml +++ b/hugo.yml @@ -54,3 +54,16 @@ outputFormats: mediaType: application/json baseName: pages isPlainText: true + +build: + # Ensure Hugo correctly processes JavaScript modules + jsConfig: + nodeEnv: "development" + +module: + mounts: + - source: assets + target: assets + + - source: node_modules + target: assets/node_modules \ No newline at end of file diff --git a/layouts/_default/page-list.json b/layouts/_default/page-list.json index 507009897..e40823ffa 100644 --- a/layouts/_default/page-list.json +++ b/layouts/_default/page-list.json @@ -8,49 +8,49 @@ {{- if $index -}},{{- end }} { "name": {{ .Name | jsonify }}, - "url": {{ absURL (cond (isset .Params "url") .Params.url .URL) | jsonify }}, + "url": {{ absURL (default .URL .Params.url) | jsonify }}, "children": [ {{- range $index, $entry := .Children -}} {{- if $index -}},{{- end }} { "name": {{ .Name | jsonify }}, - "url": {{ absURL (cond (isset .Params "url") .Params.url .URL) | jsonify }}, + "url": {{ absURL (default .URL .Params.url) | jsonify }}, "children": [ {{- range $index, $entry := .Children -}} {{- if $index -}},{{- end }} { "name": {{ .Name | jsonify }}, - "url": {{ absURL (cond (isset .Params "url") .Params.url .URL) | jsonify }}, + "url": {{ absURL (default .URL .Params.url) | jsonify }}, "children": [ {{- range $index, $entry := .Children -}} {{- if $index -}},{{- end }} { "name": {{ .Name | jsonify }}, - "url": {{ absURL (cond (isset .Params "url") .Params.url .URL) | jsonify }}, + "url": {{ absURL (default .URL .Params.url) | jsonify }}, "children": [ {{- range $index, $entry := .Children -}} {{- if $index -}},{{- end }} { "name": {{ .Name | jsonify }}, - "url": {{ absURL (cond (isset .Params "url") .Params.url .URL) | jsonify }}, + "url": {{ absURL (default .URL .Params.url) | jsonify }}, "children": [ {{- range $index, $entry := .Children -}} {{- if $index -}},{{- end }} { "name": {{ .Name | jsonify }}, - "url": {{ absURL (cond (isset .Params "url") .Params.url .URL) | jsonify }}, + "url": {{ absURL (default .URL .Params.url) | jsonify }}, "children": [ {{- range $index, $entry := .Children -}} {{- if $index -}},{{- end }} { "name": {{ .Name | jsonify }}, - "url": {{ absURL (cond (isset .Params "url") .Params.url .URL) | jsonify }}, + "url": {{ absURL (default .URL .Params.url) | jsonify }}, "children": [ {{- range $index, $entry := .Children -}} {{- if $index -}},{{- end }} { "name": {{ .Name | jsonify }}, - "url": {{ absURL (cond (isset .Params "url") .Params.url .URL) | jsonify }}, + "url": {{ absURL (default .URL .Params.url) | jsonify }}, "children": [ {{ range .Children }} {{ end }} diff --git a/layouts/index.html b/layouts/index.html index 62d82ec82..b04b66e5c 100644 --- a/layouts/index.html +++ b/layouts/index.html @@ -6,6 +6,13 @@ {{ $kapacitorVersion := replaceRE "v" "" .Site.Data.products.kapacitor.latest }} {{ $fluxVersion := replaceRE "v" "" .Site.Data.products.flux.latest }} + +{{ if or (not .Params.test_only) (and .Params.test_only (in site.Params.environment (slice "testing" "development"))) }} + {{ partial "header.html" . }} {{ partial "topnav.html" . }} @@ -32,64 +39,87 @@

InfluxDB 3

The modern time series data engine built for high-speed, high-cardinality data, from the edge to the cloud.

-
-

Self-managed

-
-
-
-
-

InfluxDB 3 Core

-

The open source recent data engine optimized for time series and event data.

+
+
+
+

Self-managed

+
+
+
+
+

InfluxDB 3 Core

+

The open source recent data engine optimized for time series and event data.

+
+ +
+
+
+

InfluxDB 3 Enterprise

+

The scalable data engine built for recent and historical time series and event data.

+
+ +
+
+
+

InfluxDB Clustered

+

The Kubernetes-enabled, highly-available InfluxDB 3 cluster built for high write and query workloads on your own infrastructure.

+
+ +
-
-
-
-

InfluxDB 3 Enterprise

-

The scalable data engine built for recent and historical time series and event data.

+
+
+

Fully-Managed

+
+
+
+
+

InfluxDB Cloud Serverless

+

The fully-managed, multi-tenant InfluxDB 3 service deployed in the cloud.

+
+ +
+
+
+

InfluxDB Cloud Dedicated

+

The fully-managed InfluxDB 3 cluster dedicated to your workload and deployed in the cloud.

+
+ +
-
-
-
-

InfluxDB Clustered

-

The Kubernetes-enabled, highly-available InfluxDB 3 cluster built for high write and query workloads on your own infrastructure.

+ -
-
-

Fully-Managed

-
-
-
-
-

InfluxDB Cloud Serverless

-

The fully-managed, multi-tenant InfluxDB 3 service deployed in the cloud.

+
+
+
+

InfluxDB 3 Explorer

+

A standalone UI designed for visualizing, querying, and managing data in InfluxDB 3 Core and Enterprise.

+
+ +
- -
-
-
-

InfluxDB Cloud Dedicated

-

The fully-managed InfluxDB 3 cluster dedicated to your workload and deployed in the cloud.

-
-
@@ -264,3 +294,9 @@
{{ partial "footer.html" . }} +{{ else }} + + {{ if eq .Params.test_only true }} + {{ template "404.html" . }} + {{ end }} +{{ end }} \ No newline at end of file diff --git a/layouts/partials/article.html b/layouts/partials/article.html index 986e59f30..bbe22ed84 100644 --- a/layouts/partials/article.html +++ b/layouts/partials/article.html @@ -5,6 +5,7 @@ {{ partial "article/supported-versions.html" . }} {{ partial "article/page-meta.html" . }}
+ {{ partial "article/beta.html" . }} {{ partial "article/stable-version.html" . }} {{ partial "article/flux-experimental.html" . }} {{ partial "article/flux-contrib.html" . }} diff --git a/layouts/partials/article/beta.html b/layouts/partials/article/beta.html new file mode 100644 index 000000000..d2288dd53 --- /dev/null +++ b/layouts/partials/article/beta.html @@ -0,0 +1,39 @@ + +{{ $productPathData := split .RelPermalink "/" }} +{{ $product := index $productPathData 1 }} +{{ $version := index $productPathData 2 }} +{{ $productKey := cond (eq $product "influxdb3") (print "influxdb3_" (replaceRE "-" "_" $version)) $product }} +{{ $productData := index $.Site.Data.products $productKey }} +{{ $displayName := $productData.name }} +{{ $earlyAccessList := slice "influxdb3/explorer" }} + +{{ if in $earlyAccessList (print $product "/" $version )}} +
+
+

{{ $displayName }} is in Public Beta

+

+ {{ $displayName }} is in public beta and available for testing and feedback, + but is not meant for production use yet. + Both the product and this documentation are works in progress. + We welcome and encourage your input about your experience with the beta and + invite you to join our public channels for updates and to + share feedback. +

+ +
+
+{{ end }} \ No newline at end of file diff --git a/layouts/partials/article/feedback.html b/layouts/partials/article/feedback.html index 20519be3f..c1609c205 100644 --- a/layouts/partials/article/feedback.html +++ b/layouts/partials/article/feedback.html @@ -8,9 +8,7 @@ {{ if .File }} {{ .Scratch.Set "pageGithubLink" (print "https://github.com/influxdata/docs-v2/edit/master/content/" .File.Path) }} - {{ if .Page.HasShortcode "duplicate-oss" }} - {{ .Scratch.Set "pageGithubLink" (replaceRE "/cloud/" "/v2/" (.Scratch.Get "pageGithubLink")) }} - {{ else if .Params.Source }} + {{ if .Params.Source }} {{ .Scratch.Set "pageGithubLink" (print "https://github.com/influxdata/docs-v2/edit/master/content" .Params.source) }} {{ end }} {{ else }} @@ -53,9 +51,14 @@ To find support, use the following resources:

{{ if not (in $supportBlacklist $product) }} diff --git a/layouts/partials/article/flux-contrib.html b/layouts/partials/article/flux-contrib.html index dbd317e1d..6a0d1c749 100644 --- a/layouts/partials/article/flux-contrib.html +++ b/layouts/partials/article/flux-contrib.html @@ -13,7 +13,7 @@
{{ else if eq .Kind "section" }}
- {{ $packageTitle := cond (isset .Params "list_title") $.Params.list_title .Title }} + {{ $packageTitle := default .Title .Params.list_title }} {{ $packageName := replaceRE `^(.*)( package)` "$1$2" $packageTitle }}

The {{ $packageName | safeHTML }} is a user-contributed package diff --git a/layouts/partials/article/flux-experimental.html b/layouts/partials/article/flux-experimental.html index 3f951abff..9c3b75c50 100644 --- a/layouts/partials/article/flux-experimental.html +++ b/layouts/partials/article/flux-experimental.html @@ -13,7 +13,7 @@

{{ else if eq .Kind "section" }}
- {{ $packageTitle := cond (isset .Params "list_title") $.Params.list_title .Title }} + {{ $packageTitle := default .Title .Params.list_title }} {{ $packageName := replaceRE `^(.*)( package)` "$1$2" $packageTitle }}

The {{ $packageName | safeHTML }} is experimental and subject to change at any time. diff --git a/layouts/partials/article/stable-version.html b/layouts/partials/article/stable-version.html index cc53ed736..eb0b13781 100644 --- a/layouts/partials/article/stable-version.html +++ b/layouts/partials/article/stable-version.html @@ -2,11 +2,47 @@ {{ $product := index $productPathData 0 }} {{ $version := index $productPathData 1 | default "0"}} {{ $productKey := cond (eq $product "influxdb3") (print "influxdb3_" (replaceRE "-" "_" $version)) $product }} -{{ $productName := cond (isset (index .Site.Data.products $productKey) "altname") (index .Site.Data.products $productKey).altname (index .Site.Data.products $productKey).name }} -{{ $stableVersion := (replaceRE `\.[0-9x]+$` "" (index .Site.Data.products $product).latest) }} -{{ $stableVersionURL := replaceRE `v[1-3]` $stableVersion .RelPermalink }} -{{ $stableDefaultURL := print "/" $product "/" $stableVersion "/" }} + +{{ $successorInfo := dict "exists" false }} +{{ $productName := $product | humanize }} +{{ $stableVersion := "" }} +{{ $stableVersionURL := "" }} +{{ $stableDefaultURL := "" }} + + +{{ if isset .Site.Data.products $productKey }} + {{ $productName = cond (isset (index .Site.Data.products $productKey) "altname") (index .Site.Data.products $productKey).altname (index .Site.Data.products $productKey).name }} +{{ end }} + + +{{ if and (isset .Site.Data.products $productKey) (isset (index .Site.Data.products $productKey) "succeeded_by") }} + {{ $successorKey := (index .Site.Data.products $productKey).succeeded_by }} + + {{ if and $successorKey (isset .Site.Data.products $successorKey) }} + + {{ $successorInfo = dict + "exists" true + "key" $successorKey + "name" (cond (isset (index .Site.Data.products $successorKey) "altname") + (index .Site.Data.products $successorKey).altname + (index .Site.Data.products $successorKey).name) + "version" (replaceRE `\.[0-9x]+$` "" (index .Site.Data.products $successorKey).latest) + "namespace" (index .Site.Data.products $successorKey).namespace + }} + + + {{ $stableVersion = $successorInfo.version }} + {{ $stableVersionURL = print "/" $successorInfo.namespace "/" $stableVersion "/" }} + {{ $stableDefaultURL = $stableVersionURL }} + {{ end }} +{{ else if isset .Site.Data.products $product }} + + {{ $stableVersion = (replaceRE `\.[0-9x]+$` "" (index .Site.Data.products $product).latest) }} + {{ $stableVersionURL = replaceRE `v[1-3]` $stableVersion .RelPermalink }} + {{ $stableDefaultURL = print "/" $product "/" $stableVersion "/" }} +{{ end }} + {{ $stableEquivalentURL := index .Page.Params.alt_links $stableVersion | default "does-not-exist" }} {{ $stableEquivalentPage := .GetPage (replaceRE `\/$` "" $stableEquivalentURL) }} {{ $stablePageExists := gt (len $stableEquivalentPage.Title) 0 }} @@ -14,34 +50,32 @@ {{ $isMultiVersion := in (print "/" $version) "/v" }} {{ if and (in $productWhiteList $product) $isMultiVersion }} - - {{ if lt (int (replaceRE `[a-z]` "" $version)) (int (replaceRE `[a-z]` "" $stableVersion)) }} + {{ if $successorInfo.exists }} +

This page documents an earlier version of {{ $productName }}. - {{ $productName }} {{ $stableVersion }} is the latest stable version. - - {{ if gt (len (.GetPage ((replaceRE `v[1-3]` $stableVersion .RelPermalink) | replaceRE `\/$` "")).Title) 0 }} - View this page in the {{ $stableVersion }} documentation. - - {{ else if $stablePageExists }} - See the equivalent InfluxDB {{ $stableVersion }} documentation: {{ $stableEquivalentPage.Title | .RenderString }}. - {{ else }} - See the InfluxDB {{ $stableVersion }} documentation. - {{ end }} + {{ $successorInfo.name }} is the latest stable version.

+ {{ else if $stableVersion }} + + {{ if lt (int (replaceRE `[a-z]` "" $version)) (int (replaceRE `[a-z]` "" $stableVersion)) }} +
+

+ This page documents an earlier version of {{ $productName }}. + {{ $productName }} {{ $stableVersion }} is the latest stable version. + + + {{ if gt (len (.GetPage ((replaceRE `v[1-3]` $stableVersion .RelPermalink) | replaceRE `\/$` "")).Title) 0 }} + View this page in the {{ $stableVersion }} documentation. + {{ else if $stablePageExists }} + See the equivalent {{ $productName }} {{ $stableVersion }} documentation: {{ $stableEquivalentPage.Title | .RenderString }}. + {{ else }} + See the {{ $productName }} {{ $stableVersion }} documentation. + {{ end }} +

+
+ {{ end }} {{ end }} -{{ end }} - -{{ if and .Page.Params.v2 (eq (findRE `v[1-3]` $version) (findRE `v[1-3]` $stableVersion)) }} -
-

- {{ if $stablePageExists }} - See the equivalent InfluxDB {{ $stableVersion }} documentation: {{ $stableEquivalentPage.Title | .RenderString }}. - {{ else }} - See the equivalent InfluxDB {{ $stableVersion }} documentation. - {{ end }} -

-
-{{ end }} +{{ end }} \ No newline at end of file diff --git a/layouts/partials/article/tags.html b/layouts/partials/article/tags.html index 60b2f48ca..4c297c861 100644 --- a/layouts/partials/article/tags.html +++ b/layouts/partials/article/tags.html @@ -2,9 +2,9 @@ {{ $product := index $productPathData 0 }} {{ $version := index $productPathData 1 }} {{ $tagSet := print $product "/" $version "/tags" }} -{{ if isset .Params $tagSet }} +{{ with .Param $tagSet }}
- {{ range .Param $tagSet }} + {{ range . }} {{ $name := . }} {{ with $.Site.GetPage (printf "/%s/%s" $tagSet ($name | urlize)) }} {{ $name }} diff --git a/layouts/partials/footer/javascript.html b/layouts/partials/footer/javascript.html index eb64ae5ad..2e0f85494 100644 --- a/layouts/partials/footer/javascript.html +++ b/layouts/partials/footer/javascript.html @@ -8,7 +8,7 @@ {{ $dateTime := resources.Get "js/datetime.js" }} {{ $homepageInteractions := resources.Get "js/home-interactions.js" }} {{ $releaseTOC := resources.Get "/js/release-toc.js" }} -{{ $footerjs := slice $versionSelector $searchInteractions $listFilters $featureCallouts $keybindings $homepageInteractions | resources.Concat "js/footer.bundle.js" | resources.Fingerprint }} +{{ $footerjs := slice $jquery $versionSelector $searchInteractions $listFilters $featureCallouts $keybindings $homepageInteractions | resources.Concat "js/footer.bundle.js" | resources.Fingerprint }} {{ $fluxGroupKeyjs := $fluxGroupKeys | resources.Fingerprint }} {{ $dateTimejs := $dateTime | resources.Fingerprint }} {{ $releaseTOCjs := $releaseTOC | resources.Fingerprint }} diff --git a/layouts/partials/footer/search.html b/layouts/partials/footer/search.html index 6df87ec39..c0405957f 100644 --- a/layouts/partials/footer/search.html +++ b/layouts/partials/footer/search.html @@ -5,7 +5,7 @@ {{ $fluxSupported := slice "influxdb" "enterprise_influxdb" }} {{ $influxdbFluxSupport := slice "v1" "v2" "cloud" }} {{ $includeFlux := and (in $fluxSupported $product) (in $influxdbFluxSupport $version) }} -{{ $includeResources := not (in (slice "cloud-serverless" "cloud-dedicated" "clustered" "core" "enterprise") $version) }} +{{ $includeResources := not (in (slice "cloud-serverless" "cloud-dedicated" "clustered" "core" "enterprise" "explorer") $version) }} - {{ $productPathData := findRE "[^/]+.*?" .RelPermalink }} {{ $product := index $productPathData 0 }} @@ -23,7 +13,7 @@ {{ $products := .Site.Data.products }} {{ $influxdb_urls := .Site.Data.influxdb_urls }} -{{ with resources.Get "js/main.js" }} +{{ with resources.Get "js/index.js" }} {{ $opts := dict "minify" hugo.IsProduction "sourceMap" (cond hugo.IsProduction "" "external") diff --git a/layouts/partials/header/title.html b/layouts/partials/header/title.html index e6b1c8910..8383be7e5 100644 --- a/layouts/partials/header/title.html +++ b/layouts/partials/header/title.html @@ -22,6 +22,8 @@ {{ $scratch.Set "siteTitle" "InfluxDB 3 Core Documentation" }} {{ else if eq $currentVersion "enterprise"}} {{ $scratch.Set "siteTitle" "InfluxDB 3 Enterprise Documentation" }} +{{ else if eq $currentVersion "explorer"}} + {{ $scratch.Set "siteTitle" "InfluxDB 3 Explorer Documentation" }} {{ else if eq $currentVersion "cloud-serverless"}} {{ $scratch.Set "siteTitle" "InfluxDB Cloud Serverless Documentation" }} {{ else if eq $currentVersion "cloud-dedicated"}} @@ -35,10 +37,10 @@ {{ else if eq $currentVersion nil}} {{ $scratch.Set "siteTitle" (print (index .Site.Data.products $product).name " Documentation") }} {{ else }} - {{ if (isset (index .Site.Data.products $product) "altname" ) }} - {{ $scratch.Set "siteTitle" (print (index .Site.Data.products $product).altname " Documentation") }} + {{ with (index .Site.Data.products $product).altname }} + {{ $scratch.Set "siteTitle" (print . " Documentation") }} {{ else }} - {{ $scratch.Set "siteTitle" (print (index .Site.Data.products $product).name " Documentation") }} + {{ $scratch.Set "siteTitle" (print (index $.Site.Data.products $product).name " Documentation") }} {{ end }} {{ end }} diff --git a/layouts/partials/sidebar/nested-menu.html b/layouts/partials/sidebar/nested-menu.html index feceb9ea8..0083b2a3e 100644 --- a/layouts/partials/sidebar/nested-menu.html +++ b/layouts/partials/sidebar/nested-menu.html @@ -4,37 +4,37 @@ {{ range $menu }}