Merge branch 'master' into patch-1

pull/6052/head
Jason Stirnaman 2025-05-23 09:10:30 -05:00 committed by GitHub
commit 2dbe5f77bc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
135 changed files with 5074 additions and 1768 deletions

View File

@ -31,10 +31,10 @@ jobs:
command: cd api-docs && bash generate-api-docs.sh command: cd api-docs && bash generate-api-docs.sh
- run: - run:
name: Inject Flux stdlib frontmatter name: Inject Flux stdlib frontmatter
command: node ./flux-build-scripts/inject-flux-stdlib-frontmatter.js command: node ./flux-build-scripts/inject-flux-stdlib-frontmatter.cjs
- run: - run:
name: Update Flux/InfluxDB versions name: Update Flux/InfluxDB versions
command: node ./flux-build-scripts/update-flux-versions.js command: node ./flux-build-scripts/update-flux-versions.cjs
- save_cache: - save_cache:
key: install-{{ .Environment.CACHE_VERSION }}-{{ checksum ".circleci/config.yml" }} key: install-{{ .Environment.CACHE_VERSION }}-{{ checksum ".circleci/config.yml" }}
paths: paths:

2
.gitignore vendored
View File

@ -16,6 +16,8 @@ node_modules
!telegraf-build/scripts !telegraf-build/scripts
!telegraf-build/README.md !telegraf-build/README.md
/cypress/screenshots/* /cypress/screenshots/*
/cypress/videos/*
test-results.xml
/influxdb3cli-build-scripts/content /influxdb3cli-build-scripts/content
.vscode/* .vscode/*
.idea .idea

57
.husky/_/serve Executable file
View File

@ -0,0 +1,57 @@
#!/bin/sh
if [ "$LEFTHOOK_VERBOSE" = "1" -o "$LEFTHOOK_VERBOSE" = "true" ]; then
set -x
fi
if [ "$LEFTHOOK" = "0" ]; then
exit 0
fi
call_lefthook()
{
if test -n "$LEFTHOOK_BIN"
then
"$LEFTHOOK_BIN" "$@"
elif lefthook -h >/dev/null 2>&1
then
lefthook "$@"
else
dir="$(git rev-parse --show-toplevel)"
osArch=$(uname | tr '[:upper:]' '[:lower:]')
cpuArch=$(uname -m | sed 's/aarch64/arm64/;s/x86_64/x64/')
if test -f "$dir/node_modules/lefthook-${osArch}-${cpuArch}/bin/lefthook"
then
"$dir/node_modules/lefthook-${osArch}-${cpuArch}/bin/lefthook" "$@"
elif test -f "$dir/node_modules/@evilmartians/lefthook/bin/lefthook-${osArch}-${cpuArch}/lefthook"
then
"$dir/node_modules/@evilmartians/lefthook/bin/lefthook-${osArch}-${cpuArch}/lefthook" "$@"
elif test -f "$dir/node_modules/@evilmartians/lefthook-installer/bin/lefthook"
then
"$dir/node_modules/@evilmartians/lefthook-installer/bin/lefthook" "$@"
elif test -f "$dir/node_modules/lefthook/bin/index.js"
then
"$dir/node_modules/lefthook/bin/index.js" "$@"
elif bundle exec lefthook -h >/dev/null 2>&1
then
bundle exec lefthook "$@"
elif yarn lefthook -h >/dev/null 2>&1
then
yarn lefthook "$@"
elif pnpm lefthook -h >/dev/null 2>&1
then
pnpm lefthook "$@"
elif swift package plugin lefthook >/dev/null 2>&1
then
swift package --disable-sandbox plugin lefthook "$@"
elif command -v mint >/dev/null 2>&1
then
mint run csjones/lefthook-plugin "$@"
else
echo "Can't find lefthook in PATH"
fi
fi
}
call_lefthook run "serve" "$@"

1
.nvmrc Normal file
View File

@ -0,0 +1 @@
v23.10.0

View File

@ -28,8 +28,10 @@ For the linting and tests to run, you need to install Docker and Node.js
dependencies. dependencies.
\_**Note:** \_**Note:**
We strongly recommend running linting and tests, but you can skip them The git pre-commit and pre-push hooks are configured to run linting and tests automatically
(and avoid installing dependencies) when you commit or push changes.
We strongly recommend letting them run, but you can skip them
(and avoid installing related dependencies)
by including the `--no-verify` flag with your commit--for example, enter the following command in your terminal: by including the `--no-verify` flag with your commit--for example, enter the following command in your terminal:
```sh ```sh
@ -51,7 +53,7 @@ dev dependencies used in pre-commit hooks for linting, syntax-checking, and test
Dev dependencies include: Dev dependencies include:
- [Lefthook](https://github.com/evilmartians/lefthook): configures and - [Lefthook](https://github.com/evilmartians/lefthook): configures and
manages pre-commit hooks for linting and testing Markdown content. manages git pre-commit and pre-push hooks for linting and testing Markdown content.
- [prettier](https://prettier.io/docs/en/): formats code, including Markdown, according to style rules for consistency - [prettier](https://prettier.io/docs/en/): formats code, including Markdown, according to style rules for consistency
- [Cypress]: e2e testing for UI elements and URLs in content - [Cypress]: e2e testing for UI elements and URLs in content
@ -93,9 +95,11 @@ Make your suggested changes being sure to follow the [style and formatting guide
## Lint and test your changes ## Lint and test your changes
`package.json` contains scripts for running tests and linting.
### Automatic pre-commit checks ### Automatic pre-commit checks
docs-v2 uses Lefthook to manage Git hooks, such as pre-commit hooks that lint Markdown and test code blocks. docs-v2 uses Lefthook to manage Git hooks that run during pre-commit and pre-push. The hooks run the scripts defined in `package.json` to lint Markdown and test code blocks.
When you try to commit changes (`git commit`), Git runs When you try to commit changes (`git commit`), Git runs
the commands configured in `lefthook.yml` which pass your **staged** files to Vale, the commands configured in `lefthook.yml` which pass your **staged** files to Vale,
Prettier, Cypress (for UI tests and link-checking), and Pytest (for testing Python and shell code in code blocks). Prettier, Cypress (for UI tests and link-checking), and Pytest (for testing Python and shell code in code blocks).

View File

@ -1,5 +1,5 @@
plugins: plugins:
- './../openapi/plugins/docs-plugin.js' - './../openapi/plugins/docs-plugin.cjs'
extends: extends:
- recommended - recommended
- docs/all - docs/all

View File

@ -1,5 +1,5 @@
plugins: plugins:
- '../../openapi/plugins/docs-plugin.js' - '../../openapi/plugins/docs-plugin.cjs'
extends: extends:
- recommended - recommended
- docs/all - docs/all

View File

@ -1,5 +1,5 @@
plugins: plugins:
- '../../openapi/plugins/docs-plugin.js' - '../../openapi/plugins/docs-plugin.cjs'
extends: extends:
- recommended - recommended
- docs/all - docs/all

View File

@ -218,11 +218,11 @@ tags:
|:-----------:|:------------------------ |:--------------------- | |:-----------:|:------------------------ |:--------------------- |
| `200` | Success | | | `200` | Success | |
| `204` | Success. No content | InfluxDB doesn't return data for the request. | | `204` | Success. No content | InfluxDB doesn't return data for the request. |
| `400` | Bad request | May indicate one of the following: <ul><li>Line protocol is malformed. The response body contains the first malformed line in the data and indicates what was expected. For partial writes, the number of points written and the number of points rejected are also included. For more information, check the `rejected_points` measurement in your `_monitoring` bucket.</li><li>`Authorization` header is missing or malformed or the API token doesn't have permission for the operation.</li></ul> | | `400` | Bad request | May indicate one of the following:<ul><li>the request body is malformed</li><li>`Authorization` header is missing or malformed</li><li>the API token doesn't have permission for the operation.</li></ul> |
| `401` | Unauthorized | May indicate one of the following: <ul><li>`Authorization: Token` header is missing or malformed</li><li>API token value is missing from the header</li><li>API token doesn't have permission. For more information about token types and permissions, see [Manage API tokens](/influxdb/v2/security/tokens/)</li></ul> | | `401` | Unauthorized | May indicate one of the following: <ul><li>`Authorization: Token` header is missing or malformed</li><li>API token value is missing from the header</li><li>API token doesn't have permission. For more information about token types and permissions, see [Manage API tokens](/influxdb/v2/security/tokens/)</li></ul> |
| `404` | Not found | Requested resource was not found. `message` in the response body provides details about the requested resource. | | `404` | Not found | Requested resource was not found. `message` in the response body provides details about the requested resource. |
| `413` | Request entity too large | Request payload exceeds the size limit. | | `413` | Request entity too large | Request payload exceeds the size limit. |
| `422` | Unprocessable entity | Request data is invalid. `code` and `message` in the response body provide details about the problem. | | `422` | Unprocessable entity | Request data is invalid. The request was well-formed, but couldn't complete due to semantic errors--for example, some or all points in a write request were rejected due to a schema or retention policy violation. The response body provides details about the problem. For more information about rejected points, see how to [Troubleshoot issues writing data](/influxdb/v2/write-data/troubleshoot/)|
| `429` | Too many requests | API token is temporarily over the request quota. The `Retry-After` header describes when to try the request again. | | `429` | Too many requests | API token is temporarily over the request quota. The `Retry-After` header describes when to try the request again. |
| `500` | Internal server error | | | `500` | Internal server error | |
| `503` | Service unavailable | Server is temporarily unavailable to process the request. The `Retry-After` header describes when to try the request again. | | `503` | Service unavailable | Server is temporarily unavailable to process the request. The `Retry-After` header describes when to try the request again. |
@ -12752,6 +12752,12 @@ paths:
- Returns this error only if the [Go (golang) `ioutil.ReadAll()`](https://pkg.go.dev/io/ioutil#ReadAll) function raises an error. - Returns this error only if the [Go (golang) `ioutil.ReadAll()`](https://pkg.go.dev/io/ioutil#ReadAll) function raises an error.
- Returns `Content-Type: application/json` for this error. - Returns `Content-Type: application/json` for this error.
'422':
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
description: The request was well-formed, but some or all the points were rejected due to semantic errors--for example, schema conflicts or retention policy violations. Error message contains details for one or more rejected points.
'429': '429':
description: | description: |
Too many requests. Too many requests.
@ -13190,6 +13196,14 @@ paths:
- Legacy Query - Legacy Query
/write: /write:
post: post:
description: |-
Writes line protocol to the specified bucket.
This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools.
Use this endpoint to send data in [line protocol](https://docs.influxdata.com/influxdb/v2/reference/syntax/line-protocol/) format to InfluxDB.
Use query parameters to specify options for writing data.
operationId: PostLegacyWrite operationId: PostLegacyWrite
parameters: parameters:
- $ref: '#/components/parameters/TraceSpan' - $ref: '#/components/parameters/TraceSpan'
@ -13263,6 +13277,12 @@ paths:
schema: schema:
$ref: '#/components/schemas/LineProtocolLengthError' $ref: '#/components/schemas/LineProtocolLengthError'
description: Write has been rejected because the payload is too large. Error message returns max size supported. All data in body was rejected and not written. description: Write has been rejected because the payload is too large. Error message returns max size supported. All data in body was rejected and not written.
'422':
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
description: The request was well-formed, but some or all the points were rejected due to semantic errors--for example, schema conflicts or retention policy violations. Error message contains details for one or more rejected points.
'429': '429':
description: Token is temporarily over quota. The Retry-After header describes when to try the write again. description: Token is temporarily over quota. The Retry-After header describes when to try the write again.
headers: headers:

View File

@ -1,5 +1,5 @@
plugins: plugins:
- '../../openapi/plugins/docs-plugin.js' - '../../openapi/plugins/docs-plugin.cjs'
extends: extends:
- recommended - recommended
- docs/all - docs/all

View File

@ -40,6 +40,7 @@ tags:
See how to [create a management token](/influxdb3/cloud-dedicated/admin/tokens/management/). See how to [create a management token](/influxdb3/cloud-dedicated/admin/tokens/management/).
By default, management tokens in InfluxDB 3 are short-lived tokens issued by an OAuth2 identity provider that grant a specific user administrative access to your InfluxDB cluster. However, for automation purposes, you can manually create management tokens that authenticate directly with your InfluxDB cluster and do not require human interaction with your identity provider. By default, management tokens in InfluxDB 3 are short-lived tokens issued by an OAuth2 identity provider that grant a specific user administrative access to your InfluxDB cluster. However, for automation purposes, you can manually create management tokens that authenticate directly with your InfluxDB cluster and do not require human interaction with your identity provider.
<!-- ReDoc-Inject: <security-definitions> -->
- name: Database tokens - name: Database tokens
description: Manage database read/write tokens for a cluster description: Manage database read/write tokens for a cluster
- name: Databases - name: Databases

View File

@ -1,5 +1,5 @@
plugins: plugins:
- '../../openapi/plugins/docs-plugin.js' - '../../openapi/plugins/docs-plugin.cjs'
extends: extends:
- recommended - recommended
- docs/all - docs/all

View File

@ -1,5 +1,5 @@
plugins: plugins:
- '../../openapi/plugins/docs-plugin.js' - '../../openapi/plugins/docs-plugin.cjs'
extends: extends:
- recommended - recommended
- docs/all - docs/all

View File

@ -1,5 +1,5 @@
plugins: plugins:
- '../../openapi/plugins/docs-plugin.js' - '../../openapi/plugins/docs-plugin.cjs'
extends: extends:
- recommended - recommended
- docs/all - docs/all

View File

@ -52,7 +52,7 @@ tags:
#### Related guides #### Related guides
- [Manage tokens](/influxdb3/core/admin/tokens/) - [Manage tokens](/influxdb3/core/admin/tokens/)
- [Authentication and authorization](/influxdb3/core/reference/authentication/) - [Authentication and authorization](/influxdb3/core/reference/internals/authentication/)
x-traitTag: true x-traitTag: true
- name: Cache data - name: Cache data
description: | description: |

View File

@ -1,5 +1,5 @@
plugins: plugins:
- '../../openapi/plugins/docs-plugin.js' - '../../openapi/plugins/docs-plugin.cjs'
extends: extends:
- recommended - recommended
- docs/all - docs/all

View File

@ -52,7 +52,7 @@ tags:
#### Related guides #### Related guides
- [Manage tokens](/influxdb3/enterprise/admin/tokens/) - [Manage tokens](/influxdb3/enterprise/admin/tokens/)
- [Authentication and authorization](/influxdb3/enterprise/reference/authentication/) - [Authentication and authorization](/influxdb3/enterprise/reference/internals/authentication/)
x-traitTag: true x-traitTag: true
- name: Cache data - name: Cache data
description: | description: |
@ -157,7 +157,7 @@ tags:
1. [Create an admin token](#section/Authentication) for the InfluxDB 3 Enterprise API. 1. [Create an admin token](#section/Authentication) for the InfluxDB 3 Enterprise API.
```bash ```bash
curl -X POST "http://localhost:8181/api/v3/enterprise/configure/token/admin" curl -X POST "http://localhost:8181/api/v3/configure/token/admin"
``` ```
2. [Check the status](#section/Server-information) of the InfluxDB server. 2. [Check the status](#section/Server-information) of the InfluxDB server.
@ -1351,15 +1351,13 @@ paths:
tags: tags:
- Authentication - Authentication
- Token - Token
/api/v3/configure/enterprise/token/admin: /api/v3/configure/token/admin:
post: post:
operationId: PostCreateAdminToken operationId: PostCreateAdminToken
summary: Create admin token summary: Create admin token
description: | description: |
Creates an admin token. Creates an admin token.
An admin token is a special type of token that has full access to all resources in the system. An admin token is a special type of token that has full access to all resources in the system.
This endpoint is only available in InfluxDB 3 Enterprise.
responses: responses:
'201': '201':
description: | description: |
@ -1374,14 +1372,12 @@ paths:
tags: tags:
- Authentication - Authentication
- Token - Token
/api/v3/configure/enterprise/token/admin/regenerate: /api/v3/configure/token/admin/regenerate:
post: post:
operationId: PostRegenerateAdminToken operationId: PostRegenerateAdminToken
summary: Regenerate admin token summary: Regenerate admin token
description: | description: |
Regenerates an admin token and revokes the previous token with the same name. Regenerates an admin token and revokes the previous token with the same name.
This endpoint is only available in InfluxDB 3 Enterprise.
parameters: [] parameters: []
responses: responses:
'201': '201':
@ -1940,8 +1936,6 @@ components:
scheme: bearer scheme: bearer
bearerFormat: JWT bearerFormat: JWT
description: | description: |
_During Alpha release, an API token is not required._
A Bearer token for authentication. A Bearer token for authentication.
Provide the scheme and the API token in the `Authorization` header--for example: Provide the scheme and the API token in the `Authorization` header--for example:

View File

@ -1,6 +1,6 @@
module.exports = SetTagGroups; module.exports = SetTagGroups;
const { collect, getName, sortName, isPresent } = require('../../helpers/content-helper.js') const { collect, getName, sortName, isPresent } = require('../../helpers/content-helper.cjs')
/** /**
* Returns an object that defines handler functions for: * Returns an object that defines handler functions for:
* - Operation nodes * - Operation nodes

View File

@ -1,25 +0,0 @@
module.exports = SetTags;
const { tags } = require('../../../content/content')
/**
* Returns an object that defines handler functions for:
* - DefinitionRoot (the root openapi) node
* The DefinitionRoot handler, executed when
* the parser is leaving the root node,
* sets the root `tags` list to the provided `data`.
*/
/** @type {import('@redocly/openapi-cli').OasDecorator} */
function SetTags() {
const data = tags();
return {
DefinitionRoot: {
/** Set tags from custom tags when visitor enters root. */
enter(root) {
if(data) {
root.tags = data;
}
}
}
}
};

View File

@ -1,5 +1,5 @@
const path = require('path'); const path = require('path');
const { toJSON } = require('./helpers/content-helper'); const { toJSON } = require('./helpers/content-helper.cjs');
function getVersioned(filename) { function getVersioned(filename) {
const apiDocsRoot=path.resolve(process.env.API_DOCS_ROOT_PATH || process.cwd()); const apiDocsRoot=path.resolve(process.env.API_DOCS_ROOT_PATH || process.cwd());

View File

@ -1,14 +1,14 @@
const {info, servers, tagGroups} = require('./docs-content'); const {info, servers, tagGroups} = require('./docs-content.cjs');
const ReportTags = require('./rules/report-tags'); const ReportTags = require('./rules/report-tags.cjs');
const ValidateServersUrl = require('./rules/validate-servers-url'); const ValidateServersUrl = require('./rules/validate-servers-url.cjs');
const RemovePrivatePaths = require('./decorators/paths/remove-private-paths'); const RemovePrivatePaths = require('./decorators/paths/remove-private-paths.cjs');
const ReplaceShortcodes = require('./decorators/replace-shortcodes'); const ReplaceShortcodes = require('./decorators/replace-shortcodes.cjs');
const SetInfo = require('./decorators/set-info'); const SetInfo = require('./decorators/set-info.cjs');
const DeleteServers = require('./decorators/servers/delete-servers'); const DeleteServers = require('./decorators/servers/delete-servers.cjs');
const SetServers = require('./decorators/servers/set-servers'); const SetServers = require('./decorators/servers/set-servers.cjs');
const SetTagGroups = require('./decorators/tags/set-tag-groups'); const SetTagGroups = require('./decorators/tags/set-tag-groups.cjs');
const StripVersionPrefix = require('./decorators/paths/strip-version-prefix'); const StripVersionPrefix = require('./decorators/paths/strip-version-prefix.cjs');
const StripTrailingSlash = require('./decorators/paths/strip-trailing-slash'); const StripTrailingSlash = require('./decorators/paths/strip-trailing-slash.cjs');
const id = 'docs'; const id = 'docs';

1
assets/js/index.js Normal file
View File

@ -0,0 +1 @@
export * from './main.js';

View File

@ -6,9 +6,6 @@
/** Import modules that are not components. /** Import modules that are not components.
* TODO: Refactor these into single-purpose component modules. * TODO: Refactor these into single-purpose component modules.
*/ */
// import * as codeblocksPreferences from './api-libs.js';
// import * as datetime from './datetime.js';
// import * as featureCallouts from './feature-callouts.js';
import * as apiLibs from './api-libs.js'; import * as apiLibs from './api-libs.js';
import * as codeControls from './code-controls.js'; import * as codeControls from './code-controls.js';
import * as contentInteractions from './content-interactions.js'; import * as contentInteractions from './content-interactions.js';
@ -21,15 +18,6 @@ import * as pageContext from './page-context.js';
import * as pageFeedback from './page-feedback.js'; import * as pageFeedback from './page-feedback.js';
import * as tabbedContent from './tabbed-content.js'; import * as tabbedContent from './tabbed-content.js';
import * as v3Wayfinding from './v3-wayfinding.js'; import * as v3Wayfinding from './v3-wayfinding.js';
// import * as homeInteractions from './home-interactions.js';
// import { getUrls, getReferrerHost, InfluxDBUrl } from './influxdb-url.js';
// import * as keybindings from './keybindings.js';
// import * as listFilters from './list-filters.js';
// import { Modal } from './modal.js';
// import { showNotifications } from './notifications.js';
// import ReleaseTOC from './release-toc.js';
// import * as scroll from './scroll.js';
// import { TabbedContent } from './tabbed-content.js';
/** Import component modules /** Import component modules
* The component pattern organizes JavaScript, CSS, and HTML for a specific UI element or interaction: * The component pattern organizes JavaScript, CSS, and HTML for a specific UI element or interaction:
@ -41,40 +29,95 @@ import * as v3Wayfinding from './v3-wayfinding.js';
import AskAITrigger from './ask-ai-trigger.js'; import AskAITrigger from './ask-ai-trigger.js';
import CodePlaceholder from './code-placeholders.js'; import CodePlaceholder from './code-placeholders.js';
import { CustomTimeTrigger } from './custom-timestamps.js'; import { CustomTimeTrigger } from './custom-timestamps.js';
import FluxInfluxDBVersionsTrigger from './flux-influxdb-versions.js';
import { SearchButton } from './search-button.js'; import { SearchButton } from './search-button.js';
import { SidebarToggle } from './sidebar-toggle.js'; import { SidebarToggle } from './sidebar-toggle.js';
import Theme from './theme.js'; import Theme from './theme.js';
import ThemeSwitch from './theme-switch.js'; import ThemeSwitch from './theme-switch.js';
// import CodeControls from './code-controls.js';
// import ContentInteractions from './content-interactions.js';
// import CustomTimestamps from './custom-timestamps.js';
// import Diagram from './Diagram.js';
// import FluxGroupKeysExample from './FluxGroupKeysExample.js';
import FluxInfluxDBVersionsTrigger from './flux-influxdb-versions.js';
// import PageFeedback from './page-feedback.js';
// import SearchInput from './SearchInput.js';
// import Sidebar from './Sidebar.js';
// import V3Wayfinding from './v3-wayfinding.js';
// import VersionSelector from './VersionSelector.js';
// Expose libraries and components within a namespaced object (for backwards compatibility or testing) /**
// Expose libraries and components within a namespaced object (for backwards compatibility or testing) * Component Registry
* A central registry that maps component names to their constructor functions.
* Add new components to this registry as they are created or migrated from non-component modules.
* This allows for:
* 1. Automatic component initialization based on data-component attributes
* 2. Centralized component management
* 3. Easy addition/removal of components
* 4. Simplified testing and debugging
*/
const componentRegistry = {
'ask-ai-trigger': AskAITrigger,
'code-placeholder': CodePlaceholder,
'custom-time-trigger': CustomTimeTrigger,
'flux-influxdb-versions-trigger': FluxInfluxDBVersionsTrigger,
'search-button': SearchButton,
'sidebar-toggle': SidebarToggle,
'theme': Theme,
'theme-switch': ThemeSwitch
};
/**
* Initialize global namespace for documentation JavaScript
document.addEventListener('DOMContentLoaded', function () { * Exposes core modules for debugging, testing, and backwards compatibility
*/
function initGlobals() {
if (typeof window.influxdatadocs === 'undefined') { if (typeof window.influxdatadocs === 'undefined') {
window.influxdatadocs = {}; window.influxdatadocs = {};
} }
// Expose modules to the global object for debugging, testing, and backwards compatibility for non-ES6 modules. // Expose modules to the global object for debugging, testing, and backwards compatibility
window.influxdatadocs.delay = delay; window.influxdatadocs.delay = delay;
window.influxdatadocs.localStorage = window.LocalStorageAPI = localStorage; window.influxdatadocs.localStorage = window.LocalStorageAPI = localStorage;
window.influxdatadocs.pageContext = pageContext; window.influxdatadocs.pageContext = pageContext;
window.influxdatadocs.toggleModal = modals.toggleModal; window.influxdatadocs.toggleModal = modals.toggleModal;
window.influxdatadocs.componentRegistry = componentRegistry;
return window.influxdatadocs;
}
// On content loaded, initialize (not-component-ready) UI interaction modules /**
// To differentiate these from component-ready modules, these modules typically export an initialize function that wraps UI interactions and event listeners. * Initialize components based on data-component attributes
* @param {Object} globals - The global influxdatadocs namespace
*/
function initComponents(globals) {
const components = document.querySelectorAll('[data-component]');
components.forEach((component) => {
const componentName = component.getAttribute('data-component');
const ComponentConstructor = componentRegistry[componentName];
if (ComponentConstructor) {
// Initialize the component and store its instance in the global namespace
try {
const instance = ComponentConstructor({ component });
globals[componentName] = ComponentConstructor;
// Optionally store component instances for future reference
if (!globals.instances) {
globals.instances = {};
}
if (!globals.instances[componentName]) {
globals.instances[componentName] = [];
}
globals.instances[componentName].push({
element: component,
instance
});
} catch (error) {
console.error(`Error initializing component "${componentName}":`, error);
}
} else {
console.warn(`Unknown component: "${componentName}"`);
}
});
}
/**
* Initialize all non-component modules
*/
function initModules() {
modals.initialize(); modals.initialize();
apiLibs.initialize(); apiLibs.initialize();
codeControls.initialize(); codeControls.initialize();
@ -84,67 +127,24 @@ document.addEventListener('DOMContentLoaded', function () {
pageFeedback.initialize(); pageFeedback.initialize();
tabbedContent.initialize(); tabbedContent.initialize();
v3Wayfinding.initialize(); v3Wayfinding.initialize();
}
/** Initialize components /**
Component Structure: Each component is structured as a jQuery anonymous function that listens for the document ready state. * Main initialization function
Initialization in main.js: Each component is called in main.js inside a jQuery document ready function to ensure they are initialized when the document is ready. */
Note: These components should *not* be called directly in the HTML. function init() {
*/ // Initialize global namespace and expose core modules
const components = document.querySelectorAll('[data-component]'); const globals = initGlobals();
components.forEach((component) => {
const componentName = component.getAttribute('data-component'); // Initialize non-component UI modules
switch (componentName) { initModules();
case 'ask-ai-trigger':
AskAITrigger({ component }); // Initialize components from registry
window.influxdatadocs[componentName] = AskAITrigger; initComponents(globals);
break; }
case 'code-placeholder':
CodePlaceholder({ component }); // Initialize everything when the DOM is ready
window.influxdatadocs[componentName] = CodePlaceholder; document.addEventListener('DOMContentLoaded', init);
break;
case 'custom-time-trigger': // Export public API
CustomTimeTrigger({ component }); export { initGlobals, componentRegistry };
window.influxdatadocs[componentName] = CustomTimeTrigger;
break;
case 'flux-influxdb-versions-trigger':
FluxInfluxDBVersionsTrigger({ component });
window.influxdatadocs[componentName] = FluxInfluxDBVersionsTrigger;
break;
case 'search-button':
SearchButton({ component });
window.influxdatadocs[componentName] = SearchButton;
break;
case 'sidebar-toggle':
SidebarToggle({ component });
window.influxdatadocs[componentName] = SidebarToggle;
break;
case 'theme':
Theme({ component });
window.influxdatadocs[componentName] = Theme;
break;
// CodeControls();
// ContentInteractions();
// CustomTimestamps();
// Diagram();
// FluxGroupKeysExample();
// FluxInfluxDBVersionsModal();
// InfluxDBUrl();
// Modal();
// PageFeedback();
// ReleaseTOC();
// SearchInput();
// showNotifications();
// Sidebar();
// TabbedContent();
// ThemeSwitch({});
// V3Wayfinding();
// VersionSelector();
case 'theme-switch':
ThemeSwitch({ component });
window.influxdatadocs[componentName] = ThemeSwitch;
break;
default:
console.warn(`Unknown component: ${componentName}`);
}
});
});

View File

@ -3,7 +3,8 @@
"baseUrl": ".", "baseUrl": ".",
"paths": { "paths": {
"*": [ "*": [
"*" "*",
"../node_modules/*"
] ]
} }
} }

View File

@ -0,0 +1,2 @@
import:
- hugo.yml

20
config/testing/config.yml Normal file
View File

@ -0,0 +1,20 @@
baseURL: 'http://localhost:1315/'
server:
port: 1315
# Override settings for testing
buildFuture: true
# Configure what content is built in testing env
params:
environment: testing
buildTestContent: true
# Keep your shared content exclusions
ignoreFiles:
- "content/shared/.*"
# Ignore specific warning logs
ignoreLogs:
- warning-goldmark-raw-html

View File

@ -6,14 +6,14 @@ related:
- /influxdb/v2/write-data/ - /influxdb/v2/write-data/
- /influxdb/v2/write-data/quick-start - /influxdb/v2/write-data/quick-start
- https://influxdata.com, This is an external link - https://influxdata.com, This is an external link
draft: true test_only: true # Custom parameter to indicate test-only content
--- ---
This is a paragraph. Lorem ipsum dolor ({{< icon "trash" "v2" >}}) sit amet, consectetur adipiscing elit. Nunc rutrum, metus id scelerisque euismod, erat ante suscipit nibh, ac congue enim risus id est. Etiam tristique nisi et tristique auctor. Morbi eu bibendum erat. Sed ullamcorper, dui id lobortis efficitur, mauris odio pharetra neque, vel tempor odio dolor blandit justo. This is a paragraph. Lorem ipsum dolor ({{< icon "trash" "v2" >}}) sit amet, consectetur adipiscing elit. Nunc rutrum, metus id scelerisque euismod, erat ante suscipit nibh, ac congue enim risus id est. Etiam tristique nisi et tristique auctor. Morbi eu bibendum erat. Sed ullamcorper, dui id lobortis efficitur, mauris odio pharetra neque, vel tempor odio dolor blandit justo.
[Ref link][foo] [Ref link][foo]
[foo]: https://docs.influxadata.com [foo]: https://docs.influxdata.com
This is **bold** text. This is _italic_ text. This is _**bold and italic**_. This is **bold** text. This is _italic_ text. This is _**bold and italic**_.

View File

@ -19,4 +19,5 @@ source: /shared/influxdb-v2/write-data/troubleshoot.md
--- ---
<!-- The content of this file is at <!-- The content of this file is at
// SOURCE content/shared/influxdb-v2/write-data/troubleshoot.md--> // SOURCE content/shared/influxdb-v2/write-data/troubleshoot.md
-->

View File

@ -164,13 +164,13 @@ gpg: key 7C3D57159FC2F927: public key "InfluxData Package Signing Key <support@i
<!--test:setup <!--test:setup
```sh ```sh
curl --silent --location --output-dir ~/Downloads -O \ curl --silent --location --output-dir ~/Downloads -O \
"https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz" \ "https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz" \
``` ```
--> -->
```sh ```sh
curl --silent --location \ curl --silent --location \
https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz.asc \ https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz.asc \
| gpg --verify - ~/Downloads/influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz \ | gpg --verify - ~/Downloads/influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz \
2>&1 | grep 'InfluxData Package Signing Key <support@influxdata.com>' 2>&1 | grep 'InfluxData Package Signing Key <support@influxdata.com>'
``` ```
@ -239,12 +239,12 @@ brew install influxdb
1. In your browser or your terminal, download the InfluxDB package. 1. In your browser or your terminal, download the InfluxDB package.
<a class="btn download" href="https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz" download>InfluxDB v2 (macOS)</a> <a class="btn download" href="https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz" download>InfluxDB v2 (macOS)</a>
```sh ```sh
# Download using cURL # Download using cURL
curl --location -O \ curl --location -O \
"https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz" "https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_darwin_amd64.tar.gz"
``` ```
2. {{< req text="Recommended:" color="magenta" >}}: Verify the integrity of the download--for example, enter the 2. {{< req text="Recommended:" color="magenta" >}}: Verify the integrity of the download--for example, enter the
@ -443,18 +443,18 @@ _If necessary, adjust the example file paths and utilities for your system._
1. In your browser or your terminal, download the InfluxDB binary for your 1. In your browser or your terminal, download the InfluxDB binary for your
system architecture (AMD64 or ARM). system architecture (AMD64 or ARM).
<a class="btn download" href="https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_linux_amd64.tar.gz" download >InfluxDB v2 (amd64)</a> <a class="btn download" href="https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_linux_amd64.tar.gz" download >InfluxDB v2 (amd64)</a>
<a class="btn download" href="https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz" download >InfluxDB v2 (arm)</a> <a class="btn download" href="https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz" download >InfluxDB v2 (arm)</a>
<!--test:actual <!--test:actual
```sh ```sh
curl -s --location -O \ curl -s --location -O \
"https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_linux_amd64.tar.gz" "https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_linux_amd64.tar.gz"
``` ```
```sh ```sh
curl -s --location -O \ curl -s --location -O \
"https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz" "https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz"
``` ```
--> -->
@ -463,7 +463,7 @@ _If necessary, adjust the example file paths and utilities for your system._
```sh ```sh
# Use curl to download the amd64 binary. # Use curl to download the amd64 binary.
curl --location -O \ curl --location -O \
https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_linux_amd64.tar.gz https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_linux_amd64.tar.gz
``` ```
<!--pytest.mark.skip--> <!--pytest.mark.skip-->
@ -471,7 +471,7 @@ _If necessary, adjust the example file paths and utilities for your system._
```sh ```sh
# Use curl to download the arm64 binary. # Use curl to download the arm64 binary.
curl --location -O \ curl --location -O \
https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz
``` ```
2. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-system). 2. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-system).
@ -505,7 +505,7 @@ _If necessary, adjust the example file paths and utilities for your system._
| grep 'InfluxData Package Signing Key <support@influxdata.com>' \ | grep 'InfluxData Package Signing Key <support@influxdata.com>' \
&& &&
# Download and verify the binary's signature file # Download and verify the binary's signature file
curl --silent --location "https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_linux_amd64.tar.gz.asc" \ curl --silent --location "https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_linux_amd64.tar.gz.asc" \
| gpg --verify - influxdb2-{{< latest-patch >}}_linux_amd64.tar.gz \ | gpg --verify - influxdb2-{{< latest-patch >}}_linux_amd64.tar.gz \
2>&1 | grep 'InfluxData Package Signing Key <support@influxdata.com>' 2>&1 | grep 'InfluxData Package Signing Key <support@influxdata.com>'
``` ```
@ -519,7 +519,7 @@ _If necessary, adjust the example file paths and utilities for your system._
| grep 'InfluxData Package Signing Key <support@influxdata.com>' \ | grep 'InfluxData Package Signing Key <support@influxdata.com>' \
&& &&
# Download and verify the binary's signature file # Download and verify the binary's signature file
curl --silent --location "https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz.asc" \ curl --silent --location "https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz.asc" \
| gpg --verify - influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz \ | gpg --verify - influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz \
2>&1 | grep 'InfluxData Package Signing Key <support@influxdata.com>' 2>&1 | grep 'InfluxData Package Signing Key <support@influxdata.com>'
``` ```
@ -618,7 +618,7 @@ chmod 0750 ~/.influxdbv2
> >
> _You'll install the `influx CLI` in a [later step](#download-install-and-configure-the-influx-cli)._ > _You'll install the `influx CLI` in a [later step](#download-install-and-configure-the-influx-cli)._
<a class="btn download" href="https://download.influxdata.com/influxdb/releases/influxdb2-{{< latest-patch >}}-windows.zip" download >InfluxDB v2 (Windows)</a> <a class="btn download" href="https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}-windows.zip" download >InfluxDB v2 (Windows)</a>
Expand the downloaded archive into `C:\Program Files\InfluxData\` and rename the Expand the downloaded archive into `C:\Program Files\InfluxData\` and rename the
files if desired. files if desired.

View File

@ -2744,6 +2744,61 @@ storage-validate-keys = true
--- ---
### storage-wal-flush-on-shutdown
Flush the WAL on shutdown.
**Default:** `false`
| influxd flag | Environment variable | Configuration key |
| :------------------------------ | :------------------------------------ | :---------------------------- |
| `--storage-wal-flush-on-shutdown` | `INFLUXD_STORAGE_WAL_FLUSH_ON_SHUTDOWN` | `storage-wal-flush-on-shutdown` |
If set, `influxd` flushes or snapshots all WALs prior to completing shutdown--`influxd` performs cache snapshots on shutdown, which
results in the WAL files being written to TSM files and then deleted.
This is useful in upgrade and downgrade scenarios to prevent WAL format
compatibility issues.
###### influxd flag
<!--pytest.mark.skip-->
```sh
influxd --storage-wal-flush-on-shutdown
```
###### Environment variable
```sh
export INFLUXD_STORAGE_WAL_FLUSH_ON_SHUTDOWN=true
```
###### Configuration file
{{< code-tabs-wrapper >}}
{{% code-tabs %}}
[YAML](#)
[TOML](#)
[JSON](#)
{{% /code-tabs %}}
{{% code-tab-content %}}
```yml
storage-wal-flush-on-shutdown: true
```
{{% /code-tab-content %}}
{{% code-tab-content %}}
```toml
storage-wal-flush-on-shutdown = true
```
{{% /code-tab-content %}}
{{% code-tab-content %}}
```json
{
"storage-wal-flush-on-shutdown": true
}
```
{{% /code-tab-content %}}
{{< /code-tabs-wrapper >}}
---
### storage-wal-fsync-delay ### storage-wal-fsync-delay
Duration a write will wait before fsyncing. Duration a write will wait before fsyncing.
A duration greater than `0` batches multiple fsync calls. A duration greater than `0` batches multiple fsync calls.

View File

@ -8,6 +8,27 @@ menu:
weight: 101 weight: 101
--- ---
## v2.7.12 {date="2025-05-20"}
### Features
- Add a `--pid-file` option to write a PID file to the specified location on startup. InfluxDB removes the PID file on shutdown.
- Add a `--storage-wal-flush-on-shutdown` option to flush the WAL on database shutdown to ensure all data is written to disk.
- Improve response error messages for dropped points, adding details including database, retention policy, and which bound was violated for partial writes.
### Bug Fixes
- Fix a locking issue in `TagValueIterator` that could cause reads and writes in buckets to block. [PR #26414](https://github.com/influxdata/influxdb/pull/26414)
### Maintenance
- Improved startup logging with an "are we there yet" counter for the number and percentage of shards opened.
- Update Go to 1.23.9.
- Update Flux to v0.196.1.
- Refresh dependencies to address security vulnerabilities and improve stability.
---
## v2.7.11 {date="2024-12-02"} ## v2.7.11 {date="2024-12-02"}
### Features ### Features
@ -606,7 +627,7 @@ to migrate InfluxDB key-value metadata schemas to earlier 2.x versions when nece
#### Flux #### Flux
- Update to [Flux v0.139.0](/flux/v0/release-notes/#v01390). - Update to [Flux v0.139.0](/flux/v0/release-notes/#v01390).
- Enable writing to remote hosts using the Flux [`to()`](/flux/v0/stdlib/influxdata/influxdb/to/) and [`experimental.to()`](/flux/v0/v0.x/stdlib/experimental/to/) functions. - Enable writing to remote hosts using the Flux [`to()`](/flux/v0/stdlib/influxdata/influxdb/to/) and [`experimental.to()`](/flux/v0/stdlib/experimental/to/) functions.
- Flux now supports locations that dynamically modify time offsets based on your specified timezone. You can also specify fixed time offsets relative to UTC. - Flux now supports locations that dynamically modify time offsets based on your specified timezone. You can also specify fixed time offsets relative to UTC.
- Perform [bitwise operations](/flux/v0/stdlib/experimental/bitwise/) - Perform [bitwise operations](/flux/v0/stdlib/experimental/bitwise/)
on integers and unsigned integers. on integers and unsigned integers.
@ -673,24 +694,24 @@ New features include:
- Add a new route `/api/v2/resources` that returns a list of known resources to the platform, including the following resource types. Makes it easier to update All Access tokens with current resources: - Add a new route `/api/v2/resources` that returns a list of known resources to the platform, including the following resource types. Makes it easier to update All Access tokens with current resources:
- `AuthorizationsResourceType` - `AuthorizationsResourceType`
- `BucketsResourceType` - `BucketsResourceType`
- `ChecksResourceType` - `ChecksResourceType`
- `DashboardsResourceType` - `DashboardsResourceType`
- `DBRPResourceType` - `DBRPResourceType`
- `DocumentsResourceType` - `DocumentsResourceType`
- `LabelsResourceType` - `LabelsResourceType`
- `NotificationEndpointResourceType` - `NotificationEndpointResourceType`
- `NotificationRuleResourceType` - `NotificationRuleResourceType`
- `OrgsResourceType` - `OrgsResourceType`
- `ScraperResourceType` - `ScraperResourceType`
- `SecretsResourceType` - `SecretsResourceType`
- `SourcesResourceType` - `SourcesResourceType`
- `TasksResourceType` - `TasksResourceType`
- `TelegrafsResourceType` - `TelegrafsResourceType`
- `UsersResourceType` - `UsersResourceType`
- `VariablesResourceType` - `VariablesResourceType`
- `ViewsResourceType` - `ViewsResourceType`
#### Flux updates #### Flux updates
@ -992,10 +1013,10 @@ The startup process automatically generates replacement `tsi1` indexes for shard
- Fix timeout setup for `influxd` graceful shutdown. - Fix timeout setup for `influxd` graceful shutdown.
- Require user to set password during initial user onboarding. - Require user to set password during initial user onboarding.
- Error message improvements: - Error message improvements:
- Remove duplication from task error messages. - Remove duplication from task error messages.
- Improve error message shown when influx CLI can't find an `org` by name. - Improve error message shown when influx CLI can't find an `org` by name.
- Improve error message when opening BoltDB with unsupported file system options. - Improve error message when opening BoltDB with unsupported file system options.
- Improve messages in DBRP API validation errors. - Improve messages in DBRP API validation errors.
- `influxd upgrade` improvements: - `influxd upgrade` improvements:
- Add confirmation step with file sizes before copying data files. - Add confirmation step with file sizes before copying data files.
- Prevent panic in `influxd upgrade` when v1 users exist but v1 config is missing. - Prevent panic in `influxd upgrade` when v1 users exist but v1 config is missing.
@ -1072,8 +1093,8 @@ Previously, the database retention policy (DBRP) mapping API did not match the s
### Features ### Features
- Improvements to upgrade from 1.x to 2.x: - Improvements to upgrade from 1.x to 2.x:
- Warning appears if auth is not enabled in 1.x (`auth-enabled = false`), which is not an option in 2.x. For details, see [Upgrade from InfluxDB 1.x to InfluxDB 2.0](/influxdb/v2/upgrade/v1-to-v2/). - Warning appears if auth is not enabled in 1.x (`auth-enabled = false`), which is not an option in 2.x. For details, see [Upgrade from InfluxDB 1.x to InfluxDB 2.0](/influxdb/v2/upgrade/v1-to-v2/).
- `upgrade` command now checks to see if continuous queries are running and automatically exports them to a local file. - `upgrade` command now checks to see if continuous queries are running and automatically exports them to a local file.
- Upgrade to [Flux v0.95.0](/flux/v0/release-notes/#v0-95-0). - Upgrade to [Flux v0.95.0](/flux/v0/release-notes/#v0-95-0).
- Upgrade `flux-lsp-browser` to v.0.5.23. - Upgrade `flux-lsp-browser` to v.0.5.23.
- Manage database retention policy (DBRP) mappings via CLI. See [`influx v1 dbrp`](/influxdb/v2/reference/cli/influx/v1/dbrp/). - Manage database retention policy (DBRP) mappings via CLI. See [`influx v1 dbrp`](/influxdb/v2/reference/cli/influx/v1/dbrp/).
@ -1117,8 +1138,8 @@ When there are multiple [DBRP mappings](/influxdb/v2/reference/api/influxdb-1x/d
Highlights include: Highlights include:
- Support for **upgrading to InfluxDB 2.0**: - Support for **upgrading to InfluxDB 2.0**:
- To upgrade **from InfluxDB 1.x**, see [Upgrade from InfluxDB 1.x to InfluxDB 2.0](/influxdb/v2/upgrade/v1-to-v2). - To upgrade **from InfluxDB 1.x**, see [Upgrade from InfluxDB 1.x to InfluxDB 2.0](/influxdb/v2/upgrade/v1-to-v2).
- To upgrade **from InfluxDB 2.0 beta 16 or earlier**, see [Upgrade from InfluxDB 2.0 beta to InfluxDB 2.0](/influxdb/v2/upgrade/v2-beta-to-v2). - To upgrade **from InfluxDB 2.0 beta 16 or earlier**, see [Upgrade from InfluxDB 2.0 beta to InfluxDB 2.0](/influxdb/v2/install/upgrade/v2-beta-to-v2/).
- **Flux**, our powerful new functional data scripting language designed for querying, analyzing, and acting on data. This release includes [Flux v0.94.0](/flux/v0/release-notes/#v0940). If you're new to Flux, [check out how to get started with Flux](/influxdb/v2/query-data/get-started/). Next, delve deeper into the [Flux standard library](/flux/v0/stdlib//) reference docs and see how to [query with Flux](/influxdb/v2/query-data/flux/). - **Flux**, our powerful new functional data scripting language designed for querying, analyzing, and acting on data. This release includes [Flux v0.94.0](/flux/v0/release-notes/#v0940). If you're new to Flux, [check out how to get started with Flux](/influxdb/v2/query-data/get-started/). Next, delve deeper into the [Flux standard library](/flux/v0/stdlib//) reference docs and see how to [query with Flux](/influxdb/v2/query-data/flux/).
- Support for [InfluxDB 1.x API compatibility](/influxdb/v2/reference/api/influxdb-1x/). - Support for [InfluxDB 1.x API compatibility](/influxdb/v2/reference/api/influxdb-1x/).
- **Templates** and **stacks**. Discover how to [use community templates](/influxdb/v2/tools/influxdb-templates/use/) and how to [manage templates with stacks](/influxdb/v2/tools/influxdb-templates/stacks/). - **Templates** and **stacks**. Discover how to [use community templates](/influxdb/v2/tools/influxdb-templates/use/) and how to [manage templates with stacks](/influxdb/v2/tools/influxdb-templates/stacks/).
@ -1241,14 +1262,14 @@ If you're new to InfluxDB 2.0, we recommend checking out [how to get started](/i
{{% warn %}} {{% warn %}}
#### Manual upgrade required #### Manual upgrade required
To simplify the migration for existing users of InfluxDB 1.x, this release includes significant breaking changes that require a manual upgrade from all alpha and beta versions. For more information, see [Upgrade to InfluxDB OSS 2.0rc](/influxdb/v2/upgrade/v2-beta-to-v2/), To simplify the migration for existing users of InfluxDB 1.x, this release includes significant breaking changes that require a manual upgrade from all alpha and beta versions. For more information, see [Upgrade to InfluxDB OSS 2.0rc](/influxdb/v2/install/upgrade/v2-beta-to-v2/),
{{% /warn %}} {{% /warn %}}
### Breaking changes ### Breaking changes
#### Manual upgrade #### Manual upgrade
- To continue using data from InfluxDB 2.0 beta 16 or earlier, you must move all existing data out of the `~/.influxdbv2` (or equivalent) path, including `influxd.bolt`. All existing dashboards, tasks, integrations, alerts, users, and tokens must be recreated. For information on how to migrate your data, see [Upgrade to InfluxDB OSS 2.0rc](/influxdb/v2/upgrade/v2-beta-to-v2/). - To continue using data from InfluxDB 2.0 beta 16 or earlier, you must move all existing data out of the `~/.influxdbv2` (or equivalent) path, including `influxd.bolt`. All existing dashboards, tasks, integrations, alerts, users, and tokens must be recreated. For information on how to migrate your data, see [Upgrade to InfluxDB OSS 2.0rc](/influxdb/v2/install/upgrade/v2-beta-to-v2/).
#### Port update to 8086 #### Port update to 8086
@ -2045,7 +2066,7 @@ _**This will remove all data from your InfluxDB v2.0 instance including time ser
###### Linux and macOS ###### Linux and macOS
```sh ```sh
rm ~/.influxdbv2/influxd.bolt rm -f ~/.influxdbv2/influxd.bolt
``` ```
Once completed, `v2.0.0-alpha.6` can be started. Once completed, `v2.0.0-alpha.6` can be started.
@ -2079,7 +2100,7 @@ run the following command.
###### Linux and macOS ###### Linux and macOS
```sh ```sh
rm -r ~/.influxdbv2/engine rm -rf ~/.influxdbv2/engine
``` ```
Once completed, InfluxDB v2.0.0-alpha.5 can be started. Once completed, InfluxDB v2.0.0-alpha.5 can be started.

View File

@ -13,312 +13,9 @@ related:
- /influxdb/v2/api/#tag/Write, InfluxDB API /write endpoint - /influxdb/v2/api/#tag/Write, InfluxDB API /write endpoint
- /influxdb/v2/reference/internals - /influxdb/v2/reference/internals
- /influxdb/v2/reference/cli/influx/write - /influxdb/v2/reference/cli/influx/write
source: /shared/influxdb-v2/write-data/troubleshoot.md
--- ---
Learn how to avoid unexpected results and recover from errors when writing to InfluxDB.
{{% show-in "v2" %}} <!-- The content of this file is at
// SOURCE content/shared/influxdb-v2/write-data/troubleshoot.md
- [Handle `write` and `delete` responses](#handle-write-and-delete-responses) -->
- [Troubleshoot failures](#troubleshoot-failures)
{{% /show-in %}}
{{% show-in "cloud,cloud-serverless" %}}
- [Handle `write` and `delete` responses](#handle-write-and-delete-responses)
- [Troubleshoot failures](#troubleshoot-failures)
- [Troubleshoot rejected points](#troubleshoot-rejected-points)
{{% /show-in %}}
## Handle `write` and `delete` responses
{{% show-in "cloud,cloud-serverless" %}}
In InfluxDB Cloud, writes and deletes are asynchronous and eventually consistent.
Once InfluxDB validates your request and [queues](/influxdb/cloud/reference/internals/durability/#backup-on-write) the write or delete, it sends a _success_ response (HTTP `204` status code) as an acknowledgement.
To ensure that InfluxDB handles writes and deletes in the order you request them, wait for the acknowledgement before you send the next request.
Because writes are asynchronous, keep the following in mind:
- Data might not yet be queryable when you receive _success_ (HTTP `204` status code).
- InfluxDB may still reject points after you receive _success_ (HTTP `204` status code).
{{% /show-in %}}
{{% show-in "v2" %}}
If InfluxDB OSS successfully writes all the request data to the bucket, InfluxDB returns _success_ (HTTP `204` status code).
The first rejected point in a batch causes InfluxDB to reject the entire batch and respond with an [HTTP error status](#review-http-status-codes).
{{% /show-in %}}
### Review HTTP status codes
InfluxDB uses conventional HTTP status codes to indicate the success or failure of a request.
Write requests return the following status codes:
{{% show-in "cloud,cloud-serverless" %}}
| HTTP response code | Message | Description |
| :-------------------------------| :--------------------------------------------------------------- | :------------- |
| `204 "Success"` | | If InfluxDB validated the request data format and queued the data for writing to the bucket |
| `400 "Bad request"` | `message` contains the first malformed line | If data is malformed |
| `401 "Unauthorized"` | | If the [`Authorization: Token` header](/influxdb/cloud/api-guide/api_intro/#authentication) is missing or malformed or if the [API token](/influxdb/cloud/api-guide/api_intro/#authentication) doesn't have [permission](/influxdb/cloud/admin/tokens/) to write to the bucket |
| `404 "Not found"` | requested **resource type**, e.g. "organization", and **resource name** | If a requested resource (e.g. organization or bucket) wasn't found |
| `413 “Request too large”` | cannot read data: points in batch is too large | If a **write** request exceeds the maximum [global limit](/influxdb/cloud/account-management/limits/#global-limits) |
| `429 “Too many requests”` | `Retry-After` header: xxx (seconds to wait before retrying the request) | If a **read** or **write** request exceeds your plan's [adjustable service quotas](/influxdb/cloud/account-management/limits/#adjustable-service-quotas) or if a **delete** request exceeds the maximum [global limit](/influxdb/cloud/account-management/limits/#global-limits) |
| `500 "Internal server error"` | | Default status for an error |
| `503 “Service unavailable“` | Series cardinality exceeds your plan's service quota | If **series cardinality** exceeds your plan's [adjustable service quotas](/influxdb/cloud/account-management/limits/#adjustable-service-quotas) |
{{% /show-in %}}
{{% show-in "v2" %}}
- `204` **Success**: All request data was written to the bucket.
- `400` **Bad request**: The [line protocol](/influxdb/v2/reference/syntax/line-protocol/) data in the request was malformed.
The response body contains the first malformed line in the data. All request data was rejected and not written.
- `401` **Unauthorized**: May indicate one of the following:
- [`Authorization: Token` header](/influxdb/v2/api-guide/api_intro/#authentication) is missing or malformed.
- [API token](/influxdb/v2/api-guide/api_intro/#authentication) value is missing from the header.
- API token does not have sufficient permissions to write to the organization and the bucket. For more information about token types and permissions, see [Manage API tokens](/influxdb/v2/admin/tokens/).
- `404` **Not found**: A requested resource (e.g. an organization or bucket) was not found. The response body contains the requested resource type, e.g. "organization", and resource name.
- `413` **Request entity too large**: All request data was rejected and not written. InfluxDB OSS only returns this error if the [Go (golang) `ioutil.ReadAll()`](https://pkg.go.dev/io/ioutil#ReadAll) function raises an error.
- `500` **Internal server error**: Default HTTP status for an error.
- `503` **Service unavailable**: Server is temporarily unavailable to accept writes. The `Retry-After` header describes when to try the write again.
{{% /show-in %}}
The `message` property of the response body may contain additional details about the error.
If some of your data did not write to the bucket, see how to [troubleshoot rejected points](#troubleshoot-rejected-points).
{{% show-in "cloud,cloud-serverless" %}}
### Troubleshoot partial writes
Because writes are asynchronous, they may fail partially or completely even though InfluxDB returns an HTTP `2xx` status code for a valid request.
For example, a partial write may occur when InfluxDB writes all points that conform to the bucket schema, but rejects points that have the wrong data type in a field.
To check for writes that fail asynchronously, create a [task](/influxdb/cloud/process-data/manage-tasks/) to [check the _monitoring bucket for rejected points](#review-rejected-points).
To resolve partial writes and rejected points, see [troubleshoot failures](#troubleshoot-failures).
{{% /show-in %}}
## Troubleshoot failures
{{% show-in "v2" %}}
If you notice data is missing in your bucket, do the following:
- Check the `message` property in the response body for details about the error.
- If the `message` describes a field error, [troubleshoot rejected points](#troubleshoot-rejected-points).
- Verify all lines contain valid syntax ([line protocol](/influxdb/v2/reference/syntax/line-protocol/) or [CSV](/influxdb/v2/reference/syntax/annotated-csv/)).
- Verify the timestamps match the [precision parameter](/influxdb/v2/write-data/#timestamp-precision).
- Minimize payload size and network errors by [optimizing writes](/influxdb/v2/write-data/best-practices/optimize-writes/).
{{% /show-in %}}
{{% show-in "cloud,cloud-serverless" %}}
If you notice data is missing in your bucket, do the following:
- Check the `message` property in the response body for details about the error--for example, `partial write error` indicates [rejected points](#troubleshoot-rejected-points).
- Check for [rejected points](#troubleshoot-rejected-points) in your organization's `_monitoring` bucket.
- Verify all lines contain valid syntax ([line protocol](/influxdb/cloud/reference/syntax/line-protocol/) or [CSV](/influxdb/cloud/reference/syntax/annotated-csv/)). See how to [find parsing errors](#find-parsing-errors).
- Verify the data types match the [series](/influxdb/cloud/reference/key-concepts/data-elements/#series) or [bucket schema](/influxdb/cloud/admin/buckets/bucket-schema/). See how to resolve [explicit schema rejections](#resolve-explicit-schema-rejections).
- Verify the timestamps match the [precision parameter](/influxdb/cloud/write-data/#timestamp-precision).
- Minimize payload size and network errors by [optimizing writes](/influxdb/cloud/write-data/best-practices/optimize-writes/).
{{% /show-in %}}
## Troubleshoot rejected points
{{% show-in "v2" %}}
InfluxDB rejects points for the following reasons:
- The **batch** contains another point with the same series, but one of the fields has a different value type.
- The **bucket** contains another point with the same series, but one of the fields has a different value type.
Check for [field type](/influxdb/v2/reference/key-concepts/data-elements/#field-value) differences between the missing data point and other points that have the same [series](/influxdb/v2/reference/key-concepts/data-elements/#series)--for example, did you attempt to write `string` data to an `int` field?
{{% /show-in %}}
{{% show-in "cloud,cloud-serverless" %}}
InfluxDB may have rejected points even if the HTTP request returned "Success".
InfluxDB logs rejected data points and associated errors to your organization's `_monitoring` bucket.
- [Review rejected points](#review-rejected-points)
- [Find parsing errors](#find-parsing-errors)
- [Find data type conflicts and schema rejections](#find-data-type-conflicts-and-schema-rejections)
- [Resolve data type conflicts](#resolve-data-type-conflicts)
- [Resolve explicit schema rejections](#resolve-explicit-schema-rejections)
### Review rejected points
To get a log of rejected points, query the [`rejected_points` measurement](/influxdb/cloud/reference/internals/system-buckets/#_monitoring-bucket-schema) in your organization's `_monitoring` bucket.
To more quickly locate `rejected_points`, keep the following in mind:
- If your line protocol batch contains single lines with multiple [fields](/influxdb/cloud/reference/syntax/line-protocol/#field-set), InfluxDB logs an entry for each point (each unique field) that is rejected.
- Each entry contains a `reason` tag that describes why the point was rejected.
- Entries for [data type conflicts and schema rejections](#find-data-type-conflicts-and-schema-rejections) have a `count` field value of `1`.
- Entries for [parsing errors](#find-parsing-errors) contain an `error` field (and don't contain a `count` field).
#### rejected_points schema
| Name | Value |
|:------ |:----- |
| `_measurement`| `rejected_points` |
| `_field` | [`count`](#find-data-type-conflicts-and-schema-rejections) or [`error`](#find-parsing-errors) |
| `_value` | [`1`](#find-data-type-conflicts-and-schema-rejections) or [error details](#find-parsing-errors) |
| `bucket` | ID of the bucket that rejected the point |
| `measurement` | Measurement name of the point |
| `field` | Name of the field that caused the rejection |
| `reason` | Brief description of the problem. See specific reasons in [data type conflicts and schema rejections](#find-data-type-conflicts-and-schema-rejections) |
| `gotType` | Received [field](/influxdb/cloud/reference/key-concepts/data-elements/#field-value) type: `Boolean`, `Float`, `Integer`, `String`, or `UnsignedInteger` |
| `wantType` | Expected [field](/influxdb/cloud/reference/key-concepts/data-elements/#field-value) type: `Boolean`, `Float`, `Integer`, `String`, or `UnsignedInteger` |
| `<timestamp>` | Time the rejected point was logged |
#### Find parsing errors
If InfluxDB can't parse a line (e.g. due to syntax problems), the response `message` might not provide details.
To find parsing error details, query `rejected_points` entries that contain the `error` field.
```js
from(bucket: "_monitoring")
|> range(start: -1h)
|> filter(fn: (r) => r._measurement == "rejected_points")
|> filter(fn: (r) => r._field == "error")
```
#### Find data type conflicts and schema rejections
To find `rejected_points` caused by [data type conflicts](#resolve-data-type-conflicts) or [schema rejections](#resolve-explicit-schema-rejections),
query for the `count` field.
```js
from(bucket: "_monitoring")
|> range(start: -1h)
|> filter(fn: (r) => r._measurement == "rejected_points")
|> filter(fn: (r) => r._field == "count")
```
### Resolve data type conflicts
When you write to a bucket that has the `implicit` schema type, InfluxDB compares new points to points that have the same [series](/influxdb/cloud/reference/key-concepts/data-elements/#series).
If a point has a field with a different data type than the series, InfluxDB rejects the point and logs a `rejected_points` entry.
The `rejected_points` entry contains one of the following reasons:
| Reason | Meaning |
|:------ |:------- |
| `type conflict in batch write` | The **batch** contains another point with the same series, but one of the fields has a different value type. |
| `type conflict with existing data` | The **bucket** contains another point with the same series, but one of the fields has a different value type. |
### Resolve explicit schema rejections
If you write to a bucket with an
[explicit schema](/influxdb/cloud/admin/buckets/bucket-schema/),
the data must conform to the schema. Otherwise, InfluxDB rejects the data.
Do the following to interpret explicit schema rejections:
- [Detect a measurement mismatch](#detect-a-measurement-mismatch)
- [Detect a field type mismatch](#detect-a-field-type-mismatch)
#### Detect a measurement mismatch
InfluxDB rejects a point if the [measurement](/influxdb/cloud/reference/key-concepts/data-elements/#measurement) doesn't match the **name** of a [bucket schema](/influxdb/cloud/admin/buckets/bucket-schema/).
The `rejected_points` entry contains the following `reason` tag value:
| Reason | Meaning |
|:------ |:-------
| `measurement not allowed by schema` | The **bucket** is configured to use explicit schemas and none of the schemas matches the **measurement** of the point. |
Consider the following [line protocol](/influxdb/cloud/reference/syntax/line-protocol) data.
```
airSensors,sensorId=TLM0201 temperature=73.97,humidity=35.23,co=0.48 1637014074
```
The line has an `airSensors` measurement and three fields (`temperature`, `humidity`, and `co`).
If you try to write this data to a bucket that has the [`explicit` schema type](/influxdb/cloud/admin/buckets/bucket-schema/) and doesn't have an `airSensors` schema, the `/api/v2/write` InfluxDB API returns an error and the following data:
```json
{
"code": "invalid",
"message": "3 out of 3 points rejected (check rejected_points in your _monitoring bucket for further information)"
}
```
InfluxDB logs three `rejected_points` entries, one for each field.
| _measurement | _field | _value | field | measurement | reason |
|:----------------|:-------|:-------|:------------|:------------|:----------------------------------|
| rejected_points | count | 1 | humidity | airSensors | measurement not allowed by schema |
| rejected_points | count | 1 | co | airSensors | measurement not allowed by schema |
| rejected_points | count | 1 | temperature | airSensors | measurement not allowed by schema |
#### Detect a field type mismatch
InfluxDB rejects a point if the [measurement](/influxdb/cloud/reference/key-concepts/data-elements/#measurement) matches the **name** of a bucket schema and the field data types don't match.
The `rejected_points` entry contains the following reason:
| Reason | Meaning |
|:------------------------------------|:-----------------------------------------------------------------------------------------------------|
| `field type mismatch with schema` | The point has the same measurement as a configured schema and they have different field value types. |
Consider a bucket that has the following `airSensors` [`explicit bucket schema`](/influxdb/cloud/admin/buckets/bucket-schema/):
```json
{
"name": "airSensors",
"columns": [
{
"name": "time",
"type": "timestamp"
},
{
"name": "sensorId",
"type": "tag"
},
{
"name": "temperature",
"type": "field",
"dataType": "float"
},
{
"name": "humidity",
"type": "field",
"dataType": "float"
},
{
"name": "co",
"type": "field",
"dataType": "float"
}
]
}
```
The following [line protocol](/influxdb/cloud/reference/syntax/line-protocol/) data has an `airSensors` measurement, a `sensorId` tag, and three fields (`temperature`, `humidity`, and `co`).
```
airSensors,sensorId=L1 temperature=90.5,humidity=70.0,co=0.2 1637014074
airSensors,sensorId=L1 temperature="90.5",humidity=70.0,co=0.2 1637014074
```
In the example data above, the second point has a `temperature` field value with the _string_ data type.
Because the `airSensors` schema requires `temperature` to have the _float_ data type,
InfluxDB returns a `400` error and a message that describes the result:
```json
{
"code": "invalid",
"message": "partial write error (5 accepted): 1 out of 6 points rejected (check rejected_points in your _monitoring bucket for further information)"
}
```
InfluxDB logs the following `rejected_points` entry to the `_monitoring` bucket:
| _measurement | _field | _value | bucket | field | gotType | measurement | reason | wantType |
|:------------------|:-------|:-------|:-------------------|:--------------|:---------|:------------|:----------------------------------|:---------|
| rejected_points | count | 1 | a7d5558b880a93da | temperature | String | airSensors | field type mismatch with schema | Float |
{{% /show-in %}}

View File

@ -18,7 +18,7 @@ The InfluxDB time series platform is designed to handle high write and query loa
Learn how to use and leverage InfluxDB Cloud Dedicated for your specific Learn how to use and leverage InfluxDB Cloud Dedicated for your specific
time series use case. time series use case.
<a class="btn" href="{{< cta-link >}}">Run an {{% product-name %}} proof of concept (PoC)</a> <a class="btn" href="{{< cta-link >}}">Run an {{% product-name %}} proof of concept (PoC)</a>
<a class="btn" href="/influxdb3/cloud-dedicated/get-started/">Get started with InfluxDB Cloud Dedicated</a> <a class="btn" href="/influxdb3/cloud-dedicated/get-started/">Get started with InfluxDB Cloud Dedicated</a>
## InfluxDB 3 ## InfluxDB 3

View File

@ -71,13 +71,18 @@ and managing tables.
can sort on column headers or use the **Search** field to find a specific cluster. can sort on column headers or use the **Search** field to find a specific cluster.
4. In the database list, find and click the database you want to create a table in. You 4. In the database list, find and click the database you want to create a table in. You
can sort on column headers or use the **Search** field to find a specific database. can sort on column headers or use the **Search** field to find a specific database.
4. Click the **New Table** button above the table list. 5. Click the **New Table** button above the table list.
The **Create table** dialog displays. The **Create table** dialog displays.
5. In the **Create table** dialog, provide a **Table name**.
6. Toggle **Use default partitioning** to **On**
7. Click the **Create Table** button.
{{% /tab-content %}}
{{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-create-table-default.png" alt="Create table dialog" />}}
6. In the **Create table** dialog, provide a **Table name**.
7. Leave **Use custom partitioning** set to **Off**.
By default, the table inherits the database's partition template.
If no custom partition template is applied to the database, the table inherits the default partitioning of `%Y-%m-%d` (daily).
8. Click the **Create Table** button.
{{% /tab-content %}}
{{% tab-content %}} {{% tab-content %}}
<!------------------------------- BEGIN INFLUXCTL -----------------------------> <!------------------------------- BEGIN INFLUXCTL ----------------------------->
1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/#download-and-install-influxctl). 1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/#download-and-install-influxctl).
@ -95,8 +100,8 @@ influxctl table create \
Replace: Replace:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the database to create the table in - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the database to create the table in
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name for your new table - {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name for your new table
{{% /tab-content %}}
{{% /tab-content %}}
{{% tab-content %}} {{% tab-content %}}
<!------------------------------- BEGIN MANAGEMENT API ------------------------------> <!------------------------------- BEGIN MANAGEMENT API ------------------------------>
_This example uses [cURL](https://curl.se/) to send a Management HTTP API request, but you can use any HTTP client._ _This example uses [cURL](https://curl.se/) to send a Management HTTP API request, but you can use any HTTP client._
@ -123,11 +128,12 @@ curl \
Replace the following: Replace the following:
- {{% code-placeholder-key %}}`ACCOUNT_ID`{{% /code-placeholder-key %}}: the account ID for the cluster - {{% code-placeholder-key %}}`ACCOUNT_ID`{{% /code-placeholder-key %}}: the [account](/influxdb3/cloud-dedicated/admin/account/) ID for the cluster _(list details via the [Admin UI](/influxdb3/cloud-dedicated/admin/clusters/list/) or [CLI](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json))_
- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: the cluster ID - {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: the [cluster](/influxdb3/cloud-dedicated/admin/clusters/) ID _(list details via the [Admin UI](/influxdb3/cloud-dedicated/admin/clusters/list/) or [CLI](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json))_.
- {{% code-placeholder-key %}}`MANAGEMENT_TOKEN`{{% /code-placeholder-key %}}: a valid management token - {{% code-placeholder-key %}}`MANAGEMENT_TOKEN`{{% /code-placeholder-key %}}: a valid [management token](/influxdb3/cloud-dedicated/admin/tokens/management/) for your {{% product-name %}} cluster
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the database to create the table in - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the [database](/influxdb3/cloud-dedicated/admin/databases/) to create the table in
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name for your new table - {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name for your new table
{{% /tab-content %}} {{% /tab-content %}}
{{< /tabs-wrapper >}} {{< /tabs-wrapper >}}
@ -161,21 +167,26 @@ If a table doesn't have a custom partition template, it inherits the database's
can sort on column headers or use the **Search** field to find a specific cluster. can sort on column headers or use the **Search** field to find a specific cluster.
4. In the database list, find and click the database you want to create a table in. You 4. In the database list, find and click the database you want to create a table in. You
can sort on column headers or use the **Search** field to find a specific database. can sort on column headers or use the **Search** field to find a specific database.
4. Click the **New Table** button above the table list. 5. Click the **New Table** button above the table list.
The **Create table** dialog displays. The **Create table** dialog displays.
<img src="/img/influxdb3/cloud-dedicated-admin-ui-create-table.png" alt="Create table dialog" />
5. In the **Create table** dialog, provide a **Table name**.
6. Make sure the **Use default partitioning** toggle is set to **Off**
7. Provide the following:
- **Custom partition template time format**: The time part for partitioning data. {{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-create-table-default.png" alt="Create table dialog" />}}
- _Optional_: **Custom partition template tag parts**: The tag parts for partitioning data.
- _Optional_: **Custom partition template tag bucket parts**: The tag bucket parts for partitioning data. 6. In the **Create table** dialog, provide a **Table name**.
8. _Optional_: To add more parts to the partition template, click the **Add Tag** button. 7. Toggle **Use custom partitioning** to **On**.
9. Click the **Create Table** button to create the table. The **Custom partition template** section displays.
{{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-create-table-custom-partitioning.png" alt="Create table dialog with custom partitioning" />}}
8. Provide the following:
- **Custom partition template time format**: The time part for partitioning data (yearly, monthly, or daily).
- _Optional_: **Custom partition template tag parts**: The [tag parts](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#tag-part-templates) for partitioning data.
- _Optional_: **Custom partition template tag bucket parts**: The [tag bucket parts](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/#tag-bucket-part-templates) for partitioning data.
9. _Optional_: To add more parts to the partition template, click the **Add Tag** button. For more information, see [Partition template requirements and guidelines](#partition-template-requirements-and-guidelines).
10. Click the **Create Table** button to create the table.
The new table displays in the list of tables for the cluster. The new table displays in the list of tables for the cluster.
{{% /tab-content %}} {{% /tab-content %}}
{{% tab-content %}} {{% tab-content %}}
<!------------------------------- BEGIN INFLUXCTL CUSTOM -----------------------------> <!------------------------------- BEGIN INFLUXCTL CUSTOM ----------------------------->
1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/cloud-dedicated/get-started/setup/#download-install-and-configure-the-influxctl-cli). 1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/cloud-dedicated/get-started/setup/#download-install-and-configure-the-influxctl-cli).
@ -220,7 +231,6 @@ Replace the following:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the [database](/influxdb3/cloud-dedicated/admin/databases/) to create the table in - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the [database](/influxdb3/cloud-dedicated/admin/databases/) to create the table in
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name you want for the new table - {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name you want for the new table
{{% /tab-content %}} {{% /tab-content %}}
{{% tab-content %}} {{% tab-content %}}
<!------------------------------- BEGIN MANAGEMENT API CUSTOM ------------------------------> <!------------------------------- BEGIN MANAGEMENT API CUSTOM ------------------------------>
_This example uses [cURL](https://curl.se/) to send a Management HTTP API request, but you can use any HTTP client._ _This example uses [cURL](https://curl.se/) to send a Management HTTP API request, but you can use any HTTP client._

View File

@ -6,7 +6,7 @@ description: >
menu: menu:
influxdb3_cloud_dedicated: influxdb3_cloud_dedicated:
parent: Reference parent: Reference
weight: 6 weight: 105
--- ---
The Administrative (Admin) UI for {{% product-name %}} is a browser-based, no-code way to manage your {{% product-name %}} environment and perform administrative tasks, such as creating and managing clusters, databases, and tokens. The Administrative (Admin) UI for {{% product-name %}} is a browser-based, no-code way to manage your {{% product-name %}} environment and perform administrative tasks, such as creating and managing clusters, databases, and tokens.

View File

@ -9,7 +9,7 @@ menu:
influxdb3_cloud_dedicated: influxdb3_cloud_dedicated:
parent: Reference parent: Reference
name: InfluxDB HTTP API name: InfluxDB HTTP API
weight: 104 weight: 105
influxdb3/cloud-dedicated/tags: [api] influxdb3/cloud-dedicated/tags: [api]
--- ---

View File

@ -10,11 +10,9 @@ menu:
parent: Reference parent: Reference
name: CLIs name: CLIs
weight: 104 weight: 104
draft: true # draft: true
--- ---
The following command line interfaces (CLIs) are available: The following command line interfaces (CLIs) are available:
{{< children >}} {{< children >}}

View File

@ -4,7 +4,7 @@ description: >
InfluxDB client libraries are language-specific tools that integrate with InfluxDB APIs. InfluxDB client libraries are language-specific tools that integrate with InfluxDB APIs.
View the list of available client libraries. View the list of available client libraries.
list_title: API client libraries list_title: API client libraries
weight: 105 weight: 106
aliases: aliases:
- /influxdb3/cloud-dedicated/reference/api/client-libraries/ - /influxdb3/cloud-dedicated/reference/api/client-libraries/
- /influxdb3/cloud-dedicated/tools/client-libraries/ - /influxdb3/cloud-dedicated/tools/client-libraries/

View File

@ -5,7 +5,7 @@ description: >
InfluxDB uses an InfluxQL-like predicate syntax to determine what data points to delete. InfluxDB uses an InfluxQL-like predicate syntax to determine what data points to delete.
menu: menu:
influxdb3_cloud_serverless: influxdb3_cloud_serverless:
parent: Syntax parent: Other syntaxes
name: Delete predicate name: Delete predicate
weight: 104 weight: 104
influxdb3/cloud-serverless/tags: [syntax, delete] influxdb3/cloud-serverless/tags: [syntax, delete]

View File

@ -18,7 +18,7 @@ The InfluxDB time series platform is designed to handle high write and query loa
Learn how to use and leverage InfluxDB Clustered for your specific Learn how to use and leverage InfluxDB Clustered for your specific
time series use case. time series use case.
<a class="btn" href="{{< cta-link >}}">Run an {{% product-name %}} proof of concept (PoC)</a> <a class="btn" href="{{< cta-link >}}">Run an {{% product-name %}} proof of concept (PoC)</a>
<a class="btn" href="/influxdb3/clustered/get-started/">Get started with InfluxDB Clustered</a> <a class="btn" href="/influxdb3/clustered/get-started/">Get started with InfluxDB Clustered</a>
## InfluxDB 3 ## InfluxDB 3

View File

@ -55,7 +55,12 @@ snapshot. When a snapshot is restored to the Catalog store, the Compactor
A _soft delete_ refers to when, on compaction, the Compactor sets a `deleted_at` A _soft delete_ refers to when, on compaction, the Compactor sets a `deleted_at`
timestamp on the Parquet file entry in the Catalog. timestamp on the Parquet file entry in the Catalog.
The Parquet file is no The Parquet file is no
longer queryable, but remains intact in the object store. longer queryable, but remains intact in the object store.
> [!Note]
> Soft deletes are a mechanism of the {{% product-name %}} Catalog, not of the
> underlying object storage provider. Soft deletes do not modify objects in the
> object store; only Catalog entries that reference objects in the object store.
## Hard delete ## Hard delete
@ -219,6 +224,15 @@ written on or around the beginning of the next hour.
Use the following process to restore your InfluxDB cluster to a recovery point Use the following process to restore your InfluxDB cluster to a recovery point
using Catalog store snapshots: using Catalog store snapshots:
> [!Warning]
>
> #### Use the same InfluxDB Clustered version used to generate the snapshot
>
> When restoring an InfluxDB cluster to a recovery point, use the same version
> of InfluxDB Clustered used to generate the Catalog store snapshot.
> You may need to [downgrade to a previous version](/influxdb3/clustered/admin/upgrade/)
> before restoring.
1. **Install prerequisites:** 1. **Install prerequisites:**
- `kubectl` CLI for managing your Kubernetes deployment. - `kubectl` CLI for managing your Kubernetes deployment.
@ -273,7 +287,8 @@ using Catalog store snapshots:
metadata: metadata:
name: influxdb name: influxdb
namespace: influxdb namespace: influxdb
pause: true spec:
pause: true
# ... # ...
``` ```
@ -331,7 +346,8 @@ using Catalog store snapshots:
metadata: metadata:
name: influxdb name: influxdb
namespace: influxdb namespace: influxdb
pause: false spec:
pause: false
# ... # ...
``` ```
@ -349,8 +365,6 @@ Your InfluxDB cluster is now restored to the recovery point.
When the Garbage Collector runs, it identifies what Parquet files are not When the Garbage Collector runs, it identifies what Parquet files are not
associated with the recovery point and [soft deletes](#soft-delete) them. associated with the recovery point and [soft deletes](#soft-delete) them.
## Resources ## Resources
### prep\_pg\_dump.awk ### prep\_pg\_dump.awk

View File

@ -22,9 +22,9 @@ to delete a database from your InfluxDB cluster.
1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/clustered/reference/cli/influxctl/#download-and-install-influxctl). 1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/clustered/reference/cli/influxctl/#download-and-install-influxctl).
2. Run the `influxctl database delete` command and provide the following: 2. Run the `influxctl database delete` command and provide the following:
- Name of the database to delete - The name of the database to delete
3. Confirm that you want to delete the database. 3. Confirm that you want to delete the database.
{{% code-placeholders "DATABASE_NAME" %}} {{% code-placeholders "DATABASE_NAME" %}}
```sh ```sh
@ -37,9 +37,12 @@ influxctl database delete DATABASE_NAME
> >
> Once a database is deleted, data stored in that database cannot be recovered. > Once a database is deleted, data stored in that database cannot be recovered.
> >
> #### Cannot reuse database names > #### Wait before writing to a new database with the same name
> >
> After a database is deleted, you cannot reuse the same name for a new database. > After deleting a database from your {{% product-name omit=" Clustered" %}}
> cluster, you can reuse the name to create a new database, but **wait two to
> three minutes** after deleting the previous database before writing to the new
> database to allow write caches to clear.
> >
> #### Never directly modify the Catalog > #### Never directly modify the Catalog
> >

View File

@ -1,55 +0,0 @@
---
title: Delete a database token
description: >
Use the [`influxctl token delete` command](/influxdb3/clustered/reference/cli/influxctl/token/delete/)
to delete a token from your InfluxDB cluster and revoke all
permissions associated with the token.
Provide the ID of the token you want to delete.
menu:
influxdb3_clustered:
parent: Database tokens
weight: 203
list_code_example: |
```sh
influxctl token delete <TOKEN_ID>
```
aliases:
- /influxdb3/clustered/admin/tokens/delete/
---
Use the [`influxctl token delete` command](/influxdb3/clustered/reference/cli/influxctl/token/delete/)
to delete a database token from your InfluxDB cluster and revoke
all permissions associated with the token.
1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/clustered/reference/cli/influxctl/#download-and-install-influxctl).
2. Run the [`influxctl token list` command](/influxdb3/clustered/reference/cli/influxctl/token/list)
to output tokens with their IDs.
Copy the **token ID** of the token you want to delete.
```sh
influxctl token list
```
3. Run the `influxctl token delete` command and provide the following:
- Token ID to delete
4. Confirm that you want to delete the token.
{{% code-placeholders "TOKEN_ID" %}}
```sh
influxctl token delete TOKEN_ID
```
{{% /code-placeholders %}}
> [!Warning]
> #### Deleting a token is immediate and cannot be undone
>
> Deleting a database token is a destructive action that takes place immediately
> and cannot be undone.
>
> #### Rotate deleted tokens
>
> After deleting a database token, any clients using the deleted token need to be
> updated with a new database token to continue to interact with your InfluxDB
> cluster.

View File

@ -0,0 +1,56 @@
---
title: Revoke a database token
description: >
Use the [`influxctl token revoke` command](/influxdb3/clustered/reference/cli/influxctl/token/revoke/)
to revoke a token from your InfluxDB cluster and disable all
permissions associated with the token.
Provide the ID of the token you want to revoke.
menu:
influxdb3_clustered:
parent: Database tokens
weight: 203
list_code_example: |
```sh
influxctl token revoke <TOKEN_ID>
```
aliases:
- /influxdb3/clustered/admin/tokens/delete/
- /influxdb3/clustered/admin/tokens/database/delete/
---
Use the [`influxctl token revoke` command](/influxdb3/clustered/reference/cli/influxctl/token/revoke/)
to revoke a database token from your {{< product-name omit=" Clustered" >}} cluster and disable
all permissions associated with the token.
1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/clustered/reference/cli/influxctl/#download-and-install-influxctl).
2. Run the [`influxctl token list` command](/influxdb3/clustered/reference/cli/influxctl/token/list)
to output tokens with their IDs.
Copy the **token ID** of the token you want to delete.
```sh
influxctl token list
```
3. Run the `influxctl token revoke` command and provide the following:
- Token ID to revoke
4. Confirm that you want to revoke the token.
{{% code-placeholders "TOKEN_ID" %}}
```sh
influxctl token revoke TOKEN_ID
```
{{% /code-placeholders %}}
> [!Warning]
> #### Revoking a token is immediate and cannot be undone
>
> Revoking a database token is a destructive action that takes place immediately
> and cannot be undone.
>
> #### Rotate revoked tokens
>
> After revoking a database token, any clients using the revoked token need to
> be updated with a new database token to continue to interact with your
> {{% product-name omit=" Clustered" %}} cluster.

View File

@ -68,17 +68,12 @@ Be sure to follow [partitioning best practices](/influxdb3/clustered/admin/custo
> Otherwise, InfluxDB omits time from the partition template and won't compact partitions. > Otherwise, InfluxDB omits time from the partition template and won't compact partitions.
> [!Warning] > [!Warning]
> #### Cannot reuse deleted database names > #### Wait before writing to a new database with the same name as a deleted database
> >
> You cannot reuse the name of a deleted database when creating a new database. > After deleting a database from your {{% product-name omit=" Clustered" %}}
> If you try to reuse the name, the API response status code > cluster, you can reuse the name to create a new database, but **wait two to
> is `400` and the `message` field contains the following: > three minutes** after deleting the previous database before writing to the new
> > database to allow write caches to clear.
> ```text
> 'iox_proxy.app.CreateDatabase failed to create database: \
> rpc error: code = AlreadyExists desc = A namespace with the
> name `<DATABASE_NAME>` already exists'
> ```
## Usage ## Usage

View File

@ -1,14 +1,16 @@
--- ---
title: influxctl database delete title: influxctl database delete
description: > description: >
The `influxctl database delete` command deletes a database from an InfluxDB cluster. The `influxctl database delete` command deletes a database from an
{{% product-name omit=" Clustered" %}} cluster.
menu: menu:
influxdb3_clustered: influxdb3_clustered:
parent: influxctl database parent: influxctl database
weight: 301 weight: 301
--- ---
The `influxctl database delete` command deletes a database from an InfluxDB cluster. The `influxctl database delete` command deletes a database from an
{{< product-name omit=" Clustered" >}} cluster.
## Usage ## Usage
@ -24,10 +26,12 @@ influxctl database delete [command options] [--force] <DATABASE_NAME> [<DATABASE
> >
> Deleting a database is a destructive action that cannot be undone. > Deleting a database is a destructive action that cannot be undone.
> >
> #### Cannot reuse deleted database names > #### Wait before writing to a new database with the same name
> >
> After deleting a database, you cannot reuse the name of the deleted database > After deleting a database from your {{% product-name omit=" Clustered" %}}
> when creating a new database. > cluster, you can reuse the name to create a new database, but **wait two to
> three minutes** after deleting the previous database before writing to the new
> database to allow write caches to clear.
## Arguments ## Arguments

View File

@ -7,7 +7,7 @@ menu:
influxdb3_clustered: influxdb3_clustered:
name: Release notes name: Release notes
parent: Reference parent: Reference
weight: 190 weight: 101
--- ---
View release notes and updates for products and tools related to View release notes and updates for products and tools related to

View File

@ -25,6 +25,30 @@ weight: 201
--- ---
## 20250508-1719206 {date="2025-05-08"}
### Quickstart
```yaml
spec:
package:
image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250508-1719206
```
### Changes
#### Deployment
- Expose the v0 REST API for the management and authorization service (Granite).
#### Database Engine
- Reuse database names after deletion.
- Create database tokens with expiration dates.
- Revoke database tokens rather than deleting them.
---
## 20250212-1570743 {date="2025-02-12"} ## 20250212-1570743 {date="2025-02-12"}
### Quickstart ### Quickstart

View File

@ -0,0 +1,17 @@
---
title: Extend plugins with API features and state management
description: |
The Processing engine includes an API that allows your plugins to interact with your data, build and write line protocol, and maintain state between executions.
menu:
influxdb3_core:
name: Extend plugins
parent: Processing engine and Python plugins
weight: 4
influxdb3/core/tags: [processing engine, plugins, API, python]
source: /shared/extended-plugin-api.md
---
<!--
// SOURCE content/shared/extended-plugin-api.md
-->

View File

@ -13,7 +13,7 @@ related:
- /influxdb3/core/reference/cli/influxdb3/query/ - /influxdb3/core/reference/cli/influxdb3/query/
- /influxdb3/core/reference/sql/ - /influxdb3/core/reference/sql/
- /influxdb3/core/reference/influxql/ - /influxdb3/core/reference/influxql/
# - /influxdb3/core/get-started/query/#execute-an-sql-query, Get started querying data # - /influxdb3/core/query-data/execute-queries/, Get started querying data
list_code_example: | list_code_example: |
```sh ```sh
influxdb3 query \ influxdb3 query \

View File

@ -1,15 +0,0 @@
---
title: influxdb3 create plugin
description: >
The `influxdb3 create plugin` command creates a new processing engine plugin.
menu:
influxdb3_core:
parent: influxdb3 create
name: influxdb3 create plugin
weight: 400
source: /shared/influxdb3-cli/create/plugin.md
---
<!--
The content of this file is at content/shared/influxdb3-cli/create/plugin.md
-->

View File

@ -1,15 +0,0 @@
---
title: influxdb3 delete plugin
description: >
The `influxdb3 delete plugin` command deletes a processing engine plugin.
menu:
influxdb3_core:
parent: influxdb3 delete
name: influxdb3 delete plugin
weight: 400
source: /shared/influxdb3-cli/delete/last_cache.md
---
<!--
The content of this file is at content/shared/influxdb3-cli/delete/plugin.md
-->

View File

@ -16,7 +16,7 @@ list_code_example: |
``` ```
#### HTTP API #### HTTP API
```bash ```bash
curl -X POST "http://{{< influxdb/host >}}/api/v3/enterprise/configure/token/admin" \ curl -X POST "http://{{< influxdb/host >}}/api/v3/configure/token/admin" \
--header 'Accept: application/json' \ --header 'Accept: application/json' \
--header 'Content-Type: application/json' --header 'Content-Type: application/json'
``` ```

View File

@ -0,0 +1,16 @@
---
title: Extend plugins with API features and state management
description: |
The Processing engine includes an API that allows your plugins to interact with your data, build and write line protocol, and maintain state between executions.
menu:
influxdb3_enterprise:
name: Extend plugins
parent: Processing engine and Python plugins
weight: 4
influxdb3/enterprise/tags: [processing engine, plugins, API, python]
source: /shared/extended-plugin-api.md
---
<!--
// SOURCE content/shared/extended-plugin-api.md
-->

View File

@ -5,7 +5,7 @@ description: |
code on different events in an {{< product-name >}} instance. code on different events in an {{< product-name >}} instance.
menu: menu:
influxdb3_enterprise: influxdb3_enterprise:
name: Processing Engine and Python plugins name: Processing engine and Python plugins
weight: 4 weight: 4
influxdb3/enterprise/tags: [processing engine, python] influxdb3/enterprise/tags: [processing engine, python]
related: related:

View File

@ -13,7 +13,7 @@ related:
- /influxdb3/enterprise/reference/cli/influxdb3/query/ - /influxdb3/enterprise/reference/cli/influxdb3/query/
- /influxdb3/enterprise/reference/sql/ - /influxdb3/enterprise/reference/sql/
- /influxdb3/enterprise/reference/influxql/ - /influxdb3/enterprise/reference/influxql/
# - /influxdb3/enterprise/get-started/query/#execute-an-sql-query, Get started querying data # - /influxdb3/enterprise/query-data/execute-queries/, Get started querying data
list_code_example: | list_code_example: |
```sh ```sh
influxdb3 query \ influxdb3 query \

View File

@ -1,15 +0,0 @@
---
title: influxdb3 create plugin
description: >
The `influxdb3 create plugin` command creates a new processing engine plugin.
menu:
influxdb3_enterprise:
parent: influxdb3 create
name: influxdb3 create plugin
weight: 400
source: /shared/influxdb3-cli/create/plugin.md
---
<!--
The content of this file is at content/shared/influxdb3-cli/create/plugin.md
-->

View File

@ -1,15 +0,0 @@
---
title: influxdb3 delete plugin
description: >
The `influxdb3 delete plugin` command deletes a processing engine plugin.
menu:
influxdb3_enterprise:
parent: influxdb3 delete
name: influxdb3 delete plugin
weight: 400
source: /shared/influxdb3-cli/delete/last_cache.md
---
<!--
The content of this file is at content/shared/influxdb3-cli/delete/plugin.md
-->

View File

@ -0,0 +1,323 @@
The Processing Engine includes a shared API that your plugins can use to interact with data, write new records in line protocol format, and maintain state between executions. These capabilities let you build plugins that transform, analyze, and respond to time series data as it flows through your database.
The plugin API lets you:
- [Write data](#write-data)
- [Query data](#query-data)
- [Log messages for monitoring and debugging](#log-messages-for-monitoring-and-debugging)
- [Maintain state with the in-memory cache](#maintain-state-with-in-memory-cache)
- [Store and retrieve cached data](#store-and-retrieve-cached-data)
- [Use TTL appropriately](#use-ttl-appropriately)
- [Share data across plugins](#share-data-across-plugins)
- [Build a counter](#building-a-counter)
- [Guidelines for in-memory caching](#guidelines-for-in-memory-caching)
- [Consider cache limitations](#consider-cache-limitations)
## Get started with the shared API
Each plugin automatically has access to the shared API through the `influxdb3_local` object. You dont need to import any libraries. The API becomes available as soon as your plugin runs.
## Write data
To write data into your database, use the `LineBuilder` API to create line protocol data:
```python
# Create a line protocol entry
line = LineBuilder("weather")
line.tag("location", "us-midwest")
line.float64_field("temperature", 82.5)
line.time_ns(1627680000000000000)
# Write the data to the database
influxdb3_local.write(line)
```
InfluxDB 3 buffers your writes while the plugin runs and flushes them when the plugin completes.
{{% expand-wrapper %}}
{{% expand "View the `LineBuilder` Python implementation" %}}
```python
from typing import Optional
from collections import OrderedDict
class InfluxDBError(Exception):
"""Base exception for InfluxDB-related errors"""
pass
class InvalidMeasurementError(InfluxDBError):
"""Raised when measurement name is invalid"""
pass
class InvalidKeyError(InfluxDBError):
"""Raised when a tag or field key is invalid"""
pass
class InvalidLineError(InfluxDBError):
"""Raised when a line protocol string is invalid"""
pass
class LineBuilder:
def __init__(self, measurement: str):
if ' ' in measurement:
raise InvalidMeasurementError("Measurement name cannot contain spaces")
self.measurement = measurement
self.tags: OrderedDict[str, str] = OrderedDict()
self.fields: OrderedDict[str, str] = OrderedDict()
self._timestamp_ns: Optional[int] = None
def _validate_key(self, key: str, key_type: str) -> None:
"""Validate that a key does not contain spaces, commas, or equals signs."""
if not key:
raise InvalidKeyError(f"{key_type} key cannot be empty")
if ' ' in key:
raise InvalidKeyError(f"{key_type} key '{key}' cannot contain spaces")
if ',' in key:
raise InvalidKeyError(f"{key_type} key '{key}' cannot contain commas")
if '=' in key:
raise InvalidKeyError(f"{key_type} key '{key}' cannot contain equals signs")
def tag(self, key: str, value: str) -> 'LineBuilder':
"""Add a tag to the line protocol."""
self._validate_key(key, "tag")
self.tags[key] = str(value)
return self
def uint64_field(self, key: str, value: int) -> 'LineBuilder':
"""Add an unsigned integer field to the line protocol."""
self._validate_key(key, "field")
if value < 0:
raise ValueError(f"uint64 field '{key}' cannot be negative")
self.fields[key] = f"{value}u"
return self
def int64_field(self, key: str, value: int) -> 'LineBuilder':
"""Add an integer field to the line protocol."""
self._validate_key(key, "field")
self.fields[key] = f"{value}i"
return self
def float64_field(self, key: str, value: float) -> 'LineBuilder':
"""Add a float field to the line protocol."""
self._validate_key(key, "field")
# Check if value has no decimal component
self.fields[key] = f"{int(value)}.0" if value % 1 == 0 else str(value)
return self
def string_field(self, key: str, value: str) -> 'LineBuilder':
"""Add a string field to the line protocol."""
self._validate_key(key, "field")
# Escape quotes and backslashes in string values
escaped_value = value.replace('"', '\\"').replace('\\', '\\\\')
self.fields[key] = f'"{escaped_value}"'
return self
def bool_field(self, key: str, value: bool) -> 'LineBuilder':
"""Add a boolean field to the line protocol."""
self._validate_key(key, "field")
self.fields[key] = 't' if value else 'f'
return self
def time_ns(self, timestamp_ns: int) -> 'LineBuilder':
"""Set the timestamp in nanoseconds."""
self._timestamp_ns = timestamp_ns
return self
def build(self) -> str:
"""Build the line protocol string."""
# Start with measurement name (escape commas only)
line = self.measurement.replace(',', '\\,')
# Add tags if present
if self.tags:
tags_str = ','.join(
f"{k}={v}" for k, v in self.tags.items()
)
line += f",{tags_str}"
# Add fields (required)
if not self.fields:
raise InvalidLineError(f"At least one field is required: {line}")
fields_str = ','.join(
f"{k}={v}" for k, v in self.fields.items()
)
line += f" {fields_str}"
# Add timestamp if present
if self._timestamp_ns is not None:
line += f" {self._timestamp_ns}"
return line
```
{{% /expand %}}
{{% /expand-wrapper %}}
## Query data
Your plugins can execute SQL queries and process results directly:
```python
# Simple query
results = influxdb3_local.query("SELECT * FROM metrics WHERE time > now() - INTERVAL '1 hour'")
# Parameterized query for safer execution
params = {"table": "metrics", "threshold": 90}
results = influxdb3_local.query("SELECT * FROM $table WHERE value > $threshold", params)
```
Query results are a `List` of `Dict[String, Any]`, where each dictionary represents a row. Column names are keys, and column values are the corresponding values.
## Log messages for monitoring and debugging
Use the shared API's `info`, `warn`, and `error` functions to log messages from your plugin. Each function accepts one or more arguments, converts them to strings, and logs them as a space-separated message.
Add logging to monitor plugin execution and assist with debugging:
```python
influxdb3_local.info("Starting data processing")
influxdb3_local.warn("Could not process some records")
influxdb3_local.error("Failed to connect to external API")
# Log structured data
obj_to_log = {"records": 157, "errors": 3}
influxdb3_local.info("Processing complete", obj_to_log)
```
The system writes all log messages to the server logs and stores them in [system tables](/influxdb3/version/reference/cli/influxdb3/show/system/summary/), where you can query them using SQL.
## Maintain state with the in-memory cache
The Processing Engine provides an in-memory cache that enables your plugins to persist and retrieve data between executions.
Access the cache using the `cache` property of the shared API:
```python
# Basic usage pattern
influxdb3_local.cache.METHOD(PARAMETERS)
```
`cache` provides the following methods to retrieve and manage cached values:
| Method | Parameters | Returns | Description |
|--------|------------|---------|-------------|
| `put` | `key` (str): The key to store the value under<br>`value` (Any): Any Python object to cache<br>`ttl` (Optional[float], default=None): Time in seconds before expiration<br>`use_global` (bool, default=False): If True, uses global namespace | None | Stores a value in the cache with an optional time-to-live |
| `get` | `key` (str): The key to retrieve<br>`default` (Any, default=None): Value to return if key not found<br>`use_global` (bool, default=False): If True, uses global namespace | Any | Retrieves a value from the cache or returns default if not found |
| `delete` | `key` (str): The key to delete<br>`use_global` (bool, default=False): If True, uses global namespace | bool | Deletes a value from the cache. Returns True if deleted, False if not found |
### Understanding cache namespaces
The cache system offers two distinct namespaces:
| Namespace | Scope | Best For |
| --- | --- | --- |
| **Trigger-specific** (default) | Isolated to a single trigger | Plugin state, counters, timestamps specific to one plugin |
| **Global** | Shared across all triggers | Configuration, lookup tables, service states that should be available to all plugins |
### Common cache operations
- [Store and retrieve cached data](#store-and-retrieve-cached-data)
- [Store cached data with expiration](#store-cached-data-with-expiration)
- [Share data across plugins](#share-data-across-plugins)
- [Build a counter](#build-a-counter)
### Store and retrieve cached data
```python
# Store a value
influxdb3_local.cache.put("last_run_time", time.time())
# Retrieve a value with a default if not found
last_time = influxdb3_local.cache.get("last_run_time", default=0)
# Delete a cached value
influxdb3_local.cache.delete("temporary_data")
```
### Store cached data with expiration
```python
# Cache with a 5-minute TTL (time-to-live)
influxdb3_local.cache.put("api_response", response_data, ttl=300)
```
### Share data across plugins
```python
# Store in the global namespace
influxdb3_local.cache.put("config", {"version": "1.0"}, use_global=True)
# Retrieve from the global namespace
config = influxdb3_local.cache.get("config", use_global=True)
```
### Building a counter
You can track how many times a plugin has run:
```python
# Get current counter or default to 0
counter = influxdb3_local.cache.get("execution_count", default=0)
# Increment counter
counter += 1
# Store the updated value
influxdb3_local.cache.put("execution_count", counter)
influxdb3_local.info(f"This plugin has run {counter} times")
```
## Guidelines for in-memory caching
To get the most out of the in-memory cache, follow these guidelines:
- [Use the trigger-specific namespace](#use-the-trigger-specific-namespace)
- [Use TTL appropriately](#use-ttl-appropriately)
- [Cache computation results](#cache-computation-results)
- [Warm the cache](#warm-the-cache)
- [Consider cache limitations](#consider-cache-limitations)
### Use the trigger-specific namespace
The Processing Engine provides a cache that supports stateful operations while maintaining isolation between different triggers. For most use cases, use the trigger-specific namespace to keep plugin state isolated. Use the global namespace only when you need to share data across triggers.
### Use TTL appropriately
Set appropriate expiration times based on how frequently your data changes:
```python
# Cache external API responses for 5 minutes
influxdb3_local.cache.put("weather_data", api_response, ttl=300)
```
### Cache computation results
Store the results of expensive calculations that you frequently utilize:
```python
# Cache aggregated statistics
influxdb3_local.cache.put("daily_stats", calculate_statistics(data), ttl=3600)
```
### Warm the cache
For critical data, prime the cache at startup. This can be especially useful for global namespace data where multiple triggers need the data:
```python
# Check if cache needs to be initialized
if not influxdb3_local.cache.get("lookup_table"):
influxdb3_local.cache.put("lookup_table", load_lookup_data())
```
### Consider cache limitations
- **Memory Usage**: Since the system stores cache contents in memory, monitor your memory usage when caching large datasets.
- **Server Restarts**: Because the server clears the cache on restart, design your plugins to handle cache initialization (as noted above).
- **Concurrency**: Be cautious of accessing inaccurate or out-of-date data when multiple trigger instances might simultaneously update the same cache key.
## Next Steps
With an understanding of the InfluxDB 3 Shared Plugin API, you can start building data workflows that transform, analyze, and respond to your time series data.
To find example plugins you can extend, visit the [influxdb3_plugins repository](https://github.com/influxdata/influxdb3_plugins) on GitHub.

View File

@ -8,5 +8,5 @@ For more information, see the [C# client example on GitHub](https://github.com/a
> We recommend using the [`influxdb3-csharp` C# client library](/influxdb3/version/reference/client-libraries/v3/csharp/) for integrating InfluxDB 3 with your C# application code. > We recommend using the [`influxdb3-csharp` C# client library](/influxdb3/version/reference/client-libraries/v3/csharp/) for integrating InfluxDB 3 with your C# application code.
> >
> [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) wrap Apache Arrow Flight clients > [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) wrap Apache Arrow Flight clients
> and provide convenient methods for [writing](/influxdb3/version/get-started/write/#write-line-protocol-to-influxdb), [querying](/influxdb3/version/get-started/query/#execute-an-sql-query), and processing data stored in {{% product-name %}}. > and provide convenient methods for [writing](/influxdb3/version/write-data/api-client-libraries/), [querying](/influxdb3/version/query-data/execute-queries/), and processing data stored in {{% product-name %}}.
> Client libraries can query using SQL or InfluxQL. > Client libraries can query using SQL or InfluxQL.

View File

@ -6,7 +6,7 @@
> We recommend using the [`influxdb3-go` Go client library](/influxdb3/version/reference/client-libraries/v3/go/) for integrating InfluxDB 3 with your Go application code. > We recommend using the [`influxdb3-go` Go client library](/influxdb3/version/reference/client-libraries/v3/go/) for integrating InfluxDB 3 with your Go application code.
> >
> [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) wrap Apache Arrow Flight clients > [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) wrap Apache Arrow Flight clients
> and provide convenient methods for [writing](/influxdb3/version/get-started/write/#write-line-protocol-to-influxdb), [querying](/influxdb3/version/get-started/query/#execute-an-sql-query), and processing data stored in {{% product-name %}}. > and provide convenient methods for [writing](/influxdb3/version/write-data/api-client-libraries/), [querying](/influxdb3/version/query-data/execute-queries/), and processing data stored in {{% product-name %}}.
> Client libraries can query using SQL or InfluxQL. > Client libraries can query using SQL or InfluxQL.
## Flight SQL client ## Flight SQL client

View File

@ -6,7 +6,7 @@
> We recommend using the [`influxdb3-java` Java client library](/influxdb3/version/reference/client-libraries/v3/java/) for integrating InfluxDB 3 with your Java application code. > We recommend using the [`influxdb3-java` Java client library](/influxdb3/version/reference/client-libraries/v3/java/) for integrating InfluxDB 3 with your Java application code.
> >
> [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) wrap Apache Arrow Flight clients > [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) wrap Apache Arrow Flight clients
> and provide convenient methods for [writing](/influxdb3/version/get-started/write/#write-line-protocol-to-influxdb), [querying](/influxdb3/version/get-started/query/#execute-an-sql-query), and processing data stored in {{% product-name %}}. > and provide convenient methods for [writing](/influxdb3/version/write-data/api-client-libraries/), [querying](/influxdb3/version/query-data/execute-queries/), and processing data stored in {{% product-name %}}.
> Client libraries can query using SQL or InfluxQL. > Client libraries can query using SQL or InfluxQL.
<!-- TOC --> <!-- TOC -->

View File

@ -6,21 +6,20 @@
> We recommend using the [`influxdb3-python` Python client library](/influxdb3/version/reference/client-libraries/v3/python/) for integrating InfluxDB 3 with your Python application code. > We recommend using the [`influxdb3-python` Python client library](/influxdb3/version/reference/client-libraries/v3/python/) for integrating InfluxDB 3 with your Python application code.
> >
> [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) wrap Apache Arrow Flight clients > [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) wrap Apache Arrow Flight clients
> and provide convenient methods for [writing](/influxdb3/version/get-started/write/#write-line-protocol-to-influxdb), [querying](/influxdb3/version/get-started/query/#execute-an-sql-query), and processing data stored in {{% product-name %}}. > and provide convenient methods for [writing](/influxdb3/version/write-data/api-client-libraries/), [querying](/influxdb3/version/query-data/execute-queries/), and processing data stored in {{% product-name %}}.
> Client libraries can query using SQL or InfluxQL. > Client libraries can query using SQL or InfluxQL.
The following examples show how to use the `pyarrow.flight` and `pandas` Python modules to query and format data stored in an {{% product-name %}} database: The following examples show how to use the `pyarrow.flight` and `pandas` Python modules to query and format data stored in an {{% product-name %}} database:
{{% code-tabs-wrapper %}} {{< code-tabs-wrapper >}}
{{% code-tabs %}} {{% code-tabs %}}
[SQL](#sql-python) [SQL](#sql-python)
[InfluxQL](#influxql-python) [InfluxQL](#influxql-python)
{{% /code-tabs %}} {{% /code-tabs %}}
{{% code-tab-content %}} {{% code-tab-content %}}
<!-- BEGIN SQL --> <!-- BEGIN SQL -->
{{% code-placeholders "DATABASE_NAME|DATABASE_TOKEN" %}} {{% code-placeholders "DATABASE_NAME|DATABASE_TOKEN" %}}
```py ```python
# Using pyarrow>=12.0.0 FlightClient # Using pyarrow>=12.0.0 FlightClient
from pyarrow.flight import FlightClient, Ticket, FlightCallOptions from pyarrow.flight import FlightClient, Ticket, FlightCallOptions
import json import json
@ -62,7 +61,7 @@ print(data_frame.to_markdown())
{{% code-tab-content %}} {{% code-tab-content %}}
<!-- BEGIN INFLUXQL --> <!-- BEGIN INFLUXQL -->
{{% code-placeholders "DATABASE_NAME|DATABASE_TOKEN" %}} {{% code-placeholders "DATABASE_NAME|DATABASE_TOKEN" %}}
```py ```python
# Using pyarrow>=12.0.0 FlightClient # Using pyarrow>=12.0.0 FlightClient
from pyarrow.flight import FlightClient, Ticket, FlightCallOptions from pyarrow.flight import FlightClient, Ticket, FlightCallOptions
import json import json
@ -97,6 +96,7 @@ print(data_frame.to_markdown())
{{% /code-placeholders %}} {{% /code-placeholders %}}
<!-- END INFLUXQL --> <!-- END INFLUXQL -->
{{% /code-tab-content %}} {{% /code-tab-content %}}
{{< /code-tabs-wrapper >}}
Replace the following: Replace the following:
@ -104,5 +104,3 @@ Replace the following:
- {{% code-placeholder-key %}}`DATABASE_TOKEN`{{% /code-placeholder-key %}}: - {{% code-placeholder-key %}}`DATABASE_TOKEN`{{% /code-placeholder-key %}}:
a [database token](/influxdb3/version/admin/tokens/database/) a [database token](/influxdb3/version/admin/tokens/database/)
with sufficient permissions to the specified database with sufficient permissions to the specified database
{{% /code-tabs-wrapper %}}

View File

@ -6,7 +6,7 @@ The [Python `flightsql-dbapi` Flight SQL DBAPI library](https://github.com/influ
> We recommend using the [`influxdb3-python` Python client library](/influxdb3/version/reference/client-libraries/v3/python/) for integrating InfluxDB 3 with your Python application code. > We recommend using the [`influxdb3-python` Python client library](/influxdb3/version/reference/client-libraries/v3/python/) for integrating InfluxDB 3 with your Python application code.
> >
> [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) wrap Apache Arrow Flight clients > [InfluxDB 3 client libraries](/influxdb3/version/reference/client-libraries/v3/) wrap Apache Arrow Flight clients
> and provide convenient methods for [writing](/influxdb3/version/get-started/write/#write-line-protocol-to-influxdb), [querying](/influxdb3/version/get-started/query/#execute-an-sql-query), and processing data stored in {{% product-name %}}. > and provide convenient methods for [writing](/influxdb3/version/write-data/api-client-libraries/), [querying](/influxdb3/version/query-data/execute-queries/), and processing data stored in {{% product-name %}}.
> Client libraries can query using SQL or InfluxQL. > Client libraries can query using SQL or InfluxQL.
## Installation ## Installation
@ -32,7 +32,7 @@ from flightsql import FlightSQLClient
``` ```
- `flightsql.FlightSQLClient` class: an interface for [initializing - `flightsql.FlightSQLClient` class: an interface for [initializing
a client](#initialization) and interacting with a Flight SQL server. a client](#initialize-a-client) and interacting with a Flight SQL server.
## API reference ## API reference
@ -41,11 +41,11 @@ a client](#initialization) and interacting with a Flight SQL server.
- [Initialize a client](#initialize-a-client) - [Initialize a client](#initialize-a-client)
- [Instance methods](#instance-methods) - [Instance methods](#instance-methods)
- [FlightSQLClient.execute](#flightsqlclientexecute) - [FlightSQLClient.execute](#flightsqlclientexecute)
- [Syntax {#execute-query-syntax}](#syntax-execute-query-syntax) - [Syntax](#execute-query-syntax)
- [Example {#execute-query-example}](#example-execute-query-example) - [Example](#execute-query-example)
- [FlightSQLClient.do_get](#flightsqlclientdo_get) - [FlightSQLClient.do_get](#flightsqlclientdo_get)
- [Syntax {#retrieve-data-syntax}](#syntax-retrieve-data-syntax) - [Syntax](#retrieve-data-syntax)
- [Example {#retrieve-data-example}](#example-retrieve-data-example) - [Example](#retrieve-data-example)
## Class FlightSQLClient ## Class FlightSQLClient

View File

@ -0,0 +1,345 @@
Learn how to avoid unexpected results and recover from errors when writing to InfluxDB.
{{% show-in "v2,cloud" %}}
- [Handle `write` and `delete` responses](#handle-write-and-delete-responses)
- [Troubleshoot failures](#troubleshoot-failures)
- [Troubleshoot rejected points](#troubleshoot-rejected-points)
{{% /show-in %}}
## Handle `write` and `delete` responses
{{% show-in "cloud" %}}
In InfluxDB Cloud, writes and deletes are asynchronous and eventually consistent.
Once InfluxDB validates your request and [queues](/influxdb/cloud/reference/internals/durability/#backup-on-write) the write or delete, it sends a _success_ response (HTTP `204` status code) as an acknowledgement.
To ensure that InfluxDB handles writes and deletes in the order you request them, wait for the acknowledgement before you send the next request.
Because writes are asynchronous, keep the following in mind:
- Data might not yet be queryable when you receive _success_ (HTTP `204` status code).
- InfluxDB may still reject points after you receive _success_ (HTTP `204` status code).
{{% /show-in %}}
{{% show-in "v2" %}}
{{% product-name %}} does the following when you send a write request:
1. Validates the request.
2. If successful, attempts to [ingest data](/influxdb/v2/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](#review-http-status-codes).
3. Ingests or rejects data from the batch and returns one of the following HTTP status codes:
- `204 No Content`: All of the data is ingested and queryable.
- `422 Unprocessable Entity`: Some or all of the data has been rejected. Data that has not been rejected is ingested and queryable.
The response body contains error details about [rejected points](#troubleshoot-rejected-points).
Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable.
To ensure that InfluxDB handles writes in the order you request them,
wait for the response before you send the next request.
{{% /show-in %}}
### Review HTTP status codes
InfluxDB uses conventional HTTP status codes to indicate the success or failure of a request.
Write requests return the following status codes:
{{% show-in "cloud" %}}
| HTTP response code | Message | Description |
| :-------------------------------| :--------------------------------------------------------------- | :------------- |
| `204 "Success"` | | If InfluxDB validated the request data format and queued the data for writing to the bucket |
| `400 "Bad request"` | `message` contains the first malformed line | If data is malformed |
| `401 "Unauthorized"` | | If the [`Authorization: Token` header](/influxdb/cloud/api-guide/api_intro/#authentication) is missing or malformed or if the [API token](/influxdb/cloud/api-guide/api_intro/#authentication) doesn't have [permission](/influxdb/cloud/admin/tokens/) to write to the bucket |
| `404 "Not found"` | requested **resource type** (for example, "organization") and **resource name** | If a requested resource, such as an organization or bucket, wasn't found |
| `413 "Request too large"` | cannot read data: points in batch is too large | If a **write** request exceeds the maximum [global limit](/influxdb/cloud/account-management/limits/#global-limits) |
| `429 “Too many requests”` | `Retry-After` header: xxx (seconds to wait before retrying the request) | If a **read** or **write** request exceeds your plan's [adjustable service quotas](/influxdb/cloud/account-management/limits/#adjustable-service-quotas) or if a **delete** request exceeds the maximum [global limit](/influxdb/cloud/account-management/limits/#global-limits) |
| `500 "Internal server error"` | | Default status for an error |
| `503 “Service unavailable“` | Series cardinality exceeds your plan's service quota | If **series cardinality** exceeds your plan's [adjustable service quotas](/influxdb/cloud/account-management/limits/#adjustable-service-quotas) |
{{% /show-in %}}
{{% show-in "v2" %}}
- `204` **Success**: All request data was written to the bucket.
- `400` **Bad request**:
The response body contains the first malformed line in the data. All request data was rejected and not written.
- `401` **Unauthorized**: May indicate one of the following:
- [`Authorization: Token` header](/influxdb/v2/api-guide/api_intro/#authentication) is missing or malformed.
- [API token](/influxdb/v2/api-guide/api_intro/#authentication) value is missing from the header.
- API token does not have sufficient permissions to write to the organization and the bucket. For more information about token types and permissions, see [Manage API tokens](/influxdb/v2/admin/tokens/).
- `404` **Not found**: A requested resource, such as an organization or bucket, was not found. The response body contains the requested resource type (for example, "organization") and resource name.
- `413` **Request entity too large**: All request data was rejected and not written. InfluxDB OSS only returns this error if the [Go (golang) `ioutil.ReadAll()`](https://pkg.go.dev/io/ioutil#ReadAll) function raises an error.
- `422` **Unprocessable entity**: The request was well-formed, but some or all the points were rejected due to semantic errors--for example, schema conflicts or retention policy violations.
- `500` **Internal server error**: Default HTTP status for an error.
- `503` **Service unavailable**: Server is temporarily unavailable to accept writes. The `Retry-After` header describes when to try the write again.
{{% /show-in %}}
The `message` property of the response body may contain additional details about the error.
If some of your data did not write to the bucket, see how to [troubleshoot rejected points](#troubleshoot-rejected-points).
{{% show-in "cloud" %}}
### Troubleshoot partial writes
For example, a partial write may occur when InfluxDB writes all points that conform to the bucket schema, but rejects points that have the wrong data type in a field.
To check for writes that fail asynchronously, create a [task](/influxdb/cloud/process-data/manage-tasks/) to [check the _monitoring bucket for rejected points](#review-rejected-points).
To resolve partial writes and rejected points, see [troubleshoot failures](#troubleshoot-failures).
{{% /show-in %}}
## Troubleshoot failures
{{% show-in "v2" %}}
If you notice data is missing in your bucket, do the following:
- Check the [HTTP status code](#review-http-status-codes) in the response.
- Check the `message` property in the response body for details about the error--for example, `partial write` indicates [rejected points](#troubleshoot-rejected-points).
- Verify all lines contain valid syntax ([line protocol](/influxdb/v2/reference/syntax/line-protocol/) or [CSV](/influxdb/v2/reference/syntax/annotated-csv/)).
- Verify the timestamps match the [precision parameter](/influxdb/v2/write-data/#timestamp-precision) in your request.
- Minimize payload size and network errors by [optimizing writes](/influxdb/v2/write-data/best-practices/optimize-writes/).
{{% /show-in %}}
{{% show-in "cloud" %}}
If you notice data is missing in your bucket, do the following:
- Check the `message` property in the response body for details about the error--for example, `partial write error` indicates [rejected points](#troubleshoot-rejected-points).
- Check for [rejected points](#troubleshoot-rejected-points) in your organization's `_monitoring` bucket.
- Verify all lines contain valid syntax ([line protocol](/influxdb/cloud/reference/syntax/line-protocol/) or [CSV](/influxdb/cloud/reference/syntax/annotated-csv/)). See how to [find parsing errors](#find-parsing-errors).
- Verify the data types match the [series](/influxdb/cloud/reference/key-concepts/data-elements/#series) or [bucket schema](/influxdb/cloud/admin/buckets/bucket-schema/). See how to resolve [explicit schema rejections](#resolve-explicit-schema-rejections).
- Verify the timestamps match the [precision parameter](/influxdb/cloud/write-data/#timestamp-precision).
- Minimize payload size and network errors by [optimizing writes](/influxdb/cloud/write-data/best-practices/optimize-writes/).
{{% /show-in %}}
## Troubleshoot rejected points
{{% show-in "v2" %}}
When writing points from a batch, InfluxDB rejects points that have syntax errors or schema conflicts.
If InfluxDB processes the data in your batch and then rejects points, the [HTTP response](#handle-write-responses) body contains the following properties that describe rejected points:
- `code`: `"unprocessable entity"`
- `message`: a string that describes the reason points were rejected and may provide details, such as database, retention policy, and which bound was violated.
For example, the following `message` indicates that points were rejected because the timestamps fall outside the `1d` retention policy:
```text
failure writing points to database: partial write: dropped 4 points outside retention policy of duration 24h0m0s - oldest point home,room=Living\\ Room at 1970-01-01T00:00:01.541Z dropped because it violates a Retention Policy Lower Bound at 2025-05-20T19:06:17.612973Z, newest point home,room=Living\\ Room at 1970-01-01T00:00:01.5410006Z dropped because it violates a Retention Policy Lower Bound at 2025-05-20T19:06:17.612973Z dropped=4 for database: 9f282d63c7d3a5c0 for retention policy: autogen
```
InfluxDB rejects points for the following reasons:
- a line protocol parsing error
- an invalid timestamp
- a schema conflict
- retention policy violation
Schema conflicts occur when you try to write data that contains any of the following:
- The **batch** contains another point with the same series, but one of the fields has a different value type.
- The **bucket** contains another point with the same series, but one of the fields has a different value type.
Check for [field type](/influxdb/v2/reference/key-concepts/data-elements/#field-value) differences between the missing data point and other points that have the same [series](/influxdb/v2/reference/key-concepts/data-elements/#series)--for example, did you attempt to write `string` data to an `int` field?
{{% /show-in %}}
{{% show-in "cloud" %}}
When you receive an HTTP `204` (Success) status code, InfluxDB has validated your request format and queued your data for writing.
However, {{% product-name %}} processes data asynchronously, which means points may still be rejected after you receive a success response.
InfluxDB may reject points for several reasons:
- Line protocol parsing errors
- Invalid timestamps
- Data type conflicts with existing schema
- Retention policy violations
- Series cardinality exceeding your plan's limits
To verify if your data was successfully written, query your data or check the `_monitoring` bucket for rejected points.
- [Review rejected points](#review-rejected-points)
- [Find parsing errors](#find-parsing-errors)
- [Find data type conflicts and schema rejections](#find-data-type-conflicts-and-schema-rejections)
- [Resolve data type conflicts](#resolve-data-type-conflicts)
- [Resolve explicit schema rejections](#resolve-explicit-schema-rejections)
### Review rejected points
To get a log of rejected points, query the [`rejected_points` measurement](/influxdb/cloud/reference/internals/system-buckets/#_monitoring-bucket-schema) in your organization's `_monitoring` bucket.
To more quickly locate `rejected_points`, keep the following in mind:
- If your line protocol batch contains single lines with multiple [fields](/influxdb/cloud/reference/syntax/line-protocol/#field-set), InfluxDB logs an entry for each point (each unique field) that is rejected.
- Each entry contains a `reason` tag that describes why the point was rejected.
- Entries for [data type conflicts and schema rejections](#find-data-type-conflicts-and-schema-rejections) have a `count` field value of `1`.
- Entries for [parsing errors](#find-parsing-errors) contain an `error` field (and don't contain a `count` field).
#### rejected_points schema
| Name | Value |
|:------ |:----- |
| `_measurement`| `rejected_points` |
| `_field` | [`count`](#find-data-type-conflicts-and-schema-rejections) or [`error`](#find-parsing-errors) |
| `_value` | [`1`](#find-data-type-conflicts-and-schema-rejections) or [error details](#find-parsing-errors) |
| `bucket` | ID of the bucket that rejected the point |
| `measurement` | Measurement name of the point |
| `field` | Name of the field that caused the rejection |
| `reason` | Brief description of the problem. See specific reasons in [data type conflicts and schema rejections](#find-data-type-conflicts-and-schema-rejections) |
| `gotType` | Received [field](/influxdb/cloud/reference/key-concepts/data-elements/#field-value) type: `Boolean`, `Float`, `Integer`, `String`, or `UnsignedInteger` |
| `wantType` | Expected [field](/influxdb/cloud/reference/key-concepts/data-elements/#field-value) type: `Boolean`, `Float`, `Integer`, `String`, or `UnsignedInteger` |
| `<timestamp>` | Time the rejected point was logged |
#### Find parsing errors
If InfluxDB can't parse a line (for example, due to syntax problems), the response `message` might not provide details.
To find parsing error details, query `rejected_points` entries that contain the `error` field.
```js
from(bucket: "_monitoring")
|> range(start: -1h)
|> filter(fn: (r) => r._measurement == "rejected_points")
|> filter(fn: (r) => r._field == "error")
```
#### Find data type conflicts and schema rejections
To find `rejected_points` caused by [data type conflicts](#resolve-data-type-conflicts) or [schema rejections](#resolve-explicit-schema-rejections),
query for the `count` field.
```js
from(bucket: "_monitoring")
|> range(start: -1h)
|> filter(fn: (r) => r._measurement == "rejected_points")
|> filter(fn: (r) => r._field == "count")
```
### Resolve data type conflicts
When you write to a bucket that has the `implicit` schema type, InfluxDB compares new points to points that have the same [series](/influxdb/cloud/reference/key-concepts/data-elements/#series).
If a point has a field with a different data type than the series, InfluxDB rejects the point and logs a `rejected_points` entry.
The `rejected_points` entry contains one of the following reasons:
| Reason | Meaning |
|:------ |:------- |
| `type conflict in batch write` | The **batch** contains another point with the same series, but one of the fields has a different value type. |
| `type conflict with existing data` | The **bucket** contains another point with the same series, but one of the fields has a different value type. |
### Resolve explicit schema rejections
If you write to a bucket with an
[explicit schema](/influxdb/cloud/admin/buckets/bucket-schema/),
the data must conform to the schema. Otherwise, InfluxDB rejects the data.
Do the following to interpret explicit schema rejections:
- [Detect a measurement mismatch](#detect-a-measurement-mismatch)
- [Detect a field type mismatch](#detect-a-field-type-mismatch)
#### Detect a measurement mismatch
InfluxDB rejects a point if the [measurement](/influxdb/cloud/reference/key-concepts/data-elements/#measurement) doesn't match the **name** of a [bucket schema](/influxdb/cloud/admin/buckets/bucket-schema/).
The `rejected_points` entry contains the following `reason` tag value:
| Reason | Meaning |
|:------ |:-------
| `measurement not allowed by schema` | The **bucket** is configured to use explicit schemas and none of the schemas matches the **measurement** of the point. |
Consider the following [line protocol](/influxdb/cloud/reference/syntax/line-protocol) data.
```
airSensors,sensorId=TLM0201 temperature=73.97,humidity=35.23,co=0.48 1637014074
```
The line has an `airSensors` measurement and three fields (`temperature`, `humidity`, and `co`).
If you try to write this data to a bucket that has the [`explicit` schema type](/influxdb/cloud/admin/buckets/bucket-schema/) and doesn't have an `airSensors` schema, the `/api/v2/write` InfluxDB API returns an error and the following data:
```json
{
"code": "invalid",
"message": "3 out of 3 points rejected (check rejected_points in your _monitoring bucket for further information)"
}
```
InfluxDB logs three `rejected_points` entries, one for each field.
| _measurement | _field | _value | field | measurement | reason |
|:----------------|:-------|:-------|:------------|:------------|:----------------------------------|
| rejected_points | count | 1 | humidity | airSensors | measurement not allowed by schema |
| rejected_points | count | 1 | co | airSensors | measurement not allowed by schema |
| rejected_points | count | 1 | temperature | airSensors | measurement not allowed by schema |
#### Detect a field type mismatch
InfluxDB rejects a point if the [measurement](/influxdb/cloud/reference/key-concepts/data-elements/#measurement) matches the **name** of a bucket schema and the field data types don't match.
The `rejected_points` entry contains the following reason:
| Reason | Meaning |
|:------------------------------------|:-----------------------------------------------------------------------------------------------------|
| `field type mismatch with schema` | The point has the same measurement as a configured schema and they have different field value types. |
Consider a bucket that has the following `airSensors` [`explicit bucket schema`](/influxdb/cloud/admin/buckets/bucket-schema/):
```json
{
"name": "airSensors",
"columns": [
{
"name": "time",
"type": "timestamp"
},
{
"name": "sensorId",
"type": "tag"
},
{
"name": "temperature",
"type": "field",
"dataType": "float"
},
{
"name": "humidity",
"type": "field",
"dataType": "float"
},
{
"name": "co",
"type": "field",
"dataType": "float"
}
]
}
```
The following [line protocol](/influxdb/cloud/reference/syntax/line-protocol/) data has an `airSensors` measurement, a `sensorId` tag, and three fields (`temperature`, `humidity`, and `co`).
```
airSensors,sensorId=L1 temperature=90.5,humidity=70.0,co=0.2 1637014074
airSensors,sensorId=L1 temperature="90.5",humidity=70.0,co=0.2 1637014074
```
In the example data above, the second point has a `temperature` field value with the _string_ data type.
Because the `airSensors` schema requires `temperature` to have the _float_ data type,
InfluxDB returns a `400` error and a message that describes the result:
```json
{
"code": "invalid",
"message": "partial write error (5 accepted): 1 out of 6 points rejected (check rejected_points in your _monitoring bucket for further information)"
}
```
InfluxDB logs the following `rejected_points` entry to the `_monitoring` bucket:
| _measurement | _field | _value | bucket | field | gotType | measurement | reason | wantType |
|:------------------|:-------|:-------|:-------------------|:--------------|:---------|:------------|:----------------------------------|:---------|
| rejected_points | count | 1 | a7d5558b880a93da | temperature | String | airSensors | field type mismatch with schema | Float |
{{% /show-in %}}

View File

@ -15,9 +15,6 @@ have multiple DVCs.
{{% show-in "core" %}} {{% show-in "core" %}}
- [Distinct Value Caches are flushed when the server stops](#distinct-value-caches-are-flushed-when-the-server-stops) - [Distinct Value Caches are flushed when the server stops](#distinct-value-caches-are-flushed-when-the-server-stops)
{{% /show-in %}} {{% /show-in %}}
{{% show-in "enterprise" %}}
- [Distinct Value Caches are rebuilt on restart](#distinct-value-caches-are-rebuilt-on-restart)
{{% /show-in %}}
Consider a dataset with the following schema: Consider a dataset with the following schema:
@ -75,9 +72,6 @@ node requires to maintain it. Consider the following:
{{% show-in "core" %}} {{% show-in "core" %}}
- [Distinct Value Caches are flushed when the server stops](#distinct-value-caches-are-flushed-when-the-server-stops) - [Distinct Value Caches are flushed when the server stops](#distinct-value-caches-are-flushed-when-the-server-stops)
{{% /show-in %}} {{% /show-in %}}
{{% show-in "enterprise" %}}
- [Distinct Value Caches are rebuilt on restart](#distinct-value-caches-are-rebuilt-on-restart)
{{% /show-in %}}
### High cardinality limits ### High cardinality limits
@ -96,11 +90,3 @@ stops. After a server restart, {{% product-name %}} only writes new values to
the DVC when you write data, so there may be a period of time when some values are the DVC when you write data, so there may be a period of time when some values are
unavailable in the DVC. unavailable in the DVC.
{{% /show-in %}} {{% /show-in %}}
{{% show-in "enterprise" %}}
### Distinct Value Caches are rebuilt on restart
Because the DVC is an in-memory cache, the cache is flushed any time the server
stops. After a server restarts, {{< product-name >}} uses persisted data to
rebuild the DVC.
{{% /show-in %}}

View File

@ -17,9 +17,6 @@ An LVC is associated with a table, which can have multiple LVCs.
{{% show-in "core" %}} {{% show-in "core" %}}
- [Last Value Caches are flushed when the server stops](#last-value-caches-are-flushed-when-the-server-stops) - [Last Value Caches are flushed when the server stops](#last-value-caches-are-flushed-when-the-server-stops)
{{% /show-in %}} {{% /show-in %}}
{{% show-in "enterprise" %}}
- [Last Value Caches are rebuilt on restart](#last-value-caches-are-rebuilt-on-restart)
{{% /show-in %}}
- [Defining value columns](#defining-value-columns) - [Defining value columns](#defining-value-columns)
Consider a dataset with the following schema (similar to the Consider a dataset with the following schema (similar to the
@ -88,11 +85,7 @@ maintain it. Consider the following:
- [Value count](#value-count) - [Value count](#value-count)
{{% show-in "core" %}} {{% show-in "core" %}}
- [Last Value Caches are flushed when the server stops](#last-value-caches-are-flushed-when-the-server-stops) - [Last Value Caches are flushed when the server stops](#last-value-caches-are-flushed-when-the-server-stops)
{{% /show-in %}} {{% /show-in %}}lue-columns)
{{% show-in "enterprise" %}}
- [Last Value Caches are rebuilt on restart](#last-value-caches-are-rebuilt-on-restart)
{{% /show-in %}}
- [Defining value columns](#defining-value-columns)
### High cardinality key columns ### High cardinality key columns
@ -141,14 +134,6 @@ you write data, so there may be a period of time when some values are
unavailable in the LVC. unavailable in the LVC.
{{% /show-in %}} {{% /show-in %}}
{{% show-in "enterprise" %}}
### Last Value Caches are rebuilt on restart
Because the LVC is an in-memory cache, the cache is flushed any time the server
stops. After a server restarts, {{< product-name >}} uses persisted data to
rebuild the LVC.
{{% /show-in %}}
### Defining value columns ### Defining value columns
When creating an LVC, if you include the `--value-columns` options to specify When creating an LVC, if you include the `--value-columns` options to specify

View File

@ -1,4 +1,66 @@
Manage tokens to authenticate and authorize access to resources and data in your Manage tokens to authenticate and authorize access to resources and data in your {{< product-name >}} instance.
{{< product-name >}} instance.
## Provide your token
Before running CLI commands or making HTTP API requests, you must provide a valid token to authenticate.
The mechanism for providing your token depends on the client you use to interact with {{% product-name %}}--for example:
{{< tabs-wrapper >}}
{{% tabs %}}
[influxdb3 CLI](#influxdb3-cli-auth)
[cURL](#curl-auth)
{{% /tabs %}}
{{% tab-content %}}
When using the `influxdb3` CLI, you can use the `--token` option to provide your authorization token.
{{% code-placeholders "YOUR_TOKEN" %}}
```bash
# Include the --token option in your influxdb3 command
influxdb3 query \
--token YOUR_TOKEN \
--database example-db \
"SELECT * FROM 'example-table' WHERE time > now() - INTERVAL '10 minutes'"
```
{{% /code-placeholders %}}
You can also set the `INFLUXDB3_AUTH_TOKEN` environment variable to automatically provide your
authorization token to all `influxdb3` commands.
{{% code-placeholders "YOUR_TOKEN" %}}
```bash
# Export your token as an environment variable
export INFLUXDB3_AUTH_TOKEN=YOUR_TOKEN
# Run an influxdb3 command without the --token option
influxdb3 query \
--database example-db \
"SELECT * FROM 'example-table' WHERE time > now() - INTERVAL '10 minutes'"
```
{{% /code-placeholders %}}
Replace `YOUR_TOKEN` with your authorization token.
{{% /tab-content %}}
{{% tab-content %}}
{{% code-placeholders "AUTH_TOKEN" %}}
```bash
# Add your token to the HTTP Authorization header
curl "http://{{< influxdb/host >}}/api/v3/query_sql" \
--header "Authorization: Bearer AUTH_TOKEN" \
--data-urlencode "db=example-db" \
--data-urlencode "q=SELECT * FROM 'example-table' WHERE time > now() - INTERVAL '10 minutes'"
```
{{% /code-placeholders %}}
Replace `AUTH_TOKEN` with your actual InfluxDB 3 token.
{{% /tab-content %}}
{{< /tabs-wrapper >}}
{{< children hlevel="h2" readmore=true hr=true >}} {{< children hlevel="h2" readmore=true hr=true >}}

View File

@ -35,7 +35,6 @@ across sessions, assign the token string to the `INFLUXDB3_AUTH_TOKEN` environme
{{% tab-content %}} {{% tab-content %}}
Use the following endpoint to create an admin token: Use the following endpoint to create an admin token:
{{% show-in "core" %}}
{{% api-endpoint method="POST" endpoint="/api/v3/configure/token/admin" api-ref="/influxdb3/version/api/v3/#operation/PostCreateAdminToken" %}} {{% api-endpoint method="POST" endpoint="/api/v3/configure/token/admin" api-ref="/influxdb3/version/api/v3/#operation/PostCreateAdminToken" %}}
```bash ```bash
@ -43,15 +42,5 @@ curl -X POST "http://{{< influxdb/host >}}/api/v3/configure/token/admin" \
--header 'Accept: application/json' \ --header 'Accept: application/json' \
--header 'Content-Type: application/json' --header 'Content-Type: application/json'
``` ```
{{% /show-in %}}
{{% show-in "enterprise" %}}
{{% api-endpoint method="POST" endpoint="/api/v3/enterprise/configure/token/admin" api-ref="/influxdb3/version/api/v3/#operation/PostCreateAdminToken" %}}
```bash
curl -X POST "http://{{< influxdb/host >}}/api/v3/{{< product-key >}}/configure/token/admin" \
--header 'Accept: application/json' \
--header 'Content-Type: application/json'
```
{{% /show-in %}}
{{% /tab-content %}} {{% /tab-content %}}
{{< /tabs-wrapper >}} {{< /tabs-wrapper >}}

View File

@ -8,8 +8,12 @@ data and resources in your InfluxDB 3 instance.
> Token metadata includes the hashed token string. > Token metadata includes the hashed token string.
> InfluxDB 3 does not store the raw token string. > InfluxDB 3 does not store the raw token string.
In the following examples, replace {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}} with your InfluxDB {{% token-link "admin" %}} > [!Important]
{{% show-in "enterprise" %}} or a token with read permission on the `_internal` system database`{{% /show-in %}}. > #### Required permissions
>
> Listing admin tokens requires a valid InfluxDB {{% token-link "admin" %}}{{% show-in "enterprise" %}} or a token with read access to the `_internal` system database{{% /show-in %}}.
> For more information about providing a token, see [provide your token](/influxdb3/version/admin/tokens/#provide-your-token).
## List all tokens ## List all tokens

View File

@ -56,36 +56,18 @@ The output contains the new token string and InfluxDB deactivates the previous t
<!----------------------------BEGIN HTTP API-----------------------------------> <!----------------------------BEGIN HTTP API----------------------------------->
Use the following HTTP API endpoint: Use the following HTTP API endpoint:
{{% show-in "core" %}}
{{% api-endpoint method="POST" endpoint="/api/v3/configure/token/admin/regenerate" api-ref="/influxdb3/version/api/v3/configure/token/admin/regenerate" %}} {{% api-endpoint method="POST" endpoint="/api/v3/configure/token/admin/regenerate" api-ref="/influxdb3/version/api/v3/configure/token/admin/regenerate" %}}
{{% /show-in %}}
{{% show-in "enterprise" %}}
{{% api-endpoint method="POST" endpoint="/api/v3/enterprise/configure/token/admin/regenerate" api-ref="/influxdb3/version/api/v3/enterprise/configure/token/admin" %}}
{{% /show-in %}}
In your request, send an `Authorization` header with your current admin token string In your request, send an `Authorization` header with your current admin token string
--for example: --for example:
{{% show-in "core" %}}
{{% code-placeholders "ADMIN_TOKEN" %}} {{% code-placeholders "ADMIN_TOKEN" %}}
```bash ```bash
curl -X POST "http://{{< influxdb/host >}}/api/v3/configure/token/admin" \ curl -X POST "http://{{< influxdb/host >}}/api/v3/configure/token/admin/regenerate" \
--header "Authorization: Bearer ADMIN_TOKEN" \ --header "Authorization: Bearer ADMIN_TOKEN" \
--header "Accept: application/json" --header "Accept: application/json"
``` ```
{{% /code-placeholders %}} {{% /code-placeholders %}}
{{% /show-in %}}
{{% show-in "enterprise" %}}
{{% code-placeholders "ADMIN_TOKEN" %}}
```bash
curl -X POST "http://{{< influxdb/host >}}/api/v3/enterprise/configure/token/admin" \
--header "Authorization: Bearer ADMIN_TOKEN" \
--header "Accept: application/json"
```
{{% /code-placeholders %}}
{{% /show-in %}}
In your command, replace {{% code-placeholder-key %}}`ADMIN_TOKEN`{{% /code-placeholder-key %}} with the current token string. In your command, replace {{% code-placeholder-key %}}`ADMIN_TOKEN`{{% /code-placeholder-key %}} with the current token string.

View File

@ -18,7 +18,6 @@ influxdb3 create <SUBCOMMAND>
| [file_index](/influxdb3/version/reference/cli/influxdb3/create/file_index/) | Create a new file index for a database or table | | [file_index](/influxdb3/version/reference/cli/influxdb3/create/file_index/) | Create a new file index for a database or table |
| [last_cache](/influxdb3/version/reference/cli/influxdb3/create/last_cache/) | Create a new last value cache | | [last_cache](/influxdb3/version/reference/cli/influxdb3/create/last_cache/) | Create a new last value cache |
| [distinct_cache](/influxdb3/version/reference/cli/influxdb3/create/distinct_cache/) | Create a new distinct value cache | | [distinct_cache](/influxdb3/version/reference/cli/influxdb3/create/distinct_cache/) | Create a new distinct value cache |
| [plugin](/influxdb3/version/reference/cli/influxdb3/create/plugin/) | Create a new processing engine plugin |
| [table](/influxdb3/version/reference/cli/influxdb3/create/table/) | Create a new table in a database | | [table](/influxdb3/version/reference/cli/influxdb3/create/table/) | Create a new table in a database |
| [token](/influxdb3/version/reference/cli/influxdb3/create/token/) | Create a new authentication token | | [token](/influxdb3/version/reference/cli/influxdb3/create/token/) | Create a new authentication token |
| [trigger](/influxdb3/version/reference/cli/influxdb3/create/trigger/) | Create a new trigger for the processing engine | | [trigger](/influxdb3/version/reference/cli/influxdb3/create/trigger/) | Create a new trigger for the processing engine |

View File

@ -1,5 +1,6 @@
The `influxdb3 create database` command creates a new database in your {{< product-name >}} instance.
The `influxdb3 create database` command creates a new database. Provide a database name and, optionally, specify connection settings and authentication credentials using flags or environment variables.
## Usage ## Usage
@ -11,11 +12,10 @@ influxdb3 create database [OPTIONS] <DATABASE_NAME>
## Arguments ## Arguments
- **DATABASE_NAME**: The name of the database to create.
Valid database names are alphanumeric and start with a letter or number. - **`DATABASE_NAME`**: The name of the database to create. Valid database names are alphanumeric and start with a letter or number. Dashes (-) and underscores (_) are allowed.
Dashes (`-`) and underscores (`_`) are allowed.
Environment variable: `INFLUXDB3_DATABASE_NAME` You can also set the database name using the `INFLUXDB3_DATABASE_NAME` environment variable.
## Options ## Options
@ -29,7 +29,7 @@ influxdb3 create database [OPTIONS] <DATABASE_NAME>
### Option environment variables ### Option environment variables
You can use the following environment variables to set command options: You can use the following environment variables instead of providing CLI options directly:
| Environment Variable | Option | | Environment Variable | Option |
| :------------------------ | :----------- | | :------------------------ | :----------- |
@ -38,11 +38,9 @@ You can use the following environment variables to set command options:
## Examples ## Examples
- [Create a new database](#create-a-new-database) The following examples show how to create a database.
- [Create a new database while specifying the token inline](#create-a-new-database-while-specifying-the-token-inline)
In the examples below, replace the following:
In your commands replace the following:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
Database name Database name
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
@ -50,7 +48,9 @@ In the examples below, replace the following:
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} {{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
### Create a new database ### Create a database (default)
Creates a database using settings from environment variables and defaults.
<!--pytest.mark.skip--> <!--pytest.mark.skip-->
@ -58,7 +58,10 @@ In the examples below, replace the following:
influxdb3 create database DATABASE_NAME influxdb3 create database DATABASE_NAME
``` ```
### Create a new database while specifying the token inline ### Create a database with an authentication token
Creates a database using the specified arguments.
Flags override their associated environment variables.
<!--pytest.mark.skip--> <!--pytest.mark.skip-->

View File

@ -1,5 +1,6 @@
The `influxdb3 create distinct_cache` command creates a new distinct value cache for a specific table and column set in your {{< product-name >}} instance.
The `influxdb3 create distinct_cache` command creates a new distinct value cache. Use this command to configure a cache that tracks unique values in specified columns. You must provide the database, token, table, and columns. Optionally, you can specify a name for the cache.
## Usage ## Usage
@ -16,10 +17,9 @@ influxdb3 create distinct_cache [OPTIONS] \
## Arguments ## Arguments
- **CACHE_NAME**: _(Optional)_ Name for the cache. - **`CACHE_NAME`**: _(Optional)_ A name to assign to the cache. If omitted, the CLI generates a name automatically.
If not provided, the command automatically generates a name.
## Options ## Options
| Option | | Description | | Option | | Description |
| :----- | :------------------ | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | :----- | :------------------ | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@ -52,4 +52,69 @@ You can use the following environment variables to set command options:
| `INFLUXDB3_DATABASE_NAME` | `--database` | | `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` | | `INFLUXDB3_AUTH_TOKEN` | `--token` |
<!-- TODO: GET EXAMPLES -->
## Prerequisites
Before creating a distinct value cache, make sure you:
1. [Create a database](/influxdb3/version/reference/cli/influxdb3/create/database/)
2. [Create a table](/influxdb3/version/reference/cli/influxdb3/create/table/) that includes the columns you want to cache
3. Have a valid authentication token
## Examples
Before running the following commands, replace the placeholder values with your own:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
The database name
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}:
The name of the table to cache values from
- {{% code-placeholder-key %}}`CACHE_NAME`{{% /code-placeholder-key %}}:
The name of the distinct value cache to create
- {{% code-placeholder-key %}}`COLUMN_NAME`{{% /code-placeholder-key %}}: The column to
cache distinct values from
You can also set environment variables (such as `INFLUXDB3_AUTH_TOKEN`) instead of passing options inline.
{{% code-placeholders "(DATABASE|TABLE|COLUMN|CACHE)_NAME" %}}
### Create a distinct cache for one column
Track unique values from a single column. This setup is useful for testing or simple use cases.
<!--pytest.mark.skip-->
```bash
influxdb3 create distinct_cache \
--database DATABASE_NAME \
--table TABLE_NAME \
--column COLUMN_NAME \
CACHE_NAME
```
### Create a hierarchical cache with constraints
Create a distinct value cache for multiple columns. The following example tracks unique combinations of `room` and `sensor_id`, and sets limits on the number of entries and their maximum age.
<!--pytest.mark.skip-->
```bash
influxdb3 create distinct_cache \
--database my_test_db \
--table my_sensor_table \
--columns room,sensor_id \
--max-cardinality 1000 \
--max-age 30d \
my_sensor_distinct_cache
```
{{% /code-placeholders %}}
## Common pitfalls
- `--column` is not valid. Use `--columns`.
- Tokens must be included explicitly unless set via `INFLUXDB3_AUTH_TOKEN`
- Table and column names must already exist or be recognized by the engine

View File

@ -1,18 +1,23 @@
The `influxdb3 create last_cache` command creates a last value cache, which stores the most recent values for specified columns in a table. Use this to efficiently retrieve the latest values based on key column combinations.
The `influxdb3 create last_cache` command creates a new last value cache.
## Usage ## Usage
{{% code-placeholders "DATABASE_NAME|TABLE_NAME|AUTH_TOKEN|CACHE_NAME" %}}
<!--pytest.mark.skip--> <!--pytest.mark.skip-->
```bash ```bash
influxdb3 create last_cache [OPTIONS] --database <DATABASE_NAME> --table <TABLE> [CACHE_NAME] influxdb3 create last_cache [OPTIONS] \
--database DATABASE_NAME \
--table TABLE_NAME \
--token AUTH_TOKEN \
CACHE_NAME
``` ```
{{% /code-placeholders %}}
## Arguments ## Arguments
- **CACHE_NAME**: _(Optional)_ Name for the cache. - **CACHE_NAME**: _(Optional)_ Name for the cache. If omitted, InfluxDB automatically generates one.
If not provided, the command automatically generates a name.
## Options ## Options
@ -32,7 +37,7 @@ influxdb3 create last_cache [OPTIONS] --database <DATABASE_NAME> --table <TABLE>
### Option environment variables ### Option environment variables
You can use the following environment variables to set command options: You can use the following environment variables as substitutes for CLI options:
| Environment Variable | Option | | Environment Variable | Option |
| :------------------------ | :----------- | | :------------------------ | :----------- |
@ -40,4 +45,59 @@ You can use the following environment variables to set command options:
| `INFLUXDB3_DATABASE_NAME` | `--database` | | `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` | | `INFLUXDB3_AUTH_TOKEN` | `--token` |
<!-- TODO: GET EXAMPLES --> ## Prerequisites
Before creating a last value cache, ensure youve done the following:
- Create a [database](/influxdb3/version/reference/cli/influxdb3/create/database/).
- Create a [table](/influxdb3/version/reference/cli/influxdb3/create/table/) with the columns you want to cache.
- Have a valid authentication token.
## Examples
A last value cache stores the most recent values from specified columns in a table.
### Create a basic last value cache for one column
The following example shows how to track the most recent value for a single key (the last temperature for each room):
<!--pytest.mark.skip-->
```bash
influxdb3 create last_cache \
--database DATABASE_NAME \
--table my_sensor_table \
--token AUTH_TOKEN \
--key-columns room \
--value-columns temp \
my_temp_cache
```
### Create a last value cache with multiple keys and values
The following example shows how to:
- Use multiple columns as a composite key
- Track several values per key combination
- Set a cache entry limit with `--count`
- Configure automatic expiry with `--ttl`
<!--pytest.mark.skip-->
```bash
influxdb3 create last_cache \
--database DATABASE_NAME \
--table my_sensor_table \
--token AUTH_TOKEN \
--key-columns room,sensor_id \
--value-columns temp,hum \
--count 10 \
--ttl 1h \
my_sensor_cache
```
## Usage notes
- Define the table schema to include all specified key and value columns.
- Pass tokens using `--token`, unless you've set one through an environment variable.
- Specify `--count` and `--ttl` to override the defaults; otherwise, the system uses default values.

View File

@ -1,45 +0,0 @@
The `influxdb3 create plugin` command creates a new processing engine plugin.
## Usage
<!--pytest.mark.skip-->
```bash
influxdb3 create plugin [OPTIONS] \
--database <DATABASE_NAME> \
--token <AUTH_TOKEN> \
--filename <PLUGIN_FILENAME> \
--entry-point <FUNCTION_NAME> \
<PLUGIN_NAME>
```
## Arguments
- **PLUGIN_NAME**: The name of the plugin to create.
## Options
| Option | | Description |
| :----- | :-------------- | :--------------------------------------------------------------------------------------- |
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
| | `--token` | _({{< req >}})_ Authentication token |
| | `--filename` | _({{< req >}})_ Name of the plugin Python file in the plugin directory |
| | `--entry-point` | _({{< req >}})_ Entry point function name for the plugin |
| | `--plugin-type` | Type of trigger the plugin processes (default is `wal_rows`) |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
### Option environment variables
You can use the following environment variables to set command options:
| Environment Variable | Option |
| :------------------------ | :----------- |
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
<!-- TODO: GET EXAMPLES -->

View File

@ -1,5 +1,10 @@
The `influxdb3 create table` command creates a table in a database. The `influxdb3 create table` command creates a new table in a specified database. Tables must include at least one tag column and can optionally include field columns with defined data types.
> [!Note]
> InfluxDB automatically creates tables when you write line protocol data. Use this command
> only if you need to define a custom schema or apply a custom partition template before
> writing data.
## Usage ## Usage
@ -39,7 +44,7 @@ influxdb3 create table [OPTIONS] \
### Option environment variables ### Option environment variables
You can use the following environment variables to set command options: You can use the following environment variables to set options instead of passing them via CLI flags:
| Environment Variable | Option | | Environment Variable | Option |
| :------------------------ | :----------- | | :------------------------ | :----------- |
@ -49,21 +54,20 @@ You can use the following environment variables to set command options:
## Examples ## Examples
- [Create a table](#create-a-table) In the following examples, replace each placeholder with your actual values:
- [Create a table with tag and field columns](#create-a-table-with-tag-and-field-columns)
In the examples below, replace the following:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
Database name The database name
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
Authentication token Authentication token
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: - {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}:
Table name A name for the new table
{{% code-placeholders "(DATABASE|TABLE)_NAME" %}} {{% code-placeholders "DATABASE_NAME|TABLE_NAME|AUTH_TOKEN" %}}
### Create a table ### Create an empty table
<!--pytest.mark.skip-->
```bash ```bash
influxdb3 create table \ influxdb3 create table \
@ -86,4 +90,31 @@ influxdb3 create table \
TABLE_NAME TABLE_NAME
``` ```
### Verification
Use the `SHOW TABLES` query to verify that the table was created successfully:
<!--pytest.mark.skip-->
```bash
influxdb3 query \
--database my_test_db \
--token AUTH_TOKEN \
"SHOW TABLES"
Example output:
+---------------+--------------------+----------------------------+------------+
| table_catalog | table_schema | table_name | table_type |
+---------------+--------------------+----------------------------+------------+
| public | iox | my_sensor_table | BASE TABLE |
| public | system | distinct_caches | BASE TABLE |
| public | system | last_caches | BASE TABLE |
| public | system | parquet_files | BASE TABLE |
+---------------+--------------------+----------------------------+------------+
```
>[!Note]
> `SHOW TABLES` is an SQL query. It isn't supported in InfluxQL.
{{% /code-placeholders %}} {{% /code-placeholders %}}

View File

@ -1,12 +1,14 @@
The `influxdb3 create token` command creates a new authentication token. This returns the raw token string. Use it to authenticate future CLI commands and API requests.
The `influxdb3 create token` command creates a new authentication token. > [!Important]
> InfluxDB displays the raw token string only once. Be sure to copy and securely store it.
## Usage ## Usage
<!--pytest.mark.skip--> <!--pytest.mark.skip-->
```bash ```bash
influxdb3 create token <COMMAND> [OPTIONS] influxdb3 create token <SUBCOMMAND>
``` ```
## Commands ## Commands
@ -18,15 +20,46 @@ influxdb3 create token <COMMAND> [OPTIONS]
## Options ## Options
| Option | | Description | | Option | | Description |
| :----- | :----------- | :------------------------------ | | :----- | :------- | :--------------------- |
| `-h` | `--help` | Print help information | | |`--admin`| Create an admin token |
| | `--help-all` | Print detailed help information | | `-h` | `--help` | Print help information |
## Examples ## Examples
### Create an admin token ### Create an admin token
<!--pytest.mark.skip-->
```bash ```bash
influxdb3 create token --admin influxdb3 create token --admin
``` ```
The output is the raw token string you can use to authenticate future CLI commands and API requests.
For CLI commands, use the `--token` option or the `INFLUXDB3_AUTH_TOKEN` environment variable to pass the token string.
### Use the token to create a database
{{% code-placeholders "YOUR_ADMIN_TOKEN|DATABASE_NAME" %}}
<!--pytest.mark.skip-->
```bash
influxdb3 create database \
--token ADMIN_TOKEN \
DATABASE_NAME
```
{{% /code-placeholders %}}
Replace the following:
- {{% code-placeholder-key %}}`ADMIN_TOKEN`{{% /code-placeholder-key %}}: Your InfluxDB admin token
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Name for your new database
> [!Note]
> Set the token as an environment variable to simplify repeated CLI commands:
>
> ```bash
> export INFLUXDB3_AUTH_TOKEN=YOUR_ADMIN_TOKEN
> ```

View File

@ -10,7 +10,7 @@ processing engine.
influxdb3 create trigger [OPTIONS] \ influxdb3 create trigger [OPTIONS] \
--database <DATABASE_NAME> \ --database <DATABASE_NAME> \
--token <AUTH_TOKEN> \ --token <AUTH_TOKEN> \
--plugin <PLUGIN_NAME> \ --plugin-filename <PLUGIN_FILENAME> \
--trigger-spec <TRIGGER_SPECIFICATION> \ --trigger-spec <TRIGGER_SPECIFICATION> \
<TRIGGER_NAME> <TRIGGER_NAME>
``` ```
@ -21,17 +21,21 @@ influxdb3 create trigger [OPTIONS] \
## Options ## Options
| Option | | Description | | Option | | Description |
| :----- | :--------------- | :--------------------------------------------------------------------------------------- | | :----- | :------------------ | :------------------------------------------------------------------------------------------------------- |
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | | `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on | | `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
| | `--token` | _({{< req >}})_ Authentication token | | | `--token` | _({{< req >}})_ Authentication token |
| | `--plugin` | Plugin to execute when the trigger fires | | | `--plugin-filename` | _({{< req >}})_ Name of the file, stored in the server's `plugin-dir`, that contains the Python plugin code to run |
| | `--trigger-spec` | Trigger specification--for example `table:<TABLE_NAME>` or `all_tables` | | | `--trigger-spec` | Trigger specification--for example `table:<TABLE_NAME>` or `all_tables` |
| | `--disabled` | Create the trigger in disabled state | | | `--disabled` | Create the trigger in disabled state |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | | | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| `-h` | `--help` | Print help information | | `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information | | | `--help-all` | Print detailed help information |
If you want to use a plugin from the [Plugin Library](https://github.com/influxdata/influxdb3_plugins) repo, use the url path with `gh:` specified as the prefix.
For example, to use the [System Metrics](https://github.com/influxdata/influxdb3_plugins/blob/main/examples/schedule/system_metrics/system_metrics.py) plugin, the plugin filename is `gh:examples/schedule/system_metrics/system_metrics.py`.
### Option environment variables ### Option environment variables
@ -43,4 +47,69 @@ You can use the following environment variables to set command options:
| `INFLUXDB3_DATABASE_NAME` | `--database` | | `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` | | `INFLUXDB3_AUTH_TOKEN` | `--token` |
<!-- TODO: GET EXAMPLES --> ## Examples
The following examples show how to use the `influxdb3 create trigger` command to create triggers in different scenarios.
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Database name
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: Authentication token
- {{% code-placeholder-key %}}`PLUGIN_FILENAME`{{% /code-placeholder-key %}}: Python plugin filename
- {{% code-placeholder-key %}}`TRIGGER_NAME`{{% /code-placeholder-key %}}:
Name of the trigger to create
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}:
Name of the table to trigger on
{{% code-placeholders "(DATABASE|TRIGGER)_NAME|AUTH_TOKEN|TABLE_NAME" %}}
### Create a trigger for a specific table
Create a trigger that processes data from a specific table.
<!--pytest.mark.skip-->
```bash
influxdb3 create trigger \
--database DATABASE_NAME \
--token AUTH_TOKEN \
--plugin-filename PLUGIN_FILENAME \
--trigger-spec table:TABLE_NAME \
TRIGGER_NAME
```
### Create a trigger for all tables
Create a trigger that applies to all tables in the specified database.
<!--pytest.mark.skip-->
```bash
influxdb3 create trigger \
--database DATABASE_NAME \
--token AUTH_TOKEN \
--plugin-filename <PLUGIN_FILENAME> \
--trigger-spec all_tables \
TRIGGER_NAME
```
This is useful when you want a trigger to apply to any table in the database, regardless of name.
### Create a disabled trigger
Create a trigger in a disabled state.
<!--pytest.mark.skip-->
```bash
influxdb3 create trigger \
--disabled \
--database DATABASE_NAME \
--token AUTH_TOKEN \
--plugin-filename <PLUGIN_FILENAME> \
--trigger-spec table:TABLE_NAME \
TRIGGER_NAME
```
Creating a trigger in a disabled state prevents it from running immediately. You can enable it later when you're ready to activate it.
{{% /code-placeholders %}}

View File

@ -11,7 +11,7 @@ influxdb3 delete database [OPTIONS] <DATABASE_NAME>
## Arguments ## Arguments
- **DATABASE_NAME**: The name of the database to delete. - **DATABASE_NAME**: The name of the database to delete. Valid database names are alphanumeric and start with a letter or number. Dashes (`-`) and underscores (`_`) are allowed.
Environment variable: `INFLUXDB3_DATABASE_NAME` Environment variable: `INFLUXDB3_DATABASE_NAME`

View File

@ -1,61 +0,0 @@
The `influxdb3 delete plugin` command deletes a processing engine plugin.
## Usage
<!--pytest.mark.skip-->
```bash
influxdb3 delete plugin [OPTIONS] --database <DATABASE_NAME> <PLUGIN_NAME>
```
## Arguments
- **PLUGIN_NAME**: The name of the plugin to delete.
## Options
| Option | | Description |
| :----- | :----------- | :--------------------------------------------------------------------------------------- |
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
| | `--token` | _({{< req >}})_ Authentication token |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
### Option environment variables
You can use the following environment variables to set command options:
| Environment Variable | Option |
| :------------------------ | :----------- |
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
## Examples
### Delete a plugin
{{% code-placeholders "(DATABASE|PLUGIN)_NAME|AUTH_TOKEN" %}}
<!--pytest.mark.skip-->
```bash
influxdb3 delete plugin \
--database DATABASE_NAME \
--token AUTH_TOKEN \
PLUGIN_NAME
```
{{% /code-placeholders %}}
In the example above, replace the following:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
Database name
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
Authentication token
- {{% code-placeholder-key %}}`PLUGIN_NAME`{{% /code-placeholder-key %}}:
Name of the plugin to delete

View File

@ -41,7 +41,7 @@ Write requests return the following status codes:
| :-------------------------------| :--------------------------------------------------------------- | :------------- | | :-------------------------------| :--------------------------------------------------------------- | :------------- |
| `204 "Success"` | | If InfluxDB ingested the data | | `204 "Success"` | | If InfluxDB ingested the data |
| `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | If some or all request data isn't allowed (for example, if it is malformed or falls outside of the bucket's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected | | `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | If some or all request data isn't allowed (for example, if it is malformed or falls outside of the bucket's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected |
| `401 "Unauthorized"` | | If the `Authorization` header is missing or malformed or if the [token](/influxdb3/version/admin/tokens/) doesn't have [permission](/influxdb3/version/reference/cli/influxctl/token/create/#examples) to write to the database. See [examples using credentials](/influxdb3/version/get-started/write/#write-line-protocol-to-influxdb) in write requests. | | `401 "Unauthorized"` | | If the `Authorization` header is missing or malformed or if the [token](/influxdb3/version/admin/tokens/) doesn't have [permission](/influxdb3/version/reference/cli/influxctl/token/create/#examples) to write to the database. See [examples using credentials](/influxdb3/version/write-data/api-client-libraries/) in write requests. |
| `404 "Not found"` | requested **resource type** (for example, "organization" or "database"), and **resource name** | If a requested resource (for example, organization or database) wasn't found | | `404 "Not found"` | requested **resource type** (for example, "organization" or "database"), and **resource name** | If a requested resource (for example, organization or database) wasn't found |
| `500 "Internal server error"` | | Default status for an error | | `500 "Internal server error"` | | Default status for an error |
| `503` "Service unavailable" | | If the server is temporarily unavailable to accept writes. The `Retry-After` header describes when to try the write again. | `503` "Service unavailable" | | If the server is temporarily unavailable to accept writes. The `Retry-After` header describes when to try the write again.

View File

@ -5,6 +5,28 @@
> All updates to Core are automatically included in Enterprise. > All updates to Core are automatically included in Enterprise.
> The Enterprise sections below only list updates exclusive to Enterprise. > The Enterprise sections below only list updates exclusive to Enterprise.
## v3.0.3 {date="2025-05-16"}
**Core**: revision 384c457ef5f0d5ca4981b22855e411d8cac2688e
**Enterprise**: revision 34f4d28295132b9efafebf654e9f6decd1a13caf
### Core
#### Fixes
- Prevent operator token, `_admin`, from being deleted.
### Enterprise
#### Fixes
- Fix object store info digest that is output during onboarding.
- Fix issues with false positive catalog error on shutdown.
- Fix licensing validation issues.
- Other fixes and performance improvements.
## v3.0.2 {date="2025-05-01"} ## v3.0.2 {date="2025-05-01"}
**Core**: revision d80d6cd60049c7b266794a48c97b1b6438ac5da9 **Core**: revision d80d6cd60049c7b266794a48c97b1b6438ac5da9

View File

@ -252,14 +252,34 @@ To have the `influxdb3` CLI use your admin token automatically, assign it to the
To create an admin token, use the `influxdb3 create token --admin` subcommand--for example: To create an admin token, use the `influxdb3 create token --admin` subcommand--for example:
{{< code-tabs-wrapper >}}
{{% code-tabs %}}
[CLI](#)
[Docker](#)
{{% /code-tabs %}}
{{% code-tab-content %}}
```bash ```bash
influxdb3 create token --admin \ influxdb3 create token --admin \
--host http://{{< influxdb/host >}} --host http://INFLUXDB_HOST
``` ```
{{% /code-tab-content %}}
{{% code-tab-content %}}
{{% code-placeholders "CONTAINER_NAME" %}}
```bash ```bash
# With Docker -- In a new terminal, run: # With Docker — in a new terminal:
docker exec -it CONTAINER_NAME influxdb3 create token --admin docker exec -it CONTAINER_NAME influxdb3 create token --admin
``` ```
{{% /code-placeholders %}}
Replace {{% code-placeholder-key %}}`CONTAINER_NAME`{{% /code-placeholder-key %}} with the name of your running Docker container.
{{% /code-tab-content %}}
{{< /code-tabs-wrapper >}}
The command returns a token string that you can use to authenticate CLI commands and API requests. The command returns a token string that you can use to authenticate CLI commands and API requests.

View File

@ -1,54 +1,72 @@
Use the InfluxDB 3 Processing engine to run Python code directly in your Use the Processing Engine in {{% product-name %}} to extend your database with custom Python code. Trigger your code on write, on a schedule, or on demand to automate workflows, transform data, and create API endpoints.
{{% product-name %}} database to automatically process data and respond to database events.
The Processing engine is an embedded Python VM that runs inside your InfluxDB 3 database and lets you: ## What is the Processing Engine?
- Process data as it's written to the database The Processing Engine is an embedded Python virtual machine that runs inside your {{% product-name %}} database. You configure _triggers_ to run your Python _plugin_ code in response to:
- Run code on a schedule
- Create API endpoints that execute Python code
- Maintain state between executions with an in-memory cache
Learn how to create, configure, run, and extend Python plugins that execute when specific events occur. - **Data writes** - Process and transform data as it enters the database
- **Scheduled events** - Run code at defined intervals or specific times
- **HTTP requests** - Expose custom API endpoints that execute your code
1. [Set up the Processing engine](#set-up-the-processing-engine) You can use the Processing Engine's in-memory cache to manage state between executions and build stateful applications directly in your database.
2. [Add a Processing engine plugin](#add-a-processing-engine-plugin)
- [Get example plugins](#get-example-plugins) This guide walks you through setting up the Processing Engine, creating your first plugin, and configuring triggers that execute your code on specific events.
- [Create a plugin](#create-a-plugin)
3. [Create a trigger to run a plugin](#create-a-trigger-to-run-a-plugin) ## Before you begin
- [Create a trigger for data writes](#create-a-trigger-for-data-writes)
- [Create a trigger for scheduled events](#create-a-trigger-for-scheduled-events) Ensure you have:
- [Create a trigger for HTTP requests](#create-a-trigger-for-http-requests) - A working {{% product-name %}} instance
- [Use community plugins from GitHub](#use-community-plugins-from-github) - Access to command line
- Python installed if you're writing your own plugin
- Basic knowledge of the InfluxDB CLI
Once you have all the prerequisites in place, follow these steps to implement the Processing Engine for your data automation needs.
1. [Set up the Processing Engine](#set-up-the-processing-engine)
2. [Add a Processing Engine plugin](#add-a-processing-engine-plugin)
- [Use example plugins](#use-example-plugins)
- [Create a custom plugin](#create-a-custom-plugin)
3. [Set up a trigger](#set-up-a-trigger)
- [Understand trigger types](#understand-trigger-types)
- [Use the create trigger command](#use-the-create-trigger-command)
- [Trigger specification examples](#trigger-specification-examples)
4. [Advanced trigger configuration](#advanced-trigger-configuration)
- [Access community plugins from GitHub](#access-community-plugins-from-github)
- [Pass arguments to plugins](#pass-arguments-to-plugins) - [Pass arguments to plugins](#pass-arguments-to-plugins)
- [Control trigger execution](#control-trigger-execution) - [Control trigger execution](#control-trigger-execution)
- [Configure error handling for a trigger](#configure-error-handling-for-a-trigger) - [Configure error handling for a trigger](#configure-error-handling-for-a-trigger)
- [Extend plugins with API features and state management](#extend-plugins-with-api-features-and-state-management) - [Install Python dependencies](#install-python-dependencies)
- [Install Python dependencies](#install-python-dependencies)
## Set up the Processing engine ## Set up the Processing Engine
To enable the Processing engine, start your InfluxDB server with the `--plugin-dir` option: To activate the Processing Engine, start your {{% product-name %}} server with the `--plugin-dir` flag. This flag tells InfluxDB where to load your plugin files.
{{% code-placeholders "NODE_ID|OBJECT_STORE_TYPE|PLUGIN_DIR" %}}
```bash ```bash
influxdb3 serve \ influxdb3 serve \
--node-id node0 \ --NODE_ID \
--object-store [OBJECT_STORE_TYPE] \ --object-store OBJECT_STORE_TYPE \
--plugin-dir /path/to/plugins --plugin-dir PLUGIN_DIR
``` ```
Replace `/path/to/plugins` with the directory where you want to store your Python plugin files. All plugin files must be located in this directory or its subdirectories. {{% /code-placeholders %}}
In the example above, replace the following:
- {{% code-placeholder-key %}}`NODE_ID`{{% /code-placeholder-key %}}: Unique identifier for your instance
- {{% code-placeholder-key %}}`OBJECT_STORE_TYPE`{{% /code-placeholder-key %}}: Type of object store (for example, file or s3)
- {{% code-placeholder-key %}}`PLUGIN_DIR`{{% /code-placeholder-key %}}: Absolute path to the directory where plugin files are stored. Store all plugin files in this directory or its subdirectories.
### Configure distributed environments ### Configure distributed environments
If you're running multiple {{% product-name %}} instances (distributed deployment): When running {{% product-name %}} in a distributed setup, follow these steps to configure the Processing Engine:
1. Decide where plugins should run 1. Decide where each plugin should run
- Data processing plugins, such as WAL plugins, run on ingester nodes - Data processing plugins, such as WAL plugins, run on ingester nodes
- HTTP-triggered plugins run on nodes handling API requests - HTTP-triggered plugins run on nodes handling API requests
- Scheduled plugins can run on any configured node - Scheduled plugins can run on any configured node
2. Enable plugins on selected instances 2. Enable plugins on the correct instance
3. Maintain identical plugin files across all instances where plugins run 3. Maintain identical plugin files across all instances where plugins run
- Use shared storage or file synchronization tools to keep plugins consistent - Use shared storage or file synchronization tools to keep plugins consistent
@ -57,43 +75,58 @@ If you're running multiple {{% product-name %}} instances (distributed deploymen
> >
> Configure your plugin directory on the same system as the nodes that run the triggers and plugins. > Configure your plugin directory on the same system as the nodes that run the triggers and plugins.
## Add a Processing Engine plugin
## Add a Processing engine plugin A plugin is a Python script that defines a specific function signature for a trigger (_trigger spec_). When the specified event occurs, InfluxDB runs the plugin.
A plugin is a Python file that contains a specific function signature that corresponds to a trigger type. ### Choose a plugin strategy
Plugins:
- Receive plugin-specific arguments (such as written data, call time, or an HTTP request) You have two main options for adding plugins to your InfluxDB instance:
- Can receive keyword arguments (as `args`) from _trigger arguments_
- Can access the `influxdb3_local` shared API for writing, querying, and managing state
Get started using example plugins or create your own: - [Use example plugins](#use-example-plugins) - Quickly get started with prebuilt plugins
- [Create a custom plugin](#create-a-custom-plugin) - Build your own for specialized use cases
- [Get example plugins](#get-example-plugins) ### Use example plugins
- [Create a plugin](#create-a-plugin)
### Get example plugins InfluxData provides a public repository of example plugins that you can use immediately.
InfluxData maintains a repository of contributed plugins that you can use as-is or as a starting point for your own plugin. #### Browse plugin examples
#### From local files Visit the [influxdb3_plugins repository](https://github.com/influxdata/influxdb3_plugins) to find examples for:
You can copy example plugins from the [influxdb3_plugins repository](https://github.com/influxdata/influxdb3_plugins) to your local plugin directory: - **Data transformation**: Process and transform incoming data
- **Alerting**: Send notifications based on data thresholds
- **Aggregation**: Calculate statistics on time series data
- **Integration**: Connect to external services and APIs
- **System monitoring**: Track resource usage and health metrics
#### Add example plugins
You can either copy a plugin or retrieve it directly from the repository:
{{< code-tabs-wrapper >}}
{{% code-tabs %}}
[Copy locally](#)
[Fetch via gh:](#)
{{% /code-tabs %}}
{{% code-tab-content %}}
```bash ```bash
# Clone the repository # Clone the repository
git clone https://github.com/influxdata/influxdb3_plugins.git git clone https://github.com/influxdata/influxdb3_plugins.git
# Copy example plugins to your plugin directory # Copy a plugin to your configured plugin directory
cp -r influxdb3_plugins/examples/wal_plugin/* /path/to/plugins/ cp influxdb3_plugins/examples/schedule/system_metrics/system_metrics.py /path/to/plugins/
``` ```
{{% /code-tab-content %}}
#### Directly from GitHub {{% code-tab-content %}}
You can use plugins directly from GitHub without downloading them first by using the `gh:` prefix in the plugin filename:
```bash ```bash
# Use a plugin directly from GitHub # To retrieve and use a plugin directly from GitHub,
# use the `gh:` prefix in the plugin filename:
influxdb3 create trigger \ influxdb3 create trigger \
--trigger-spec "every:1m" \ --trigger-spec "every:1m" \
--plugin-filename "gh:examples/schedule/system_metrics/system_metrics.py" \ --plugin-filename "gh:examples/schedule/system_metrics/system_metrics.py" \
@ -101,26 +134,61 @@ influxdb3 create trigger \
system_metrics system_metrics
``` ```
> [!Note] {{% /code-tab-content %}}
> #### Find and contribute plugins
>
> The plugins repository includes examples for various use cases:
>
> - **Data transformation**: Process and transform incoming data
> - **Alerting**: Send notifications based on data thresholds
> - **Aggregation**: Calculate statistics on time series data
> - **Integration**: Connect to external services and APIs
> - **System monitoring**: Track resource usage and health metrics
>
> Visit [influxdata/influxdb3_plugins](https://github.com/influxdata/influxdb3_plugins)
> to browse available plugins or contribute your own.
### Create a plugin {{< /code-tabs-wrapper >}}
1. Create a `.py` file in your plugins directory Plugins have various functions such as:
2. Define a function with one of the following signatures:
#### For data write events - Receive plugin-specific arguments (such as written data, call time, or an HTTP request)
- Access keyword arguments (as `args`) passed from _trigger arguments_ configurations
- Access the `influxdb3_local` shared API to write data, query data, and managing state between executions
For more information about available functions, arguments, and how plugins interact with InfluxDB, see how to [Extend plugins](/influxdb3/version/extend-plugin/).
### Create a custom plugin
To build custom functionality, you can create your own Processing Engine plugin.
#### Prerequisites
Before you begin, make sure:
- The Processing Engine is enabled on your {{% product-name %}} instance.
- Youve configured the `--plugin-dir` where plugin files are stored.
- You have access to that plugin directory.
#### Steps to create a plugin:
- [Choose your plugin type](#choose-your-plugin-type)
- [Create your plugin file](#create-your-plugin-file)
- [Next Steps](#next-steps)
#### Choose your plugin type
Choose a plugin type based on your automation goals:
| Plugin Type | Best For | Trigger Type |
|-------------|----------|-------------|
| **Data write** | Processing data as it arrives | `table:` or `all_tables` |
| **Scheduled** | Running code at specific times | `every:` or `cron:` |
| **HTTP request** | Creating API endpoints | `path:` |
#### Create your plugin file
- Create a `.py` file in your plugins directory
- Add the appropriate function signature based on your chosen plugin type
- Write your processing logic inside the function
After writing your plugin, [create a trigger](#use-the-create-trigger-command) to connect it to a database event and define when it runs.
#### Create a data write plugin
Use a data write plugin to process data as it's written to the database. Ideal use cases include:
- Data transformation and enrichment
- Alerting on incoming values
- Creating derived metrics
```python ```python
def process_writes(influxdb3_local, table_batches, args=None): def process_writes(influxdb3_local, table_batches, args=None):
@ -139,7 +207,13 @@ def process_writes(influxdb3_local, table_batches, args=None):
influxdb3_local.write(line) influxdb3_local.write(line)
``` ```
#### For scheduled events #### Create a scheduled plugin
Scheduled plugins run at defined intervals. Use them for:
- Periodic data aggregation
- Report generation
- System health checks
```python ```python
def process_scheduled_call(influxdb3_local, call_time, args=None): def process_scheduled_call(influxdb3_local, call_time, args=None):
@ -155,7 +229,13 @@ def process_scheduled_call(influxdb3_local, call_time, args=None):
influxdb3_local.warn("No recent metrics found") influxdb3_local.warn("No recent metrics found")
``` ```
#### For HTTP requests #### Create an HTTP request plugin
HTTP request plugins respond to API calls. Use them for:
- Creating custom API endpoints
- Webhooks for external integrations
- User interfaces for data interaction
```python ```python
def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None): def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None):
@ -174,25 +254,55 @@ def process_request(influxdb3_local, query_parameters, request_headers, request_
return {"status": "success", "message": "Request processed"} return {"status": "success", "message": "Request processed"}
``` ```
After adding your plugin, you can [install Python dependencies](#install-python-dependencies) or learn how to [extend plugins with API features and state management](#extend-plugins-with-api-features-and-state-management). #### Next steps
## Create a trigger to run a plugin After writing your plugin:
A trigger connects your plugin to a specific database event. - [Create a trigger](#use-the-create-trigger-command) to connect your plugin to database events
The plugin function signature in your plugin file determines which _trigger specification_ - [Install any Python dependencies](#install-python-dependencies) your plugin requires
you can choose for configuring and activating your plugin. - Learn how to [extend plugins with the API](/influxdb3/version/extend-plugin/)
Create a trigger with the `influxdb3 create trigger` command. ## Set up a trigger
### Understand trigger types
| Plugin Type | Trigger Specification | When Plugin Runs |
|------------|----------------------|-----------------|
| Data write | `table:<TABLE_NAME>` or `all_tables` | When data is written to tables |
| Scheduled | `every:<DURATION>` or `cron:<EXPRESSION>` | At specified time intervals |
| HTTP request | `path:<ENDPOINT_PATH>` | When HTTP requests are received |
### Use the create trigger command
Use the `influxdb3 create trigger` command with the appropriate trigger specification:
{{% code-placeholders "SPECIFICATION|PLUGIN_FILE|DATABASE_NAME|TRIGGER_NAME" %}}
```bash
influxdb3 create trigger \
--trigger-spec SPECIFICATION \
--plugin-filename PLUGIN_FILE \
--database DATABASE_NAME \
TRIGGER_NAME
```
{{% /code-placeholders %}}
In the example above, replace the following:
- {{% code-placeholder-key %}}`SPECIFICATION`{{% /code-placeholder-key %}}: Trigger specification
- {{% code-placeholder-key %}}`PLUGIN_FILE`{{% /code-placeholder-key %}}: Plugin filename relative to your configured plugin directory
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Name of the database
- {{% code-placeholder-key %}}`TRIGGER_NAME`{{% /code-placeholder-key %}}: Name of the new trigger
> [!Note] > [!Note]
> When specifying a local plugin file, the `--plugin-filename` parameter > When specifying a local plugin file, the `--plugin-filename` parameter
> _is relative to_ the `--plugin-dir` configured for the server. > _is relative to_ the `--plugin-dir` configured for the server.
> You don't need to provide an absolute path. > You don't need to provide an absolute path.
### Create a trigger for data writes ### Trigger specification examples
Use the `table:<TABLE_NAME>` or the `all_tables` trigger specification to configure #### Data write example
and run a [plugin for data write events](#for-data-write-events)--for example:
```bash ```bash
# Trigger on writes to a specific table # Trigger on writes to a specific table
@ -211,15 +321,11 @@ influxdb3 create trigger \
all_data_processor all_data_processor
``` ```
The trigger runs when the database flushes ingested data for the specified tables The trigger runs when the database flushes ingested data for the specified tables to the Write-Ahead Log (WAL) in the Object store (default is every second).
to the Write-Ahead Log (WAL) in the Object store (default is every second).
The plugin receives the written data and table information. The plugin receives the written data and table information.
### Create a trigger for scheduled events #### Scheduled events example
Use the `every:<DURATION>` or the `cron:<CRONTAB_EXPRESSION>` trigger specification
to configure and run a [plugin for scheduled events](#for-scheduled-events)--for example:
```bash ```bash
# Run every 5 minutes # Run every 5 minutes
@ -239,9 +345,7 @@ influxdb3 create trigger \
The plugin receives the scheduled call time. The plugin receives the scheduled call time.
### Create a trigger for HTTP requests #### HTTP requests example
For an [HTTP request plugin](#for-http-requests), use the `request:<ENDPOINT_PATH>` trigger specification to configure and enable a [plugin for HTTP requests](#for-http-requests)--for example:
```bash ```bash
# Create an endpoint at /api/v3/engine/webhook # Create an endpoint at /api/v3/engine/webhook
@ -252,7 +356,7 @@ influxdb3 create trigger \
webhook_processor webhook_processor
``` ```
The trigger makes your endpoint available at `/api/v3/engine/<ENDPOINT_PATH>`. Access your endpoint available at `/api/v3/engine/<ENDPOINT_PATH>`.
To run the plugin, send a `GET` or `POST` request to the endpoint--for example: To run the plugin, send a `GET` or `POST` request to the endpoint--for example:
```bash ```bash
@ -261,22 +365,10 @@ curl http://{{% influxdb/host %}}/api/v3/engine/webhook
The plugin receives the HTTP request object with methods, headers, and body. The plugin receives the HTTP request object with methods, headers, and body.
### Use community plugins from GitHub
You can reference plugins directly from the GitHub repository by using the `gh:` prefix:
```bash
# Create a trigger using a plugin from GitHub
influxdb3 create trigger \
--trigger-spec "every:1m" \
--plugin-filename "gh:examples/schedule/system_metrics/system_metrics.py" \
--database my_database \
system_metrics
```
### Pass arguments to plugins ### Pass arguments to plugins
Use trigger arguments to pass configuration from a trigger to the plugin it runs. You can use this for: Use trigger arguments to pass configuration from a trigger to the plugin it runs. You can use this for:
- Threshold values for monitoring - Threshold values for monitoring
- Connection properties for external services - Connection properties for external services
- Configuration settings for plugin behavior - Configuration settings for plugin behavior
@ -344,300 +436,91 @@ influxdb3 create trigger \
auto_disable_processor auto_disable_processor
``` ```
## Extend plugins with API features and state management ## Advanced trigger configuration
The Processing engine includes API capabilities that allow your plugins to After creating basic triggers, you can enhance your plugins with these advanced features:
interact with InfluxDB data and maintain state between executions.
These features let you build more sophisticated plugins that can transform, analyze, and respond to data.
### Use the shared API ### Access community plugins from GitHub
All plugins have access to the shared API to interact with the database. Skip downloading plugins by referencing them directly from GitHub:
#### Write data ```bash
# Create a trigger using a plugin from GitHub
influxdb3 create trigger \
--trigger-spec "every:1m" \
--plugin-filename "gh:examples/schedule/system_metrics/system_metrics.py" \
--database my_database \
system_metrics
```
Use the `LineBuilder` API to create line protocol data: This approach:
- Ensures you're using the latest version
- Simplifies updates and maintenance
- Reduces local storage requirements
### Configure your triggers
#### Pass configuration arguments
Provide runtine configuration to your plugins:
```bash
# Pass threshold and email settings to a plugin
Provide runtime configuration to your plugins:
--trigger-spec "every:1h" \
--plugin-filename "threshold_check.py" \
--trigger-arguments threshold=90,notify_email=admin@example.com \
--database my_database \
threshold_monitor
```
Your plugin accesses these values through the `args` parameter:
```python ```python
# Create a line protocol entry def process_scheduled_call(influxdb3_local, call_time, args=None):
line = LineBuilder("weather") if args and "threshold" in args:
line.tag("location", "us-midwest") threshold = float(args["threshold"])
line.float64_field("temperature", 82.5) email = args.get("notify_email", "default@example.com")
line.time_ns(1627680000000000000)
# Use the arguments in your logic
# Write the data to the database influxdb3_local.info(f"Checking threshold {threshold}, will notify {email}")
influxdb3_local.write(line)
``` ```
Writes are buffered while the plugin runs and are flushed when the plugin completes. #### Set execution mode
{{% expand-wrapper %}} Choose between synchronous (default) or asynchronous execution:
{{% expand "View the `LineBuilder` Python implementation" %}}
```python ```bash
from typing import Optional # Allow multiple trigger instances to run simultaneously
from collections import OrderedDict influxdb3 create trigger \
--trigger-spec "table:metrics" \
class InfluxDBError(Exception): --plugin-filename "heavy_process.py" \
"""Base exception for InfluxDB-related errors""" --run-asynchronous \
pass --database my_database \
async_processor
class InvalidMeasurementError(InfluxDBError):
"""Raised when measurement name is invalid"""
pass
class InvalidKeyError(InfluxDBError):
"""Raised when a tag or field key is invalid"""
pass
class InvalidLineError(InfluxDBError):
"""Raised when a line protocol string is invalid"""
pass
class LineBuilder:
def __init__(self, measurement: str):
if ' ' in measurement:
raise InvalidMeasurementError("Measurement name cannot contain spaces")
self.measurement = measurement
self.tags: OrderedDict[str, str] = OrderedDict()
self.fields: OrderedDict[str, str] = OrderedDict()
self._timestamp_ns: Optional[int] = None
def _validate_key(self, key: str, key_type: str) -> None:
"""Validate that a key does not contain spaces, commas, or equals signs."""
if not key:
raise InvalidKeyError(f"{key_type} key cannot be empty")
if ' ' in key:
raise InvalidKeyError(f"{key_type} key '{key}' cannot contain spaces")
if ',' in key:
raise InvalidKeyError(f"{key_type} key '{key}' cannot contain commas")
if '=' in key:
raise InvalidKeyError(f"{key_type} key '{key}' cannot contain equals signs")
def tag(self, key: str, value: str) -> 'LineBuilder':
"""Add a tag to the line protocol."""
self._validate_key(key, "tag")
self.tags[key] = str(value)
return self
def uint64_field(self, key: str, value: int) -> 'LineBuilder':
"""Add an unsigned integer field to the line protocol."""
self._validate_key(key, "field")
if value < 0:
raise ValueError(f"uint64 field '{key}' cannot be negative")
self.fields[key] = f"{value}u"
return self
def int64_field(self, key: str, value: int) -> 'LineBuilder':
"""Add an integer field to the line protocol."""
self._validate_key(key, "field")
self.fields[key] = f"{value}i"
return self
def float64_field(self, key: str, value: float) -> 'LineBuilder':
"""Add a float field to the line protocol."""
self._validate_key(key, "field")
# Check if value has no decimal component
self.fields[key] = f"{int(value)}.0" if value % 1 == 0 else str(value)
return self
def string_field(self, key: str, value: str) -> 'LineBuilder':
"""Add a string field to the line protocol."""
self._validate_key(key, "field")
# Escape quotes and backslashes in string values
escaped_value = value.replace('"', '\\"').replace('\\', '\\\\')
self.fields[key] = f'"{escaped_value}"'
return self
def bool_field(self, key: str, value: bool) -> 'LineBuilder':
"""Add a boolean field to the line protocol."""
self._validate_key(key, "field")
self.fields[key] = 't' if value else 'f'
return self
def time_ns(self, timestamp_ns: int) -> 'LineBuilder':
"""Set the timestamp in nanoseconds."""
self._timestamp_ns = timestamp_ns
return self
def build(self) -> str:
"""Build the line protocol string."""
# Start with measurement name (escape commas only)
line = self.measurement.replace(',', '\\,')
# Add tags if present
if self.tags:
tags_str = ','.join(
f"{k}={v}" for k, v in self.tags.items()
)
line += f",{tags_str}"
# Add fields (required)
if not self.fields:
raise InvalidLineError(f"At least one field is required: {line}")
fields_str = ','.join(
f"{k}={v}" for k, v in self.fields.items()
)
line += f" {fields_str}"
# Add timestamp if present
if self._timestamp_ns is not None:
line += f" {self._timestamp_ns}"
return line
```
{{% /expand %}}
{{% /expand-wrapper %}}
#### Query data
Execute SQL queries and get results:
```python
# Simple query
results = influxdb3_local.query("SELECT * FROM metrics WHERE time > now() - INTERVAL '1 hour'")
# Parameterized query for safer execution
params = {"table": "metrics", "threshold": 90}
results = influxdb3_local.query("SELECT * FROM $table WHERE value > $threshold", params)
``` ```
The shared API `query` function returns results as a `List` of `Dict[String, Any]`, where the key is the column name and the value is the column value. Use asynchronous execution when:
#### Log information - Processing might take longer than the trigger interval
- Multiple events need to be handled simultaneously
- Performance is more important than sequential execution
The shared API `info`, `warn`, and `error` functions accept multiple arguments, #### Configure error handling
convert them to strings, and log them as a space-separated message to the database log,
which is output in the server logs and captured in system tables that you can
query using SQL.
Add logging to track plugin execution: Control how your trigger responds to errors:
```bash
```python # Automatically retry on error
influxdb3_local.info("Starting data processing") influxdb3 create trigger \
influxdb3_local.warn("Could not process some records") --trigger-spec "table:important_data" \
influxdb3_local.error("Failed to connect to external API") --plugin-filename "critical_process.py" \
--error-behavior retry \
# Log structured data --database my_database \
obj_to_log = {"records": 157, "errors": 3} critical_processor
influxdb3_local.info("Processing complete", obj_to_log)
``` ```
#### Use the in-memory cache ### Install Python dependencies
The Processing engine provides an in-memory cache system that enables plugins to persist and retrieve data between executions.
Use the shared API `cache` property to access the cache API.
```python
# Basic usage pattern
influxdb3_local.cache.METHOD(PARAMETERS)
```
| Method | Parameters | Returns | Description |
|--------|------------|---------|-------------|
| `put` | `key` (str): The key to store the value under<br>`value` (Any): Any Python object to cache<br>`ttl` (Optional[float], default=None): Time in seconds before expiration<br>`use_global` (bool, default=False): If True, uses global namespace | None | Stores a value in the cache with an optional time-to-live |
| `get` | `key` (str): The key to retrieve<br>`default` (Any, default=None): Value to return if key not found<br>`use_global` (bool, default=False): If True, uses global namespace | Any | Retrieves a value from the cache or returns default if not found |
| `delete` | `key` (str): The key to delete<br>`use_global` (bool, default=False): If True, uses global namespace | bool | Deletes a value from the cache. Returns True if deleted, False if not found |
##### Cache namespaces
The cache system offers two distinct namespaces:
| Namespace | Scope | Best For |
| --- | --- | --- |
| **Trigger-specific** (default) | Isolated to a single trigger | Plugin state, counters, timestamps specific to one plugin |
| **Global** | Shared across all triggers | Configuration, lookup tables, service states that should be available to all plugins |
##### Store and retrieve cached data
```python
# Store a value
influxdb3_local.cache.put("last_run_time", time.time())
# Retrieve a value with a default if not found
last_time = influxdb3_local.cache.get("last_run_time", default=0)
# Delete a cached value
influxdb3_local.cache.delete("temporary_data")
```
##### Store cached data with expiration
```python
# Cache with a 5-minute TTL (time-to-live)
influxdb3_local.cache.put("api_response", response_data, ttl=300)
```
##### Share data across plugins
```python
# Store in the global namespace
influxdb3_local.cache.put("config", {"version": "1.0"}, use_global=True)
# Retrieve from the global namespace
config = influxdb3_local.cache.get("config", use_global=True)
```
##### Track state between executions
```python
# Get current counter or default to 0
counter = influxdb3_local.cache.get("execution_count", default=0)
# Increment counter
counter += 1
# Store the updated value
influxdb3_local.cache.put("execution_count", counter)
influxdb3_local.info(f"This plugin has run {counter} times")
```
#### Best practices for in-memory caching
- [Use the trigger-specific namespace](#use-the-trigger-specific-namespace)
- [Use TTL appropriately](#use-ttl-appropriately)
- [Cache computation results](#cache-computation-results)
- [Warm the cache](#warm-the-cache)
- [Consider cache limitations](#consider-cache-limitations)
##### Use the trigger-specific namespace
The cache is designed to support stateful operations while maintaining isolation between different triggers. Use the trigger-specific namespace for most operations and the global namespace only when data sharing across triggers is necessary.
##### Use TTL appropriately
Set realistic expiration times based on how frequently data changes.
```python
# Cache external API responses for 5 minutes
influxdb3_local.cache.put("weather_data", api_response, ttl=300)
```
##### Cache computation results
Store the results of expensive calculations that need to be utilized frequently.
```python
# Cache aggregated statistics
influxdb3_local.cache.put("daily_stats", calculate_statistics(data), ttl=3600)
```
##### Warm the cache
For critical data, prime the cache at startup. This can be especially useful for global namespace data where multiple triggers need the data.
```python
# Check if cache needs to be initialized
if not influxdb3_local.cache.get("lookup_table"):
influxdb3_local.cache.put("lookup_table", load_lookup_data())
```
##### Consider cache limitations
- **Memory Usage**: Since cache contents are stored in memory, monitor your memory usage when caching large datasets.
- **Server Restarts**: Because the cache is cleared when the server restarts, design your plugins to handle cache initialization (as noted above).
- **Concurrency**: Be cautious of accessing inaccurate or out-of-date data when multiple trigger instances might simultaneously update the same cache key.
## Install Python dependencies
If your plugin needs additional Python packages, use the `influxdb3 install` command: If your plugin needs additional Python packages, use the `influxdb3 install` command:
@ -654,6 +537,7 @@ docker exec -it CONTAINER_NAME influxdb3 install package pandas
This creates a Python virtual environment in your plugins directory with the specified packages installed. This creates a Python virtual environment in your plugins directory with the specified packages installed.
{{% show-in "enterprise" %}} {{% show-in "enterprise" %}}
### Connect Grafana to your InfluxDB instance ### Connect Grafana to your InfluxDB instance
When configuring Grafana to connect to an InfluxDB 3 Enterprise instance: When configuring Grafana to connect to an InfluxDB 3 Enterprise instance:

View File

@ -282,14 +282,34 @@ To have the `influxdb3` CLI use your admin token automatically, assign it to the
To create an admin token, use the `influxdb3 create token --admin` subcommand--for example: To create an admin token, use the `influxdb3 create token --admin` subcommand--for example:
{{< code-tabs-wrapper >}}
{{% code-tabs %}}
[CLI](#)
[Docker](#)
{{% /code-tabs %}}
{{% code-tab-content %}}
```bash ```bash
influxdb3 create token --admin \ influxdb3 create token --admin \
--host http://{{< influxdb/host >}} --host http://INFLUXDB_HOST
``` ```
{{% /code-tab-content %}}
{{% code-tab-content %}}
{{% code-placeholders "CONTAINER_NAME" %}}
```bash ```bash
# With Docker -- In a new terminal, run: # With Docker — in a new terminal:
docker exec -it CONTAINER_NAME influxdb3 create token --admin docker exec -it CONTAINER_NAME influxdb3 create token --admin
``` ```
{{% /code-placeholders %}}
Replace {{% code-placeholder-key %}}`CONTAINER_NAME`{{% /code-placeholder-key %}} with the name of your running Docker container.
{{% /code-tab-content %}}
{{< /code-tabs-wrapper >}}
The command returns a token string that you can use to authenticate CLI commands and API requests. The command returns a token string that you can use to authenticate CLI commands and API requests.
@ -316,6 +336,7 @@ To create a database token, use the `influxdb3 create token` subcommand and pass
The following example shows how to create a database token that expires in 90 days and has read and write permissions for all databases on the server: The following example shows how to create a database token that expires in 90 days and has read and write permissions for all databases on the server:
{{% code-placeholders "ADMIN_TOKEN" %}} {{% code-placeholders "ADMIN_TOKEN" %}}
```bash ```bash
influxdb3 create token \ influxdb3 create token \
--permission \ --permission \
@ -327,8 +348,7 @@ influxdb3 create token \
``` ```
{{% /code-placeholders %}} {{% /code-placeholders %}}
In your command, replace {{% code-placeholder-key %}} `ADMIN_TOKEN`{{% /code-placeholder-key %}} In your command, replace {{% code-placeholder-key %}} `ADMIN_TOKEN`{{% /code-placeholder-key %}} with the admin token you created earlier.
with the admin token you created earlier.
#### Create a system token #### Create a system token
@ -355,6 +375,8 @@ To create a system token, use the `influxdb3 create token` subcommand and pass t
The following example shows how to create a system token that expires in 1 year and has read permissions for all system endpoints on the server: The following example shows how to create a system token that expires in 1 year and has read permissions for all system endpoints on the server:
{{% code-placeholders "ADMIN_TOKEN" %}}
```bash ```bash
influxdb3 create token \ influxdb3 create token \
--permission \ --permission \
@ -364,6 +386,9 @@ influxdb3 create token \
--name "all system endpoints" \ --name "all system endpoints" \
"system:*:read" "system:*:read"
``` ```
{{% /code-placeholders %}}
In your command, replace {{% code-placeholder-key %}} `ADMIN_TOKEN`{{% /code-placeholder-key %}} with the admin token you created earlier.
For more information, see how to [Manage resource tokens](/influxdb3/version/admin/tokens/resource/). For more information, see how to [Manage resource tokens](/influxdb3/version/admin/tokens/resource/).
@ -372,14 +397,18 @@ For more information, see how to [Manage resource tokens](/influxdb3/version/adm
- To authenticate `influxdb3` CLI commands, use the `--token` option or assign your - To authenticate `influxdb3` CLI commands, use the `--token` option or assign your
token to the `INFLUXDB3_AUTH_TOKEN` environment variable for `influxdb3` to use it automatically. token to the `INFLUXDB3_AUTH_TOKEN` environment variable for `influxdb3` to use it automatically.
- To authenticate HTTP API requests, include `Bearer <TOKEN>` in the `Authorization` header value--for example: - To authenticate HTTP API requests, include `Bearer <TOKEN>` in the `Authorization` header value--for example:
{{% code-placeholders "SYSTEM_TOKEN" %}}
```bash ```bash
curl "http://{{< influxdb/host >}}/health" \ curl "http://{{< influxdb/host >}}/health" \
--header "Authorization: Bearer SYSTEM_TOKEN" --header "Authorization: Bearer SYSTEM_TOKEN"
``` ```
{{% /code-placeholders %}}
In your request, replace Replace the following:
{{% code-placeholder-key %}}`SYSTEM_TOKEN`{{% /code-placeholder-key %}} with the system token you created earlier.
In your command, replace {{% code-placeholder-key %}}`SYSTEM_TOKEN`{{% /code-placeholder-key %}}: System token that grants access to system endpoints (`/health`, `/metrics`, etc.)
### Data model ### Data model

View File

@ -1,10 +1,17 @@
const { defineConfig } = require('cypress'); import { defineConfig } from 'cypress';
const process = require('process'); import { cwd as _cwd } from 'process';
import * as fs from 'fs';
import * as yaml from 'js-yaml';
import {
BROKEN_LINKS_FILE,
FIRST_BROKEN_LINK_FILE,
initializeReport,
readBrokenLinksReport,
} from './cypress/support/link-reporter.js';
module.exports = defineConfig({ export default defineConfig({
e2e: { e2e: {
// Automatically prefix cy.visit() and cy.request() commands with a baseUrl. baseUrl: 'http://localhost:1315',
baseUrl: 'http://localhost:1313',
defaultCommandTimeout: 10000, defaultCommandTimeout: 10000,
pageLoadTimeout: 30000, pageLoadTimeout: 30000,
responseTimeout: 30000, responseTimeout: 30000,
@ -12,34 +19,177 @@ module.exports = defineConfig({
numTestsKeptInMemory: 5, numTestsKeptInMemory: 5,
projectId: 'influxdata-docs', projectId: 'influxdata-docs',
setupNodeEvents(on, config) { setupNodeEvents(on, config) {
// implement node event listeners here // Browser setup
on('before:browser:launch', (browser, launchOptions) => { on('before:browser:launch', (browser, launchOptions) => {
if (browser.name === 'chrome' && browser.isHeadless) { if (browser.name === 'chrome' && browser.isHeadless) {
// Force Chrome to use a less memory-intensive approach
launchOptions.args.push('--disable-dev-shm-usage'); launchOptions.args.push('--disable-dev-shm-usage');
launchOptions.args.push('--disable-gpu'); launchOptions.args.push('--disable-gpu');
launchOptions.args.push('--disable-extensions'); launchOptions.args.push('--disable-extensions');
return launchOptions; return launchOptions;
} }
}); });
on('task', { on('task', {
// Fetch the product list configured in /data/products.yml // Fetch the product list configured in /data/products.yml
getData(filename) { getData(filename) {
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
const yq = require('js-yaml'); const cwd = _cwd();
const fs = require('fs');
const cwd = process.cwd();
try { try {
resolve( resolve(
yq.load(fs.readFileSync(`${cwd}/data/${filename}.yml`, 'utf8')) yaml.load(
fs.readFileSync(`${cwd}/data/${filename}.yml`, 'utf8')
)
); );
} catch (e) { } catch (e) {
reject(e); reject(e);
} }
}); });
}, },
// Log task for reporting
log(message) {
if (typeof message === 'object') {
if (message.type === 'error') {
console.error(`\x1b[31m${message.message}\x1b[0m`); // Red
} else if (message.type === 'warning') {
console.warn(`\x1b[33m${message.message}\x1b[0m`); // Yellow
} else if (message.type === 'success') {
console.log(`\x1b[32m${message.message}\x1b[0m`); // Green
} else if (message.type === 'divider') {
console.log(`\x1b[90m${message.message}\x1b[0m`); // Gray
} else {
console.log(message.message || message);
}
} else {
console.log(message);
}
return null;
},
// File tasks
writeFile({ path, content }) {
try {
fs.writeFileSync(path, content);
return true;
} catch (error) {
console.error(`Error writing to file ${path}: ${error.message}`);
return { error: error.message };
}
},
readFile(path) {
try {
return fs.existsSync(path) ? fs.readFileSync(path, 'utf8') : null;
} catch (error) {
console.error(`Error reading file ${path}: ${error.message}`);
return { error: error.message };
}
},
// Broken links reporting tasks
initializeBrokenLinksReport() {
return initializeReport();
},
// Special case domains are now handled directly in the test without additional reporting
// This task is kept for backward compatibility but doesn't do anything special
reportSpecialCaseLink(linkData) {
console.log(
`✅ Expected status code: ${linkData.url} (status: ${linkData.status}) is valid for this domain`
);
return true;
},
reportBrokenLink(linkData) {
try {
// Validate link data
if (!linkData || !linkData.url || !linkData.page) {
console.error('Invalid link data provided');
return false;
}
// Read current report
const report = readBrokenLinksReport();
// Find or create entry for this page
let pageReport = report.find((r) => r.page === linkData.page);
if (!pageReport) {
pageReport = { page: linkData.page, links: [] };
report.push(pageReport);
}
// Check if link is already in the report to avoid duplicates
const isDuplicate = pageReport.links.some(
(link) => link.url === linkData.url && link.type === linkData.type
);
if (!isDuplicate) {
// Add the broken link to the page's report
pageReport.links.push({
url: linkData.url,
status: linkData.status,
type: linkData.type,
linkText: linkData.linkText,
});
// Write updated report back to file
fs.writeFileSync(
BROKEN_LINKS_FILE,
JSON.stringify(report, null, 2)
);
// Store first broken link if not already recorded
const firstBrokenLinkExists =
fs.existsSync(FIRST_BROKEN_LINK_FILE) &&
fs.readFileSync(FIRST_BROKEN_LINK_FILE, 'utf8').trim() !== '';
if (!firstBrokenLinkExists) {
// Store first broken link with complete information
const firstBrokenLink = {
url: linkData.url,
status: linkData.status,
type: linkData.type,
linkText: linkData.linkText,
page: linkData.page,
time: new Date().toISOString(),
};
fs.writeFileSync(
FIRST_BROKEN_LINK_FILE,
JSON.stringify(firstBrokenLink, null, 2)
);
console.error(
`🔴 FIRST BROKEN LINK: ${linkData.url} (${linkData.status}) - ${linkData.type} on page ${linkData.page}`
);
}
// Log the broken link immediately to console
console.error(
`❌ BROKEN LINK: ${linkData.url} (${linkData.status}) - ${linkData.type} on page ${linkData.page}`
);
}
return true;
} catch (error) {
console.error(`Error reporting broken link: ${error.message}`);
// Even if there's an error, we want to ensure the test knows there was a broken link
return true;
}
},
});
// Load plugins file using dynamic import for ESM compatibility
return import('./cypress/plugins/index.js').then((module) => {
return module.default(on, config);
}); });
return config;
}, },
specPattern: 'cypress/e2e/**/*.cy.{js,jsx,ts,tsx}',
supportFile: 'cypress/support/e2e.js',
viewportWidth: 1280,
viewportHeight: 720,
},
env: {
test_subjects: '',
}, },
}); });

View File

@ -1,11 +1,17 @@
/// <reference types="cypress" /> /// <reference types="cypress" />
describe('Article links', () => { describe('Article', () => {
const subjects = Cypress.env('test_subjects').split(','); const subjects = Cypress.env('test_subjects').split(',');
// Always use HEAD for downloads to avoid timeouts // Always use HEAD for downloads to avoid timeouts
const useHeadForDownloads = true; const useHeadForDownloads = true;
// Helper function to identify download links - improved // Set up initialization for tests
before(() => {
// Initialize the broken links report
cy.task('initializeBrokenLinksReport');
});
// Helper function to identify download links
function isDownloadLink(href) { function isDownloadLink(href) {
// Check for common download file extensions // Check for common download file extensions
const downloadExtensions = [ const downloadExtensions = [
@ -45,130 +51,192 @@ describe('Article links', () => {
} }
// Helper function to make appropriate request based on link type // Helper function to make appropriate request based on link type
function testLink(href) { function testLink(href, linkText = '', pageUrl) {
// Common request options for both methods
const requestOptions = {
failOnStatusCode: true,
timeout: 15000, // Increased timeout for reliability
followRedirect: true, // Explicitly follow redirects
retryOnNetworkFailure: true, // Retry on network issues
retryOnStatusCodeFailure: true, // Retry on 5xx errors
};
function handleFailedLink(url, status, type, redirectChain = '') {
// Report the broken link
cy.task('reportBrokenLink', {
url: url + redirectChain,
status,
type,
linkText,
page: pageUrl,
});
// Throw error for broken links
throw new Error(
`BROKEN ${type.toUpperCase()} LINK: ${url} (status: ${status})${redirectChain} on ${pageUrl}`
);
}
if (useHeadForDownloads && isDownloadLink(href)) { if (useHeadForDownloads && isDownloadLink(href)) {
cy.log(`** Testing download link with HEAD: ${href} **`); cy.log(`** Testing download link with HEAD: ${href} **`);
cy.request({ cy.request({
method: 'HEAD', method: 'HEAD',
url: href, url: href,
...requestOptions,
}).then((response) => { }).then((response) => {
const message = `Link is broken: ${href} (status: ${response.status})`; // Check final status after following any redirects
try { if (response.status >= 400) {
expect(response.status).to.be.lt(400); // Build redirect info string if available
} catch (e) { const redirectInfo =
// Log the broken link with the URL for better visibility in reports response.redirects && response.redirects.length > 0
cy.log(`❌ BROKEN LINK: ${href} (${response.status})`); ? ` (redirected to: ${response.redirects.join(' -> ')})`
throw new Error(message); : '';
handleFailedLink(href, response.status, 'download', redirectInfo);
} }
}); });
} else { } else {
cy.log(`** Testing link: ${href} **`); cy.log(`** Testing link: ${href} **`);
cy.log(JSON.stringify(requestOptions));
cy.request({ cy.request({
url: href, url: href,
failOnStatusCode: false, ...requestOptions,
timeout: 10000, // 10 second timeout for regular links
}).then((response) => { }).then((response) => {
const message = `Link is broken: ${href} (status: ${response.status})`; // Check final status after following any redirects
try { if (response.status >= 400) {
expect(response.status).to.be.lt(400); // Build redirect info string if available
} catch (e) { const redirectInfo =
// Log the broken link with the URL for better visibility in reports response.redirects && response.redirects.length > 0
cy.log(`❌ BROKEN LINK: ${href} (${response.status})`); ? ` (redirected to: ${response.redirects.join(' -> ')})`
throw new Error(message); : '';
handleFailedLink(href, response.status, 'regular', redirectInfo);
} }
}); });
} }
} }
// Test implementation for subjects
subjects.forEach((subject) => { subjects.forEach((subject) => {
it(`contains valid internal links on ${subject}`, function () { it(`${subject} has valid internal links`, function () {
cy.visit(`${subject}`); cy.visit(`${subject}`, { timeout: 20000 });
// Test internal links // Test internal links
// 1. Timeout and fail the test if article is not found cy.get('article, .api-content').then(($article) => {
// 2. Check each link.
// 3. If no links are found, continue without failing
cy.get('article').then(($article) => {
// Find links without failing the test if none are found // Find links without failing the test if none are found
const $links = $article.find('a[href^="/"]'); const $links = $article.find('a[href^="/"]');
if ($links.length === 0) { if ($links.length === 0) {
cy.log('No internal links found on this page'); cy.log('No internal links found on this page');
return; return;
} }
// Now test each link
cy.wrap($links).each(($a) => { cy.wrap($links).each(($a) => {
const href = $a.attr('href'); const href = $a.attr('href');
testLink(href); const linkText = $a.text().trim();
testLink(href, linkText, subject);
}); });
}); });
}); });
it(`checks anchor links on ${subject} (with warnings for missing targets)`, function () { it(`${subject} has valid anchor links`, function () {
cy.visit(`${subject}`); cy.visit(`${subject}`);
// Track missing anchors for summary // Define selectors for anchor links to ignore, such as behavior triggers
const missingAnchors = []; const ignoreLinks = ['.tabs a[href^="#"]', '.code-tabs a[href^="#"]'];
// Process anchor links individually const anchorSelector =
cy.get('article').then(($article) => { 'a[href^="#"]:not(' + ignoreLinks.join('):not(') + ')';
const $anchorLinks = $article.find('a[href^="#"]');
cy.get('article, .api-content').then(($article) => {
const $anchorLinks = $article.find(anchorSelector);
if ($anchorLinks.length === 0) { if ($anchorLinks.length === 0) {
cy.log('No anchor links found on this page'); cy.log('No anchor links found on this page');
return; return;
} }
cy.wrap($anchorLinks).each(($a) => { cy.wrap($anchorLinks).each(($a) => {
const href = $a.prop('href'); const href = $a.prop('href');
if (href && href.length > 1) { const linkText = $a.text().trim();
// Skip empty anchors (#)
// Get just the fragment part
const url = new URL(href);
const anchorId = url.hash.substring(1); // Remove the # character
if (!anchorId) { if (href && href.length > 1) {
cy.log(`Skipping empty anchor in ${href}`); // Get just the fragment part
return; const url = new URL(href);
const anchorId = url.hash.substring(1); // Remove the # character
if (!anchorId) {
cy.log(`Skipping empty anchor in ${href}`);
return;
}
// Use DOM to check if the element exists
cy.window().then((win) => {
const element = win.document.getElementById(anchorId);
if (!element) {
cy.task('reportBrokenLink', {
url: `#${anchorId}`,
status: 404,
type: 'anchor',
linkText,
page: subject,
});
cy.log(`⚠️ Missing anchor target: #${anchorId}`);
} }
});
// Use DOM to check if the element exists, but don't fail if missing
cy.window().then((win) => {
const element = win.document.getElementById(anchorId);
if (element) {
cy.log(`✅ Anchor target exists: #${anchorId}`);
} else {
// Just warn about the missing anchor
cy.log(`⚠️ WARNING: Missing anchor target: #${anchorId}`);
missingAnchors.push(anchorId);
}
});
}
})
.then(() => {
// After checking all anchors, log a summary
if (missingAnchors.length > 0) {
cy.log(
`⚠️ Found ${missingAnchors.length} missing anchor targets: ${missingAnchors.join(', ')}`
);
} else {
cy.log('✅ All anchor targets are valid');
}
});
});
it(`contains valid external links on ${subject}`, function () {
cy.visit(`${subject}`);
// Test external links
// 1. Timeout and fail the test if article is not found
// 2. Check each link.
// 3. If no links are found, continue without failing
cy.get('article').then(($article) => {
// Find links without failing the test if none are found
const $links = $article.find('a[href^="http"]');
if ($links.length === 0) {
cy.log('No external links found on this page');
return;
} }
cy.wrap($links).each(($a) => { });
const href = $a.attr('href'); });
testLink(href); });
});
it(`${subject} has valid external links`, function () {
// Check if we should skip external links entirely
if (Cypress.env('skipExternalLinks') === true) {
cy.log(
'Skipping all external links as configured by skipExternalLinks'
);
return;
}
cy.visit(`${subject}`);
// Define allowed external domains to test
const allowedExternalDomains = ['github.com', 'kapa.ai'];
// Test external links
cy.get('article, .api-content').then(($article) => {
// Find links without failing the test if none are found
const $links = $article.find('a[href^="http"]');
if ($links.length === 0) {
cy.log('No external links found on this page');
return;
}
// Filter links to only include allowed domains
const $allowedLinks = $links.filter((_, el) => {
const href = el.getAttribute('href');
try {
const url = new URL(href);
return allowedExternalDomains.some(
(domain) =>
url.hostname === domain || url.hostname.endsWith(`.${domain}`)
);
} catch (e) {
return false;
}
});
if ($allowedLinks.length === 0) {
cy.log('No links to allowed external domains found on this page');
return;
}
cy.log(
`Found ${$allowedLinks.length} links to allowed external domains to test`
);
cy.wrap($allowedLinks).each(($a) => {
const href = $a.attr('href');
const linkText = $a.text().trim();
testLink(href, linkText, subject);
}); });
}); });
}); });

View File

Some files were not shown because too many files have changed in this diff Show More