Merge branch 'master' into sql-difference-fn

sql-difference-fn
Jason Stirnaman 2026-03-02 11:37:41 -06:00 committed by GitHub
commit c6016796a0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
498 changed files with 3887 additions and 1589 deletions

50
.ci/shellcheck/shellcheck.sh Executable file
View File

@ -0,0 +1,50 @@
#!/bin/bash
set -euo pipefail
# Run ShellCheck to lint shell scripts for common issues.
# Uses a local shellcheck binary if available, otherwise falls back to Docker.
#
# Example usage:
#
# Lint a single script:
# .ci/shellcheck/shellcheck.sh test/scripts/init-influxdb3.sh
#
# Lint all staged shell scripts (used by Lefthook):
# .ci/shellcheck/shellcheck.sh scripts/deploy-staging.sh test/scripts/*.sh
SHELLCHECK_VERSION="0.10.0"
# Derive minimum major.minor from the pinned version so there's one source of truth.
SHELLCHECK_MAJOR_MIN=${SHELLCHECK_VERSION%%.*}
_rest=${SHELLCHECK_VERSION#*.}
SHELLCHECK_MINOR_MIN=${_rest%%.*}
if command -v shellcheck &>/dev/null; then
local_version=$(shellcheck --version 2>/dev/null \
| grep -oE 'version: [0-9.]+' \
| grep -oE '[0-9.]+' || true)
local_major=${local_version%%.*}
local_rest=${local_version#*.}
local_minor=${local_rest%%.*}
if [[ -z "$local_major" ]] ||
[[ "$local_major" -lt "$SHELLCHECK_MAJOR_MIN" ]] ||
[[ "$local_major" -eq "$SHELLCHECK_MAJOR_MIN" && "${local_minor:-0}" -lt "$SHELLCHECK_MINOR_MIN" ]]; then
echo "WARNING: local ShellCheck version ($local_version) is older than pinned v${SHELLCHECK_VERSION}." >&2
echo " Upgrade: brew install shellcheck (or see https://www.shellcheck.net/)" >&2
echo " Falling back to Docker (koalaman/shellcheck:v${SHELLCHECK_VERSION})..." >&2
else
shellcheck "$@"
exit $?
fi
fi
# Docker fallback — mount repo read-only, run from workdir
docker run \
--rm \
--label tag=influxdata-docs \
--label stage=lint \
--mount type=bind,src="$(pwd)",dst=/workdir,readonly \
-w /workdir \
"koalaman/shellcheck:v${SHELLCHECK_VERSION}" \
"$@"

View File

@ -1,27 +1,58 @@
extends: spelling
message: "Did you really mean '%s'?"
level: warning
# Exclude from spell checking:
# - code: fenced code blocks (```...```)
# - raw: inline code (`...`)
# - table.*: table headers and cells
# Scope configuration for spell checking
# NOTE: Code blocks are intentionally INCLUDED to catch spelling errors in:
# - Comments (e.g., // NOTE: this is importent)
# - Documentation strings
# - Inline code comments (e.g., #!/bin/bash #comment typo)
# This enables detection of spelling mistakes in code block comments that
# users may copy into their own code.
scope:
- ~code
- ~raw
- ~table.header
- ~table.cell
- ~raw # Exclude inline code (`...`) - already has IDE spell check
- ~table.header # Exclude table headers and cells
- ~table.cell # (often contain abbreviations, constants, etc.)
ignore:
# Ignore the following words. All words are case-insensitive.
# To use case-sensitive matching, use the filters section or vocabulary Terms.
- InfluxDataDocs/Terms/ignore.txt
- InfluxDataDocs/Terms/query-functions.txt
filters:
# Allow product-specific Branding.yml configurations to handle [Ss]erverless
# while also allowing serverless as a valid dictionary word.
# === BRANDING TERMS ===
# Allow product-specific configurations to handle [Ss]erverless
# while also allowing "serverless" as a valid dictionary word
- '[Ss]erverless'
# === URL AND PATH PATTERNS ===
# Ignore URL paths (e.g., /api/v2/write, /kapacitor/v1/api/v2/tasks)
# Matches: /path/to/endpoint, /v2.0/api/write, /influxdb/v1.8/cli
- '/[a-zA-Z0-9/_\-\.\{\}]+'
# Ignore full URLs
- 'https?://[^\s\)\]>"]+'
# Ignore shortcode attribute values (e.g., endpoint="..." method="...")
# Ignore full URLs (http, https, ftp, ftps, ssh, file, etc.)
# Matches: https://docs.example.com, http://localhost:8086, ftp://server.com, ssh://host
- '(?:https?|ftp|ftps|ssh|file)://[^\s\)\]>"]+'
# === CODE IDENTIFIERS & VARIABLES ===
# camelCase and snake_case identifiers (requires >=1 uppercase OR >=1 underscore)
# camelCase: _*[a-z]+(?:[A-Z][a-z0-9]*)+(?:[A-Z][a-zA-Z0-9]*)* - requires >=1 uppercase
# snake_case: [a-z_][a-z0-9]*_[a-z0-9_]* - requires >=1 underscore
# Matches: myVariable, targetField, _privateVar, my_variable, terminationGracePeriodSeconds
# Does NOT match prose: provide, database, variable (normal words)
- '(?:_*[a-z]+(?:[A-Z][a-z0-9]*)+(?:[A-Z][a-zA-Z0-9]*)*|[a-z_][a-z0-9]*_[a-z0-9_]*)'
# UPPER_CASE constants and environment variables
# Matches: API_KEY, MY_CONSTANT, AWS_REGION, INFLUXDB_TOKEN
- '[A-Z_][A-Z0-9_]+'
# === CODE LITERALS ===
# Hexadecimal values (0xFF, 0xDEADBEEF, etc.)
- '0[xX][0-9a-fA-F]+'
# Version numbers and semantic versioning (1.0, 2.3.1, 0.101.0, etc.)
- '\d+\.\d+(?:\.\d+)*'
# === HUGO SHORTCODES ===
# Ignore shortcode attribute values (endpoint="...", method="...", etc.)
# Matches: {{<endpoint="...">}}, [method="GET"]
- '(?:endpoint|method|url|href|src|path)="[^"]+"'
# === CODE PUNCTUATION & SYMBOLS ===
# Common programming symbols that may appear in patterns
- '[@#$%^&*()_+=\[\]{};:,.<>?/\\|-]+'

View File

@ -1,20 +1,41 @@
#!/bin/bash
set -euo pipefail
# Run Vale to lint files for writing style and consistency
# Run Vale to lint files for writing style and consistency.
# Uses a local vale binary if available, otherwise falls back to Docker.
#
# Example usage:
#
# Lint all added and modified files in the cloud-dedicated directory:
# git diff --name-only --diff-filter=d HEAD \
# | grep "content/influxdb/cloud-dedicated" \
# | xargs .ci/vale/vale.sh \
# --minAlertLevel=suggestion \
# --config=content/influxdb/cloud-dedicated/.vale.ini
# Lint all added and modified files in the cloud-dedicated directory and report suggestions, warnings, and errors.
VALE_VERSION="3.13.1"
VALE_MAJOR_MIN=3
# git diff --name-only --diff-filter=d HEAD | grep "content/influxdb/cloud-dedicated" | xargs .ci/vale/vale.sh --minAlertLevel=suggestion --config=content/influxdb/cloud-dedicated/.vale.ini
if command -v vale &>/dev/null; then
local_version=$(vale --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1 || true)
local_major=${local_version%%.*}
if [[ -z "$local_major" || "$local_major" -lt "$VALE_MAJOR_MIN" ]]; then
echo "WARNING: local Vale version ($local_version) may be incompatible (expected v${VALE_MAJOR_MIN}.x+)." >&2
echo " Upgrade or install Vale: see https://vale.sh/docs/install/ (for Homebrew: brew upgrade vale)" >&2
echo " Falling back to Docker (jdkato/vale:v${VALE_VERSION})..." >&2
else
vale "$@"
exit $?
fi
fi
# Lint files provided as arguments
docker run \
--rm \
--label tag=influxdata-docs \
--label stage=lint \
--mount type=bind,src=$(pwd),dst=/workdir \
--mount type=bind,src="$(pwd)",dst=/workdir \
-w /workdir \
--entrypoint /bin/vale \
jdkato/vale:latest \
"$@"
"jdkato/vale:v${VALE_VERSION}" \
"$@"

View File

@ -17,7 +17,9 @@
"Bash(curl:*)",
"Bash(gh:*)",
"Bash(hugo:*)",
"Bash(htmlq:*)",
"Bash(jq:*)",
"Bash(yq:*)",
"Bash(mkdir:*)",
"Bash(cat:*)",
"Bash(ls:*)",

View File

@ -30,7 +30,7 @@ Made content changes?
└─ Run tests (See Part 2: Testing)
Need to verify technical accuracy?
└─ Use Kapa MCP server (See Part 4: Fact-Checking)
└─ Use documentation MCP server (See Part 4: Fact-Checking)
Need to write/debug Vale rules?
└─ See vale-rule-config skill (for CI/Quality Engineers)
@ -165,20 +165,12 @@ See [DOCS-FRONTMATTER.md](../../../DOCS-FRONTMATTER.md#alternative-links-alt_lin
### Check product resource terms are cross-referenced
When creating or editing content, check that product resource terms link to `admin/` or `reference/` pages that help the user understand and set up the resource.
Product resource terms often appear inside `code-placeholder-key` shortcode text and bullet item text.
Example product resource terms:
- "database token"
- "database name"
**TODOs for CI/config:**
- Add automated check to validate `alt_links` are present when shared content paths differ across products
- Add check for product-specific URL patterns in shared content (e.g., Cloud Serverless uses `/reference/regions` for URLs, Cloud Dedicated/Clustered do not have this page - cluster URLs come from account setup)
- Add check/helper to ensure resource references (tokens, databases, buckets) link to proper admin pages using `/influxdb3/version/admin/` pattern
- Rethink `code-placeholder-key` workflow: `docs placeholders` adds `placeholders` attributes to code blocks but doesn't generate the "Replace the following:" lists with `{{% code-placeholder-key %}}` shortcodes. Either improve automation to generate these lists, or simplify by removing `code-placeholder-key` if the attribute alone is sufficient
## Part 2: Testing Workflow
After making content changes, run tests to validate:
@ -187,7 +179,7 @@ After making content changes, run tests to validate:
```bash
# Verify Hugo can build the site
hugo --quiet
yarn hugo --quiet
# Look for errors like:
# - Template errors
@ -237,7 +229,7 @@ node cypress/support/run-e2e-specs.js \
**Important prerequisites:**
- API tests: Run `yarn build:api-docs` first
- Markdown validation: Run `hugo --quiet && yarn build:md` first
- Markdown validation: Run `yarn hugo --quiet && yarn build:md` first
See **cypress-e2e-testing** skill for detailed test workflow.
@ -268,7 +260,7 @@ See **vale-linting** skill for comprehensive Vale workflow.
```bash
# Start Hugo development server
hugo server
yarn hugo server
# Visit http://localhost:1313
# Preview your changes in browser
@ -284,13 +276,13 @@ Vale checks documentation for style guide violations, spelling errors, and brand
```bash
# Basic linting (all markdown files)
docker compose run -T vale content/**/*.md
.ci/vale/vale.sh content/**/*.md
# Lint specific product
docker compose run -T vale content/influxdb3/core/**/*.md
.ci/vale/vale.sh content/influxdb3/core/**/*.md
# With specific config and alert level
docker compose run -T vale \
.ci/vale/vale.sh \
--config=content/influxdb/cloud-dedicated/.vale.ini \
--minAlertLevel=error \
content/influxdb/cloud-dedicated/write-data/**/*.md
@ -335,26 +327,20 @@ This paragraph contains technical terms that Vale might flag.
<!-- vale InfluxDataDocs.TechnicalTerms = YES -->
```
### VS Code Integration (Optional)
For real-time linting while editing:
1. Install the [Vale VSCode extension](https://marketplace.visualstudio.com/items?itemName=ChrisChinchilla.vale-vscode)
2. Configure the extension to use the workspace Vale:
- Set `Vale:Vale CLI:Path` to `${workspaceFolder}/node_modules/.bin/vale`
### When to Run Vale
- **During editing**: If you have VS Code extension enabled
- **Before committing**: Pre-commit hooks run Vale automatically
- **After content changes**: Run manually to catch issues early
- **In CI/CD**: Automated on pull requests
## Part 4: Fact-Checking with MCP Server
## Part 4: Fact-Checking with the Documentation MCP Server
The InfluxData documentation MCP server (`influxdata`) provides access to **Ask AI (Kapa.ai)** for fact-checking and answering questions about InfluxData products.
The **InfluxDB documentation MCP server** lets you search InfluxDB documentation (the rendered `content` managed in this repository) and related InfluxData references (source code READMEs, community forums, and some third-party tool documentation) directly from your AI assistant.
### When to Use MCP Server
### When to Use the Documentation MCP Server
The primary source of content in the Documentation MCP Server is the fully rendered `public` HTML from this repository.
Use the Documentation MCP Server when the information here is inconclusive, when you need to deepen your understanding of InfluxData products and integrations, or when identifying content gaps in the documentation.
**Use for:**
@ -362,67 +348,64 @@ The InfluxData documentation MCP server (`influxdata`) provides access to **Ask
- Checking current API syntax
- Confirming feature availability across products
- Understanding complex product behavior
- Finding related documentation
- Finding related documentation and code examples
- Identifying and analyzing content gaps in the documentation
**Don't use for:**
- Basic style/grammar checks
- Basic style/grammar checks (use Vale)
- Link validation (use `yarn test:links`)
- Testing code examples (use `yarn test:codeblocks`)
### Available MCP Tools
### Setup
The `influxdata` MCP server provides:
The documentation MCP server is hosted—no local installation required. Add the server URL to your AI assistant's MCP configuration.
```typescript
// Query documentation knowledge base
kapa_query({
query: string, // Your question
stream: boolean // Stream response (optional)
})
**MCP server URL:**
// Examples:
kapa_query({
query: "How do I create a database in InfluxDB 3 Core?",
stream: false
})
kapa_query({
query: "What's the difference between InfluxDB 3 Core and Enterprise clustering?",
stream: false
})
kapa_query({
query: "Show me InfluxQL SELECT syntax for filtering by time range",
stream: false
})
```text
https://influxdb-docs.mcp.kapa.ai
```
### Setup Requirements
The MCP server requires configuration in `.mcp.json`:
**Claude Desktop configuration** (Settings > Developer):
```json
{
"mcpServers": {
"influxdata": {
"type": "stdio",
"command": "node",
"args": ["${DOCS_MCP_SERVER_PATH}/dist/index.js"],
"env": {
"DOCS_API_KEY_FILE": "${DOCS_API_KEY_FILE:-$HOME/.env.docs-kapa-api-key}",
"DOCS_MODE": "external-only",
"MCP_LOG_LEVEL": "${MCP_LOG_LEVEL:-info}"
}
"influxdb-docs": {
"url": "https://influxdb-docs.mcp.kapa.ai"
}
}
}
```
**Required:**
For other AI assistants see the [InfluxDB documentation MCP server guide](/influxdb3/core/admin/mcp-server/)
and verify the MCP configuration options and syntax for a specific AI assistant.
- `DOCS_MCP_SERVER_PATH`: Path to docs-mcp-server installation
- `DOCS_API_KEY_FILE`: Path to file containing Kapa.ai API key
**Rate limits** (per Google OAuth user):
- 40 requests per hour
- 200 requests per day
### Available Tool
The MCP server exposes a semantic search tool:
```text
search_influxdb_knowledge_sources
```
**What it does:**
- Searches all InfluxDB documentation for a given query
- Returns relevant chunks in descending order of relevance
- Each chunk includes `source_url` and Markdown `content`
**Example queries:**
- "How do I create a database in InfluxDB 3 Core?"
- "What's the difference between InfluxDB 3 Core and Enterprise clustering?"
- "Show me InfluxQL SELECT syntax for filtering by time range"
### Example Workflow: Fact-Checking During Editing
@ -431,17 +414,14 @@ The MCP server requires configuration in `.mcp.json`:
1. Draft claims: "InfluxDB 3 supports up to 10,000 databases per instance"
2. Verify with MCP:
kapa_query({
query: "What are the database limits in InfluxDB 3 Core and Enterprise?",
stream: false
})
2. Ask your AI assistant to verify using the MCP server:
"What are the database limits in InfluxDB 3 Core and Enterprise?"
3. MCP response clarifies actual limits differ by product
3. MCP response returns documentation chunks with actual limits
4. Update draft with accurate information
5. Cite source in documentation if needed
5. Cite the source_url in documentation if needed
```
### Best Practices
@ -450,14 +430,14 @@ The MCP server requires configuration in `.mcp.json`:
- Ask specific, focused questions
- Verify claims about features, limits, syntax
- Cross-check answers with official docs links provided
- Cross-check answers with source URLs provided
- Use for understanding complex interactions
**DON'T:**
- Rely solely on MCP without reviewing source docs
- Use for subjective style decisions
- Expect real-time product behavior (it knows documentation, not live systems)
- Expect real-time product behavior (it searches documentation, not live systems)
- Use as a replacement for testing (always test code examples)
## Part 5: Complete Example Workflows
@ -474,14 +454,11 @@ docs create database-tutorial.md --products influxdb3-core,influxdb3-enterprise
# - content/influxdb3/enterprise/guides/database-tutorial.md (frontmatter)
# Step 2: Verify technical accuracy
# Use MCP to check claims in the tutorial
kapa_query({
query: "Verify database creation syntax for InfluxDB 3",
stream: false
})
# Ask your AI assistant (with MCP configured) to verify claims:
# "Verify database creation syntax for InfluxDB 3"
# Step 3: Test Hugo build
hugo --quiet
yarn hugo --quiet
# Step 4: Run E2E tests
node cypress/support/run-e2e-specs.js \
@ -511,13 +488,10 @@ docs edit https://docs.influxdata.com/influxdb3/core/reference/sql/
# Step 2: Make edits to the shared source file
# Step 3: Fact-check changes with MCP
kapa_query({
query: "Verify SQL WHERE clause syntax in InfluxDB 3",
stream: false
})
# Ask your AI assistant: "Verify SQL WHERE clause syntax in InfluxDB 3"
# Step 4: Test the build
hugo --quiet
yarn hugo --quiet
# Step 5: Test affected pages
node cypress/support/run-e2e-specs.js \
@ -535,10 +509,10 @@ yarn test:links
# Edit content/influxdb3/core/get-started/_index.md
# Step 2: Test Hugo build
hugo --quiet
yarn hugo --quiet
# Step 3: Quick visual check
hugo server
yarn hugo server
# Visit http://localhost:1313/influxdb3/core/get-started/
# Done! (No need for comprehensive testing on typo fixes)
@ -550,7 +524,7 @@ hugo server
```bash
# Check for detailed errors
hugo
yarn hugo
# Common issues:
# - Invalid frontmatter YAML
@ -578,19 +552,17 @@ touch content/influxdb3/enterprise/path/to/file.md
### MCP Server Not Responding
```bash
# Check configuration
cat .mcp.json
The hosted MCP server (`https://influxdb-docs.mcp.kapa.ai`) requires:
# Verify environment variables
echo $DOCS_MCP_SERVER_PATH
echo $DOCS_API_KEY_FILE
1. **Google OAuth authentication** - On first use, sign in with Google
2. **Rate limits** - 40 requests/hour, 200 requests/day per user
# Check API key file exists and has content
cat $HOME/.env.docs-kapa-api-key
**Troubleshooting steps:**
# Check MCP server logs (if available)
```
- Verify your AI assistant has the MCP server URL configured correctly
- Check if you've exceeded rate limits (wait an hour or until the next day)
- Try re-authenticating by clearing your OAuth session
- Ensure your network allows connections to `*.kapa.ai`
### Cypress Tests Fail
@ -618,15 +590,14 @@ ls content/influxdb3/core/api/
| Add placeholders to code | `docs placeholders file.md` or `docs placeholders file.md --dry` |
| Audit documentation | `docs audit --products influxdb3_core` or `docs audit --products /influxdb3/core` |
| Generate release notes | `docs release-notes v3.1.0 v3.2.0 --products influxdb3_core` |
| Build Hugo site | `hugo --quiet` |
| Run Vale linting | `docker compose run -T vale content/**/*.md` |
| Build Hugo site | `yarn hugo --quiet` |
| Run Vale linting | `.ci/vale/vale.sh --config=.vale.ini content/path/` |
| Test links | `yarn test:links` |
| Test code blocks | `yarn test:codeblocks:all` |
| Test specific page | `yarn test:e2e content/path/file.md` |
| Fact-check with MCP | `kapa_query({ query: "...", stream: false })` |
| Preview locally | `hugo server` (visit localhost:1313) |
| Fact-check with MCP | Ask AI assistant with `search_influxdb_knowledge_sources` tool configured |
| Preview locally | `yarn hugo server` (visit localhost:1313) |
| Generate API docs | `yarn build:api-docs` (before API reference tests) |
| Style linting | `.ci/vale/vale.sh --config=.vale.ini content/path/` |
**Note:** `--products` accepts both product keys (`influxdb3_core`) and content paths (`/influxdb3/core`).
@ -644,8 +615,8 @@ ls content/influxdb3/core/api/
- [ ] If shared content: Sourcing files touched (or used `docs edit`)
- [ ] If shared content: Check for path differences and add `alt_links` if paths vary
- [ ] Technical accuracy verified (MCP fact-check if needed)
- [ ] Hugo builds without errors (`hugo --quiet`)
- [ ] Vale style linting passes (`docker compose run -T vale content/**/*.md`)
- [ ] Hugo builds without errors (`yarn hugo --quiet`)
- [ ] Vale style linting passes (`.ci/vale/vale.sh --config=.vale.ini content/path/`)
- [ ] Links validated (`yarn test:links`)
- [ ] Code examples tested (if applicable)
- [ ] E2E tests pass for affected pages

View File

@ -276,6 +276,52 @@ describe('Component Name', () => {
});
```
### Using Real Configuration Data
Import real configuration data (from `data/*.yml`) via `cy.task('getData')` instead of hardcoding expected values. This keeps tests in sync with the source of truth.
```javascript
describe('Product shortcodes', function () {
let products;
before(function () {
// Load products.yml via the getData task defined in cypress.config.js
cy.task('getData', 'products').then((data) => {
products = data;
});
});
it('renders the correct product name', function () {
cy.visit('/influxdb3/core/_test/shortcodes/');
// Assert against YAML data, not a hardcoded string
cy.get('[data-testid="product-name"]').should(
'contain.text',
products.influxdb3_core.name
);
});
it('renders current-version from YAML', function () {
cy.visit('/influxdb/v2/_test/shortcodes/');
// Derive expected value the same way the Hugo shortcode does
const patch = products.influxdb.latest_patches?.v2;
const expected = patch ? patch.replace(/\.\d+$/, '') : '';
cy.get('[data-testid="current-version"] .current-version').should(
'have.text',
expected
);
});
});
```
**Key principles:**
- Load YAML data in `before()` — available to all tests in the suite
- Derive expected values from the data, mirroring shortcode logic
- Only hardcode what you must: content paths and test page URLs
- Derive boolean flags from data fields (e.g., `product.distributed_architecture`, `product.limits`)
See `cypress/e2e/content/shortcodes.cy.js` and `cypress/e2e/content/latest-patch-shortcode.cy.js` for full examples.
### Testing Links
```javascript

View File

@ -298,13 +298,13 @@ Individual rules are YAML files in style directories:
```bash
# Test specific rule on one file
docker compose run -T vale \
.ci/vale/vale.sh \
--config=.vale.ini \
--minAlertLevel=suggestion \
content/influxdb3/core/get-started/_index.md
# Test only error-level issues
docker compose run -T vale \
.ci/vale/vale.sh \
--config=content/influxdb/cloud-dedicated/.vale.ini \
--minAlertLevel=error \
content/influxdb/cloud-dedicated/**/*.md
@ -455,7 +455,7 @@ EOF
```bash
# Test on one file first
docker compose run -T vale content/influxdb3/core/get-started/_index.md
.ci/vale/vale.sh content/influxdb3/core/get-started/_index.md
```
### Step 3: Refine if needed
@ -475,7 +475,7 @@ tokens:
```bash
# Test on entire product
docker compose run -T vale content/influxdb3/**/*.md
.ci/vale/vale.sh content/influxdb3/**/*.md
```
## Related Skills

6
.codespellignore Normal file
View File

@ -0,0 +1,6 @@
# Product and technical terms to ignore
# - Azure Kubernetes Service (AKS)
aks
AKS
# - InfluxData product feature for scriptable tasks/queries
invokable

11
.codespellrc Normal file
View File

@ -0,0 +1,11 @@
[codespell]
# Use only 'clear' dictionary to minimize false positives on documentation
# 'rare', 'code' are too aggressive for reference documentation
builtin = clear
# Skip directories with generated or configuration content
skip = public,node_modules,dist,.git,.vale,api-docs
# Use external ignore file for product branding terms and technical acronyms
# (See .codespellignore for the complete list)
ignore-words = .codespellignore

93
.github/workflows/check-pinned-deps.yml vendored Normal file
View File

@ -0,0 +1,93 @@
name: Check pinned dependency updates
on:
schedule:
# Run every Monday at 09:00 UTC
- cron: '0 9 * * 1'
workflow_dispatch: # Allow manual trigger
# Each entry in the matrix defines a pinned dependency:
# name: Human-readable name
# repo: GitHub owner/repo to check for releases
# file: Local file containing the pinned version
# pattern: grep -oP pattern to extract the current version (must capture bare semver)
# sed_pattern: sed expression to replace the old version with the new one
# Use CURRENT and LATEST as placeholders.
jobs:
check-update:
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
strategy:
fail-fast: false
matrix:
dep:
- name: Vale
repo: errata-ai/vale
file: .ci/vale/vale.sh
pattern: '^VALE_VERSION="\K[^"]+'
sed_pattern: 's/^VALE_VERSION="CURRENT"/VALE_VERSION="LATEST"/'
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Check for update
id: check
run: |
set -euo pipefail
CURRENT=$(grep -oP '${{ matrix.dep.pattern }}' '${{ matrix.dep.file }}')
if [ -z "$CURRENT" ]; then
echo "Failed to determine current version from ${{ matrix.dep.file }}" >&2
exit 1
fi
echo "current=$CURRENT" >> "$GITHUB_OUTPUT"
LATEST=$(curl -sSfL \
-H "Accept: application/vnd.github+json" \
-H "Authorization: Bearer ${{ github.token }}" \
"https://api.github.com/repos/${{ matrix.dep.repo }}/releases/latest" \
| jq -r '.tag_name' | sed 's/^v//')
if [ -z "$LATEST" ] || [ "$LATEST" = "null" ]; then
echo "Failed to determine latest release for ${{ matrix.dep.repo }}" >&2
exit 1
fi
echo "latest=$LATEST" >> "$GITHUB_OUTPUT"
if [ "$CURRENT" = "$LATEST" ]; then
echo "up-to-date=true" >> "$GITHUB_OUTPUT"
echo "${{ matrix.dep.name }} is up to date ($CURRENT)"
else
echo "up-to-date=false" >> "$GITHUB_OUTPUT"
echo "${{ matrix.dep.name }} update available: $CURRENT → $LATEST"
fi
- name: Update pinned version
if: steps.check.outputs.up-to-date == 'false'
run: |
set -euo pipefail
SED_EXPR='${{ matrix.dep.sed_pattern }}'
SED_EXPR="${SED_EXPR//CURRENT/${{ steps.check.outputs.current }}}"
SED_EXPR="${SED_EXPR//LATEST/${{ steps.check.outputs.latest }}}"
sed -i "$SED_EXPR" '${{ matrix.dep.file }}'
echo "Updated ${{ matrix.dep.file }}:"
grep -n '${{ steps.check.outputs.latest }}' '${{ matrix.dep.file }}'
- name: Create pull request
if: steps.check.outputs.up-to-date == 'false'
uses: peter-evans/create-pull-request@v7
with:
commit-message: "chore(deps): update ${{ matrix.dep.name }} to v${{ steps.check.outputs.latest }}"
branch: "chore/update-${{ matrix.dep.name }}-${{ steps.check.outputs.latest }}"
title: "chore(deps): update ${{ matrix.dep.name }} to v${{ steps.check.outputs.latest }}"
body: |
Updates pinned **${{ matrix.dep.name }}** version in `${{ matrix.dep.file }}`
from v${{ steps.check.outputs.current }} to v${{ steps.check.outputs.latest }}.
**Release notes**: https://github.com/${{ matrix.dep.repo }}/releases/tag/v${{ steps.check.outputs.latest }}
labels: dependencies

1
.gitignore vendored
View File

@ -61,3 +61,4 @@ deploy/llm-markdown/lambda-edge/markdown-generator/config.json
*.d.ts.map
*.js.map
.eslintcache
.worktrees/

7
.shellcheckrc Normal file
View File

@ -0,0 +1,7 @@
# ShellCheck configuration for docs-v2
# https://www.shellcheck.net/wiki/
# Disable rules that are noisy in a docs repo context:
# SC1091 - Can't follow non-constant source (common with relative paths)
# SC2154 - Variable referenced but not assigned (often set by sourcing .env files)
disable=SC1091,SC2154

View File

@ -102,7 +102,7 @@ curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/influxdb3/core/
yarn test:links content/influxdb3/core/**/*.md
# Run style linting
docker compose run -T vale content/**/*.md
.ci/vale/vale.sh content/**/*.md
```
**📖 Complete Reference**: [DOCS-TESTING.md](DOCS-TESTING.md)

View File

@ -7,9 +7,9 @@ Ready to contribute?
1. [Sign the InfluxData CLA](#sign-the-influxdata-cla) (for substantial changes)
2. [Fork and clone](#fork-and-clone-influxdata-documentation-repository) this repository
3. [Install dependencies](#development-environment-setup) (Node.js, Yarn, Docker)
3. [Install dependencies](#development-environment-setup) (Node.js, Yarn, Vale; Docker for code block tests / optional Vale fallback)
4. Make your changes following [style guidelines](#making-changes)
5. [Test your changes](TESTING.md) (pre-commit and pre-push hooks run automatically)
5. [Test your changes](DOCS-TESTING.md) (pre-commit and pre-push hooks run automatically)
6. [Submit a pull request](#submission-process)
For detailed setup and reference information, see the sections below.
@ -80,18 +80,20 @@ manages git pre-commit and pre-push hooks for linting and testing Markdown conte
- [prettier](https://prettier.io/docs/en/): formats code, including Markdown, according to style rules for consistency
- [Cypress]: e2e testing for UI elements and URLs in content
### Install Docker
### Install Vale (style linting)
docs-v2 includes Docker configurations (`compose.yaml` and Dockerfiles) for running the Vale style linter and tests for code blocks (Shell, Bash, and Python) in Markdown files.
The `.ci/vale/vale.sh` wrapper runs Vale for style linting.
It uses a local `vale` binary if available, otherwise falls back to Docker.
Install [Docker](https://docs.docker.com/get-docker/) for your system.
1. **Option A — Install locally (recommended):** `brew install vale` (or see [Vale installation guide](https://vale.sh/docs/install/))
2. **Option B — Use Docker:** Install [Docker](https://docs.docker.com/get-docker/). The wrapper pulls a pinned Vale image automatically.
#### Build the test dependency image
### Install Docker (code block testing)
After you have installed Docker, run the following command to build the test
dependency image, `influxdata:docs-pytest`.
The tests defined in `compose.yaml` use the dependencies and execution
environment from this image.
Docker is required for code block tests (`compose.yaml` and `Dockerfile.pytest`).
1. Install [Docker](https://docs.docker.com/get-docker/) for your system.
2. Build the test dependency image:
```bash
docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest .
@ -357,10 +359,10 @@ yarn test:codeblocks:all
yarn test:links content/influxdb3/core/**/*.md
# Run style linting
docker compose run -T vale content/**/*.md
.ci/vale/vale.sh content/**/*.md
```
For comprehensive testing information, including code block testing, link validation, style linting, and advanced testing procedures, see **[TESTING.md](TESTING.md)**.
For comprehensive testing information, including code block testing, link validation, style linting, and advanced testing procedures, see **[DOCS-TESTING.md](DOCS-TESTING.md)**.
---
@ -406,18 +408,13 @@ For detailed reference documentation, see:
#### Vale style linting configuration
docs-v2 includes Vale writing style linter configurations to enforce documentation writing style rules, guidelines, branding, and vocabulary terms.
Run Vale with `.ci/vale/vale.sh`:
**Advanced Vale usage:**
1. Lint specific files: `.ci/vale/vale.sh content/influxdb3/core/**/*.md`
2. Use a product config: `.ci/vale/vale.sh --config=content/influxdb/cloud-dedicated/.vale.ini content/path/`
3. Set alert level: `.ci/vale/vale.sh --minAlertLevel=error content/path/`
```sh
docker compose run -T vale --config=content/influxdb/cloud-dedicated/.vale.ini --minAlertLevel=error content/influxdb/cloud-dedicated/write-data/**/*.md
```
The output contains error-level style alerts for the Markdown content.
If a file contains style, spelling, or punctuation problems,
the Vale linter can raise one of the following alert levels:
Vale raises the following alert levels:
- **Error**:
- Problems that can cause content to render incorrectly

View File

@ -15,7 +15,7 @@ This guide covers all testing procedures for the InfluxData documentation, inclu
| ----------------------- | ----------------------------------- | ---------------------------- |
| **Code blocks** | Validate shell/Python code examples | `yarn test:codeblocks:all` |
| **Link validation** | Check internal/external links | `yarn test:links` |
| **Style linting** | Enforce writing standards | `docker compose run -T vale` |
| **Style linting** | Enforce writing standards | `.ci/vale/vale.sh` |
| **Markdown generation** | Generate LLM-friendly Markdown | `yarn build:md` |
| **E2E tests** | UI and functionality testing | `yarn test:e2e` |
@ -586,17 +586,25 @@ jobs:
Style linting uses [Vale](https://vale.sh/) to enforce documentation writing standards, branding guidelines, and vocabulary consistency.
### Setup
1. **Install Vale locally (recommended):** `brew install vale` (or see [Vale installation guide](https://vale.sh/docs/install/))
2. **Or use Docker:** The `.ci/vale/vale.sh` wrapper falls back to a pinned Docker image if `vale` isn't installed locally.
### Basic Usage
```bash
# Basic linting with Docker
docker compose run -T vale --config=content/influxdb/cloud-dedicated/.vale.ini --minAlertLevel=error content/influxdb/cloud-dedicated/write-data/**/*.md
# Lint specific files
.ci/vale/vale.sh content/influxdb3/core/**/*.md
# With product config and alert level
.ci/vale/vale.sh --config=content/influxdb/cloud-dedicated/.vale.ini --minAlertLevel=error content/influxdb/cloud-dedicated/write-data/**/*.md
```
### VS Code Integration
### VS Code IDE Integration
1. Install the [Vale VSCode](https://marketplace.visualstudio.com/items?itemName=ChrisChinchilla.vale-vscode) extension
2. Set the `Vale:Vale CLI:Path` setting to `${workspaceFolder}/node_modules/.bin/vale`
1. Install the [Vale VSCode](https://marketplace.visualstudio.com/items?itemName=ChrisChinchilla.vale-vscode) extension.
2. Set `Vale:Vale CLI:Path` to `vale` (or the full path to the binary).
### Alert Levels

View File

@ -55,7 +55,7 @@ Telegraf:
- Documentation: https://docs.influxdata.com/telegraf/v1.37/
Chronograf:
- Documentation: https://docs.influxdata.com/chronograf/v1.10/
- Documentation: https://docs.influxdata.com/chronograf/v1.11/
Kapacitor:
- Documentation: https://docs.influxdata.com/kapacitor/v1.8/

281
SPELL-CHECK.md Normal file
View File

@ -0,0 +1,281 @@
# Spell Checking Configuration Guide
This document explains the spell-checking rules and tools used in the InfluxData documentation repository.
## Overview
The docs-v2 repository uses **two complementary spell-checking tools**:
1. **Vale** - Integrated documentation spell checker (runs in pre-commit hooks)
2. **Codespell** - Lightweight code comment spell checker (recommended for CI/CD)
## Tool Comparison
| Feature | Vale | Codespell |
|---------|------|-----------|
| **Purpose** | Document spell checking | Code comment spell checking |
| **Integration** | Pre-commit hooks (Docker) | CI/CD pipeline |
| **False Positives** | Low (comprehensive filters) | Low (clear dictionary only) |
| **Customization** | YAML rules | INI config + dictionary lists |
| **Performance** | Moderate | Fast |
| **True Positive Detection** | Document-level | Code-level |
## Vale Configuration
### File: `.ci/vale/styles/InfluxDataDocs/Spelling.yml`
#### Why Code Blocks Are Included
Unlike other documentation style checkers, this configuration **intentionally includes code blocks** (`~code` is NOT excluded). This is critical because:
1. **Comments in examples** - Users copy code blocks with comments:
```bash
# Download and verify the GPG key
curl https://repos.influxdata.com/influxdata-archive.key
```
Typos in such comments become part of user documentation/scripts.
2. **Documentation strings** - Code examples may include documentation:
```python
def create_database(name):
"""This funtion creates a new database.""" # ← typo caught
pass
```
3. **Inline comments** - Shell script comments are checked:
```sh
#!/bin/bash
# Retrive configuration from server
influxctl config get
```
### Filter Patterns Explained
#### 1. camelCase and snake_case Identifiers
```regex
(?:_*[a-z]+(?:[A-Z][a-z0-9]*)+(?:[A-Z][a-zA-Z0-9]*)*|[a-z_][a-z0-9]*_[a-z0-9_]*)
```
**Why**: Prevents false positives on variable/method names while NOT matching normal prose
**Breakdown**:
- **camelCase**: `_*[a-z]+(?:[A-Z][a-z0-9]*)+(?:[A-Z][a-zA-Z0-9]*)*`
- Requires at least one uppercase letter (distinguishes `myVariable` from `provide`)
- Allows leading underscores for private variables (`_privateVar`, `__dunder__`)
- **snake_case**: `[a-z_][a-z0-9]*_[a-z0-9_]*`
- Requires at least one underscore
- Distinguishes `my_variable` from normal words
**Examples Ignored**: `myVariable`, `targetField`, `getCwd`, `_privateVar`, `my_variable`, `terminationGracePeriodSeconds`
**Examples NOT Ignored** (caught by spell-checker): `provide`, `database`, `variable` (normal prose)
#### 2. UPPER_CASE Constants
```regex
[A-Z_][A-Z0-9_]+
```
**Why**: Prevents false positives on environment variables and constants
**Examples Ignored**: `API_KEY`, `AWS_REGION`, `INFLUXDB_TOKEN`
**Note**: Matches AWS, API (even single uppercase acronyms) - acceptable in docs
#### 3. Version Numbers
```regex
\d+\.\d+(?:\.\d+)*
```
**Why**: Version numbers aren't words
**Examples Ignored**: `1.0`, `2.3.1`, `0.101.0`, `1.2.3.4`, `v1.2.3`
**Note**: Handles any number of version parts (2-part, 3-part, 4-part, etc.)
#### 4. Hexadecimal Values
```regex
0[xX][0-9a-fA-F]+
```
**Why**: Hex values appear in code and aren't dictionary words
**Examples Ignored**: `0xFF`, `0xDEADBEEF`, `0x1A`
#### 5. URLs and Paths
```regex
/[a-zA-Z0-9/_\-\.\{\}]+ # Paths: /api/v2/write
https?://[^\s\)\]>"]+ # Full URLs: https://docs.example.com
```
**Why**: URLs contain hyphens, slashes, and special chars
**Examples Ignored**: `/api/v2/write`, `/kapacitor/v1/`, `https://docs.influxdata.com`
#### 6. Shortcode Attributes
```regex
(?:endpoint|method|url|href|src|path)="[^"]+"
```
**Why**: Hugo shortcode attribute values often contain hyphens and special chars
**Examples Ignored**: `endpoint="https://..."`, `method="POST"`
**Future Enhancement**: Add more attributes as needed (name, value, data, etc.)
#### 7. Code Punctuation
```regex
[@#$%^&*()_+=\[\]{};:,.<>?/\\|-]+
```
**Why**: Symbols and special characters aren't words
**Examples Ignored**: `()`, `{}`, `[]`, `->`, `=>`, `|`, etc.
### Ignored Words
The configuration references two word lists:
- **`InfluxDataDocs/Terms/ignore.txt`** - Product and technical terms (non-English)
- **`InfluxDataDocs/Terms/query-functions.txt`** - InfluxQL/Flux function names
To add a word that should be ignored, edit the appropriate file.
## Codespell Configuration
### File: `.codespellrc`
#### Dictionary Choice: "clear"
**Why "clear" (not "rare" or "code")**:
- `clear` - Unambiguous spelling errors only
- Examples: "recieve" → "receive", "occured" → "occurred"
- False positive rate: ~1%
- `rare` - Includes uncommon but valid English words
- Would flag legitimate technical terms
- False positive rate: ~15-20%
- `code` - Includes code-specific words
- Too aggressive for documentation
- False positive rate: ~25-30%
#### Skip Directories
```ini
skip = public,node_modules,dist,.git,.vale,api-docs
```
- `public` - Generated HTML (not source)
- `node_modules` - npm dependencies (not our code)
- `dist` - Compiled TypeScript output (not source)
- `.git` - Repository metadata
- `.vale` - Vale configuration and cache
- `api-docs` - Generated OpenAPI specifications (many false positives)
#### Ignored Words
```ini
ignore-words-list = aks,invokable
```
- **`aks`** - Azure Kubernetes Service (acronym)
- **`invokable`** - InfluxData product branding term (scriptable tasks/queries)
**To add more**:
1. Edit `.codespellrc`
2. Add word to `ignore-words-list` (comma-separated)
3. Add inline comment explaining why
## Running Spell Checkers
### Vale (Pre-commit)
Vale automatically runs on files you commit via Lefthook.
**Manual check**:
```bash
# Check all content
docker compose run -T vale content/**/*.md
# Check specific file
docker compose run -T vale content/influxdb/cloud/reference/cli.md
```
### Codespell (Manual/CI)
```bash
# Check entire content directory
codespell content/ --builtin clear
# Check specific directory
codespell content/influxdb3/core/
# Interactive mode (prompts for fixes)
codespell content/ --builtin clear -i 3
# Auto-fix (USE WITH CAUTION)
codespell content/ --builtin clear -w
```
## Rule Validation
The spell-checking rules are designed to:
✅ Catch real spelling errors (true positives)
✅ Ignore code patterns, identifiers, and paths (false negative prevention)
✅ Respect product branding terms (invokable, Flux, InfluxQL)
✅ Work seamlessly in existing workflows
### Manual Validation
Create a test file with various patterns:
```bash
# Test camelCase handling
echo "variable myVariable is defined" | codespell
# Test version numbers
echo "InfluxDB version 2.3.1 is released" | codespell
# Test real typos (should be caught)
echo "recieve the data" | codespell
```
## Troubleshooting
### Vale: False Positives
**Problem**: Vale flags a word that should be valid
**Solutions**:
1. Check if it's a code identifier (camelCase, UPPER_CASE, hex, version)
2. Add to `InfluxDataDocs/Terms/ignore.txt` if it's a technical term
3. Add filter pattern to `.ci/vale/styles/InfluxDataDocs/Spelling.yml` if it's a pattern
### Codespell: False Positives
**Problem**: Codespell flags a legitimate term
**Solutions**:
1. Add to `ignore-words-list` in `.codespellrc`
2. Add skip directory if entire directory should be excluded
3. Use `-i 3` (interactive mode) to review before accepting
### Both Tools: Missing Real Errors
**Problem**: A real typo isn't caught
**Solutions**:
1. Verify it's actually a typo (not a branding term or intentional)
2. Check if it's in excluded scope (tables, URLs, code identifiers)
3. Report as GitHub issue for tool improvement
## Contributing
When adding content:
1. **Use semantic line feeds** (one sentence per line)
2. **Run Vale pre-commit** checks before committing
3. **Test code block comments** for typos
4. **Avoid adding to ignore lists** when possible
5. **Document why** you excluded a term (if necessary)
## Related Files
- `.ci/vale/styles/InfluxDataDocs/` - Vale rule configuration
- `.codespellrc` - Codespell configuration
- `.codespellignore` - Codespell ignore word list
- `DOCS-CONTRIBUTING.md` - General contribution guidelines
- `DOCS-TESTING.md` - Testing and validation guide
## Future Improvements
1. Create comprehensive test suite for spell-checking rules
2. Document how to add product-specific branding terms
3. Consider adding codespell to CI/CD pipeline
4. Monitor and update ignore lists quarterly

View File

@ -1145,8 +1145,29 @@ paths:
Soft deletes a database.
The database is scheduled for deletion and unavailable for querying.
Use the `hard_delete_at` parameter to schedule a hard deletion.
Use the `data_only` parameter to delete data while preserving the database schema and resources.
parameters:
- $ref: '#/components/parameters/db'
- name: data_only
in: query
required: false
schema:
type: boolean
default: false
description: |
Delete only data while preserving the database schema and all associated resources
(tokens, triggers, last value caches, distinct value caches, processing engine configurations).
When `false` (default), the entire database is deleted.
- name: remove_tables
in: query
required: false
schema:
type: boolean
default: false
description: |
Used with `data_only=true` to remove table resources (caches) while preserving
database-level resources (tokens, triggers, processing engine configurations).
Has no effect when `data_only=false`.
- name: hard_delete_at
in: query
required: false
@ -1217,6 +1238,7 @@ paths:
Soft deletes a table.
The table is scheduled for deletion and unavailable for querying.
Use the `hard_delete_at` parameter to schedule a hard deletion.
Use the `data_only` parameter to delete data while preserving the table schema and resources.
#### Deleting a table cannot be undone
@ -1229,6 +1251,16 @@ paths:
required: true
schema:
type: string
- name: data_only
in: query
required: false
schema:
type: boolean
default: false
description: |
Delete only data while preserving the table schema and all associated resources
(last value caches, distinct value caches).
When `false` (default), the entire table is deleted.
- name: hard_delete_at
in: query
required: false

View File

@ -5,7 +5,7 @@
border-radius: .6rem;
font-weight: bold;
vertical-align: top;
&.dvc {
color: #2e7d2e;
background-color: #e8f5e8;
@ -14,4 +14,8 @@
color: #1976d2;
background-color: #e3f2fd;
}
&.experimental {
color: $badge-experimental-text;
background-color: $badge-experimental-bg;
}
}

View File

@ -266,3 +266,7 @@ $influxdb-logo: url('/svgs/influxdb-logo-white.svg') !default;
// Code placeholder colors
$code-placeholder: #e659a2;
$code-placeholder-hover: $br-teal;
// Badge colors
$badge-experimental-text: $article-caution-text;
$badge-experimental-bg: $article-caution-bg;

View File

@ -265,3 +265,7 @@ $diagram-arrow: $g14-chromium !default;
// Code placeholder colors
$code-placeholder: $br-new-magenta !default;
$code-placeholder-hover: $br-new-purple !default;
// Badge colors
$badge-experimental-text: $article-caution-text !default;
$badge-experimental-bg: $article-caution-bg !default;

View File

@ -0,0 +1,17 @@
---
title: Shortcode test page
noindex: true
test_only: true
---
<!-- vale off -->
<span data-testid="product-name">{{% product-name %}}</span>
<span data-testid="product-name-short">{{% product-name "short" %}}</span>
<span data-testid="product-key">{{< product-key >}}</span>
<span data-testid="current-version">{{< current-version >}}</span>
<span data-testid="host">{{< influxdb/host >}}</span>
<span data-testid="latest-patch">{{< latest-patch >}}</span>
<span data-testid="icon-check">{{< icon "check" >}}</span>
<div data-testid="api-endpoint">{{< api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/query" >}}</div>
<span data-testid="show-in-core">{{% show-in "core" %}}VISIBLE_IN_CORE{{% /show-in %}}</span>
<span data-testid="hide-in-core">{{% hide-in "core" %}}HIDDEN_IN_CORE{{% /hide-in %}}</span>

View File

@ -8,6 +8,9 @@ menu:
chronograf_v1:
name: Chronograf
weight: 1
cascade:
product: chronograf
version: v1
---
Chronograf is InfluxData's open source web application.

View File

@ -10,6 +10,22 @@ aliases:
- /chronograf/v1/about_the_project/release-notes-changelog/
---
## v1.11.0 {date="2026-02-19"}
> [!Warning]
> Chronograf 1.11.0 removes support for Linux i386, armhf, armel, and static builds.
> It also removes support for Darwin arm64.
### Maintenance updates
- Upgrade Go to 1.24.13.
- Upgrade TypeScript to 4.9.5.
- Upgrade Node.js to v24.13.0.
### Other
- Update Flux help in the UI to align with stdlib 0.199.
## v1.10.9 {date="2026-01-07"}
### Features
@ -325,7 +341,7 @@ USE "db_name"; DROP SERIES FROM "measurement_name" WHERE "tag" = 'value'
USE "db_name"; DELETE FROM "measurement_name" WHERE "tag" = 'value' AND time < '2020-01-01'
```
- Add support for Bitbucket `emails` endpoint with generic OAuth. For more information, see [Bitbucket documentation](https://developer.atlassian.com/bitbucket/api/2/reference/resource/user/emails) and how to [configure Chronograf to authenticate with OAuth 2.0](/chronograf/v1/administration/managing-security/#configure-chronograf-to-authenticate-with-oauth-2-0).
- Add support for Bitbucket `emails` endpoint with generic OAuth. For more information, see [Bitbucket documentation](https://developer.atlassian.com/bitbucket/api/2/reference/resource/user/emails) and how to [configure Chronograf to authenticate with OAuth 2.0](/chronograf/v1/administration/managing-security/#configure-chronograf-to-authenticate-with-oauth-20).
### Bug Fixes
@ -493,9 +509,9 @@ features and bug fixes below.
If you're installing Chronograf for the first time, learn how to [create a new Chronograf HA configuration](/chronograf/v1/administration/create-high-availability/).
If you're upgrading Chronograf, learn how to [migrate your existing Chronograf configuration to HA](/chronograf/v1/administration/migrate-to-high-availability/).
- Add configuration option to [disable the Host List page](/chronograf/v1/administration/config-options/#host-page-disabled-h).
- Add configuration option to [disable the Host List page](/chronograf/v1/administration/config-options/#--host-page-disabled---h).
- Add ability to select a data source when [creating a template variable](/chronograf/v1/guides/dashboard-template-variables/#create-custom-template-variables).
- Add the `refresh` query parameter to set the dashboard auto-refresh interval (by default, 10000 milliseconds). Discover ways to [configure your dashboard](/chronograf/v1/guides/create-a-dashboard/#step-6-configure-your-dashboard).
- Add the `refresh` query parameter to set the dashboard auto-refresh interval (by default, 10000 milliseconds). Discover ways to [configure your dashboard](/chronograf/v1/guides/create-a-dashboard/#configure-dashboard-wide-settings).
### Bug Fixes
@ -1339,7 +1355,7 @@ features and bug fixes below.
* When dashboard time range is changed, reset graphs that are zoomed in
* [Bar graph](/chronograf/v1/guides/visualization-types/#bar-graph) option added to dashboard
* Redesign source management table to be more intuitive
* Redesign [Line + Single Stat](/chronograf/v1/guides/visualization-types/#line-graph-single-stat) cells to appear more like a sparkline, and improve legibility
* Redesign [Line + Single Stat](/chronograf/v1/guides/visualization-types/#line-graph--single-stat) cells to appear more like a sparkline, and improve legibility
## v1.3.2.0 {date="2017-06-05"}
@ -1375,7 +1391,7 @@ In versions 1.3.1+, installing a new version of Chronograf automatically clears
### Bug fixes
* Fix infinite spinner when `/chronograf` is a [basepath](/chronograf/v1/administration/config-options/#basepath-p)
* Fix infinite spinner when `/chronograf` is a [basepath](/chronograf/v1/administration/config-options/#--basepath---p)
* Remove the query templates dropdown from dashboard cell editor mode
* Fix the backwards sort arrows in table column headers
* Make the logout button consistent with design
@ -1404,25 +1420,25 @@ In versions 1.3.1+, installing a new version of Chronograf automatically clears
### Bug fixes
* Fix the link to home when using the [`--basepath` option](/chronograf/v1/administration/config-options/#basepath-p)
* Fix the link to home when using the [`--basepath` option](/chronograf/v1/administration/config-options/#--basepath---p)
* Remove the notification to login on the login page
* Support queries that perform math on functions
* Prevent the creation of blank template variables
* Ensure thresholds for Kapacitor Rule Alerts appear on page load
* Update the Kapacitor configuration page when the configuration changes
* Fix Authentication when using Chronograf with a set [basepath](/chronograf/v1/administration/config-options/#basepath-p)
* Fix Authentication when using Chronograf with a set [basepath](/chronograf/v1/administration/config-options/#--basepath---p)
* Show red indicator on Hosts Page for an offline host
* Support escaping from presentation mode in Safari
* Re-implement level colors on the alerts page
* Fix router bug introduced by upgrading to react-router v3.0
* Show legend on [Line+Stat](/chronograf/v1/guides/visualization-types/#line-graph-single-stat) visualization type
* Show legend on [Line+Stat](/chronograf/v1/guides/visualization-types/#line-graph--single-stat) visualization type
* Prevent queries with `:dashboardTime:` from breaking the query builder
### Features
* Add line-protocol proxy for InfluxDB/InfluxDB Enterprise Cluster data sources
* Add `:dashboardTime:` to support cell-specific time ranges on dashboards
* Add support for enabling and disabling [TICKscripts that were created outside Chronograf](/chronograf/v1/guides/advanced-kapacitor/#tickscript-management)
* Add support for enabling and disabling [TICKscripts that were created outside Chronograf](/chronograf/v1/guides/advanced-kapacitor/#manage-kapacitor-tickscripts)
* Allow users to delete Kapacitor configurations
### UI improvements

View File

@ -0,0 +1,17 @@
---
title: Shortcode test page
noindex: true
test_only: true
---
<!-- vale off -->
<span data-testid="product-name">{{% product-name %}}</span>
<span data-testid="product-name-short">{{% product-name "short" %}}</span>
<span data-testid="product-key">{{< product-key >}}</span>
<span data-testid="current-version">{{< current-version >}}</span>
<span data-testid="host">{{< influxdb/host >}}</span>
<span data-testid="latest-patch">{{< latest-patch >}}</span>
<span data-testid="icon-check">{{< icon "check" >}}</span>
<div data-testid="api-endpoint">{{< api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/query" >}}</div>
<span data-testid="show-in-core">{{% show-in "core" %}}VISIBLE_IN_CORE{{% /show-in %}}</span>
<span data-testid="hide-in-core">{{% hide-in "core" %}}HIDDEN_IN_CORE{{% /hide-in %}}</span>

View File

@ -8,6 +8,9 @@ menu:
enterprise_influxdb_v1:
name: InfluxDB Enterprise v1
weight: 1
cascade:
product: enterprise_influxdb
version: v1
---
InfluxDB Enterprise provides a time series database designed to handle high write and query loads and offers highly scalable clusters on your infrastructure with a management UI. Use for DevOps monitoring, IoT sensor data, and real-time analytics.

View File

@ -0,0 +1,17 @@
---
title: Shortcode test page
noindex: true
test_only: true
---
<!-- vale off -->
<span data-testid="product-name">{{% product-name %}}</span>
<span data-testid="product-name-short">{{% product-name "short" %}}</span>
<span data-testid="product-key">{{< product-key >}}</span>
<span data-testid="current-version">{{< current-version >}}</span>
<span data-testid="host">{{< influxdb/host >}}</span>
<span data-testid="latest-patch">{{< latest-patch >}}</span>
<span data-testid="icon-check">{{< icon "check" >}}</span>
<div data-testid="api-endpoint">{{< api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/query" >}}</div>
<span data-testid="show-in-core">{{% show-in "core" %}}VISIBLE_IN_CORE{{% /show-in %}}</span>
<span data-testid="hide-in-core">{{% hide-in "core" %}}HIDDEN_IN_CORE{{% /hide-in %}}</span>

View File

@ -11,6 +11,9 @@ aliases:
- /influxdb/v2/reference/flux/
- /influxdb/v2/reference/flux/
- /influxdb/cloud/reference/flux/
cascade:
product: flux
version: v0
---
Flux is an open source functional data scripting language designed for querying,

View File

@ -0,0 +1,21 @@
---
title: Shortcode test page
noindex: true
test_only: true
---
<!-- vale off -->
<span data-testid="product-name">{{% product-name %}}</span>
<span data-testid="product-name-short">{{% product-name "short" %}}</span>
<span data-testid="product-key">{{< product-key >}}</span>
<span data-testid="current-version">{{< current-version >}}</span>
<span data-testid="host">{{< influxdb/host >}}</span>
<span data-testid="latest-patch">{{< latest-patch >}}</span>
<span data-testid="latest-patch-cli">{{< latest-patch cli=true >}}</span>
<span data-testid="icon-check">{{< icon "check" >}}</span>
<div data-testid="api-endpoint">{{< api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/query" >}}</div>
<span data-testid="show-in-core">{{% show-in "core" %}}VISIBLE_IN_CORE{{% /show-in %}}</span>
<span data-testid="hide-in-core">{{% hide-in "core" %}}HIDDEN_IN_CORE{{% /hide-in %}}</span>
<div data-testid="influx-creds-note">{{< cli/influx-creds-note >}}</div>
<div data-testid="release-toc">{{< release-toc >}}</div>
<div data-testid="points-series-flux">{{< influxdb/points-series-flux >}}</div>

View File

@ -8,10 +8,13 @@ menu:
influxdb_cloud:
name: InfluxDB Cloud
weight: 1
cascade:
product: influxdb_cloud
version: cloud
---
#### Welcome
Welcome to the InfluxDB v2.0 documentation!
Welcome to the InfluxDB v2.0 documentation.
InfluxDB is an open source time series database designed to handle high write and query workloads.
This documentation is meant to help you learn how to use and leverage InfluxDB to meet your needs.

View File

@ -106,7 +106,7 @@ downsamples it, and then sends it to an output topic which is later written back
```
2. Configure the Quix Streams built-in windowing function to create a tumbling
window that continously downsamples the data into 1-minute buckets.
window that continuously downsamples the data into 1-minute buckets.
```py
# ...

View File

@ -0,0 +1,17 @@
---
title: Shortcode test page
noindex: true
test_only: true
---
<!-- vale off -->
<span data-testid="product-name">{{% product-name %}}</span>
<span data-testid="product-name-short">{{% product-name "short" %}}</span>
<span data-testid="product-key">{{< product-key >}}</span>
<span data-testid="current-version">{{< current-version >}}</span>
<span data-testid="host">{{< influxdb/host >}}</span>
<span data-testid="latest-patch">{{< latest-patch >}}</span>
<span data-testid="icon-check">{{< icon "check" >}}</span>
<div data-testid="api-endpoint">{{< api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/query" >}}</div>
<span data-testid="show-in-core">{{% show-in "core" %}}VISIBLE_IN_CORE{{% /show-in %}}</span>
<span data-testid="hide-in-core">{{% hide-in "core" %}}HIDDEN_IN_CORE{{% /hide-in %}}</span>

View File

@ -5,6 +5,9 @@ menu:
influxdb_v1:
name: InfluxDB OSS v1
weight: 1
cascade:
product: influxdb
version: v1
---
InfluxDB is a [time series database](https://www.influxdata.com/time-series-database/) designed to handle high write and query loads.

View File

@ -0,0 +1,21 @@
---
title: Shortcode test page
noindex: true
test_only: true
---
<!-- vale off -->
<span data-testid="product-name">{{% product-name %}}</span>
<span data-testid="product-name-short">{{% product-name "short" %}}</span>
<span data-testid="product-key">{{< product-key >}}</span>
<span data-testid="current-version">{{< current-version >}}</span>
<span data-testid="host">{{< influxdb/host >}}</span>
<span data-testid="latest-patch">{{< latest-patch >}}</span>
<span data-testid="latest-patch-cli">{{< latest-patch cli=true >}}</span>
<span data-testid="icon-check">{{< icon "check" >}}</span>
<div data-testid="api-endpoint">{{< api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/query" >}}</div>
<span data-testid="show-in-core">{{% show-in "core" %}}VISIBLE_IN_CORE{{% /show-in %}}</span>
<span data-testid="hide-in-core">{{% hide-in "core" %}}HIDDEN_IN_CORE{{% /hide-in %}}</span>
<div data-testid="influx-creds-note">{{< cli/influx-creds-note >}}</div>
<div data-testid="release-toc">{{< release-toc >}}</div>
<div data-testid="points-series-flux">{{< influxdb/points-series-flux >}}</div>

View File

@ -8,11 +8,14 @@ menu:
influxdb_v2:
name: InfluxDB OSS v2
weight: 1
cascade:
product: influxdb
version: v2
---
#### Welcome
Welcome to the InfluxDB OSS v2 documentation!
Welcome to the InfluxDB OSS v2 documentation.
InfluxDB is an open source time series database designed to handle high write and query workloads.
This documentation is meant to help you learn how to use and leverage InfluxDB to meet your needs.

View File

@ -3476,7 +3476,7 @@ Enable storing hashed API tokens on disk. Hashed tokens are disabled by default
Storing hashed tokens increases security by storing API tokens as hashes on disk. When enabled, all unhashed tokens are converted to hashed tokens on every startup leaving no unhashed tokens on disk. Newly created tokens are also stored as hashes. Lost tokens must be replaced when token hashing is enabled because the hashing prevents them from being recovered.
If token hashing is disabled after being enabled, any hashed tokens on disk remain as hashed tokens. Newly created tokens are stored unhashed when token hashing is disabled. Hashed tokens on disk remain valid and useable even with token hashing disabled.
If token hashing is disabled after being enabled, any hashed tokens on disk remain as hashed tokens. Newly created tokens are stored unhashed when token hashing is disabled. Hashed tokens on disk remain valid and usable even with token hashing disabled.
Hashed token support is available in versions 2.8.0 and newer. Downgrading to older versions is not recommended after enabling hashed tokens because the downgrade process deletes all stored hashed tokens. All hashed tokens must be replaced on a downgrade after hashed tokens are enabled.

View File

@ -98,7 +98,7 @@ downsamples it, and then sends it to an output topic which is later written back
```
2. Configure the Quix Streams built-in windowing function to create a tumbling
window that continously downsamples the data into 1-minute buckets.
window that continuously downsamples the data into 1-minute buckets.
```py
# ...

View File

@ -0,0 +1,31 @@
---
title: Shortcode test page
noindex: true
test_only: true
---
<!-- vale off -->
<span data-testid="product-name">{{% product-name %}}</span>
<span data-testid="product-name-short">{{% product-name "short" %}}</span>
<span data-testid="product-key">{{< product-key >}}</span>
<span data-testid="current-version">{{< current-version >}}</span>
<span data-testid="host">{{< influxdb/host >}}</span>
<span data-testid="home-sample-link">{{< influxdb3/home-sample-link >}}</span>
<span data-testid="latest-patch">{{< latest-patch >}}</span>
<span data-testid="icon-check">{{< icon "check" >}}</span>
<div data-testid="api-endpoint">{{< api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/query" >}}</div>
<span data-testid="show-in-core">{{% show-in "core" %}}VISIBLE_IN_CORE{{% /show-in %}}</span>
<span data-testid="hide-in-core">{{% hide-in "core" %}}HIDDEN_IN_CORE{{% /hide-in %}}</span>
<span data-testid="cta-link">{{< cta-link >}}</span>
<div data-testid="sql-schema-intro">
{{% sql/sql-schema-intro %}}
</div>
<div data-testid="v1-v3-data-model-note">
{{% influxql/v1-v3-data-model-note %}}
</div>

View File

@ -10,6 +10,9 @@ menu:
influxdb3_cloud_dedicated:
name: InfluxDB Cloud Dedicated
weight: 1
cascade:
product: influxdb3_cloud_dedicated
version: cloud-dedicated
---
InfluxDB Cloud Dedicated is a hosted and managed InfluxDB Cloud cluster

View File

@ -0,0 +1,93 @@
---
title: Enable autoscaling
seotitle: Configure autoscaling for InfluxDB Cloud Dedicated
description: >
Learn how autoscaling works in InfluxDB Cloud Dedicated and how to
enable and configure autoscaling limits for your clusters.
menu:
influxdb3_cloud_dedicated:
parent: Administer InfluxDB Cloud
weight: 106
influxdb3/cloud-dedicated/tags: [admin, autoscaling, performance]
---
Enable autoscaling to automatically adjust your {{% product-name %}} cluster capacity in response to workload demand.
Autoscaling helps protect performance during spikes while minimizing manual intervention and over-provisioning.
- [What is autoscaling](#what-is-autoscaling)
- [How autoscaling works](#how-autoscaling-works)
- [Scaling and billing](#scaling-and-billing)
- [Update or disable autoscaling](#update-or-disable-autoscaling)
- [Monitor autoscaling behavior](#monitor-autoscaling-behavior)
## What is autoscaling
Autoscaling for {{% product-name %}} automatically scales cluster components based on workload demand.
Clusters scale up from a minimum committed size to upper limits that you define, and scale back toward the baseline when demand decreases.
With autoscaling, you can:
- **Improve performance**: Scale up automatically during peak loads to maintain ingest and query performance.
- **Increase cost efficiency**: Scale down to your baseline commitment during periods of low demand to reduce infrastructure costs.
- **Simplify operations**: Reduce manual interventions needed to resize clusters as workloads change.
Autoscaling is generally available for {{% product-name %}} clusters.
## How autoscaling works
Autoscaling for {{% product-name %}} uses Kubernetes autoscaling under the hood and supports independent scaling of cluster components.
In particular, ingest and query components can scale separately based on their respective workloads.
### At a high level
- You have a **baseline configuration** that defines your committed cluster size.
- You select **upper autoscaling limits** for key components (for example, querier and ingester CPU).
- When workload demand increases and resource utilization exceeds thresholds, autoscaling increases resources for the affected components, up to the configured limits.
- When demand drops and capacity is no longer required, autoscaling gradually scales components back toward the baseline.
- Scaling can be granular, adding as few CPUs and as little memory as needed; both CPU and memory are used to determine when and how to scale.
Autoscaling does not change other aspects of your contract, such as data retention or feature availability.
Your {{% product-name %}} representative or support team will confirm appropriate limits for each cluster.
### Scaling and billing
Scaling occurs only when your workload requires it.
While the cluster runs at or below the baseline configuration, usage is covered by your existing commitment.
> [!Important]
> If autoscaling increases resources above the baseline, you may incur **additional usage charges** beyond your committed spend, in accordance with your agreement.
> Work with your Account Executive to choose limits that balance performance goals and cost expectations.
## Enable autoscaling for a cluster
[Contact InfluxData support](https://support.influxdata.com) to enable autoscaling for your cluster.
Provide your autoscaling requirements so the support team can configure appropriate limits.
## Update or disable autoscaling
Autoscaling settings can be adjusted at any time after enablement.
For example, you might raise the upper limit for querier CPU, lower the limit for ingester CPU, or turn autoscaling off entirely.
To update or disable autoscaling for a cluster, [contact InfluxData support](https://support.influxdata.com) and provide:
- Cloud Dedicated [account ID](/influxdb3/cloud-dedicated/admin/account/) and [cluster ID](/influxdb3/cloud-dedicated/admin/clusters/).
- Whether you want to:
- Change autoscaling limits for querier and ingester components, or
- Disable autoscaling for the cluster.
- Any relevant workload or performance context (for example, new peak load patterns).
## Monitor autoscaling behavior
If autoscaling is enabled, you can view the configured limits in the cluster card on the Admin UI **Overview** page.
<!-- vale Vale.Terms = NO -->
{{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-autoscaling.png" alt="Autoscaling enabled for cluster in Admin UI Cluster Overview page" />}}
<!-- vale Vale.Terms = YES -->
After autoscaling is enabled, monitor cluster performance and capacity to understand how and when scaling occurs.
Use the Admin UI **Overview** page to monitor CPU allocation, component CPU distribution, and cluster metrics.
For more information, see [Monitor your cluster](/influxdb3/cloud-dedicated/admin/monitor-your-cluster/).
If you see sustained utilization near your autoscaling limits or frequent scaling events during normal workloads, contact your Account Executive or support team to review and adjust limits.

View File

@ -0,0 +1,154 @@
---
title: View the query log
description: >
View and analyze queries executed on your cluster using the Admin UI Query History
or by querying the _internal database with influxctl.
menu:
influxdb3_cloud_dedicated:
name: View the query log
parent: Troubleshoot and optimize queries
weight: 351
influxdb3/cloud-dedicated/tags: [query, observability, admin]
related:
- /influxdb3/cloud-dedicated/reference/cli/influxctl/query/
- /influxdb3/cloud-dedicated/admin/account/
- /influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/system-information/
---
The query log records queries executed on your {{% product-name %}} cluster.
Use it to monitor query performance, find slow-running queries, and troubleshoot failed executions.
> [!Note]
> #### Query logging is not enabled by default
>
> The query log is disabled by default on all clusters because it generates additional ingest and storage overhead and is intended primarily for troubleshooting, not continuous monitoring.
> To enable it for your cluster, [contact InfluxData support](https://support.influxdata.com).
Use the Admin UI or the [`influxctl query` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/query/) to view the query log.
{{< tabs-wrapper >}}
{{% tabs %}}
[Admin UI](#)
[influxctl](#)
{{% /tabs %}}
{{% tab-content %}}
{{< admin-ui-access >}}
3. Open the cluster you want to inspect and go to **Query History**.
If query logging is enabled for your cluster, any admin user can access the query log in the Admin UI automatically; no database token is required.
In Query History you can:
- **Search** by Database Token ID to see queries run with a specific token.
- **Filter** by:
- **Status** (for example, success, failure)
- **Database**
- **Query type** (for example, SQL, InfluxQL)
- **Source** (for example, User Queries, UI)
- **Time range** (for example, last 24 hours)
The table lists each query with its status, query text, database, query type, duration, and timestamp.
You can use the column headers to sort (for example by duration or time).
<!-- vale Vale.Terms = NO -->
{{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-query-log-list-view.png" alt="Query History list view in the Admin UI with search, filters, and table" />}}
You can also expand a row to see more details about that execution.
<!-- vale Vale.Terms = NO -->
{{< img-hd src="/img/influxdb3/cloud-dedicated-admin-ui-query-log-detail-view.png" alt="Query History detail view in the Admin UI" />}}
{{% /tab-content %}}
{{% tab-content %}}
<!------------------------------- BEGIN INFLUXCTL ----------------------------->
Use the [`influxctl query` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/query/)
to run SQL against the `_internal` database and `query_log` table.
Query log entries are stored in the `_internal` database.
1. If you haven't already, [download and install the `influxctl` CLI](/influxdb3/cloud-dedicated/reference/cli/influxctl/#download-and-install-influxctl), and then [configure an `influxctl` connection profile](/influxdb3/cloud-dedicated/reference/cli/influxctl/#configure-connection-profiles) for your cluster.
2. [Create a database token](/influxdb3/cloud-dedicated/admin/tokens/database/create/?t=influxctl) that has read access to the `_internal` database.
Replace {{% code-placeholder-key %}}`DATABASE_TOKEN`{{% /code-placeholder-key %}} in the examples below with your {{% token-link "database" %}}.
3. Run the **query** subcommand with `--database` and `--language` (and optionally `--config`).
Global flags such as `--config` must come before the command; query flags such as `--database`, `--language`, and `--token` must come after `query`.
#### Examples
**List recent successful queries with compute duration above a threshold (for example, 0.6 ms):**
```sh { placeholders="DATABASE_TOKEN" }
influxctl query \
--token DATABASE_TOKEN \
--database _internal \
--language sql \
'SELECT * FROM query_log WHERE success = '\''true'\'' AND compute_duration_ns > 600000 LIMIT 10'
```
**Filter by namespace (database) and time range:**
```sh { placeholders="DATABASE_TOKEN" }
influxctl query \
--token DATABASE_TOKEN \
--database _internal \
--language sql \
'SELECT * FROM query_log WHERE namespace_name = '\''my_database'\'' AND time >= now() - INTERVAL '\''1 day'\'' LIMIT 50'
```
**Example output:**
```
| auth_id | compute_duration_ns | phase | query_type | query_text | success | time |
|----------------|---------------------|---------|------------|---------------------------------------------------------|---------|--------------------------|
| token-id-xxxx | 2314333 | success | sql | SELECT * FROM query_log WHERE success = 'true' AND ... | true | 2026-02-25T00:30:30Z |
| token-id-yyyy | 3673637621 | success | sql | SELECT * FROM my_measurement WHERE time > now() - ... | true | 2026-02-25T00:28:57Z |
| token-id-yyyy | 1443145654 | success | sql | SELECT COUNT(*) FROM query_log WHERE ... | true | 2026-02-25T00:29:02Z |
+----------------+---------------------+---------+------------+---------------------------------------------------------+---------+--------------------------+
```
<!-------------------------------- END INFLUXCTL ------------------------------>
{{% /tab-content %}}
{{< /tabs-wrapper >}}
## Query log data and columns
The `query_log` table in `_internal` includes the following columns:
| Column | Data type | Description |
| :----- | :-------- | :---------- |
| **time** | timestamp | Timestamp when the query log entry was recorded |
| **id** | string | Unique identifier for the query |
| **namespace_id** | string | Internal identifier for the database |
| **namespace_name** | string | Name of the database where the query was executed |
| **query_type** | string | Type of query syntax used (`sql`, `influxql`) |
| **query_text** | string | The actual query statement text |
| **query_params** | string | Query parameters (if applicable) |
| **auth_id** | string | Database token ID used to authenticate the query |
| **trace_id** | string | Trace ID for debugging and monitoring |
| **success** | string | Query execution status (`'true'` or `'false'` as string) |
| **running** | string | Indicates if query is currently running (`'true'` or `'false'` as string) |
| **phase** | string | Current query phase (for example, `received`, `planned`, `permit`, `success`, `fail`, `cancel`) |
| **query_issue_time_ns** | int64 | Time when the query was issued (nanoseconds) |
| **permit_duration_ns** | int64 | Time spent waiting for query permit (nanoseconds) |
| **plan_duration_ns** | int64 | Time spent planning the query (nanoseconds) |
| **execute_duration_ns** | int64 | Time spent executing the query (nanoseconds) |
| **end_to_end_duration_ns** | int64 | Total end-to-end query duration (nanoseconds) |
| **compute_duration_ns** | int64 | Compute time for the query (nanoseconds) |
| **partition_count** | int64 | Number of partitions accessed |
| **parquet_file_count** | int64 | Number of Parquet files read |
| **max_memory_bytes** | int64 | Maximum memory used during query execution (bytes) |
> [!Note]
> #### Use string literals for status columns
>
> In the `query_log` table, `success` and `running` are stored as strings (`'true'` or `'false'`), not booleans.
> In SQL predicates, use string comparison—for example, `success = 'true'` or `running = 'false'`—to avoid type coercion errors.

View File

@ -11,30 +11,42 @@ menu:
weight: 202
---
## July 2025 Product Highlights
## January 2026 Product Highlights
### New Features
- **Cluster storage observability:** Improve visibility into storage with new live storage usage views.
- View total live database storage in the **Storage used** field on the **Cluster Details** card on the **Overview** page.
- Track storage usage over time in the storage usage dashboard on the **Overview** page.
- Sort live database sizes by size on the **Databases** page.
- **Query request rate dashboard:** Monitor query request success and error rates (grouped by error type) in the **Query request rate** dashboard.
- **Query log UI:** Now generally available. After you enable **query logging**, use the UI to monitor query performance, find slow-running queries, and troubleshoot failed executions. For details, see [View the query log](/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-log/).
## 2025 Product Highlights
### New Features
- **`influxctl` Database Management:** You can now use `influxctl` to delete, undelete, and rename databases. For complete details, see [Manage databases](/influxdb3/cloud-dedicated/admin/databases/) and the [`influxctl database` command reference](/influxdb3/cloud-dedicated/reference/cli/influxctl/database/).
- **`influxctl` Table Deletion:** We've also added `influxctl` support for deleting tables. For more information, see [Delete a table](/influxdb3/cloud-dedicated/admin/tables/delete/) and the [`influxctl table delete` command reference](/influxdb3/cloud-dedicated/reference/cli/influxctl/table/delete/).
- **`influxctl` table deletion and management:** Delete, undelete, and rename tables with `influxctl`. For details, see [Delete a table](/influxdb3/cloud-dedicated/admin/tables/delete/) and the [`influxctl table delete` command reference](/influxdb3/cloud-dedicated/reference/cli/influxctl/table/delete/).
- **Query logs:** Access query logs as an InfluxDB table (`_internal.query_log`). For details, see [View the query log](/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-log/).
### User Interface (UI) Enhancements
- **Simplified User Management:** The UI now includes a _users page_ lets you manage existing users and invite new users.
- **Simplified User Management:** Invite and manage users via **Admin UI > Users**. For details, see [Manage users in the Admin UI](/influxdb3/cloud-dedicated/admin/users/admin-ui/).
- **Component-Based Cluster Sizing:** Cluster sizing information has been revamped to better show cluster components and offer a clearer understanding of resource allocation and usage.
- **Schema browser:** View table schemas (including column names and data types) in the Admin UI. For details, see [List tables](/influxdb3/cloud-dedicated/admin/tables/list/).
- **Embedded observability dashboards:** Use embedded dashboards on the **Overview** page to monitor component-level and aggregated cluster metrics. For details, see [Monitor your cluster](/influxdb3/cloud-dedicated/admin/monitor-your-cluster/).
### Reliability
- **Deployment Pipeline Improvements:** We've enhanced our deployment pipeline to be more reliable and minimize downtime.
- **Autoscaling Private Preview:** _Autoscaling functionality_ is entering Private Preview this July.
- **Grafana Upgrade:** Grafana has been upgraded to address a recent [CVE](https://grafana.com/blog/2025/07/02/grafana-security-update-critical-severity-security-release-for-cve-2025-5959-cve-2025-6554-cve-2025-6191-and-cve-2025-6192-in-grafana-image-renderer-plugin-and-synthetic-monitoring-agent/).
### Connectors
- **`influxctl` Iceberg Integration:** For customers with Iceberg enabled, you can now use `influxctl` to enable the _Iceberg integration on specific tables_.
- **AWS Glue and Athena Iceberg Integration Private Preview:** _AWS Glue and Athena support for Iceberg integration_ is entering Private Preview this July.
- **Deployment pipeline improvements:** Increase deployment reliability and minimize downtime.
- **Autoscaling (generally available):** Enable autoscaling to maintain performance and reliability during traffic spikes.
### Performance Improvements
- **New Disk Caching:** Customers will experience improved performance thanks to new disk caching capabilities.
- **Storage API performance improvements:** Reduce latency for storage APIs, including faster responses for database size and table size queries.

View File

@ -0,0 +1,18 @@
---
title: Shortcode test page
noindex: true
test_only: true
---
<!-- vale off -->
<span data-testid="product-name">{{% product-name %}}</span>
<span data-testid="product-name-short">{{% product-name "short" %}}</span>
<span data-testid="product-key">{{< product-key >}}</span>
<span data-testid="current-version">{{< current-version >}}</span>
<span data-testid="host">{{< influxdb/host >}}</span>
<span data-testid="home-sample-link">{{< influxdb3/home-sample-link >}}</span>
<span data-testid="latest-patch">{{< latest-patch >}}</span>
<span data-testid="icon-check">{{< icon "check" >}}</span>
<div data-testid="api-endpoint">{{< api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/query" >}}</div>
<span data-testid="show-in-core">{{% show-in "core" %}}VISIBLE_IN_CORE{{% /show-in %}}</span>
<span data-testid="hide-in-core">{{% hide-in "core" %}}HIDDEN_IN_CORE{{% /hide-in %}}</span>

View File

@ -9,6 +9,9 @@ menu:
influxdb3_cloud_serverless:
name: InfluxDB Cloud Serverless
weight: 1
cascade:
product: influxdb3_cloud_serverless
version: cloud-serverless
---
> [!Note]

View File

@ -0,0 +1,19 @@
---
title: Shortcode test page
noindex: true
test_only: true
---
<!-- vale off -->
<span data-testid="product-name">{{% product-name %}}</span>
<span data-testid="product-name-short">{{% product-name "short" %}}</span>
<span data-testid="product-key">{{< product-key >}}</span>
<span data-testid="current-version">{{< current-version >}}</span>
<span data-testid="host">{{< influxdb/host >}}</span>
<span data-testid="home-sample-link">{{< influxdb3/home-sample-link >}}</span>
<span data-testid="latest-patch">{{< latest-patch >}}</span>
<span data-testid="icon-check">{{< icon "check" >}}</span>
<div data-testid="api-endpoint">{{< api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/query" >}}</div>
<span data-testid="show-in-core">{{% show-in "core" %}}VISIBLE_IN_CORE{{% /show-in %}}</span>
<span data-testid="hide-in-core">{{% hide-in "core" %}}HIDDEN_IN_CORE{{% /hide-in %}}</span>
<span data-testid="cta-link">{{< cta-link >}}</span>

View File

@ -10,6 +10,9 @@ menu:
influxdb3_clustered:
name: InfluxDB Clustered
weight: 1
cascade:
product: influxdb3_clustered
version: clustered
---
InfluxDB Clustered is a highly available InfluxDB 3 cluster hosted and

View File

@ -837,7 +837,7 @@ InfluxDB Clustered.
#### Deployment
- Ingesters now have a `terminationGracePeriodSeconds` value of `600` to provid
- Ingesters now have a `terminationGracePeriodSeconds` value of `600` to provide
enough time to persist all buffered data.
#### Database engine
@ -1534,7 +1534,7 @@ Support for custom certificates has been implemented since version
[20230912-619813](#20230912-619813).
Unfortunately, due to a bug, our Object store client didn't use the custom certificates.
This release fixes that so you can use the existing configuration for custom
certificates to also specify the certificate and certficate authority used by
certificates to also specify the certificate and certificate authority used by
your object store.
#### Resource limits
@ -1637,7 +1637,7 @@ Otherwise, no changes are necessary.
#### Database engine
- Catalog cache convergence improvements.
- Retry after out of memeory (OOM) errors.
- Retry after out of memory (OOM) errors.
---
@ -1682,7 +1682,7 @@ spec:
#### Updated Azure AD documentation
The `Appendix` / `Configuring Identity Provider` / `Azure` section of the
"Geting started" documentation has been updated:
"Getting started" documentation has been updated:
```diff
- https://login.microsoftonline.com/{AZURE_TENANT_ID}/.well-known/openid-configuration

View File

@ -0,0 +1,33 @@
---
title: Shortcode test page
noindex: true
test_only: true
---
<!-- vale off -->
<span data-testid="product-name">{{% product-name %}}</span>
<span data-testid="product-name-short">{{% product-name "short" %}}</span>
<span data-testid="product-key">{{< product-key >}}</span>
<span data-testid="current-version">{{< current-version >}}</span>
<span data-testid="host">{{< influxdb/host >}}</span>
<span data-testid="home-sample-link">{{< influxdb3/home-sample-link >}}</span>
<span data-testid="limit-database">{{% influxdb3/limit "database" %}}</span>
<span data-testid="latest-patch">{{< latest-patch >}}</span>
<span data-testid="icon-check">{{< icon "check" >}}</span>
<div data-testid="api-endpoint">{{< api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/query" >}}</div>
<span data-testid="show-in-core">{{% show-in "core" %}}VISIBLE_IN_CORE{{% /show-in %}}</span>
<span data-testid="hide-in-core">{{% hide-in "core" %}}HIDDEN_IN_CORE{{% /hide-in %}}</span>
<span data-testid="token-link">{{% token-link %}}</span>
<span data-testid="token-link-database">{{% token-link "database" %}}</span>
<div data-testid="sql-schema-intro">
{{% sql/sql-schema-intro %}}
</div>
<div data-testid="v1-v3-data-model-note">
{{% influxql/v1-v3-data-model-note %}}
</div>

View File

@ -1,5 +1,5 @@
---
title: InfluxDB 3 Core documentation
title: InfluxDB 3 Core documentation
description: >
InfluxDB 3 Core is an open source time series database designed and optimized
for real-time and recent data.
@ -10,6 +10,9 @@ menu:
name: InfluxDB 3 Core
weight: 1
source: /shared/influxdb3/_index.md
cascade:
product: influxdb3_core
version: core
---
<!--

View File

@ -0,0 +1,21 @@
---
title: Shortcode test page
noindex: true
test_only: true
---
<!-- vale off -->
<span data-testid="product-name">{{% product-name %}}</span>
<span data-testid="product-name-short">{{% product-name "short" %}}</span>
<span data-testid="product-key">{{< product-key >}}</span>
<span data-testid="current-version">{{< current-version >}}</span>
<span data-testid="host">{{< influxdb/host >}}</span>
<span data-testid="home-sample-link">{{< influxdb3/home-sample-link >}}</span>
<span data-testid="limit-database">{{% influxdb3/limit "database" %}}</span>
<span data-testid="latest-patch">{{< latest-patch >}}</span>
<span data-testid="icon-check">{{< icon "check" >}}</span>
<div data-testid="api-endpoint">{{< api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/query" >}}</div>
<span data-testid="show-in-core">{{% show-in "core" %}}VISIBLE_IN_CORE{{% /show-in %}}</span>
<span data-testid="hide-in-core">{{% hide-in "core" %}}HIDDEN_IN_CORE{{% /hide-in %}}</span>
<span data-testid="token-link">{{% token-link %}}</span>
<span data-testid="token-link-database">{{% token-link "database" %}}</span>

View File

@ -1,15 +1,18 @@
---
title: InfluxDB 3 Enterprise documentation
title: InfluxDB 3 Enterprise documentation
description: >
InfluxDB 3 Enterprise is a time series database built on InfluxDB 3 Core open source.
It is designed to handle high write and query loads using a diskless architecture
that scales horizontally. Learn how to use and leverage InfluxDB in use cases such as
It is designed to handle high write and query loads using a diskless architecture
that scales horizontally. Learn how to use and leverage InfluxDB in use cases such as
monitoring metrics, IoT data, and events.
menu:
influxdb3_enterprise:
name: InfluxDB 3 Enterprise
weight: 1
source: /shared/influxdb3/_index.md
cascade:
product: influxdb3_enterprise
version: enterprise
---
<!--

View File

@ -233,8 +233,25 @@ You can adjust compaction strategies to balance performance and resource usage:
## Configure process nodes
Process nodes handle data transformations and processing plugins.
Setting `--plugin-dir` automatically adds `process` mode to any node, so you don't need to explicitly set `--mode=process`.
If you do set `--mode=process`, you must also set `--plugin-dir`.
### Processing node (16 cores)
### Enable the Processing Engine on any node
```bash
influxdb3 \
--num-io-threads=4 \
serve \
--num-cores=16 \
--datafusion-num-threads=12 \
--plugin-dir=/path/to/plugins \
--node-id=hybrid-01 \
--cluster-id=prod-cluster
```
### Dedicated process-only node (16 cores)
To create a node that only handles processing (no ingest, query, or compaction), set `--mode=process`:
```bash
influxdb3 \

View File

@ -153,7 +153,6 @@ influxdb3 serve [OPTIONS]
| | `--traces-jaeger-debug-name` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-jaeger-debug-name)_ |
| | `--traces-jaeger-max-msgs-per-second` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-jaeger-max-msgs-per-second)_ |
| | `--traces-jaeger-tags` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-jaeger-tags)_ |
| | `--use-pacha-tree` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#use-pacha-tree)_ |
| | `--virtual-env-location` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#virtual-env-location)_ |
| | `--wait-for-running-ingestor` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wait-for-running-ingestor)_ |
| | `--wal-flush-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-flush-interval)_ |

View File

@ -0,0 +1,18 @@
---
title: Shortcode test page
noindex: true
test_only: true
---
<!-- vale off -->
<span data-testid="product-name">{{% product-name %}}</span>
<span data-testid="product-name-short">{{% product-name "short" %}}</span>
<span data-testid="product-key">{{< product-key >}}</span>
<span data-testid="current-version">{{< current-version >}}</span>
<span data-testid="host">{{< influxdb/host >}}</span>
<span data-testid="home-sample-link">{{< influxdb3/home-sample-link >}}</span>
<span data-testid="latest-patch">{{< latest-patch >}}</span>
<span data-testid="icon-check">{{< icon "check" >}}</span>
<div data-testid="api-endpoint">{{< api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/query" >}}</div>
<span data-testid="show-in-core">{{% show-in "core" %}}VISIBLE_IN_CORE{{% /show-in %}}</span>
<span data-testid="hide-in-core">{{% hide-in "core" %}}HIDDEN_IN_CORE{{% /hide-in %}}</span>

View File

@ -6,6 +6,9 @@ menu:
influxdb3_explorer:
name: InfluxDB 3 Explorer
weight: 1
cascade:
product: influxdb3_explorer
version: explorer
---
InfluxDB 3 Explorer is the standalone web application designed for visualizing, querying, and managing your data stored in InfluxDB 3 Core and Enterprise.

View File

@ -6,14 +6,217 @@ menu:
influxdb3_explorer:
name: Release notes
weight: 250
related:
- /influxdb3/explorer/
---
## v1.6.2 {date="2025-01-15"}
This release includes important fixes and improvements for Ask AI and more.
To upgrade, pull the latest Docker image:
```sh
docker pull influxdata/influxdb3-ui
```
## v1.6.3 {date="2026-02-19"}
#### Features
- **Show deleted databases**: Toggle visibility of deleted databases in the database list and data explorer.
- **Upgrade information for Core users**: View Enterprise upgrade details directly in Explorer.
- **AI model updates**: Updated AI model support for latest Anthropic models.
#### Bug fixes
- **SQL**: Fix handling of table names containing dashes and improve quoted identifier validation.
- **SQL**: Improve validation for forbidden SQL keywords in queries.
- **Charts**: Fix date display in the DataChart component.
- **Schema**: Fix schema columns mapping.
- **Security**: Update dependency versions (axios, qs, react-router, lodash-es).
## v1.6.2 {date="2026-01-14"}
#### Bug fixes
- **Ask AI**: Fix Ask AI service proxy routing over the InfluxData endpoint.
## v1.6.1 {date="2026-01-09"}
#### Bug fixes
- **Charts**: Fix date display in the chart component.
- **Forms**: Fix validation logic for form inputs.
## v1.6.0 {date="2025-12-18"}
_Released alongside [InfluxDB 3.8](/influxdb3/core/release-notes/#v380)._
#### Features
- **Ask AI custom instructions**: Teach Ask AI your naming conventions, specify which measurements or tags matter most, and define how you want results formatted.
Custom instructions persist across sessions, users, and shared environments.
- **Improved line protocol experience**: Clearer validation and more helpful feedback when writing data using line protocol.
#### Bug fixes
- **Plugins**: Fix plugin trigger error state not clearing after a successful run.
- **Charts**: Reduce unnecessary chart re-renders for improved performance.
- **Data import**: Fix error message for file limit to clarify upgrade options.
## v1.5.2 {date="2025-12-10"}
Maintenance release with internal improvements.
## v1.5.1 {date="2025-12-03"}
#### Features
- **Timestamp precision detection**: Automatically detect timestamp precision when writing data.
- **Updated charting library**: Replace Recharts with ECharts for improved chart rendering and feature parity.
#### Bug fixes
- **Dashboards**: Fix dashboard display issues.
- **Write API**: Fix timestamp precision handling in write requests.
## v1.5.0 {date="2025-11-20"}
_Released alongside [InfluxDB 3.7](/influxdb3/core/release-notes/#v370)._
#### Features
- **One-click system monitoring**: Enable monitoring with a single action to begin collecting host-level metrics.
A built-in dashboard tracks system metrics alongside database activity over time.
- **System overview dashboard**: View memory pressure, query performance, and write performance metrics in a single dashboard to understand how your system performs under load.
#### Bug fixes
- **Monitoring**: Fix error handling in instant monitoring setup.
- **Monitoring**: Fix monitoring SQL queries to use correct identifier quoting.
## v1.4.0 {date="2025-10-31"}
_Released alongside [InfluxDB 3.6](/influxdb3/core/release-notes/#v360)._
#### Features
- **Ask AI (beta)**: Query your data conversationally without writing SQL—for example,
"show CPU usage by region over the last hour."
Ask AI can also handle operational tasks such as database creation, token generation, and configuration adjustments.
- **Dashboard import and export**: Share dashboards between environments or move them between Explorer and Grafana using compatible JSON files.
- **TLS and CA certificate support**: Configure custom CA certificates and skip verification for self-signed certificates.
#### Bug fixes
- **SQL**: Fix handling of capitalized table and column names.
- **Caching**: Improve empty state handling in cache creation dialogs.
- **Grafana**: Fix Grafana refresh interval parsing.
- **Dashboards**: Fix dashboard cell rendering and data updates.
## v1.3.1 {date="2025-10-21"}
Update dependency versions.
## v1.3.0 {date="2025-09-30"}
_Released alongside [InfluxDB 3.5](/influxdb3/core/release-notes/#v350)._
#### Features
- **Dashboards**: Save and organize queries in dashboards with auto-refresh, custom time ranges, and resizable cells.
Navigate between the data explorer and dashboards to build queries and save results.
- **Last Value Cache and Distinct Value Cache querying**: Run ad hoc queries against built-in caches from the data explorer for instant results.
- **License information**: View license details for your InfluxDB instance.
- **Server configuration editing**: Edit server configuration settings directly from Explorer.
#### Bug fixes
- **Databases**: Fix database layout and request handling.
- **Caching**: Fix cache query formatting and error handling.
## v1.2.1 {date="2025-09-03"}
#### Bug fixes
- **Permissions**: Restrict access to integrations, caches, and plugins in query mode.
- **Performance**: Fix performance issues.
## v1.2.0 {date="2025-08-27"}
_Released alongside [InfluxDB 3.4](/influxdb3/core/release-notes/#v340)._
#### Features
- **Cache management UI**: Create and manage Last Value Caches and Distinct Value Caches through the Explorer UI under **Configure > Caches**.
- **Parquet export**: Export query results as Parquet files.
- **Grafana data source setup**: Configure InfluxDB 3 as a Grafana data source directly from Explorer.
- **System overview improvements**: Add a refetch button and full-length query tooltips to system overview query tables.
#### Bug fixes
- **SQL**: Fix SQL query ordering to sort results in ascending order by time.
- **Data types**: Improve data type serialization for InfluxDB field types.
- **Navigation**: Fix navigation logic.
## v1.1.1 {date="2025-08-07"}
#### Bug fixes
- **Plugins**: Fix plugin and trigger card layout and icon display.
- **Plugins**: Fix trigger error status display.
- **Plugins**: Fix trigger logs dialog title.
- **Permissions**: Fix permission table cell alignment.
- **Performance**: Improve request handling performance.
## v1.1.0 {date="2025-07-30"}
_Released alongside [InfluxDB 3.3](/influxdb3/core/release-notes/#v330)._
#### Features
- **Plugin management**: Discover plugins from the Plugin Library and install them in seconds.
Inspect output logs, edit plugin arguments, and manage triggers for both library plugins and custom plugins.
Requires InfluxDB 3 Core or Enterprise 3.3 or later.
- **System health overview**: View a high-level dashboard of your entire system covering memory pressure, query performance, and write performance metrics.
- **AI provider settings**: Configure multiple AI providers (such as OpenAI) with API key management.
#### Bug fixes
- **Charts**: Fix date range selector.
- **Schema**: Fix schema viewer display.
- **Line protocol**: Improve line protocol conversion.
- **Data export**: Fix system table data export.
## v1.0.3 {date="2025-07-03"}
#### Bug fixes
- **Schema**: Fix schema viewer display.
## v1.0.2 {date="2025-07-03"}
#### Bug fixes
- **Performance**: Fix browser caching issues with module federation assets.
## v1.0.1 {date="2025-07-02"}
#### Bug fixes
- **Dependencies**: Fix dependency compatibility issues.
## v1.0.0 {date="2025-06-30"}
_Released alongside [InfluxDB 3.2](/influxdb3/core/release-notes/#v320). This is the initial general availability (GA) release of InfluxDB 3 Explorer._
InfluxDB 3 Explorer is a web-based UI for working with InfluxDB 3 Core and Enterprise. It provides a single interface for querying, visualizing, and managing your time series data.
#### Features
- **SQL editor**: Write and run SQL queries with autocomplete, and view results as tables or charts.
- **Database management**: Create and delete databases with point-and-click controls.
- **Token management**: Create, view, and revoke API tokens including resource-scoped tokens.
- **Data visualization**: View query results as interactive line charts with number formatting and customizable axes.
- **Data import**: Import data from CSV and JSON files, or write line protocol directly.
- **Grafana integration**: Export connection strings and configure Grafana data sources.
- **OpenAI integration**: Use natural language to generate SQL queries based on your schema.
- **Adaptive onboarding**: Optional onboarding experience that adjusts based on your experience level, with built-in sample datasets.
- **Deployment flexibility**: Run as a standalone Docker container in admin mode (full functionality) or query mode (read-only access).

View File

@ -0,0 +1,17 @@
---
title: Shortcode test page
noindex: true
test_only: true
---
<!-- vale off -->
<span data-testid="product-name">{{% product-name %}}</span>
<span data-testid="product-name-short">{{% product-name "short" %}}</span>
<span data-testid="product-key">{{< product-key >}}</span>
<span data-testid="current-version">{{< current-version >}}</span>
<span data-testid="host">{{< influxdb/host >}}</span>
<span data-testid="latest-patch">{{< latest-patch >}}</span>
<span data-testid="icon-check">{{< icon "check" >}}</span>
<div data-testid="api-endpoint">{{< api-endpoint method="get" endpoint="https://{{< influxdb/host >}}/api/v2/query" >}}</div>
<span data-testid="show-in-core">{{% show-in "core" %}}VISIBLE_IN_CORE{{% /show-in %}}</span>
<span data-testid="hide-in-core">{{% hide-in "core" %}}HIDDEN_IN_CORE{{% /hide-in %}}</span>

View File

@ -7,6 +7,9 @@ menu:
kapacitor_v1:
name: Kapacitor
weight: 1
cascade:
product: kapacitor
version: v1
---
Kapacitor is an open source data processing framework that makes it easy to create

View File

@ -852,7 +852,7 @@ No changes to Kapacitor, only upgrading to GoLang 1.7.4 for security patches.
### Release Notes
New K8sAutoscale node that allows you to auotmatically scale Kubernetes deployments driven by any metrics Kapacitor consumes.
New K8sAutoscale node that allows you to automatically scale Kubernetes deployments driven by any metrics Kapacitor consumes.
For example, to scale a deployment `myapp` based off requests per second:
```

View File

@ -11,6 +11,7 @@ the [HTTP API](/influxdb3/version/api/v3/), or [InfluxDB 3 Explorer](/influxdb3/
- [Delete a database using the influxdb3 CLI](#delete-a-database-using-the-influxdb3-cli)
- [Delete a database using the HTTP API](#delete-a-database-using-the-http-api)
- [Delete a database using InfluxDB 3 Explorer](#delete-a-database-using-influxdb-3-explorer)
{{% show-in "enterprise" %}}- [Delete data only (preserve schema and resources)](#delete-data-only-preserve-schema-and-resources){{% /show-in %}}
## Delete a database using the influxdb3 CLI
@ -71,3 +72,69 @@ You can also delete databases using the [InfluxDB 3 Explorer](/influxdb3/explore
> This action cannot be undone. All data in the database will be permanently deleted.
For more information, see [Manage databases with InfluxDB 3 Explorer](/influxdb3/explorer/manage-databases/).
{{% show-in "enterprise" %}}
## Delete data only (preserve schema and resources)
{{< product-name >}} supports deleting only the data in a database while preserving the database schema and associated resources.
This is useful when you want to clear old data and re-write new data to the same structure without recreating resources.
### What is preserved
When using the data-only deletion option, the following are preserved:
- **Database schema**: Tables and column definitions
- **Authentication tokens**: Database-scoped access tokens
- **Processing engine configurations**: Triggers and plugin configurations
- **Caches**: Last value caches (LVC) and distinct value caches (DVC)
### Delete data only using the CLI
Use the [`--data-only`](/influxdb3/version/reference/cli/influxdb3/delete/database/#options) flag to delete data while preserving the database schema and resources--for example:
```sh{placeholders="DATABASE_NAME"}
influxdb3 delete database --data-only DATABASE_NAME
```
Replace {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}} with the name of your database.
#### Delete data and remove tables
To delete data and remove table schemas while preserving database-level resources (tokens, triggers, configurations), combine `--data-only` with [`--remove-tables`](/influxdb3/version/reference/cli/influxdb3/delete/database/#options):
```sh{placeholders="DATABASE_NAME"}
influxdb3 delete database --data-only --remove-tables DATABASE_NAME
```
This preserves:
- Authentication tokens
- Processing engine triggers and configurations
But removes:
- All data
- Table schemas
- Table-level caches (LVC and DVC)
### Delete data only using the HTTP API
To delete only data using the HTTP API, include the `data_only=true` query parameter:
```bash{placeholders="DATABASE_NAME|AUTH_TOKEN"}
curl --request DELETE "{{< influxdb/host >}}/api/v3/configure/database?db=DATABASE_NAME&data_only=true" \
--header "Authorization: Bearer AUTH_TOKEN"
```
Replace the following:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}}
#### Delete data and remove tables
To also remove table schemas, add the `remove_tables=true` parameter:
```bash{placeholders="DATABASE_NAME|AUTH_TOKEN"}
curl --request DELETE "{{< influxdb/host >}}/api/v3/configure/database?db=DATABASE_NAME&data_only=true&remove_tables=true" \
--header "Authorization: Bearer AUTH_TOKEN"
```
{{% /show-in %}}

View File

@ -27,7 +27,7 @@ The `influxdb3 show databases` command supports output formats:
- `json`
- `jsonl`
- `csv`
<!-- - `parquet` _(must [output to a file](#output-to-a-parquet-file))_ -->
- `parquet` _(must [output to a file](#output-to-a-parquet-file))_
Use the `--format` flag to specify the output format:
@ -79,12 +79,18 @@ noaa
{{% /expand %}}
{{< /expand-wrapper >}}
#### Output to Parquet
#### Output to a Parquet file
To output your list of databases to a Parquet file, use the `influxdb3 query` command
[Parquet](https://parquet.apache.org/) is a binary format.
Use the `--output` option to specify the file where you want to save the Parquet data.
- `--format`: `parquet`
- `-o`, `--output`: the filepath to the Parquet file to output to
```sh
influxdb3 show databases \
--format parquet \
--output databases.parquet
```
Alternatively, use the `influxdb3 query` command to query system tables:
```sh
influxdb3 query \

View File

@ -69,10 +69,10 @@ Set the following environment variables when you start the MCP server:
{{% show-in "cloud-dedicated,clustered" %}}
- **INFLUX_DB_PRODUCT_TYPE**: `{{% product-key %}}`
- **INFLUX_DB_ACCOUNT_ID**: Your {{% product-name %}} account ID
- **INFLUX_DB_CLUSTER_ID**: Your {{% product-name %}} cluster ID
- **INFLUX_DB_TOKEN**: An {{% product-name %}} [database token](/influxdb3/cloud-dedicated/admin/tokens/database/)
- **INFLUX_DB_MANAGEMENT_TOKEN**: An {{% product-name %}} [management token](/influxdb3/cloud-dedicated/admin/tokens/management/)
- **INFLUX_DB_ACCOUNT_ID**: Your {{% product-name %}} [account ID](/influxdb3/version/admin/account/)
- **INFLUX_DB_CLUSTER_ID**: Your {{% product-name %}} [cluster ID](/influxdb3/version/admin/clusters/)
- **INFLUX_DB_TOKEN**: An {{% product-name %}} [database token](/influxdb3/version/admin/tokens/database/)
- **INFLUX_DB_MANAGEMENT_TOKEN**: An {{% product-name %}} [management token](/influxdb3/version/admin/tokens/management/)
> [!Note]
> #### Optional tokens

View File

@ -13,10 +13,10 @@ You can also schedule a hard deletion to permanently remove the table and its da
- [Delete a table using the influxdb3 CLI](#delete-a-table-using-the-influxdb3-cli)
- [Delete a table using the HTTP API](#delete-a-table-using-the-http-api)
{{% show-in "enterprise" %}}- [Delete data only (preserve schema and resources)](#delete-data-only-preserve-schema-and-resources){{% /show-in %}}
## Delete a table using the influxdb3 CLI
Use the `influxdb3 delete table` command to delete a table:
```sh{placeholders="DATABASE_NAME|TABLE_NAME|AUTH_TOKEN"}
@ -105,4 +105,51 @@ If the table doesn't exist, the API returns HTTP status `404`:
{
"error": "Table not found"
}
```
```
{{% show-in "enterprise" %}}
## Delete data only (preserve schema and resources)
{{< product-name >}} supports deleting only the data in a table while preserving the table schema and associated resources.
This is useful when you want to clear old data and re-write new data to the same table structure without recreating resources.
### What is preserved
When using the data-only deletion option, the following are preserved:
- **Table schema**: Column definitions and data types
- **Caches**: Last value caches (LVC) and distinct value caches (DVC) associated with the table
### Delete data only using the CLI
Use the [`--data-only`](/influxdb3/version/reference/cli/influxdb3/delete/table/#options) flag to delete data while preserving the table schema and resources:
```sh{placeholders="DATABASE_NAME|TABLE_NAME|AUTH_TOKEN"}
influxdb3 delete table \
--database DATABASE_NAME \
--token AUTH_TOKEN \
--data-only \
TABLE_NAME
```
Replace the following:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database containing the table
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}}
### Delete data only using the HTTP API
To delete only data using the HTTP API, include the `data_only=true` query parameter:
```bash{placeholders="DATABASE_NAME|TABLE_NAME|AUTH_TOKEN"}
curl -X DELETE "{{< influxdb/host >}}/api/v3/configure/table?db=DATABASE_NAME&table=TABLE_NAME&data_only=true" \
--header "Authorization: Bearer AUTH_TOKEN"
```
Replace the following:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database containing the table
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the name of the table
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}}
{{% /show-in %}}

View File

@ -59,7 +59,7 @@ export INFLUXDB3_ENTERPRISE_CLUSTER_ID=cluster0
{{% /show-in %}}export INFLUXDB3_NODE_IDENTIFIER_PREFIX=my-node
export INFLUXDB3_OBJECT_STORE=file
export INFLUXDB3_DB_DIR=~/.influxdb3
export LOG_FILTER=info
export INFLUXDB3_LOG_FILTER=info
influxdb3 serve
```
@ -138,6 +138,9 @@ For detailed information about thread allocation, see the [Resource Limits](#res
{{% /show-in %}}
- [object-store](#object-store)
- [query-file-limit](#query-file-limit)
{{% show-in "enterprise" %}}
- [use-pacha-tree](#use-pacha-tree)
{{% /show-in %}}
{{% show-in "enterprise" %}}
@ -276,6 +279,26 @@ This option supports the following values:
{{% show-in "enterprise" %}}
#### use-pacha-tree <span class="badge experimental">Experimental</span> {#use-pacha-tree}
Enables the PachaTree storage engine.
> [!Caution]
> PachaTree is an experimental feature not for production use.
> It might not be compatible with other features and configuration options.
**Default:** `false`
| influxdb3 serve option | Environment variable |
| :--------------------- | :----------------------------- |
| `--use-pacha-tree` | `INFLUXDB3_USE_PACHA_TREE` |
***
{{% /show-in %}}
{{% show-in "enterprise" %}}
### Licensing
#### license-email
@ -766,9 +789,9 @@ this value.
**Default:** `16`
| influxdb3 serve option | Environment variable |
| influxdb3 serve option | Environment variables |
| :-------------------------------- | :------------------------------ |
| `--object-store-connection-limit` | `OBJECT_STORE_CONNECTION_LIMIT` |
| `--object-store-connection-limit` | `INFLUXDB3_OBJECT_STORE_CONNECTION_LIMIT` (preferred)<br>`OBJECT_STORE_CONNECTION_LIMIT` (deprecated; supported for backward compatibility) |
***
@ -776,9 +799,9 @@ this value.
Forces HTTP/2 connections to network-based object stores.
| influxdb3 serve option | Environment variable |
| influxdb3 serve option | Environment variables |
| :-------------------------- | :------------------------ |
| `--object-store-http2-only` | `OBJECT_STORE_HTTP2_ONLY` |
| `--object-store-http2-only` | `INFLUXDB3_OBJECT_STORE_HTTP2_ONLY` (preferred)<br>`OBJECT_STORE_HTTP2_ONLY` (deprecated; supported for backward compatibility) |
***
@ -786,9 +809,9 @@ Forces HTTP/2 connections to network-based object stores.
Sets the maximum frame size (in bytes/octets) for HTTP/2 connections.
| influxdb3 serve option | Environment variable |
| influxdb3 serve option | Environment variables |
| :------------------------------------ | :---------------------------------- |
| `--object-store-http2-max-frame-size` | `OBJECT_STORE_HTTP2_MAX_FRAME_SIZE` |
| `--object-store-http2-max-frame-size` | `INFLUXDB3_OBJECT_STORE_HTTP2_MAX_FRAME_SIZE` (preferred)<br>`OBJECT_STORE_HTTP2_MAX_FRAME_SIZE` (deprecated; supported for backward compatibility) |
***
@ -796,9 +819,9 @@ Sets the maximum frame size (in bytes/octets) for HTTP/2 connections.
Defines the maximum number of times to retry a request.
| influxdb3 serve option | Environment variable |
| influxdb3 serve option | Environment variables |
| :--------------------------- | :------------------------- |
| `--object-store-max-retries` | `OBJECT_STORE_MAX_RETRIES` |
| `--object-store-max-retries` | `INFLUXDB3_OBJECT_STORE_MAX_RETRIES` (preferred)<br>`OBJECT_STORE_MAX_RETRIES` (deprecated; supported for backward compatibility) |
***
@ -807,9 +830,9 @@ Defines the maximum number of times to retry a request.
Specifies the maximum length of time from the initial request after which no
further retries are be attempted.
| influxdb3 serve option | Environment variable |
| influxdb3 serve option | Environment variables |
| :----------------------------- | :--------------------------- |
| `--object-store-retry-timeout` | `OBJECT_STORE_RETRY_TIMEOUT` |
| `--object-store-retry-timeout` | `INFLUXDB3_OBJECT_STORE_RETRY_TIMEOUT` (preferred)<br>`OBJECT_STORE_RETRY_TIMEOUT` (deprecated; supported for backward compatibility) |
***
@ -817,9 +840,9 @@ further retries are be attempted.
Sets the endpoint of an S3-compatible, HTTP/2-enabled object store cache.
| influxdb3 serve option | Environment variable |
| influxdb3 serve option | Environment variables |
| :------------------------------ | :---------------------------- |
| `--object-store-cache-endpoint` | `OBJECT_STORE_CACHE_ENDPOINT` |
| `--object-store-cache-endpoint` | `INFLUXDB3_OBJECT_STORE_CACHE_ENDPOINT` (preferred)<br>`OBJECT_STORE_CACHE_ENDPOINT` (deprecated; supported for backward compatibility) |
***
@ -894,7 +917,7 @@ influxdb3 serve --log-filter info,influxdb3_write_buffer=debug,influxdb3_wal=deb
<!--pytest.mark.skip-->
```sh
influxdb3 serve --log-filter info,influxdb3_pacha_tree=debug
influxdb3 serve --log-filter info,influxdb3_enterprise=debug
```
{{% /show-in %}}
@ -909,8 +932,7 @@ The following are common component names you can use for targeted filtering:
| `influxdb3_wal` | Write-ahead log operations |
| `influxdb3_catalog` | Catalog and schema operations |
| `influxdb3_cache` | Caching operations |
{{% show-in "enterprise" %}}`influxdb3_pacha_tree` | Enterprise storage engine operations |
`influxdb3_enterprise` | Enterprise-specific features |
{{% show-in "enterprise" %}}`influxdb3_enterprise` | Enterprise-specific features |
{{% /show-in %}}
> [!Note]
@ -919,9 +941,9 @@ The following are common component names you can use for targeted filtering:
> code. Use `debug` or `trace` sparingly on specific components to avoid
> excessive log output.
| influxdb3 serve option | Environment variable |
| influxdb3 serve option | Environment variables |
| :--------------------- | :------------------- |
| `--log-filter` | `LOG_FILTER` |
| `--log-filter` | `INFLUXDB3_LOG_FILTER` (preferred)<br>`LOG_FILTER` (deprecated; supported for backward compatibility) |
***
@ -936,9 +958,9 @@ This option supports the following values:
**Default:** `stdout`
| influxdb3 serve option | Environment variable |
| influxdb3 serve option | Environment variables |
| :--------------------- | :------------------- |
| `--log-destination` | `LOG_DESTINATION` |
| `--log-destination` | `INFLUXDB3_LOG_DESTINATION` (preferred)<br>`LOG_DESTINATION` (deprecated; supported for backward compatibility) |
***
@ -952,9 +974,9 @@ This option supports the following values:
**Default:** `full`
| influxdb3 serve option | Environment variable |
| influxdb3 serve option | Environment variables |
| :--------------------- | :------------------- |
| `--log-format` | `LOG_FORMAT` |
| `--log-format` | `INFLUXDB3_LOG_FORMAT` (preferred)<br>`LOG_FORMAT` (deprecated; supported for backward compatibility) |
***
@ -988,9 +1010,9 @@ Sets the type of tracing exporter.
**Default:** `none`
| influxdb3 serve option | Environment variable |
| influxdb3 serve option | Environment variables |
| :--------------------- | :------------------- |
| `--traces-exporter` | `TRACES_EXPORTER` |
| `--traces-exporter` | `INFLUXDB3_TRACES_EXPORTER` (preferred)<br>`TRACES_EXPORTER` (deprecated; supported for backward compatibility) |
***
@ -1000,9 +1022,9 @@ Specifies the Jaeger agent network hostname for tracing.
**Default:** `0.0.0.0`
| influxdb3 serve option | Environment variable |
| influxdb3 serve option | Environment variables |
| :------------------------------------ | :---------------------------------- |
| `--traces-exporter-jaeger-agent-host` | `TRACES_EXPORTER_JAEGER_AGENT_HOST` |
| `--traces-exporter-jaeger-agent-host` | `INFLUXDB3_TRACES_EXPORTER_JAEGER_AGENT_HOST` (preferred)<br>`TRACES_EXPORTER_JAEGER_AGENT_HOST` (deprecated; supported for backward compatibility) |
***
@ -1012,9 +1034,9 @@ Defines the Jaeger agent network port for tracing.
**Default:** `6831`
| influxdb3 serve option | Environment variable |
| influxdb3 serve option | Environment variables |
| :------------------------------------ | :---------------------------------- |
| `--traces-exporter-jaeger-agent-port` | `TRACES_EXPORTER_JAEGER_AGENT_PORT` |
| `--traces-exporter-jaeger-agent-port` | `INFLUXDB3_TRACES_EXPORTER_JAEGER_AGENT_PORT` (preferred)<br>`TRACES_EXPORTER_JAEGER_AGENT_PORT` (deprecated; supported for backward compatibility) |
***
@ -1024,9 +1046,9 @@ Sets the Jaeger service name for tracing.
**Default:** `iox-conductor`
| influxdb3 serve option | Environment variable |
| influxdb3 serve option | Environment variables |
| :-------------------------------------- | :------------------------------------ |
| `--traces-exporter-jaeger-service-name` | `TRACES_EXPORTER_JAEGER_SERVICE_NAME` |
| `--traces-exporter-jaeger-service-name` | `INFLUXDB3_TRACES_EXPORTER_JAEGER_SERVICE_NAME` (preferred)<br>`TRACES_EXPORTER_JAEGER_SERVICE_NAME` (deprecated; supported for backward compatibility) |
***
@ -1036,9 +1058,9 @@ Specifies the header name used for passing trace context.
**Default:** `uber-trace-id`
| influxdb3 serve option | Environment variable |
| influxdb3 serve option | Environment variables |
| :--------------------------------------------------- | :------------------------------------------------- |
| `--traces-exporter-jaeger-trace-context-header-name` | `TRACES_EXPORTER_JAEGER_TRACE_CONTEXT_HEADER_NAME` |
| `--traces-exporter-jaeger-trace-context-header-name` | `INFLUXDB3_TRACES_EXPORTER_JAEGER_TRACE_CONTEXT_HEADER_NAME` (preferred)<br>`TRACES_EXPORTER_JAEGER_TRACE_CONTEXT_HEADER_NAME` (deprecated; supported for backward compatibility) |
***
@ -1048,9 +1070,9 @@ Specifies the header name used for force sampling in tracing.
**Default:** `jaeger-debug-id`
| influxdb3 serve option | Environment variable |
| influxdb3 serve option | Environment variables |
| :--------------------------- | :---------------------------------- |
| `--traces-jaeger-debug-name` | `TRACES_EXPORTER_JAEGER_DEBUG_NAME` |
| `--traces-jaeger-debug-name` | `INFLUXDB3_TRACES_JAEGER_DEBUG_NAME` (preferred)<br>`TRACES_EXPORTER_JAEGER_DEBUG_NAME` (deprecated; supported for backward compatibility) |
***
@ -1058,9 +1080,9 @@ Specifies the header name used for force sampling in tracing.
Defines a set of `key=value` pairs to annotate tracing spans with.
| influxdb3 serve option | Environment variable |
| influxdb3 serve option | Environment variables |
| :--------------------- | :---------------------------- |
| `--traces-jaeger-tags` | `TRACES_EXPORTER_JAEGER_TAGS` |
| `--traces-jaeger-tags` | `INFLUXDB3_TRACES_JAEGER_TAGS` (preferred)<br>`TRACES_EXPORTER_JAEGER_TAGS` (deprecated; supported for backward compatibility) |
***
@ -1070,9 +1092,9 @@ Specifies the maximum number of messages sent to a Jaeger service per second.
**Default:** `1000`
| influxdb3 serve option | Environment variable |
| influxdb3 serve option | Environment variables |
| :------------------------------------ | :---------------------------------- |
| `--traces-jaeger-max-msgs-per-second` | `TRACES_JAEGER_MAX_MSGS_PER_SECOND` |
| `--traces-jaeger-max-msgs-per-second` | `INFLUXDB3_TRACES_JAEGER_MAX_MSGS_PER_SECOND` (preferred)<br>`TRACES_JAEGER_MAX_MSGS_PER_SECOND` (deprecated; supported for backward compatibility) |
***
@ -1649,6 +1671,64 @@ Specifies the local directory that contains Python plugins and their test files.
| :--------------------- | :--------------------- |
| `--plugin-dir` | `INFLUXDB3_PLUGIN_DIR` |
##### Default behavior by deployment type
| Deployment | Default state | Configuration |
|:-----------|:--------------|:--------------|
| Docker images | **Enabled** | `INFLUXDB3_PLUGIN_DIR=/plugins` |
| DEB/RPM packages | **Enabled** | `plugin-dir="/var/lib/influxdb3/plugins"` |
| Binary/source | Disabled | No `plugin-dir` configured |
##### Disable the Processing Engine
To disable the Processing Engine, ensure `plugin-dir` is not configured.
> [!Warning]
> Setting `plugin-dir=""` or `INFLUXDB3_PLUGIN_DIR=""` (empty string) does **not** disable the Processing Engine.
> You must comment out, remove, or unset the configuration — not set it to empty.
{{% show-in "enterprise" %}}
**Docker:** Use `INFLUXDB3_UNSET_VARS` to unset default environment variables that are preconfigured in the container image.
`INFLUXDB3_UNSET_VARS` accepts a comma-separated list of environment variable names to unset in the container entrypoint before {{< product-name >}} starts.
This lets you disable or override image defaults (for example, `INFLUXDB3_PLUGIN_DIR`, logging, or other configuration variables) without modifying the container image itself.
To disable the default plugin directory, unset `INFLUXDB3_PLUGIN_DIR`:
```bash
docker run -e INFLUXDB3_UNSET_VARS="INFLUXDB3_PLUGIN_DIR" influxdb:3-enterprise
```
{{% /show-in %}}
{{% show-in "core" %}}
**Docker:** Use a custom entrypoint:
```bash
docker run --entrypoint /bin/sh influxdb:3-core -c 'unset INFLUXDB3_PLUGIN_DIR && exec influxdb3 serve --object-store memory'
```
{{% /show-in %}}
**systemd (DEB/RPM):** Comment out or remove `plugin-dir` in the configuration file:
```bash
sudo nano /etc/influxdb3/influxdb3-{{< product-key >}}.conf
```
```toml
# plugin-dir="/var/lib/influxdb3/plugins"
```
Then restart the service:
```bash
sudo systemctl restart influxdb3-{{< product-key >}}
```
When the Processing Engine is disabled:
- The Python environment and PyO3 bindings are not initialized
- Plugin-related operations return a "No plugin directory configured" error
- The server runs with reduced resource usage
***
#### plugin-repo

View File

@ -7,6 +7,7 @@ Provide a database name and, optionally, specify connection settings and authent
<!--pytest.mark.skip-->
```bash
# Syntax
influxdb3 create database [OPTIONS] <DATABASE_NAME>
```
@ -28,7 +29,8 @@ You can also set the database name using the `INFLUXDB3_DATABASE_NAME` environme
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
| | `--retention-period` | Database [retention period](/influxdb3/version/reference/glossary/#retention-period) ([duration](/influxdb3/version/reference/glossary/#duration) value, for example: `30d`, `24h`, `1h`) |
| | `--token` | Authentication token |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-ca` | Path to a custom TLS certificate authority (for self-signed or internal certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification. **Not recommended in production.** Useful for testing with self-signed certificates |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -36,30 +38,29 @@ You can also set the database name using the `INFLUXDB3_DATABASE_NAME` environme
You can use the following environment variables instead of providing CLI options directly:
| Environment Variable | Option |
| :------------------------ | :----------- |
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| Environment Variable | Option |
| :------------------------ | :---------------- |
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples
The following examples show how to create a database.
In your commands replace the following:
In the examples below, replace the following:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
Database name
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
Authentication token
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
### Create a database (default)
Creates a database using settings from environment variables and defaults.
<!--pytest.mark.skip-->
```bash
```bash { placeholders="DATABASE_NAME" }
influxdb3 create database DATABASE_NAME
```
@ -70,7 +71,7 @@ Flags override their associated environment variables.
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME" }
influxdb3 create database --token AUTH_TOKEN DATABASE_NAME
```
@ -81,7 +82,7 @@ Data older than 30 days will not be queryable.
<!--pytest.mark.skip-->
```bash
```bash { placeholders="DATABASE_NAME" }
influxdb3 create database --retention-period 30d DATABASE_NAME
```
@ -91,7 +92,7 @@ Creates a database with no retention period (data never expires).
<!--pytest.mark.skip-->
```bash
```bash { placeholders="DATABASE_NAME" }
influxdb3 create database --retention-period none DATABASE_NAME
```
@ -101,7 +102,7 @@ Creates a database with a 90-day retention period using an authentication token.
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME" }
influxdb3 create database \
--retention-period 90d \
--token AUTH_TOKEN \
@ -114,7 +115,7 @@ Creates a database with a 1-year retention period.
<!--pytest.mark.skip-->
```bash
```bash { placeholders="DATABASE_NAME" }
influxdb3 create database --retention-period 1y DATABASE_NAME
```
@ -124,12 +125,10 @@ Creates a database with a retention period of 30 days and 12 hours.
<!--pytest.mark.skip-->
```bash
```bash { placeholders="DATABASE_NAME" }
influxdb3 create database --retention-period 30d12h DATABASE_NAME
```
{{% /code-placeholders %}}
## Retention period duration formats
Retention periods are specified as [duration](/influxdb3/version/reference/glossary/#duration)

View File

@ -31,6 +31,7 @@ influxdb3 create distinct_cache [OPTIONS] \
| | `--max-cardinality` | Maximum number of distinct value combinations to hold in the cache |
| | `--max-age` | Maximum age of an entry in the cache entered as a human-readable duration--for example: `30d`, `24h` |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification. **Not recommended in production.** Useful for testing with self-signed certificates |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -51,6 +52,7 @@ You can use the following environment variables to set command options:
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Prerequisites

View File

@ -26,6 +26,7 @@ influxdb3 create file_index [OPTIONS] \
| | `--token` | _({{< req >}})_ Authentication token |
| `-t` | `--table` | Table to apply the file index too |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification. **Not recommended in production.** Useful for testing with self-signed certificates |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -38,6 +39,7 @@ You can use the following environment variables to set command options:
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples

View File

@ -32,6 +32,7 @@ influxdb3 create last_cache [OPTIONS] \
| | `--count` | Number of entries per unique key column combination to store in the cache |
| | `--ttl` | Cache entries' time-to-live (TTL) in [Humantime form](https://docs.rs/humantime/latest/humantime/fn.parse_duration.html)--for example: `10s`, `1min 30sec`, `3 hours` |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification. **Not recommended in production.** Useful for testing with self-signed certificates |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -44,6 +45,7 @@ You can use the following environment variables as substitutes for CLI options:
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Prerequisites

View File

@ -11,6 +11,7 @@ The `influxdb3 create table` command creates a new table in a specified database
<!--pytest.mark.skip-->
```bash
# Syntax
influxdb3 create table [OPTIONS] \
--tags [<TAGS>...] \
--database <DATABASE_NAME> \
@ -28,16 +29,17 @@ influxdb3 create table [OPTIONS] \
-->
{{% hide-in "enterprise" %}}
| Option | | Description |
| :----- | :----------- | :--------------------------------------------------------------------------------------- |
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
| | `--token` | _({{< req >}})_ Authentication token |
| | `--tags` | _({{< req >}})_ Comma-separated list of tag columns to include in the table |
| | `--fields` | Comma-separated list of field columns and their types to include in the table |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
| Option | | Description |
| :----- | :---------------- | :--------------------------------------------------------------------------------------- |
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
| | `--token` | _({{< req >}})_ Authentication token |
| | `--tags` | _({{< req >}})_ Comma-separated list of tag columns to include in the table |
| | `--fields` | Comma-separated list of field columns and their types to include in the table |
| | `--tls-ca` | Path to a custom TLS certificate authority (for self-signed or internal certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
{{% /hide-in %}}
<!-- Using the show-in shortcode for only the retention-period option breaks the formatting in Core -->
@ -50,7 +52,8 @@ influxdb3 create table [OPTIONS] \
| | `--token` | _({{< req >}})_ Authentication token |
| | `--tags` | _({{< req >}})_ Comma-separated list of tag columns to include in the table |
| | `--fields` | Comma-separated list of field columns and their types to include in the table |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-ca` | Path to a custom TLS certificate authority (for self-signed or internal certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
{{% /show-in %}}
@ -66,30 +69,29 @@ influxdb3 create table [OPTIONS] \
You can use the following environment variables to set options instead of passing them via CLI flags:
| Environment Variable | Option |
| :------------------------ | :----------- |
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| Environment Variable | Option |
| :------------------------ | :---------------- |
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples
In the following examples, replace each placeholder with your actual values:
In the examples below, replace the following:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
The database name
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
Authentication token
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}:
The authentication token
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}:
A name for the new table
{{% code-placeholders "DATABASE_NAME|TABLE_NAME|AUTH_TOKEN" %}}
### Create an empty table
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|TABLE_NAME" }
influxdb3 create table \
--tags tag1,tag2,tag3 \
--database DATABASE_NAME \
@ -101,7 +103,7 @@ influxdb3 create table \
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|TABLE_NAME" }
influxdb3 create table \
--tags room,sensor_id \
--fields temp:float64,hum:float64,co:int64 \
@ -115,7 +117,7 @@ influxdb3 create table \
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|TABLE_NAME" }
influxdb3 create table \
--tags room,sensor_id \
--fields temp:float64,hum:float64 \
@ -132,7 +134,7 @@ Use the `SHOW TABLES` query to verify that the table was created successfully:
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN" }
influxdb3 query \
--database my_test_db \
--token AUTH_TOKEN \
@ -152,5 +154,3 @@ Example output:
> [!Note]
> `SHOW TABLES` is an SQL query. It isn't supported in InfluxQL.
{{% /code-placeholders %}}

View File

@ -7,6 +7,7 @@ processing engine.
<!--pytest.mark.skip-->
```bash
# Syntax
influxdb3 create trigger [OPTIONS] \
--database <DATABASE_NAME> \
--token <AUTH_TOKEN> \
@ -39,7 +40,8 @@ influxdb3 create trigger [OPTIONS] \
| | `--error-behavior` | Error handling behavior: `log`, `retry`, or `disable` |
| | `--run-asynchronous` | Run the trigger asynchronously, allowing multiple triggers to run simultaneously (default is synchronous) |
{{% show-in "enterprise" %}}| | `--node-spec` | Which node(s) the trigger should be configured on. Two value formats are supported: `all` (default) - applies to all nodes, or `nodes:<node-id>[,<node-id>..]` - applies only to specified comma-separated list of nodes |{{% /show-in %}}
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-ca` | Path to a custom TLS certificate authority (for self-signed or internal certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -51,11 +53,12 @@ For example, to use the [System Metrics](https://github.com/influxdata/influxdb3
You can use the following environment variables to set command options:
| Environment Variable | Option |
| :------------------------ | :----------- |
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| Environment Variable | Option |
| :------------------------ | :---------------- |
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples
@ -81,9 +84,7 @@ Replace the following placeholders with your values:
- {{% code-placeholder-key %}}`TRIGGER_NAME`{{% /code-placeholder-key %}}:
Name of the trigger to create
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}:
Name of the table to trigger on
{{% code-placeholders "(DATABASE|TRIGGER)_NAME|AUTH_TOKEN|TABLE_NAME" %}}
Name of the table to trigger on
### Create a trigger for a specific table
@ -91,7 +92,7 @@ Create a trigger that processes data from a specific table.
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|PLUGIN_FILENAME|TABLE_NAME|TRIGGER_NAME" }
influxdb3 create trigger \
--database DATABASE_NAME \
--token AUTH_TOKEN \
@ -108,7 +109,7 @@ Create a trigger that applies to all tables in the specified database.
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|PLUGIN_FILENAME|TRIGGER_NAME" }
influxdb3 create trigger \
--database DATABASE_NAME \
--token AUTH_TOKEN \
@ -125,7 +126,7 @@ This is useful when you want a trigger to apply to any table in the database, re
Create a trigger that runs at a specific interval using a duration. Supported duration units: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` (years). Maximum interval is 1 year.
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|PLUGIN_FILENAME|TRIGGER_NAME" }
influxdb3 create trigger \
--database DATABASE_NAME \
--token AUTH_TOKEN \
@ -150,7 +151,7 @@ Fields:
Example: Run at 6:00 AM every weekday (Monday-Friday):
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|PLUGIN_FILENAME|TRIGGER_NAME" }
influxdb3 create trigger \
--database DATABASE_NAME \
--token AUTH_TOKEN \
@ -165,7 +166,7 @@ influxdb3 create trigger \
Create a trigger that provides an API endpoint and processes HTTP requests.
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|PLUGIN_FILENAME|REQUEST_PATH|TRIGGER_NAME" }
influxdb3 create trigger \
--database DATABASE_NAME \
--token AUTH_TOKEN \
@ -182,7 +183,7 @@ Create a trigger using a plugin organized in multiple files. The plugin director
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|TRIGGER_NAME" }
influxdb3 create trigger \
--database DATABASE_NAME \
--token AUTH_TOKEN \
@ -208,7 +209,7 @@ Upload plugin files from your local machine and create a trigger in a single com
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|TABLE_NAME|TRIGGER_NAME" }
# Upload single-file plugin
influxdb3 create trigger \
--database DATABASE_NAME \
@ -237,7 +238,7 @@ For more information, see [Upload plugins from local machine](/influxdb3/version
### Create a trigger with additional arguments
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|PLUGIN_FILENAME|TABLE_NAME|TRIGGER_NAME" }
influxdb3 create trigger \
--database DATABASE_NAME \
--token AUTH_TOKEN \
@ -253,7 +254,7 @@ Create a trigger in a disabled state.
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|PLUGIN_FILENAME|TABLE_NAME|TRIGGER_NAME" }
influxdb3 create trigger \
--disabled \
--database DATABASE_NAME \
@ -269,7 +270,7 @@ Creating a trigger in a disabled state prevents it from running immediately. You
Log the error to the service output and the `system.processing_engine_logs` table:
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|PLUGIN_FILENAME|TABLE_NAME|TRIGGER_NAME" }
influxdb3 create trigger \
--database DATABASE_NAME \
--token AUTH_TOKEN \
@ -281,7 +282,7 @@ influxdb3 create trigger \
Rerun the trigger if it fails:
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|PLUGIN_FILENAME|TABLE_NAME|TRIGGER_NAME" }
influxdb3 create trigger \
--database DATABASE_NAME \
--token AUTH_TOKEN \
@ -293,7 +294,7 @@ influxdb3 create trigger \
Disable the trigger if it fails:
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|PLUGIN_FILENAME|TABLE_NAME|TRIGGER_NAME" }
influxdb3 create trigger \
--database DATABASE_NAME \
--token AUTH_TOKEN \
@ -302,5 +303,3 @@ influxdb3 create trigger \
--error-behavior disable \
TRIGGER_NAME
```
{{% /code-placeholders %}}

View File

@ -6,6 +6,7 @@ The `influxdb3 delete database` command deletes a database.
<!--pytest.mark.skip-->
```bash
# Syntax
influxdb3 delete database [OPTIONS] <DATABASE_NAME>
```
@ -21,28 +22,48 @@ influxdb3 delete database [OPTIONS] <DATABASE_NAME>
--database-name: internal variable, use positional <DATABASE_NAME>
-->
| Option | | Description |
| :----- | :------------ | :--------------------------------------------------------------------------------------- |
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
| | `--hard-delete` | When to hard delete data (never/now/default/timestamp). Default behavior is a soft delete that allows recovery |
| | `--token` | Authentication token |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
{{% hide-in "enterprise" %}}
| Option | | Description |
| :----- | :---------------- | :--------------------------------------------------------------------------------------- |
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
| | `--hard-delete` | When to hard delete data (never/now/default/timestamp). Default behavior is a soft delete that allows recovery |
| | `--token` | Authentication token |
| | `--tls-ca` | Path to a custom TLS certificate authority (for self-signed or internal certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
{{% /hide-in %}}
{{% show-in "enterprise" %}}
| Option | | Description |
| :----- | :---------------- | :--------------------------------------------------------------------------------------- |
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
| | `--data-only` | Delete only data while preserving schemas and all associated resources (tokens, triggers, caches, etc.). Default behavior deletes everything |
| | `--remove-tables` | Used with `--data-only` to remove table resources (caches) while preserving database-level resources (tokens, triggers, processing engine configurations) |
| | `--hard-delete` | When to hard delete data (never/now/default/timestamp). Default behavior is a soft delete that allows recovery |
| | `--token` | Authentication token |
| | `--tls-ca` | Path to a custom TLS certificate authority (for self-signed or internal certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
{{% /show-in %}}
### Option environment variables
You can use the following environment variables to set command options:
| Environment Variable | Option |
| :------------------------ | :----------- |
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| Environment Variable | Option |
| :------------------------ | :---------------- |
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples
- [Delete a database](#delete-a-database)
- [Delete a database while specifying the token inline](#delete-a-database-while-specifying-the-token-inline)
{{% show-in "enterprise" %}}- [Delete database data only (preserve schema and resources)](#delete-database-data-only-preserve-schema-and-resources)
- [Delete database data and tables (preserve database resources)](#delete-database-data-and-tables-preserve-database-resources){{% /show-in %}}
- [Hard delete a database immediately](#hard-delete-a-database-immediately)
- [Hard delete a database at a specific time](#hard-delete-a-database-at-a-specific-time)
@ -50,16 +71,14 @@ In the examples below, replace the following:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
Database name
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
Authentication token
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
### Delete a database
<!--pytest.mark.skip-->
```bash
```bash { placeholders="DATABASE_NAME" }
influxdb3 delete database DATABASE_NAME
```
@ -67,17 +86,50 @@ influxdb3 delete database DATABASE_NAME
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME" }
influxdb3 delete database --token AUTH_TOKEN DATABASE_NAME
```
{{% show-in "enterprise" %}}
### Delete database data only (preserve schema and resources)
Delete all data from a database while preserving:
- Database schema (tables and columns)
- Authentication tokens
- Processing engine configurations and triggers
- Last value caches (LVC) and distinct value caches (DVC)
This is useful when you want to clear old data and re-write new data to the same schema without recreating resources.
<!--pytest.mark.skip-->
```bash { placeholders="DATABASE_NAME" }
influxdb3 delete database --data-only DATABASE_NAME
```
### Delete database data and tables (preserve database resources)
Delete all data and table resources (caches) while preserving database-level resources:
- Authentication tokens
- Processing engine triggers
- Processing engine configurations
This is useful when you want to start fresh with a new schema but keep existing authentication and trigger configurations.
<!--pytest.mark.skip-->
```bash { placeholders="DATABASE_NAME" }
influxdb3 delete database --data-only --remove-tables DATABASE_NAME
```
{{% /show-in %}}
### Hard delete a database immediately
Permanently delete a database and all its data immediately without the ability to recover.
<!--pytest.mark.skip-->
```bash
```bash { placeholders="DATABASE_NAME" }
influxdb3 delete database --hard-delete now DATABASE_NAME
```
@ -87,8 +139,6 @@ Schedule a database for permanent deletion at a specific timestamp.
<!--pytest.mark.skip-->
```bash
```bash { placeholders="DATABASE_NAME" }
influxdb3 delete database --hard-delete "2024-01-01T00:00:00Z" DATABASE_NAME
```
{{% /code-placeholders %}}

View File

@ -25,6 +25,7 @@ influxdb3 delete distinct_cache [OPTIONS] \
| | `--token` | _({{< req >}})_ Authentication token |
| `-t` | `--table` | _({{< req >}})_ Table to delete the cache for |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -37,6 +38,7 @@ You can use the following environment variables to set command options:
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples

View File

@ -19,6 +19,7 @@ influxdb3 delete file_index [OPTIONS] --database <DATABASE_NAME>
| | `--token` | _({{< req >}})_ Authentication token |
| `-t` | `--table` | Table to delete the file index from |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -31,6 +32,7 @@ You can use the following environment variables to set command options:
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples

View File

@ -22,6 +22,7 @@ influxdb3 delete last_cache [OPTIONS] --database <DATABASE_NAME> --table <TABLE>
| | `--token` | _({{< req >}})_ Authentication token |
| `-t` | `--table` | _({{< req >}})_ Table to delete the cache from |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -34,6 +35,7 @@ You can use the following environment variables to set command options:
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples

View File

@ -6,6 +6,7 @@ The `influxdb3 delete table` command deletes a table from a database.
<!--pytest.mark.skip-->
```bash
# Syntax
influxdb3 delete table [OPTIONS] --database <DATABASE_NAME> <TABLE_NAME>
```
@ -19,62 +20,96 @@ influxdb3 delete table [OPTIONS] --database <DATABASE_NAME> <TABLE_NAME>
--table-name: internal variable, use positional <TABLE_NAME>
-->
| Option | | Description |
| :----- | :------------ | :--------------------------------------------------------------------------------------- |
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
| | `--hard-delete` | When to hard delete data (never/now/default/timestamp). Default behavior is a soft delete that allows recovery |
| | `--token` | _({{< req >}})_ Authentication token |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
{{% hide-in "enterprise" %}}
| Option | | Description |
| :----- | :---------------- | :--------------------------------------------------------------------------------------- |
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
| | `--hard-delete` | When to hard delete data (never/now/default/timestamp). Default behavior is a soft delete that allows recovery |
| | `--token` | _({{< req >}})_ Authentication token |
| | `--tls-ca` | Path to a custom TLS certificate authority (for self-signed or internal certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
{{% /hide-in %}}
{{% show-in "enterprise" %}}
| Option | | Description |
| :----- | :---------------- | :--------------------------------------------------------------------------------------- |
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
| | `--data-only` | Delete only data while preserving the table schema and all associated resources (caches, etc.). Default behavior deletes everything |
| | `--hard-delete` | When to hard delete data (never/now/default/timestamp). Default behavior is a soft delete that allows recovery |
| | `--token` | _({{< req >}})_ Authentication token |
| | `--tls-ca` | Path to a custom TLS certificate authority (for self-signed or internal certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
{{% /show-in %}}
### Option environment variables
You can use the following environment variables to set command options:
| Environment Variable | Option |
| :------------------------ | :----------- |
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| Environment Variable | Option |
| :------------------------ | :---------------- |
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples
### Delete a table
In the examples below, replace the following:
{{% code-placeholders "(DATABASE|TABLE)_NAME|AUTH_TOKEN" %}}
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
Database name
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
Authentication token
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}:
Name of the table to delete
### Delete a table
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|TABLE_NAME" }
influxdb3 delete table \
--database DATABASE_NAME \
--token AUTH_TOKEN \
TABLE_NAME
```
{{% show-in "enterprise" %}}
### Delete table data only (preserve schema and resources)
Delete all data from a table while preserving:
- Table schema (column definitions)
- Last value caches (LVC) and distinct value caches (DVC) associated with the table
This is useful when you want to clear old data and re-write new data to the same schema without recreating the table structure.
<!--pytest.mark.skip-->
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|TABLE_NAME" }
influxdb3 delete table \
--database DATABASE_NAME \
--token AUTH_TOKEN \
--data-only \
TABLE_NAME
```
{{% /show-in %}}
### Hard delete a table immediately
Permanently delete a table and all its data immediately without the ability to recover.
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|TABLE_NAME" }
influxdb3 delete table \
--database DATABASE_NAME \
--token AUTH_TOKEN \
--hard-delete now \
TABLE_NAME
```
{{% /code-placeholders %}}
Replace the following:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
Database name
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
Authentication token
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}:
Name of the table to delete

View File

@ -4,24 +4,33 @@ The `influxdb3 delete token` command deletes an authorization token from the {{%
## Usage
```bash
# Syntax
influxdb3 delete token [OPTIONS]
```
## Options
| Option | Description | Default | Environment |
|----------------|-----------------------------------------------------------------------------------|---------|------------------------|
| `--token` | _({{< req >}})_ The token for authentication with the {{% product-name %}} server | | `INFLUXDB3_AUTH_TOKEN` |
| `--token-name` | _({{< req >}})_ The name of the token to be deleted | | |
| `--tls-ca` | An optional arg to use a custom ca for useful for testing with self signed certs | | `INFLUXDB3_TLS_CA` |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
| Option | Description | Default | Environment |
|--------------------|-----------------------------------------------------------------------------------|---------|----------------------------|
| `--token` | _({{< req >}})_ The token for authentication with the {{% product-name %}} server | | `INFLUXDB3_AUTH_TOKEN` |
| `--token-name` | _({{< req >}})_ The name of the token to be deleted | | |
| `--tls-ca` | Path to a custom TLS certificate authority (for self-signed or internal certificates) | | `INFLUXDB3_TLS_CA` |
| `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) | | `INFLUXDB3_TLS_NO_VERIFY` |
| `-h`, `--help` | Print help information | | |
| `--help-all` | Print detailed help information | | |
## Examples
In the examples below, replace the following:
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
Authentication token with permission to delete tokens
- {{% code-placeholder-key %}}`TOKEN_TO_DELETE`{{% /code-placeholder-key %}}:
Name of the token to delete
### Delete a token by name
```bash
```bash { placeholders="AUTH_TOKEN|TOKEN_TO_DELETE" }
influxdb3 delete token --token-name TOKEN_TO_DELETE --token AUTH_TOKEN
```

View File

@ -6,6 +6,7 @@ The `influxdb3 delete trigger` command deletes a processing engine trigger.
<!--pytest.mark.skip-->
```bash
# Syntax
influxdb3 delete trigger [OPTIONS] --database <DATABASE_NAME> <TRIGGER_NAME>
```
@ -19,25 +20,27 @@ influxdb3 delete trigger [OPTIONS] --database <DATABASE_NAME> <TRIGGER_NAME>
--trigger-name: internal variable, use positional <TRIGGER_NAME>
-->
| Option | | Description |
| :----- | :----------- | :--------------------------------------------------------------------------------------- |
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
| | `--token` | _({{< req >}})_ Authentication token |
| | `--force` | Force delete even if the trigger is active |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
| Option | | Description |
| :----- | :---------------- | :--------------------------------------------------------------------------------------- |
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
| | `--token` | _({{< req >}})_ Authentication token |
| | `--force` | Force delete even if the trigger is active |
| | `--tls-ca` | Path to a custom TLS certificate authority (for self-signed or internal certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
### Option environment variables
You can use the following environment variables to set command options:
| Environment Variable | Option |
| :------------------------ | :----------- |
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| Environment Variable | Option |
| :------------------------ | :---------------- |
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples
@ -50,16 +53,14 @@ In the examples below, replace the following:
Database name
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
Authentication token
- {{% code-placeholder-key %}}`TRIGGER_NAME`{{% /code-placeholder-key %}}:
- {{% code-placeholder-key %}}`TRIGGER_NAME`{{% /code-placeholder-key %}}:
Name of the trigger to delete
{{% code-placeholders "(DATABASE|TRIGGER)_NAME|AUTH_TOKEN" %}}
### Delete a trigger
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|TRIGGER_NAME" }
influxdb3 delete trigger \
--database DATABASE_NAME \
--token AUTH_TOKEN \
@ -70,12 +71,10 @@ influxdb3 delete trigger \
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME|TRIGGER_NAME" }
influxdb3 delete trigger \
--force \
--database DATABASE_NAME \
--token AUTH_TOKEN \
TRIGGER_NAME
```
{{% /code-placeholders %}}

View File

@ -21,6 +21,7 @@ influxdb3 disable trigger [OPTIONS] --database <DATABASE_NAME> <TRIGGER_NAME>
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
| | `--token` | _({{< req >}})_ Authentication token |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -33,3 +34,4 @@ You can use the following environment variables to set command options:
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |

View File

@ -21,6 +21,7 @@ influxdb3 enable trigger [OPTIONS] --database <DATABASE_NAME> <TRIGGER_NAME>
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
| | `--token` | _({{< req >}})_ Authentication token |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -33,3 +34,4 @@ You can use the following environment variables to set command options:
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |

View File

@ -4,6 +4,7 @@ Use this command to add external dependencies that your plugins require, such as
## Usage
```bash
# Syntax
influxdb3 install package [OPTIONS] [PACKAGES]...
```
@ -26,7 +27,8 @@ influxdb3 install package [OPTIONS] [PACKAGES]...
| `--package-manager <PACKAGE_MANAGER>` | Package manager to use: `discover`, `pip`, `uv`, or `disabled` | `discover` | `INFLUXDB3_PACKAGE_MANAGER` |
| `--plugin-repo <PLUGIN_REPO>` | Plugin repository URL | | `INFLUXDB3_PLUGIN_REPO` |
| `-r`, `--requirements <REQUIREMENTS>` | Path to a `requirements.txt` file | | |
| `--tls-ca <CA_CERT>` | Custom CA certificate for TLS (useful for self-signed certificates) | | `INFLUXDB3_TLS_CA` |
| `--tls-ca <CA_CERT>` | Path to a custom TLS certificate authority (for self-signed or internal certificates) | | `INFLUXDB3_TLS_CA` |
| `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) | | `INFLUXDB3_TLS_NO_VERIFY` |
| `-h`, `--help` | Print help information | | |
| `--help-all` | Print detailed help information | | |
@ -59,9 +61,7 @@ influxdb3 install package \
pint pandas
```
Replace the following:
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "admin" %}} for your {{< product-name >}} instance
Replace {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}} with your {{% token-link "admin" %}} for your {{< product-name >}} instance.
### Install packages with a specific package manager

View File

@ -34,6 +34,7 @@ influxdb3 query [OPTIONS] --database <DATABASE_NAME> [QUERY]...
| `-o` | `--output` | Output query results to the specified file |
| `-f` | `--file` | A file that contains the query to execute |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -46,6 +47,7 @@ You can use the following environment variables to set command options:
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples

View File

@ -19,6 +19,7 @@ influxdb3 show databases [OPTIONS]
| | `--show-deleted` | Include databases marked as deleted in the output |
| | `--format` | Output format (`pretty` _(default)_, `json`, `jsonl`, `csv`, or `parquet`) |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -31,6 +32,7 @@ You can use the following environment variables to set command options:
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples
@ -62,7 +64,7 @@ influxdb3 show databases --show-deleted
influxdb3 show databases --format json
```
### List databases in Parquet-formatted output
### List databases in Parquet format output
[Parquet](https://parquet.apache.org/) is a binary format.
Use the `--output` option to specify the file where you want to save the Parquet data.

View File

@ -5,6 +5,7 @@ The `influxdb3 show nodes` command displays information about nodes in your {{<
<!--pytest.mark.skip-->
```bash
# Syntax
influxdb3 show nodes [OPTIONS]
```
@ -13,9 +14,11 @@ influxdb3 show nodes [OPTIONS]
| Option | | Description |
| :----- | :--------- | :--------------------------------------------------------------------------------------- |
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
| | `--format` | Output format: `pretty` (default), `json`, or `csv` |
| | `--format` | Output format (`pretty` _(default)_, `json`, `jsonl`, `csv`, or `parquet`) |
| | `--output` | Path where to save output when using the `parquet` format |
| | `--token` | Authentication token |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
### Option environment variables
@ -26,6 +29,7 @@ You can use the following environment variables to set command options:
| :--------------------- | :-------- |
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Output
@ -44,6 +48,7 @@ The command displays the following information for each node:
- [List all nodes in pretty format](#list-all-nodes-in-pretty-format)
- [List nodes in JSON format](#list-nodes-in-json-format)
- [Export nodes data to Parquet format](#export-nodes-data-to-parquet-format)
- [List nodes on a remote server](#list-nodes-on-a-remote-server)
### List all nodes in pretty format
@ -106,6 +111,19 @@ The output is similar to the following:
]
```
### Export nodes data to Parquet format
[Parquet](https://parquet.apache.org/) is a binary format.
Use the `--output` option to specify the file where you want to save the Parquet data.
<!--pytest.mark.skip-->
```bash
influxdb3 show nodes \
--format parquet \
--output nodes-data.parquet
```
### List nodes on a remote server
<!--pytest.mark.skip-->

View File

@ -18,6 +18,7 @@ influxdb3 show plugins [OPTIONS]
| | `--format` | Output format (`pretty` _(default)_, `json`, `jsonl`, `csv`, or `parquet`) |
| | `--output` | Path where to save output when using the `parquet` format |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -29,6 +30,7 @@ You can use the following environment variables to set command options:
| :-------------------- | :-------- |
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_AUTH_TOKEN`| `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Output

View File

@ -5,6 +5,7 @@ The `influxdb3 show retention` command displays effective retention periods for
<!--pytest.mark.skip-->
```bash
# Syntax
influxdb3 show retention [OPTIONS]
```
@ -17,6 +18,7 @@ influxdb3 show retention [OPTIONS]
| | `--database` | Filter retention information by database name |
| | `--format` | Output format (`pretty` *(default)*, `json`, `jsonl`, `csv`, or `parquet`) |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -29,31 +31,35 @@ You can use the following environment variables to set command options:
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples
- [Show retention for all tables](#show-retention-for-all-tables)
- [Show retention for a specific database](#show-retention-for-a-specific-database)
- [Show retention in JSON format](#show-retention-in-json-format)
- [Export retention data to Parquet format](#export-retention-data-to-parquet-format)
In the examples below, replace {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}} with your authentication token.
### Show retention for all tables
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN" }
influxdb3 show retention \
--host http://localhost:8181 \
--token YOUR_AUTH_TOKEN
--token AUTH_TOKEN
```
### Show retention for a specific database
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN" }
influxdb3 show retention \
--host http://localhost:8181 \
--token YOUR_AUTH_TOKEN \
--token AUTH_TOKEN \
--database mydb
```
@ -61,13 +67,29 @@ influxdb3 show retention \
<!--pytest.mark.skip-->
```bash
```bash { placeholders="AUTH_TOKEN" }
influxdb3 show retention \
--host http://localhost:8181 \
--token YOUR_AUTH_TOKEN \
--token AUTH_TOKEN \
--format json
```
### Export retention data to Parquet format
[Parquet](https://parquet.apache.org/) is a binary format.
When using the `parquet` format, data is written to standard output by default.
Use output redirection or the `--output` option to save the data to a file.
<!--pytest.mark.skip-->
```bash { placeholders="AUTH_TOKEN" }
influxdb3 show retention \
--host http://localhost:8181 \
--token AUTH_TOKEN \
--format parquet \
--output retention-data.parquet
```
## Output
The command displays the following information for each table:

View File

@ -17,6 +17,7 @@ influxdb3 show system --database <DATABASE_NAME> summary [OPTIONS]
| `-l` | `--limit` | Maximum number of entries from each table to display (default is `10`, `0` indicates no limit) |
| | `--format` | Output format (`pretty` _(default)_, `json`, `jsonl`, `csv`, or `parquet`) |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |

View File

@ -15,6 +15,7 @@ influxdb3 show system --database <DATABASE_NAME> table-list [OPTIONS]
| :----- | :----------- | :----------------------------------------------------------------------------------- |
| | `--format` | Output format (`pretty` _(default)_, `json`, `jsonl`, `csv`, or `parquet`) |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |

View File

@ -22,6 +22,7 @@ influxdb3 show system --database <DATABASE_NAME> table [OPTIONS] <SYSTEM_TABLE>
| `-s` | `--select` | Select specific columns from the system table |
| | `--format` | Output format (`pretty` _(default)_, `json`, `jsonl`, `csv`, or `parquet`) |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification. (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |

View File

@ -18,6 +18,7 @@ influxdb3 show tokens [OPTIONS]
| | `--format` | Output format (`pretty` _(default)_, `json`, `jsonl`, `csv`, or `parquet`) |
| | `--output` | Path where to save output when using the `parquet` format |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -29,6 +30,7 @@ You can use the following environment variables to set command options:
| :-------------------- | :-------- |
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_AUTH_TOKEN`| `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples

View File

@ -21,6 +21,7 @@ influxdb3 stop node [OPTIONS] --node-id <NODE_ID>
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
| | `--token` | Authentication token |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
### Option environment variables
@ -31,6 +32,7 @@ You can use the following environment variables to set command options:
| :--------------------- | :-------- |
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Use case

View File

@ -24,6 +24,7 @@ influxdb3 test schedule_plugin [OPTIONS] --database <DATABASE_NAME> <FILENAME>
| | `--schedule` | Cron schedule to simulate when testing the plugin <br>(default: `* * * * *`) |
| | `--cache-name` | Optional cache name to associate with the test |
| | `--tls-ca` | Path to a custom TLS certificate authority for self-signed certs |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Show basic help information |
| | `--help-all` | Show all available help options |
@ -38,6 +39,7 @@ You can use the following environment variables to set command options:
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_CA` | `--tls-ca` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples

View File

@ -25,6 +25,7 @@ influxdb3 test wal_plugin [OPTIONS] --database <DATABASE_NAME> <PLUGIN_NAME>
| | `--file` | Line protocol file to use as input |
| | `--input-arguments` | Map of string key-value pairs as to use as plugin input arguments |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -38,6 +39,7 @@ You can use the following environment variables to set command options:
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples

View File

@ -34,6 +34,7 @@ You can also set the database name using the `INFLUXDB3_DATABASE_NAME` environme
| `-d` | `--database` | The name of the database to update |
| | `--token` | Authentication token |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
{{% /hide-in %}}
@ -46,6 +47,7 @@ You can also set the database name using the `INFLUXDB3_DATABASE_NAME` environme
| | `--token` | Authentication token |
| `-r` | `--retention-period` | The retention period as a [duration](/influxdb3/version/reference/glossary/#duration) value (for example: `30d`, `24h`) or `none` to clear |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
{{% /show-in %}}
@ -60,6 +62,7 @@ You can use the following environment variables instead of providing CLI options
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_CA` | `--tls-ca` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
{{% show-in "enterprise" %}}
## Examples

View File

@ -23,6 +23,7 @@ influxdb3 update table [OPTIONS] --database <DATABASE_NAME> <TABLE_NAME>
| | `--token` | Authentication token |
| `-r` | `--retention-period` | The retention period as a [duration](/influxdb3/version/reference/glossary/#duration) value (for example: `30d`, `24h`) or `none` to clear |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -36,6 +37,7 @@ You can use the following environment variables instead of providing CLI options
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_CA` | `--tls-ca` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples

View File

@ -32,6 +32,7 @@ influxdb3 update trigger [OPTIONS] \
| | `--error-behavior` | Error handling behavior: `log`, `retry`, or `disable` |
| | `--token` | Authentication token |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -45,6 +46,7 @@ You can use the following environment variables instead of providing CLI options
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_CA` | `--tls-ca` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples

View File

@ -34,6 +34,7 @@ influxdb3 write [OPTIONS] --database <DATABASE_NAME> [LINE_PROTOCOL]...
| | `--no-sync` | Do not wait for WAL sync before acknowledging the write request |
| | `--precision` | Precision of data timestamps (`ns`, `us`, `ms`, or `s`) |
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
| | `--tls-no-verify` | Disable TLS certificate verification (**Not recommended in production**, useful for self-signed certificates) |
| `-h` | `--help` | Print help information |
| | `--help-all` | Print detailed help information |
@ -46,6 +47,7 @@ You can use the following environment variables to set command options:
| `INFLUXDB3_HOST_URL` | `--host` |
| `INFLUXDB3_DATABASE_NAME` | `--database` |
| `INFLUXDB3_AUTH_TOKEN` | `--token` |
| `INFLUXDB3_TLS_NO_VERIFY` | `--tls-no-verify` |
## Examples

View File

@ -54,10 +54,15 @@ If you haven't completed these steps, see [Set up {{% product-name %}}](/influxd
## Activate the processing engine
To activate the processing engine, include the `--plugin-dir <PLUGIN_DIR>` option
when starting the {{% product-name %}} server.
`PLUGIN_DIR` is your file system location for storing [plugin](#plugin) files for
the processing engine to run.
To activate the processing engine, include the `--plugin-dir <PLUGIN_DIR>` option when starting the {{% product-name %}} server.
`PLUGIN_DIR` is your file system location for storing [plugin](#plugin) files for the processing engine to run.
{{% show-in "enterprise" %}}
In a cluster, `--plugin-dir` automatically adds `process` mode to the node.
{{% /show-in %}}
> [!Note]
> **Docker and DEB/RPM installations**: The Processing Engine is already enabled—no additional configuration needed.
> To disable it, see [Enable and disable the Processing Engine](/influxdb3/version/reference/processing-engine/#enable-and-disable-the-processing-engine).
> [!Note]
> If you manually installed {{% product-name %}} from a tar archive, ensure the `influxdb3` binary and `python/` directory remain in the same parent directory. The install script handles this automatically.

View File

@ -36,7 +36,25 @@ Once you have all the prerequisites in place, follow these steps to implement th
## Set up the Processing Engine
To activate the Processing Engine, start your {{% product-name %}} server with the `--plugin-dir` flag. This flag tells InfluxDB where to load your plugin files.
The Processing Engine activates when `--plugin-dir` or `INFLUXDB3_PLUGIN_DIR` is configured.
{{% show-in "enterprise" %}}
In a cluster, this automatically adds `process` mode to the node.
{{% /show-in %}}
### Default behavior by deployment type
| Deployment | Default state | Configuration |
|:-----------|:--------------|:--------------|
| Docker images | **Enabled** | `INFLUXDB3_PLUGIN_DIR=/plugins` |
| DEB/RPM packages | **Enabled** | `plugin-dir="/var/lib/influxdb3/plugins"` |
| Binary/source | Disabled | No `plugin-dir` configured |
If you installed {{% product-name %}} using Docker or a DEB/RPM package, the Processing Engine is already enabled—skip to [Add a Processing Engine plugin](#add-a-processing-engine-plugin).
To disable the Processing Engine, see [Enable and disable the Processing Engine](/influxdb3/version/reference/processing-engine/#enable-and-disable-the-processing-engine).
### Enable the Processing Engine manually
To activate the Processing Engine when running from a binary or source build, start your {{% product-name %}} server with the `--plugin-dir` flag. This flag tells InfluxDB where to load your plugin files.
> [!Important]
> #### Keep the influxdb3 binary with its python directory

View File

@ -1,5 +1,98 @@
The Processing engine is an embedded Python virtual machine that runs inside an {{% product-name %}} database server. It executes Python code in response to triggers and database events without requiring external application servers or middleware.
## Enable and disable the Processing Engine
The Processing Engine activates when [`--plugin-dir`](/influxdb3/version/reference/cli/influxdb3/serve/#plugin-dir) or `INFLUXDB3_PLUGIN_DIR` is configured.
When not configured, the Python environment and PyO3 bindings aren't initialized, and the server runs without Processing Engine functionality.
{{% show-in "enterprise" %}}
### Process mode and `--plugin-dir`
Setting `--plugin-dir` automatically adds `process` mode to any node, regardless of the [`--mode`](/influxdb3/enterprise/reference/config-options/#mode) setting.
You don't need to explicitly set `--mode=process` when `--plugin-dir` is configured.
Conversely, if you explicitly set `--mode=process`, you **must** also set `--plugin-dir`.
A node with `--mode=process` but no `--plugin-dir` won't function correctly.
For cluster node configuration examples, see [Configure process nodes](/influxdb3/enterprise/admin/clustering/#configure-process-nodes).
{{% /show-in %}}
### Default behavior by deployment type
| Deployment | Default state | Configuration |
|:-----------|:--------------|:--------------|
| Docker images | **Enabled** | `INFLUXDB3_PLUGIN_DIR=/plugins` |
| DEB/RPM packages | **Enabled** | `plugin-dir="/var/lib/influxdb3/plugins"` |
| Binary/source | Disabled | No `plugin-dir` configured |
### Disable in Docker deployments
Docker images set `INFLUXDB3_PLUGIN_DIR=/plugins` by default.
> [!Warning]
> Setting `INFLUXDB3_PLUGIN_DIR=""` (empty string) does **not** disable the Processing Engine.
> You must unset the variable, not set it to empty.
{{% show-in "enterprise" %}}
Use the `INFLUXDB3_UNSET_VARS` feature to unset inherited environment variables:
```bash
docker run -e INFLUXDB3_UNSET_VARS="INFLUXDB3_PLUGIN_DIR" influxdb:3-enterprise
```
`INFLUXDB3_UNSET_VARS` accepts one or more environment variable names (for example, a comma-separated list) and unsets them before the server starts.
Use it to clear any inherited variables that you don't want the InfluxDB 3 Enterprise container to see (for example, `INFLUXDB3_PLUGIN_DIR`, `INFLUXDB3_LOG_LEVEL`) when you can't modify the parent environment directly.
This is useful in orchestration environments (Kubernetes, Docker Compose) where removing an inherited variable isn't straightforward.
{{% /show-in %}}
{{% show-in "core" %}}
Use a custom entrypoint that unsets the variable:
```bash
docker run --entrypoint /bin/sh influxdb:3-core -c 'unset INFLUXDB3_PLUGIN_DIR && exec influxdb3 serve --object-store memory'
```
{{% /show-in %}}
### Disable in systemd deployments (DEB/RPM)
The post-install script sets `plugin-dir="/var/lib/influxdb3/plugins"` in the TOML configuration.
To disable the Processing Engine:
1. Edit the configuration file:
```bash
sudo nano /etc/influxdb3/influxdb3-{{< product-key >}}.conf
```
2. Comment out or remove the `plugin-dir` line:
```toml
# plugin-dir="/var/lib/influxdb3/plugins"
```
> [!Warning]
> Do not set `plugin-dir=""` (empty string)—you must remove or comment out the line.
3. Restart the service:
```bash
sudo systemctl restart influxdb3-{{< product-key >}}
```
> [!Note]
> The `/var/lib/influxdb3/plugins` directory can remain on disk.
> The Processing Engine only activates based on the `plugin-dir` configuration, not directory existence.
### Benefits of disabling
When the Processing Engine is disabled:
- The Python environment and PyO3 bindings are not initialized
- Plugin-related operations return a "No plugin directory configured" error
- The server runs with reduced resource usage
This is useful for deployments that don't require plugin functionality and want a minimal server footprint.
## How it works
### Architecture

View File

@ -61,11 +61,13 @@ during query execution and returned in query results.
The following numeric types are supported:
| SQL data type | Arrow data type | Description |
| :-------------- | :-------------- | :--------------------------- |
| BIGINT | INT64 | 64-bit signed integer |
| BIGINT UNSIGNED | UINT64 | 64-bit unsigned integer |
| DOUBLE | FLOAT64 | 64-bit floating-point number |
| SQL data type | Arrow data type | Description |
| :-------------- | :-------------- | :----------------------------------------- |
| BIGINT | INT64 | 64-bit signed integer |
| BIGINT UNSIGNED | UINT64 | 64-bit unsigned integer |
| DOUBLE | FLOAT64 | 64-bit floating-point number (~15 digits) |
| FLOAT | FLOAT32 | 32-bit floating-point number (~7 digits) |
| REAL | FLOAT32 | 32-bit floating-point number (alias for FLOAT) |
### Integers
@ -101,11 +103,22 @@ Unsigned integer literals are comprised of an integer cast to the `BIGINT UNSIGN
### Floats
InfluxDB SQL supports the 64-bit double floating point values.
Floats can be a decimal point, decimal integer, or decimal fraction.
InfluxDB SQL supports both 32-bit (single precision) and 64-bit (double precision) floating-point values.
| Type | Precision | Significant Digits | Use Case |
| :----- | :-------- | :----------------- | :------- |
| FLOAT | 32-bit | ~7 digits | Memory-efficient storage when full precision isn't needed |
| DOUBLE | 64-bit | ~15-16 digits | Default for most numeric operations |
> [!Note]
> InfluxDB stores float field values as 64-bit (FLOAT64) internally.
> Casting to FLOAT (32-bit) may lose precision for values with more than ~7 significant digits.
> Unlike PostgreSQL where FLOAT defaults to double precision, InfluxDB SQL treats FLOAT as single precision (32-bit).
##### Example float literals
Float literals are stored as 64-bit double precision:
```sql
23.8
-446.89
@ -113,6 +126,18 @@ Floats can be a decimal point, decimal integer, or decimal fraction.
0.033
```
##### Example float casting
```sql
-- Cast to 32-bit float (may lose precision)
SELECT 3.141592653589793::FLOAT;
-- Returns: 3.1415927 (truncated to ~7 digits)
-- Cast to 64-bit double (preserves precision)
SELECT 3.141592653589793::DOUBLE;
-- Returns: 3.141592653589793
```
## Date and time data types
InfluxDB SQL supports the following DATE/TIME data types:

Some files were not shown because too many files have changed in this diff Show More