Merge branch 'master' into pbarnett/small-explorer-quickstart-update

pull/6327/head
Jason Stirnaman 2025-08-21 11:31:54 -05:00 committed by GitHub
commit cecd706d0f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
69 changed files with 6114 additions and 3684 deletions

View File

@ -0,0 +1,74 @@
# Lychee link checker configuration
# Generated by link-checker
[lychee]
# Performance settings
# Maximum number of retries for failed checks
max_retries = 3
# Timeout for each link check (in seconds)
timeout = 30
# Maximum number of concurrent checks
max_concurrency = 128
skip_code_blocks = false
# HTTP settings
# Identify the tool to external services
user_agent = "Mozilla/5.0 (compatible; link-checker)"
# Accept these HTTP status codes as valid
accept = [200, 201, 202, 203, 204, 206, 301, 302, 303, 304,
307, 308]
# Skip these URL schemes
scheme = ["file", "mailto", "tel"]
# Exclude patterns (regex supported)
exclude = [
# Localhost URLs
"^https?://localhost",
"^https?://127\\.0\\.0\\.1",
# Common CI/CD environments
"^https?://.*\\.local",
# Example domains used in documentation
"^https?://example\\.(com|org|net)",
# Placeholder URLs from code block filtering
"https://example.com/REMOVED_FROM_CODE_BLOCK",
"example.com/INLINE_CODE_URL",
# URLs that require authentication
"^https?://.*\\.slack\\.com",
"^https?://.*\\.atlassian\\.net",
# GitHub URLs (often fail due to rate limiting and bot
# detection)
"^https?://github\\.com",
# StackExchange network URLs (often block automated requests)
"^https?://.*\\.stackexchange\\.com",
"^https?://stackoverflow\\.com",
"^https?://.*\\.stackoverflow\\.com",
# Docker Hub URLs (rate limiting and bot detection)
"^https?://hub\\.docker\\.com",
# Common documentation placeholders
"YOUR_.*",
"REPLACE_.*",
"<.*>",
]
# Request headers
[headers]
# Add custom headers here if needed
# "Authorization" = "Bearer $GITHUB_TOKEN"
# Cache settings
cache = true
max_cache_age = "1d"

View File

@ -0,0 +1,116 @@
# Production Link Checker Configuration for InfluxData docs-v2
# Optimized for performance, reliability, and reduced false positives
[lychee]
# Performance settings
# Maximum number of retries for failed checks
max_retries = 3
# Timeout for each link check (in seconds)
timeout = 30
# Maximum number of concurrent checks
max_concurrency = 128
skip_code_blocks = false
# HTTP settings
# Identify the tool to external services
"User-Agent" = "Mozilla/5.0 (compatible; influxdata-link-checker/1.0; +https://github.com/influxdata/docs-v2)"
accept = [200, 201, 202, 203, 204, 206, 301, 302, 303, 304, 307, 308]
# Skip these URL schemes
scheme = ["mailto", "tel"]
# Performance optimizations
cache = true
max_cache_age = "1h"
# Retry configuration for reliability
include_verbatim = false
# Exclusion patterns for docs-v2 (regex supported)
exclude = [
# Localhost URLs
"^https?://localhost",
"^https?://127\\.0\\.0\\.1",
# Common CI/CD environments
"^https?://.*\\.local",
# Example domains used in documentation
"^https?://example\\.(com|org|net)",
# Placeholder URLs from code block filtering
"https://example.com/REMOVED_FROM_CODE_BLOCK",
"example.com/INLINE_CODE_URL",
# URLs that require authentication
"^https?://.*\\.slack\\.com",
"^https?://.*\\.atlassian\\.net",
# GitHub URLs (often fail due to rate limiting and bot
# detection)
"^https?://github\\.com",
# Social media URLs (often block bots)
"^https?://reddit\\.com",
"^https?://.*\\.reddit\\.com",
# StackExchange network URLs (often block automated requests)
"^https?://.*\\.stackexchange\\.com",
"^https?://stackoverflow\\.com",
"^https?://.*\\.stackoverflow\\.com",
# Docker Hub URLs (rate limiting and bot detection)
"^https?://hub\\.docker\\.com",
# InfluxData support URLs (certificate/SSL issues in CI)
"^https?://support\\.influxdata\\.com",
# Common documentation placeholders
"YOUR_.*",
"REPLACE_.*",
"<.*>",
]
# Request headers
[headers]
# Add custom headers here if needed
# "Authorization" = "Bearer $GITHUB_TOKEN"
"Accept" = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
"Accept-Language" = "en-US,en;q=0.5"
"Accept-Encoding" = "gzip, deflate"
"DNT" = "1"
"Connection" = "keep-alive"
"Upgrade-Insecure-Requests" = "1"
[ci]
# CI-specific settings
[ci.github_actions]
output_format = "json"
create_annotations = true
fail_fast = false
max_annotations = 50 # Limit to avoid overwhelming PR comments
[ci.performance]
# Performance tuning for CI environment
parallel_requests = 32
connection_timeout = 10
read_timeout = 30
# Resource limits
max_memory_mb = 512
max_execution_time_minutes = 10
[reporting]
# Report configuration
include_fragments = false
verbose = false
no_progress = true # Disable progress bar in CI
# Summary settings
show_success_count = true
show_skipped_count = true

View File

@ -1,103 +0,0 @@
name: 'Report Broken Links'
description: 'Downloads broken link reports, generates PR comment, and posts results'
inputs:
github-token:
description: 'GitHub token for posting comments'
required: false
default: ${{ github.token }}
max-links-per-file:
description: 'Maximum links to show per file in comment'
required: false
default: '20'
include-success-message:
description: 'Include success message when no broken links found'
required: false
default: 'true'
outputs:
has-broken-links:
description: 'Whether broken links were found (true/false)'
value: ${{ steps.generate-comment.outputs.has-broken-links }}
broken-link-count:
description: 'Number of broken links found'
value: ${{ steps.generate-comment.outputs.broken-link-count }}
runs:
using: 'composite'
steps:
- name: Download broken link reports
uses: actions/download-artifact@v4
with:
path: reports
continue-on-error: true
- name: Generate PR comment
id: generate-comment
run: |
# Generate comment using our script
node .github/scripts/comment-generator.js \
--max-links ${{ inputs.max-links-per-file }} \
${{ inputs.include-success-message == 'false' && '--no-success' || '' }} \
--output-file comment.md \
reports/ || echo "No reports found or errors occurred"
# Check if comment file was created and has content
if [[ -f comment.md && -s comment.md ]]; then
echo "comment-generated=true" >> $GITHUB_OUTPUT
# Count broken links by parsing the comment
broken_count=$(grep -o "Found [0-9]* broken link" comment.md | grep -o "[0-9]*" || echo "0")
echo "broken-link-count=$broken_count" >> $GITHUB_OUTPUT
# Check if there are actually broken links (not just a success comment)
if [[ "$broken_count" -gt 0 ]]; then
echo "has-broken-links=true" >> $GITHUB_OUTPUT
else
echo "has-broken-links=false" >> $GITHUB_OUTPUT
fi
else
echo "has-broken-links=false" >> $GITHUB_OUTPUT
echo "broken-link-count=0" >> $GITHUB_OUTPUT
echo "comment-generated=false" >> $GITHUB_OUTPUT
fi
shell: bash
- name: Post PR comment
if: steps.generate-comment.outputs.comment-generated == 'true'
uses: actions/github-script@v7
with:
github-token: ${{ inputs.github-token }}
script: |
const fs = require('fs');
if (fs.existsSync('comment.md')) {
const comment = fs.readFileSync('comment.md', 'utf8');
if (comment.trim()) {
await github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: comment
});
}
}
- name: Report validation results
run: |
has_broken_links="${{ steps.generate-comment.outputs.has-broken-links }}"
broken_count="${{ steps.generate-comment.outputs.broken-link-count }}"
if [ "$has_broken_links" = "true" ]; then
echo "::error::❌ Link validation failed: Found $broken_count broken link(s)"
echo "Check the PR comment for detailed broken link information"
exit 1
else
echo "::notice::✅ Link validation passed successfully"
echo "All links in the changed files are valid"
if [ "${{ steps.generate-comment.outputs.comment-generated }}" = "true" ]; then
echo "PR comment posted with validation summary and cache statistics"
fi
fi
shell: bash

View File

@ -1,106 +0,0 @@
name: 'Validate Links'
description: 'Runs e2e browser-based link validation tests against Hugo site using Cypress'
inputs:
files:
description: 'Space-separated list of files to validate'
required: true
product-name:
description: 'Product name for reporting (optional)'
required: false
default: ''
cache-enabled:
description: 'Enable link validation caching'
required: false
default: 'true'
cache-key:
description: 'Cache key prefix for this validation run'
required: false
default: 'link-validation'
timeout:
description: 'Test timeout in seconds'
required: false
default: '900'
outputs:
failed:
description: 'Whether validation failed (true/false)'
value: ${{ steps.validate.outputs.failed }}
runs:
using: 'composite'
steps:
- name: Restore link validation cache
if: inputs.cache-enabled == 'true'
uses: actions/cache@v4
with:
path: .cache/link-validation
key: ${{ inputs.cache-key }}-${{ runner.os }}-${{ hashFiles('content/**/*.md', 'content/**/*.html') }}
restore-keys: |
${{ inputs.cache-key }}-${{ runner.os }}-
${{ inputs.cache-key }}-
- name: Run link validation
shell: bash
run: |
# Set CI-specific environment variables
export CI=true
export GITHUB_ACTIONS=true
export NODE_OPTIONS="--max-old-space-size=4096"
# Set test runner timeout for Hugo shutdown
export HUGO_SHUTDOWN_TIMEOUT=5000
# Add timeout to prevent hanging (timeout command syntax: timeout DURATION COMMAND)
timeout ${{ inputs.timeout }}s node cypress/support/run-e2e-specs.js ${{ inputs.files }} \
--spec cypress/e2e/content/article-links.cy.js || {
exit_code=$?
# Handle timeout specifically
if [ $exit_code -eq 124 ]; then
echo "::error::Link validation timed out after ${{ inputs.timeout }} seconds"
echo "::notice::This may indicate Hugo server startup issues or very slow link validation"
else
echo "::error::Link validation failed with exit code $exit_code"
fi
# Check for specific error patterns and logs (but don't dump full content)
if [ -f /tmp/hugo_server.log ]; then
echo "Hugo server log available for debugging"
fi
if [ -f hugo.log ]; then
echo "Additional Hugo log available for debugging"
fi
if [ -f /tmp/broken_links_report.json ]; then
# Only show summary, not full report (full report is uploaded as artifact)
broken_count=$(grep -o '"url":' /tmp/broken_links_report.json | wc -l || echo "0")
echo "Broken links report contains $broken_count entries"
fi
exit $exit_code
}
# Report success if we get here
echo "::notice::✅ Link validation completed successfully"
echo "No broken links detected in the tested files"
- name: Upload logs on failure
if: failure()
uses: actions/upload-artifact@v4
with:
name: validation-logs-${{ inputs.product-name && inputs.product-name || 'default' }}
path: |
hugo.log
/tmp/hugo_server.log
if-no-files-found: ignore
- name: Upload broken links report
if: always()
uses: actions/upload-artifact@v4
with:
name: broken-links-report${{ inputs.product-name && format('-{0}', inputs.product-name) || '' }}
path: /tmp/broken_links_report.json
if-no-files-found: ignore

View File

@ -1,134 +1,282 @@
# Instructions for InfluxData Documentation
# InfluxData Documentation Repository (docs-v2)
## Purpose and scope
Always follow these instructions first and fallback to additional search and context gathering only when the information provided here is incomplete or found to be in error.
Help document InfluxData products by creating clear, accurate technical content with proper code examples, frontmatter, and formatting.
## Working Effectively
## Documentation structure
### Collaboration approach
Be a critical thinking partner, provide honest feedback, and identify potential issues.
### Bootstrap, Build, and Test the Repository
Execute these commands in order to set up a complete working environment:
1. **Install Node.js dependencies** (takes ~4 seconds):
```bash
# Skip Cypress binary download due to network restrictions in CI environments
CYPRESS_INSTALL_BINARY=0 yarn install
```
2. **Build the static site** (takes ~75 seconds, NEVER CANCEL - set timeout to 180+ seconds):
```bash
npx hugo --quiet
```
3. **Start the development server** (builds in ~92 seconds, NEVER CANCEL - set timeout to 150+ seconds):
```bash
npx hugo server --bind 0.0.0.0 --port 1313
```
- Access at: http://localhost:1313/
- Serves 5,359+ pages and 441 static files
- Auto-rebuilds on file changes
4. **Alternative Docker development setup** (use if local Hugo fails):
```bash
docker compose up local-dev
```
**Note**: May fail in restricted network environments due to Alpine package manager issues.
### Testing (CRITICAL: NEVER CANCEL long-running tests)
#### Code Block Testing (takes 5-15 minutes per product, NEVER CANCEL - set timeout to 30+ minutes):
```bash
# Build test environment first (takes ~30 seconds, may fail due to network restrictions)
docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest .
# Test all products (takes 15-45 minutes total)
yarn test:codeblocks:all
# Test specific products
yarn test:codeblocks:cloud
yarn test:codeblocks:v2
yarn test:codeblocks:telegraf
```
#### Link Validation (takes 1-5 minutes):
Runs automatically on pull requests.
Requires the **link-checker** binary from the repo release artifacts.
```bash
# Test specific files/products (faster)
# JSON format is required for accurate reporting
link-checker map content/influxdb3/core/**/*.md \
| link-checker check \
--config .ci/link-checker/production.lycherc.toml
--format json
```
#### Style Linting (takes 30-60 seconds):
```bash
# Basic Vale linting
docker compose run -T vale content/**/*.md
# Product-specific linting with custom configurations
docker compose run -T vale --config=content/influxdb3/cloud-dedicated/.vale.ini --minAlertLevel=error content/influxdb3/cloud-dedicated/**/*.md
```
#### JavaScript and CSS Linting (takes 5-10 seconds):
```bash
yarn eslint assets/js/**/*.js
yarn prettier --check "**/*.{css,js,ts,jsx,tsx}"
```
### Pre-commit Hooks (automatically run, can be skipped if needed):
```bash
# Run all pre-commit checks manually
yarn lint
# Skip pre-commit hooks if necessary (not recommended)
git commit -m "message" --no-verify
```
## Validation Scenarios
Always test these scenarios after making changes to ensure full functionality:
### 1. Documentation Rendering Test
```bash
# Start Hugo server
npx hugo server --bind 0.0.0.0 --port 1313
# Verify key pages load correctly (200 status)
curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/influxdb3/core/
curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/influxdb/v2/
curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/telegraf/v1/
# Verify content contains expected elements
curl -s http://localhost:1313/influxdb3/core/ | grep -i "influxdb"
```
### 2. Build Output Validation
```bash
# Verify build completes successfully
npx hugo --quiet
# Check build output exists and has reasonable size (~529MB)
ls -la public/
du -sh public/
# Verify key files exist
file public/index.html
file public/influxdb3/core/index.html
```
### 3. Shortcode and Formatting Test
```bash
# Test shortcode examples page
yarn test:links content/example.md
```
## Repository Structure and Key Locations
### Content Organization
- **InfluxDB 3**: `/content/influxdb3/` (core, enterprise, cloud-dedicated, cloud-serverless, clustered, explorer)
- **InfluxDB v2**: `/content/influxdb/` (v2, cloud, enterprise_influxdb, v1)
- **Telegraf**: `/content/telegraf/v1/`
- **Other tools**: `/content/kapacitor/`, `/content/chronograf/`, `/content/flux/`
- **Shared content**: `/content/shared/`
- **Examples**: `/content/example.md` (comprehensive shortcode reference)
### Configuration Files
- **Hugo config**: `/config/_default/`
- **Package management**: `package.json`, `yarn.lock`
- **Docker**: `compose.yaml`, `Dockerfile.pytest`
- **Git hooks**: `lefthook.yml`
- **Testing**: `cypress.config.js`, `pytest.ini` (in test directories)
- **Linting**: `.vale.ini`, `.prettierrc.yaml`, `eslint.config.js`
### Build and Development
- **Hugo binary**: Available via `npx hugo` (version 0.148.2+)
- **Static assets**: `/assets/` (JavaScript, CSS, images)
- **Build output**: `/public/` (generated, ~529MB)
- **Layouts**: `/layouts/` (Hugo templates)
- **Data files**: `/data/` (YAML/JSON data for templates)
## Technology Stack
- **Static Site Generator**: Hugo (0.148.2+ extended)
- **Package Manager**: Yarn (1.22.22+) with Node.js (20.19.4+)
- **Testing Framework**:
- Pytest with pytest-codeblocks (for code examples)
- Cypress (for E2E tests)
- influxdata/docs-link-checker (for link validation)
- Vale (for style and writing guidelines)
- **Containerization**: Docker with Docker Compose
- **Linting**: ESLint, Prettier, Vale
- **Git Hooks**: Lefthook
## Common Tasks and Build Times
### Network Connectivity Issues
In restricted environments, these commands may fail due to external dependency downloads:
- `docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest .` (InfluxData repositories, HashiCorp repos)
- `docker compose up local-dev` (Alpine package manager)
- Cypress binary installation (use `CYPRESS_INSTALL_BINARY=0`)
Document these limitations but proceed with available functionality.
### Validation Commands for CI
Always run these before committing changes:
```bash
# Format and lint code
yarn prettier --write "**/*.{css,js,ts,jsx,tsx}"
yarn eslint assets/js/**/*.js
# Test Hugo build
npx hugo --quiet
# Test development server startup
timeout 150 npx hugo server --bind 0.0.0.0 --port 1313 &
sleep 120
curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/
pkill hugo
```
## Key Projects in This Codebase
1. **InfluxDB 3 Documentation** (Core, Enterprise, Clustered, Cloud Dedicated, Cloud Serverless, and InfluxDB 3 plugins for Core and Enterprise)
2. **InfluxDB 3 Explorer** (UI)
3. **InfluxDB v2 Documentation** (OSS and Cloud)
3. **InfuxDB v1 Documentation** (OSS and Enterprise)
4. **Telegraf Documentation** (agent and plugins)
5. **Supporting Tools Documentation** (Kapacitor, Chronograf, Flux)
6. **API Reference Documentation** (`/api-docs/`)
7. **Shared Documentation Components** (`/content/shared/`)
## Important Locations for Frequent Tasks
- **Shortcode reference**: `/content/example.md`
- **Contributing guide**: `CONTRIBUTING.md`
- **Testing guide**: `TESTING.md`
- **Product configurations**: `/data/products.yml`
- **Vale style rules**: `/.ci/vale/styles/`
- **GitHub workflows**: `/.github/workflows/`
- **Test scripts**: `/test/scripts/`
- **Hugo layouts and shortcodes**: `/layouts/`
- **CSS/JS assets**: `/assets/`
## Content Guidelines and Style
### Documentation Structure
- **Product version data**: `/data/products.yml`
- **InfluxData products**:
- InfluxDB 3 Explorer
- Documentation source path: `/content/influxdb3/explorer`
- Published for the web: https://docs.influxdata.com/influxdb3/explorer/
- InfluxDB 3 Core
- Documentation source path: `/content/influxdb3/core`
- Published for the web: https://docs.influxdata.com/influxdb3/core/
- Code repositories: https://github.com/influxdata/influxdb, https://github.com/influxdata/influxdb3_core
- InfluxDB 3 Enterprise
- Documentation source path: `/content/influxdb3/enterprise`
- Published for the web: https://docs.influxdata.com/influxdb3/enterprise/
- Code repositories: https://github.com/influxdata/influxdb, https://github.com/influxdata/influxdb3_enterprise
- InfluxDB Cloud Dedicated
- Documentation source path: `/content/influxdb3/cloud-dedicated`
- Published for the web: https://docs.influxdata.com/influxdb3/cloud-dedicated/
- Code repository: https://github.com/influxdata/influxdb
- InfluxDB Cloud Serverless
- Documentation source path: `/content/influxdb3/cloud-serverless`
- Published for the web: https://docs.influxdata.com/influxdb3/cloud-serverless/
- Code repository: https://github.com/influxdata/idpe
- InfluxDB Cloud v2 (TSM)
- Documentation source path: `/content/influxdb/cloud`
- Published for the web: https://docs.influxdata.com/influxdb/cloud/
- Code repository: https://github.com/influxdata/idpe
- InfluxDB Clustered
- Documentation source path: `/content/influxdb3/clustered`
- Published for the web: https://docs.influxdata.com/influxdb3/clustered/
- Code repository: https://github.com/influxdata/influxdb
- InfluxDB Enterprise v1 (1.x)
- Documentation source path: `/content/influxdb/enterprise_influxdb`
- Published for the web: https://docs.influxdata.com/enterprise_influxdb/v1/
- Code repository: https://github.com/influxdata/influxdb
- InfluxDB OSS 1.x
- Documentation source path: `/content/influxdb/v1`
- Published for the web: https://docs.influxdata.com/influxdb/v1/
- Code repository: https://github.com/influxdata/influxdb
- InfluxDB OSS 2.x
- Documentation source path: `/content/influxdb/v2`
- Published for the web: https://docs.influxdata.com/influxdb/v2/
- Code repository: https://github.com/influxdata/influxdb
- Telegraf
- Documentation source path: `/content/telegraf/v1`
- Published for the web: https://docs.influxdata.com/telegraf/v1/
- Code repository: https://github.com/influxdata/telegraf
- Kapacitor
- Documentation source path: `/content/kapacitor/v1`
- Published for the web: https://docs.influxdata.com/kapacitor/v1/
- Code repository: https://github.com/influxdata/kapacitor
- Chronograf
- Documentation source path: `/content/chronograf/v1`
- Published for the web: https://docs.influxdata.com/chronograf/v1/
- Code repository: https://github.com/influxdata/chronograf
- Flux
- Documentation source path: `/content/flux/v0`
- Published for the web: https://docs.influxdata.com/flux/v0/
- Code repository: https://github.com/influxdata/flux
- **InfluxData-supported tools**:
- InfluxDB API client libraries
- Code repositories: https://github.com/InfluxCommunity
- InfluxDB 3 processing engine plugins
- Code repository: https://github.com/influxdata/influxdb3_plugins
- **Query Languages**: SQL, InfluxQL, Flux (use appropriate language per product version)
- **Documentation Site**: https://docs.influxdata.com
- **Repository**: https://github.com/influxdata/docs-v2
- **Framework**: Hugo static site generator
## Abbreviations and shortcuts
- `gdd`: Google Developer Documentation style
- `3core`: InfluxDB 3 Core
- `3ent`: InfluxDB 3 Enterprise
## Style guidelines
### Style Guidelines
- Follow Google Developer Documentation style guidelines
- For API references, follow YouTube Data API style
- Use semantic line feeds (one sentence per line)
- Format code examples to fit within 80 characters
- Command line examples:
- Should be formatted as code blocks
- Should use long options (e.g., `--option` instead of `-o`)
- Use cURL for API examples
- Format to fit within 80 characters
- Should use `--data-urlencode` for query parameters
- Should use `--header` for headers
- Use only h2-h6 headings in content (h1 comes from frontmatter title properties)
- Use sentence case for headings
- Use GitHub callout syntax
- Use long options in command line examples (`--option` instead of `-o`)
- Use GitHub callout syntax for notes and warnings
- Image naming: `project/version-context-description.png`
- Use appropriate product names and versions consistently
- Follow InfluxData vocabulary guidelines
## Markdown and shortcodes
### Markdown and Shortcodes
- Include proper frontmatter for Markdown pages in `content/**/*.md` (except for
shared content files in `content/shared/`):
Include proper frontmatter for all content pages:
```yaml
title: # Page title (h1)
seotitle: # SEO title
list_title: # Title for article lists
description: # SEO description
menu:
product_version:
weight: # Page order (1-99, 101-199, etc.)
```
- Follow the shortcode examples in `content/example.md` and the documentation
for docs-v2 contributors in `CONTRIBUTING.md`
- Use provided shortcodes correctly:
- Notes/warnings: `{{% note %}}`, `{{% warn %}}`
- Product-specific: `{{% enterprise %}}`, `{{% cloud %}}`
- Tabbed content: `{{< tabs-wrapper >}}`, `{{% tabs %}}`, `{{% tab-content %}}`
- Tabbed content for code examples (without additional text): `{{< code-tabs-wrapper >}}`, `{{% code-tabs %}}`, `{{% code-tab-content %}}`
- Version links: `{{< latest >}}`, `{{< latest-patch >}}`
- API endpoints: `{{< api-endpoint >}}`
- Required elements: `{{< req >}}`
- Navigation: `{{< page-nav >}}`
- Diagrams: `{{< diagram >}}`, `{{< filesystem-diagram >}}`
```yaml
title: # Page title (h1)
seotitle: # SEO title
description: # SEO description
menu:
product_version:
weight: # Page order (1-99, 101-199, etc.)
```
## Code examples and testing
Key shortcodes (see `/content/example.md` for full reference):
- Provide complete, working examples with proper testing annotations:
- Notes/warnings (GitHub syntax): `> [!Note]`, `> [!Warning]`
- Tabbed content: `{{< tabs-wrapper >}}`, `{{% tabs %}}`, `{{% tab-content %}}`
- Code examples: `{{< code-tabs-wrapper >}}`, `{{% code-tabs %}}`, `{{% code-tab-content %}}`
- Required elements: `{{< req >}}`
- API endpoints: `{{< api-endpoint >}}`
### Code Examples and Testing
Provide complete, working examples with pytest annotations:
```python
print("Hello, world!")
@ -140,67 +288,21 @@ print("Hello, world!")
Hello, world!
```
- CLI command example:
## Troubleshooting Common Issues
```sh
influx query 'from(bucket:"example") |> range(start:-1h)'
```
1. **"Pytest collected 0 items"**: Use `python` (not `py`) for code block language identifiers
2. **Hugo build errors**: Check `/config/_default/` for configuration issues
3. **Docker build failures**: Expected in restricted networks - document and continue with local Hugo
4. **Cypress installation failures**: Use `CYPRESS_INSTALL_BINARY=0 yarn install`
5. **Link validation slow**: Use file-specific testing: `yarn test:links content/specific-file.md`
6. **Vale linting errors**: Check `.ci/vale/styles/config/vocabularies` for accepted/rejected terms
<!--pytest-codeblocks:expected-output-->
```
Table: keys: [_start, _stop, _field, _measurement]
_start:time _stop:time _field:string _measurement:string _time:time _value:float
------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------
```
- Include necessary environment variables
- Show proper credential handling for authenticated commands
## API documentation
- `/api-docs` contains OpenAPI spec files used for API reference documentation
- Follow OpenAPI specification patterns
- Match REST API examples to current implementation
- Include complete request/response examples
- Document required headers and authentication
## Versioning and product differentiation
- Clearly distinguish between different InfluxDB versions (1.x, 2.x, 3.x)
- Use correct terminology for each product variant
- Apply appropriate UI descriptions and screenshots
- Reference appropriate query language per version
## Development tools
- Vale.sh linter for style checking
- Configuration file: `.vale.ini`
- Docker for local development and testing
- pytest and pytest-codeblocks for validating code examples
- Use cypress for testing documentation UI and links
- Prettier for code formatting
- ESLint for JavaScript and TypeScript linting
- Lefthook (NPM package) for managing pre-commit hooks for quality assurance
## Code style
- Use modern JavaScript (ES6+) syntax
## Related repositories
- **Internal documentation assistance requests**: https://github.com/influxdata/DAR/issues Documentation
## Additional instruction files
## Additional Instruction Files
For specific workflows and content types, also refer to:
- **InfluxDB 3 code placeholders**: `.github/instructions/influxdb3-code-placeholders.instructions.md` - Guidelines for placeholder formatting, descriptions, and shortcode usage in InfluxDB 3 documentation
- **Contributing guidelines**: `.github/instructions/contributing.instructions.md` - Detailed style guidelines, shortcode usage, frontmatter requirements, and development workflows
- **Content-specific instructions**: Check `.github/instructions/` directory for specialized guidelines covering specific documentation patterns and requirements
- **InfluxDB 3 code placeholders**: `.github/instructions/influxdb3-code-placeholders.instructions.md`
- **Contributing guidelines**: `.github/instructions/contributing.instructions.md`
- **Content-specific instructions**: Check `.github/instructions/` directory
## Integration with specialized instructions
When working on InfluxDB 3 documentation (Core/Enterprise), prioritize the placeholder guidelines from `influxdb3-code-placeholders.instructions.md`.
For general documentation structure, shortcodes, and development workflows, follow the comprehensive guidelines in `contributing.instructions.md`.
Remember: This is a large documentation site with complex build processes. Patience with build times is essential, and NEVER CANCEL long-running operations.

241
.github/workflows/pr-link-check.yml vendored Normal file
View File

@ -0,0 +1,241 @@
name: Link Check PR Changes
on:
pull_request:
paths:
- 'content/**/*.md'
- 'data/**/*.yml'
- 'layouts/**/*.html'
types: [opened, synchronize, reopened]
jobs:
link-check:
name: Check links in affected files
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Detect content changes
id: detect
run: |
echo "🔍 Detecting changes between ${{ github.base_ref }} and ${{ github.sha }}"
# For PRs, use the GitHub Files API to get changed files
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
echo "Using GitHub API to detect PR changes..."
curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
"https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.number }}/files" \
| jq -r '.[].filename' > all_changed_files.txt
else
echo "Using git diff to detect changes..."
git diff --name-only ${{ github.event.before }}..${{ github.sha }} > all_changed_files.txt
fi
# Filter for content markdown files
CHANGED_FILES=$(grep '^content/.*\.md$' all_changed_files.txt || true)
echo "📁 All changed files:"
cat all_changed_files.txt
echo ""
echo "📝 Content markdown files:"
echo "$CHANGED_FILES"
if [[ -n "$CHANGED_FILES" ]]; then
echo "✅ Found $(echo "$CHANGED_FILES" | wc -l) changed content file(s)"
echo "has-changes=true" >> $GITHUB_OUTPUT
echo "changed-content<<EOF" >> $GITHUB_OUTPUT
echo "$CHANGED_FILES" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
# Check if any shared content files were modified
SHARED_CHANGES=$(echo "$CHANGED_FILES" | grep '^content/shared/' || true)
if [[ -n "$SHARED_CHANGES" ]]; then
echo "has-shared-content=true" >> $GITHUB_OUTPUT
echo "🔄 Detected shared content changes: $SHARED_CHANGES"
else
echo "has-shared-content=false" >> $GITHUB_OUTPUT
fi
else
echo "❌ No content changes detected"
echo "has-changes=false" >> $GITHUB_OUTPUT
echo "has-shared-content=false" >> $GITHUB_OUTPUT
fi
- name: Skip if no content changes
if: steps.detect.outputs.has-changes == 'false'
run: |
echo "No content changes detected in this PR - skipping link check"
echo "✅ **No content changes detected** - link check skipped" >> $GITHUB_STEP_SUMMARY
- name: Setup Node.js
if: steps.detect.outputs.has-changes == 'true'
uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'yarn'
- name: Install dependencies
if: steps.detect.outputs.has-changes == 'true'
run: yarn install --frozen-lockfile
- name: Build Hugo site
if: steps.detect.outputs.has-changes == 'true'
run: npx hugo --minify
- name: Download link-checker binary
if: steps.detect.outputs.has-changes == 'true'
run: |
echo "Downloading link-checker binary from docs-v2 releases..."
# Download from docs-v2's own releases (always accessible)
curl -L -H "Accept: application/vnd.github+json" \
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
-o link-checker-info.json \
"https://api.github.com/repos/influxdata/docs-v2/releases/tags/link-checker-v1.2.2"
# Extract download URL for linux binary
DOWNLOAD_URL=$(jq -r '.assets[] | select(.name | test("link-checker.*linux")) | .url' link-checker-info.json)
if [[ "$DOWNLOAD_URL" == "null" || -z "$DOWNLOAD_URL" ]]; then
echo "❌ No linux binary found in release"
echo "Available assets:"
jq -r '.assets[].name' link-checker-info.json
exit 1
fi
echo "📥 Downloading: $DOWNLOAD_URL"
curl -L -H "Accept: application/octet-stream" \
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
-o link-checker "$DOWNLOAD_URL"
chmod +x link-checker
./link-checker --version
- name: Verify link checker config exists
if: steps.detect.outputs.has-changes == 'true'
run: |
if [[ ! -f .ci/link-checker/production.lycherc.toml ]]; then
echo "❌ Configuration file .ci/link-checker/production.lycherc.toml not found"
echo "Please copy production.lycherc.toml from docs-tooling/link-checker/"
exit 1
fi
echo "✅ Using configuration: .ci/link-checker/production.lycherc.toml"
- name: Map changed content to public files
if: steps.detect.outputs.has-changes == 'true'
id: mapping
run: |
echo "Mapping changed content files to public HTML files..."
# Create temporary file with changed content files
echo "${{ steps.detect.outputs.changed-content }}" > changed-files.txt
# Map content files to public files
PUBLIC_FILES=$(cat changed-files.txt | xargs -r ./link-checker map --existing-only)
if [[ -n "$PUBLIC_FILES" ]]; then
echo "Found affected public files:"
echo "$PUBLIC_FILES"
echo "public-files<<EOF" >> $GITHUB_OUTPUT
echo "$PUBLIC_FILES" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
# Count files for summary
FILE_COUNT=$(echo "$PUBLIC_FILES" | wc -l)
echo "file-count=$FILE_COUNT" >> $GITHUB_OUTPUT
else
echo "No public files found to check"
echo "public-files=" >> $GITHUB_OUTPUT
echo "file-count=0" >> $GITHUB_OUTPUT
fi
- name: Run link checker
if: steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != ''
id: link-check
run: |
echo "Checking links in ${{ steps.mapping.outputs.file-count }} affected files..."
# Create temporary file with public files list
echo "${{ steps.mapping.outputs.public-files }}" > public-files.txt
# Run link checker with detailed JSON output
set +e # Don't fail immediately on error
cat public-files.txt | xargs -r ./link-checker check \
--config .ci/link-checker/production.lycherc.toml \
--format json \
--output link-check-results.json
EXIT_CODE=$?
if [[ -f link-check-results.json ]]; then
# Parse results
BROKEN_COUNT=$(jq -r '.summary.broken_count // 0' link-check-results.json)
TOTAL_COUNT=$(jq -r '.summary.total_checked // 0' link-check-results.json)
SUCCESS_RATE=$(jq -r '.summary.success_rate // 0' link-check-results.json)
echo "broken-count=$BROKEN_COUNT" >> $GITHUB_OUTPUT
echo "total-count=$TOTAL_COUNT" >> $GITHUB_OUTPUT
echo "success-rate=$SUCCESS_RATE" >> $GITHUB_OUTPUT
if [[ $BROKEN_COUNT -gt 0 ]]; then
echo "❌ Found $BROKEN_COUNT broken links out of $TOTAL_COUNT total links"
echo "check-result=failed" >> $GITHUB_OUTPUT
else
echo "✅ All $TOTAL_COUNT links are valid"
echo "check-result=passed" >> $GITHUB_OUTPUT
fi
else
echo "❌ Link check failed to generate results"
echo "check-result=error" >> $GITHUB_OUTPUT
fi
exit $EXIT_CODE
- name: Process and report results
if: always() && steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != ''
run: |
if [[ -f link-check-results.json ]]; then
# Create detailed error annotations for broken links
if [[ "${{ steps.link-check.outputs.check-result }}" == "failed" ]]; then
echo "Creating error annotations for broken links..."
jq -r '.broken_links[]? |
"::error file=\(.file // "unknown"),line=\(.line // 1)::Broken link: \(.url) - \(.error // "Unknown error")"' \
link-check-results.json || true
fi
# Generate summary comment
cat >> $GITHUB_STEP_SUMMARY << 'EOF'
## Link Check Results
**Files Checked:** ${{ steps.mapping.outputs.file-count }}
**Total Links:** ${{ steps.link-check.outputs.total-count }}
**Broken Links:** ${{ steps.link-check.outputs.broken-count }}
**Success Rate:** ${{ steps.link-check.outputs.success-rate }}%
EOF
if [[ "${{ steps.link-check.outputs.check-result }}" == "failed" ]]; then
echo "❌ **Link check failed** - see annotations above for details" >> $GITHUB_STEP_SUMMARY
else
echo "✅ **All links are valid**" >> $GITHUB_STEP_SUMMARY
fi
else
echo "⚠️ **Link check could not complete** - no results file generated" >> $GITHUB_STEP_SUMMARY
fi
- name: Upload detailed results
if: always() && steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != ''
uses: actions/upload-artifact@v4
with:
name: link-check-results
path: |
link-check-results.json
changed-files.txt
public-files.txt
retention-days: 30

View File

@ -1,148 +0,0 @@
# PR Link Validation Workflow
# Provides basic and parallel workflows
# with smart strategy selection based on change volume
name: PR Link Validation
on:
pull_request:
paths:
- 'content/**/*.md'
- 'content/**/*.html'
- 'api-docs/**/*.yml'
- 'assets/**/*.js'
- 'layouts/**/*.html'
jobs:
# TEMPORARILY DISABLED - Remove this condition to re-enable link validation
disabled-check:
if: false # Set to true to re-enable the workflow
runs-on: ubuntu-latest
steps:
- run: echo "Link validation is temporarily disabled"
setup:
name: Setup and Strategy Detection
runs-on: ubuntu-latest
if: false # TEMPORARILY DISABLED - Remove this condition to re-enable
outputs:
strategy: ${{ steps.determine-strategy.outputs.strategy }}
has-changes: ${{ steps.determine-strategy.outputs.has-changes }}
matrix: ${{ steps.determine-strategy.outputs.matrix }}
all-files: ${{ steps.changed-files.outputs.all_changed_files }}
cache-hit-rate: ${{ steps.determine-strategy.outputs.cache-hit-rate }}
cache-hits: ${{ steps.determine-strategy.outputs.cache-hits }}
cache-misses: ${{ steps.determine-strategy.outputs.cache-misses }}
original-file-count: ${{ steps.determine-strategy.outputs.original-file-count }}
validation-file-count: ${{ steps.determine-strategy.outputs.validation-file-count }}
cache-message: ${{ steps.determine-strategy.outputs.message }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup docs environment
uses: ./.github/actions/setup-docs-env
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v41
with:
files: |
content/**/*.md
content/**/*.html
api-docs/**/*.yml
- name: Determine validation strategy
id: determine-strategy
run: |
if [[ "${{ steps.changed-files.outputs.any_changed }}" != "true" ]]; then
echo "No relevant files changed"
echo "strategy=none" >> $GITHUB_OUTPUT
echo "has-changes=false" >> $GITHUB_OUTPUT
echo "matrix={\"include\":[]}" >> $GITHUB_OUTPUT
echo "cache-hit-rate=100" >> $GITHUB_OUTPUT
echo "cache-hits=0" >> $GITHUB_OUTPUT
echo "cache-misses=0" >> $GITHUB_OUTPUT
exit 0
fi
# Use our matrix generator with cache awareness
files="${{ steps.changed-files.outputs.all_changed_files }}"
echo "🔍 Analyzing ${files} for cache-aware validation..."
# Generate matrix and capture outputs
result=$(node .github/scripts/matrix-generator.js \
--min-files-parallel 10 \
--max-concurrent 5 \
--output-format github \
$files)
# Parse all outputs from matrix generator
while IFS='=' read -r key value; do
case "$key" in
strategy|has-changes|cache-hit-rate|cache-hits|cache-misses|original-file-count|validation-file-count|message)
echo "$key=$value" >> $GITHUB_OUTPUT
;;
matrix)
echo "matrix=$value" >> $GITHUB_OUTPUT
;;
esac
done <<< "$result"
# Extract values for logging
strategy=$(echo "$result" | grep "^strategy=" | cut -d'=' -f2)
cache_hit_rate=$(echo "$result" | grep "^cache-hit-rate=" | cut -d'=' -f2)
cache_message=$(echo "$result" | grep "^message=" | cut -d'=' -f2-)
echo "📊 Selected strategy: $strategy"
if [[ -n "$cache_hit_rate" ]]; then
echo "📈 Cache hit rate: ${cache_hit_rate}%"
fi
if [[ -n "$cache_message" ]]; then
echo "$cache_message"
fi
validate:
name: ${{ matrix.name }}
needs: setup
if: false # TEMPORARILY DISABLED - Original condition: needs.setup.outputs.has-changes == 'true'
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.setup.outputs.matrix) }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup docs environment
uses: ./.github/actions/setup-docs-env
- name: Validate links
uses: ./.github/actions/validate-links
with:
files: ${{ matrix.files || needs.setup.outputs.all-files }}
product-name: ${{ matrix.product }}
cache-enabled: ${{ matrix.cacheEnabled || 'true' }}
cache-key: link-validation-${{ hashFiles(matrix.files || needs.setup.outputs.all-files) }}
timeout: 900
report:
name: Report Results
needs: [setup, validate]
if: false # TEMPORARILY DISABLED - Original condition: always() && needs.setup.outputs.has-changes == 'true'
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup docs environment
uses: ./.github/actions/setup-docs-env
- name: Report broken links
uses: ./.github/actions/report-broken-links
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
max-links-per-file: 20

View File

@ -0,0 +1,68 @@
name: Sync Link Checker Binary from docs-tooling
on:
workflow_dispatch:
inputs:
version:
description: 'Link checker version to sync (e.g., v1.2.2)'
required: true
type: string
jobs:
sync-binary:
name: Sync link-checker binary from docs-tooling
runs-on: ubuntu-latest
steps:
- name: Download binary from docs-tooling release
run: |
echo "Downloading link-checker ${{ inputs.version }} from docs-tooling..."
# Download binary from docs-tooling release
curl -L -H "Accept: application/octet-stream" \
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
-o link-checker-linux-x86_64 \
"https://github.com/influxdata/docs-tooling/releases/download/link-checker-${{ inputs.version }}/link-checker-linux-x86_64"
# Download checksums
curl -L -H "Accept: application/octet-stream" \
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
-o checksums.txt \
"https://github.com/influxdata/docs-tooling/releases/download/link-checker-${{ inputs.version }}/checksums.txt"
# Verify downloads
ls -la link-checker-linux-x86_64 checksums.txt
- name: Create docs-v2 release
run: |
echo "Creating link-checker-${{ inputs.version }} release in docs-v2..."
gh release create \
--title "Link Checker Binary ${{ inputs.version }}" \
--notes "Link validation tooling binary for docs-v2 GitHub Actions workflows.
This binary is distributed from the docs-tooling repository release link-checker-${{ inputs.version }}.
### Usage in GitHub Actions
The binary is automatically downloaded by docs-v2 workflows for link validation.
### Manual Usage
\`\`\`bash
# Download and make executable
curl -L -o link-checker https://github.com/influxdata/docs-v2/releases/download/link-checker-${{ inputs.version }}/link-checker-linux-x86_64
chmod +x link-checker
# Verify installation
./link-checker --version
\`\`\`
### Changes in ${{ inputs.version }}
See the [docs-tooling release](https://github.com/influxdata/docs-tooling/releases/tag/link-checker-${{ inputs.version }}) for detailed changelog." \
link-checker-${{ inputs.version }} \
link-checker-linux-x86_64 \
checksums.txt
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}

13
.gitignore vendored
View File

@ -3,11 +3,14 @@
public
.*.swp
node_modules
package-lock.json
.config*
**/.env*
*.log
/resources
.hugo_build.lock
# Content generation
/content/influxdb*/**/api/**/*.html
!api-docs/**/.config.yml
/api-docs/redoc-static.html*
@ -16,18 +19,22 @@ node_modules
!telegraf-build/templates
!telegraf-build/scripts
!telegraf-build/README.md
# CI/CD tool files
/cypress/downloads/*
/cypress/screenshots/*
/cypress/videos/*
.lycheecache
test-results.xml
/influxdb3cli-build-scripts/content
tmp
# IDE files
.vscode/*
!.vscode/launch.json
.idea
**/config.toml
package-lock.json
tmp
# Context files for LLMs and AI tools
# User context files for AI assistant tools
.context/*
!.context/README.md

11
.vscode/settings.json vendored
View File

@ -14,17 +14,6 @@
},
"vale.valeCLI.config": "${workspaceFolder}/.vale.ini",
"vale.valeCLI.minAlertLevel": "warning",
"github.copilot.chat.codeGeneration.useInstructionFiles": true,
"github.copilot.chat.codeGeneration.instructions": [
{
"file": "${workspaceFolder}/.github/copilot-instructions.md",
}
],
"github.copilot.chat.pullRequestDescriptionGeneration.instructions": [
{
"file": "${workspaceFolder}/.github/copilot-instructions.md",
}
],
"cSpell.words": [
"influxctl"
]

View File

@ -121,96 +121,251 @@ Potential causes:
# This is ignored
```
## Link Validation Testing
## Link Validation with Link-Checker
Link validation uses Cypress for e2e browser-based testing against the Hugo site to ensure all internal and external links work correctly.
Link validation uses the `link-checker` tool to validate internal and external links in documentation files.
### Basic Usage
#### Installation
**Option 1: Build from source (macOS/local development)**
For local development on macOS, build the link-checker from source:
```bash
# Test specific files
yarn test:links content/influxdb3/core/**/*.md
# Clone and build link-checker
git clone https://github.com/influxdata/docs-tooling.git
cd docs-tooling/link-checker
cargo build --release
# Test all links (may take a long time)
yarn test:links
# Test by product (may take a long time)
yarn test:links:v3
yarn test:links:v2
yarn test:links:telegraf
yarn test:links:chronograf
yarn test:links:kapacitor
# Copy binary to your PATH or use directly
cp target/release/link-checker /usr/local/bin/
# OR use directly: ./target/release/link-checker
```
### How Link Validation Works
**Option 2: Download pre-built binary (GitHub Actions/Linux)**
The tests:
1. Start a Hugo development server
2. Navigate to each page in a browser
3. Check all links for validity
4. Report broken or invalid links
The link-checker binary is distributed via docs-v2 releases for reliable access from GitHub Actions workflows:
```bash
# Download Linux binary from docs-v2 releases
curl -L -o link-checker \
https://github.com/influxdata/docs-v2/releases/download/link-checker-v1.0.0/link-checker-linux-x86_64
chmod +x link-checker
# Verify installation
./link-checker --version
```
> [!Note]
> Pre-built binaries are currently Linux x86_64 only. For macOS development, use Option 1 to build from source.
```bash
# Clone and build link-checker
git clone https://github.com/influxdata/docs-tooling.git
cd docs-tooling/link-checker
cargo build --release
# Copy binary to your PATH or use directly
cp target/release/link-checker /usr/local/bin/
```
#### Binary Release Process
**For maintainers:** To create a new link-checker release in docs-v2:
1. **Create release in docs-tooling** (builds and releases binary automatically):
```bash
cd docs-tooling
git tag link-checker-v1.2.x
git push origin link-checker-v1.2.x
```
2. **Manually distribute to docs-v2** (required due to private repository access):
```bash
# Download binary from docs-tooling release
curl -L -H "Authorization: Bearer $(gh auth token)" \
-o link-checker-linux-x86_64 \
"https://github.com/influxdata/docs-tooling/releases/download/link-checker-v1.2.x/link-checker-linux-x86_64"
curl -L -H "Authorization: Bearer $(gh auth token)" \
-o checksums.txt \
"https://github.com/influxdata/docs-tooling/releases/download/link-checker-v1.2.x/checksums.txt"
# Create docs-v2 release
gh release create \
--repo influxdata/docs-v2 \
--title "Link Checker Binary v1.2.x" \
--notes "Link validation tooling binary for docs-v2 GitHub Actions workflows." \
link-checker-v1.2.x \
link-checker-linux-x86_64 \
checksums.txt
```
3. **Update workflow reference** (if needed):
```bash
# Update .github/workflows/pr-link-check.yml line 98 to use new version
sed -i 's/link-checker-v[0-9.]*/link-checker-v1.2.x/' .github/workflows/pr-link-check.yml
```
> [!Note]
> The manual distribution is required because docs-tooling is a private repository and the default GitHub token doesn't have cross-repository access for private repos.
#### Core Commands
```bash
# Map content files to public HTML files
link-checker map content/path/to/file.md
# Check links in HTML files
link-checker check public/path/to/file.html
# Generate configuration file
link-checker config
```
### Link Resolution Behavior
The link-checker automatically handles relative link resolution based on the input type:
**Local Files → Local Resolution**
```bash
# When checking local files, relative links resolve to the local filesystem
link-checker check public/influxdb3/core/admin/scale-cluster/index.html
# Relative link /influxdb3/clustered/tags/kubernetes/ becomes:
# → /path/to/public/influxdb3/clustered/tags/kubernetes/index.html
```
**URLs → Production Resolution**
```bash
# When checking URLs, relative links resolve to the production site
link-checker check https://docs.influxdata.com/influxdb3/core/admin/scale-cluster/
# Relative link /influxdb3/clustered/tags/kubernetes/ becomes:
# → https://docs.influxdata.com/influxdb3/clustered/tags/kubernetes/
```
**Why This Matters**
- **Testing new content**: Tag pages generated locally will be found when testing local files
- **Production validation**: Production URLs validate against the live site
- **No false positives**: New content won't appear broken when testing locally before deployment
### Content Mapping Workflows
#### Scenario 1: Map and check InfluxDB 3 Core content
```bash
# Map Markdown files to HTML
link-checker map content/influxdb3/core/get-started/
# Check links in mapped HTML files
link-checker check public/influxdb3/core/get-started/
```
#### Scenario 2: Map and check shared CLI content
```bash
# Map shared content files
link-checker map content/shared/influxdb3-cli/
# Check the mapped output files
# (link-checker map outputs the HTML file paths)
link-checker map content/shared/influxdb3-cli/ | \
xargs link-checker check
```
#### Scenario 3: Direct HTML checking
```bash
# Check HTML files directly without mapping
link-checker check public/influxdb3/core/get-started/
```
#### Combined workflow for changed files
```bash
# Check only files changed in the last commit
git diff --name-only HEAD~1 HEAD | grep '\.md$' | \
xargs link-checker map | \
xargs link-checker check
```
### Configuration Options
#### Local usage (default configuration)
```bash
# Uses default settings or test.lycherc.toml if present
link-checker check public/influxdb3/core/get-started/
```
#### Production usage (GitHub Actions)
```bash
# Use production configuration with comprehensive exclusions
link-checker check \
--config .ci/link-checker/production.lycherc.toml \
public/influxdb3/core/get-started/
```
### GitHub Actions Integration
#### Composite Action
**Automated Integration (docs-v2)**
The `.github/actions/validate-links/` composite action provides reusable link validation:
The docs-v2 repository includes automated link checking for pull requests:
- **Trigger**: Runs automatically on PRs that modify content files
- **Binary distribution**: Downloads latest pre-built binary from docs-v2 releases
- **Smart detection**: Only checks files affected by PR changes
- **Production config**: Uses optimized settings with exclusions for GitHub, social media, etc.
- **Results reporting**: Broken links reported as GitHub annotations with detailed summaries
The workflow automatically:
1. Detects content changes in PRs using GitHub Files API
2. Downloads latest link-checker binary from docs-v2 releases
3. Builds Hugo site and maps changed content to public HTML files
4. Runs link checking with production configuration
5. Reports results with annotations and step summaries
**Manual Integration (other repositories)**
For other repositories, you can integrate link checking manually:
```yaml
- uses: ./.github/actions/validate-links
with:
files: "content/influxdb3/core/file.md content/influxdb/v2/file2.md"
product-name: "core"
cache-enabled: "true"
cache-key: "link-validation"
name: Link Check
on:
pull_request:
paths:
- 'content/**/*.md'
jobs:
link-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Download link-checker
run: |
curl -L -o link-checker \
https://github.com/influxdata/docs-tooling/releases/latest/download/link-checker-linux-x86_64
chmod +x link-checker
cp target/release/link-checker ../../link-checker
cd ../..
- name: Build Hugo site
run: |
npm install
npx hugo --minify
- name: Check changed files
run: |
git diff --name-only origin/main HEAD | \
grep '\.md$' | \
xargs ./link-checker map | \
xargs ./link-checker check \
--config .ci/link-checker/production.lycherc.toml
```
#### Matrix Generator
The `.github/scripts/matrix-generator.js` script provides intelligent strategy selection:
- **Sequential validation**: For small changes (< 10 files) or single-product changes
- **Parallel validation**: For large changes across multiple products (up to 5 concurrent jobs)
Test locally:
```bash
node .github/scripts/matrix-generator.js content/influxdb3/core/file1.md content/influxdb/v2/file2.md
```
Configuration options:
- `--max-concurrent <n>`: Maximum parallel jobs (default: 5)
- `--force-sequential`: Force sequential execution
- `--min-files-parallel <n>`: Minimum files for parallel (default: 10)
### Caching for Link Validation
Link validation supports caching to improve performance:
- **Cache location**: `.cache/link-validation/` (local), GitHub Actions cache (CI)
- **Cache keys**: Based on content file hashes
- **TTL**: 30 days by default, configurable
#### Cache Configuration Options
```bash
# Use 7-day cache for more frequent validation
yarn test:links --cache-ttl=7 content/influxdb3/**/*.md
# Use 1-day cache via environment variable
LINK_CACHE_TTL_DAYS=1 yarn test:links content/**/*.md
# Clean up expired cache entries
node .github/scripts/incremental-validator.js --cleanup
```
#### How Caching Works
- **Cache key**: Based on file path + content hash (file changes invalidate cache immediately)
- **External links**: Cached for the TTL period since URLs rarely change
- **Internal links**: Effectively cached until file content changes
- **Automatic cleanup**: Expired entries are removed on access and via `--cleanup`
## Style Linting (Vale)
Style linting uses [Vale](https://vale.sh/) to enforce documentation writing standards, branding guidelines, and vocabulary consistency.

View File

@ -66,7 +66,22 @@ paths:
schema:
type: string
required: true
description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy.
description: |
The database to write to.
**Database targeting:** In Cloud Dedicated, databases can be named using the `database_name/retention_policy_name` convention for InfluxQL compatibility. Cloud Dedicated does not use DBRP mappings. The db and rp parameters are used to construct the target database name following this naming convention.
**Auto-creation behavior:** Cloud Dedicated requires databases to be created before writing data. The v1 `/write` API does not automatically create databases. If the specified
database does not exist, the write request will fail.
Authentication: Requires a valid API token with _write_ permissions for the target database.
### Related
- [Write data to InfluxDB Cloud Dedicated](/influxdb3/cloud-dedicated/write-data/)
- [Manage databases in InfluxDB Cloud Dedicated](/influxdb3/cloud-dedicated/admin/databases/)
- [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention)
- [InfluxQL data retention policy mapping differences](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences)
- in: query
name: rp
schema:
@ -137,6 +152,160 @@ paths:
schema:
$ref: '#/components/schemas/Error'
/query:
get:
operationId: GetQueryV1
tags:
- Query
summary: Query using the InfluxDB v1 HTTP API
parameters:
- $ref: '#/components/parameters/TraceSpan'
- $ref: '#/components/parameters/AuthUserV1'
- $ref: '#/components/parameters/AuthPassV1'
- in: header
name: Accept
schema:
type: string
description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps.
default: application/json
enum:
- application/json
- application/csv
- text/csv
- application/x-msgpack
- in: header
name: Accept-Encoding
description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand.
schema:
type: string
description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity.
default: identity
enum:
- gzip
- identity
- in: query
name: chunked
description: |
If true, the response is divided into chunks of size `chunk_size`.
schema:
type: boolean
default: false
- in: query
name: chunk_size
description: |
The number of records that will go into a chunk.
This parameter is only used if `chunked=true`.
schema:
type: integer
default: 10000
- in: query
name: db
schema:
type: string
required: true
description: The database to query from.
- in: query
name: pretty
description: |
If true, the JSON response is formatted in a human-readable format.
schema:
type: boolean
default: false
- in: query
name: q
description: Defines the InfluxQL query to run.
required: true
schema:
type: string
- in: query
name: rp
schema:
type: string
description: |
The retention policy name for InfluxQL compatibility
Optional parameter that, when combined with the db parameter, forms the complete database name to query. In InfluxDB Cloud Dedicated, databases can be named using the
database_name/retention_policy_name convention for InfluxQL compatibility.
When a request specifies both `db` and `rp`, Cloud Dedicated combines them as `db/rp` to target the database--for example:
- If `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen`
- If only `db=mydb` is provided (no `rp`), the query targets the database named `mydb`
Unlike InfluxDB v1 and Cloud Serverless, Cloud Dedicated does not use DBRP mappings or separate retention policy objects. This parameter exists solely for v1 API
compatibility and database naming conventions.
_Note: The retention policy name does not control data retention in Cloud Dedicated. Data retention is determined by the database's **retention period** setting._
### Related
- [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention)
- [InfluxQL data retention policy mapping differences](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences)
- name: epoch
description: |
Formats timestamps as unix (epoch) timestamps with the specified precision
instead of RFC3339 timestamps with nanosecond precision.
in: query
schema:
type: string
enum:
- h
- m
- s
- ms
- u
- µ
- ns
responses:
'200':
description: Query results
headers:
Content-Encoding:
description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body
schema:
type: string
description: Specifies that the response in the body is encoded with gzip or not encoded with identity.
default: identity
enum:
- gzip
- identity
Trace-Id:
description: The Trace-Id header reports the request's trace ID, if one was generated.
schema:
type: string
description: Specifies the request's trace ID.
content:
application/csv:
schema:
$ref: '#/components/schemas/InfluxQLCSVResponse'
text/csv:
schema:
$ref: '#/components/schemas/InfluxQLCSVResponse'
application/json:
schema:
$ref: '#/components/schemas/InfluxQLResponse'
examples:
influxql-chunk_size_2:
value: |
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:55Z",90,"1"],["2016-05-19T18:37:56Z",90,"1"]],"partial":true}],"partial":true}]}
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:57Z",90,"1"],["2016-05-19T18:37:58Z",90,"1"]]}]}]}
application/x-msgpack:
schema:
type: string
format: binary
'429':
description: Token is temporarily over quota. The Retry-After header describes when to try the read again.
headers:
Retry-After:
description: A non-negative decimal integer indicating the seconds to delay after the response is received.
schema:
type: integer
format: int32
default:
description: Error processing query
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
post:
operationId: PostQueryV1
tags:
@ -148,6 +317,83 @@ paths:
text/plain:
schema:
type: string
application/json:
schema:
type: object
properties:
db:
type: string
description: |
The database name for InfluxQL queries.
Required parameter that specifies the database to query.
In InfluxDB Cloud Dedicated, this can be either:
- A simple database name (for example, `mydb`)
- The database portion of a `database_name/retention_policy_name` naming convention (used together with the `rp` parameter)
When used alone, `db` specifies the complete database name to query. When used with the `rp` parameter, they combine to form the full database name as `db/rp`--for example, if `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen`.
Unlike InfluxDB Cloud Serverless, Cloud Dedicated does not use DBRP mappings. The database name directly corresponds to an existing database in your Cloud Dedicated cluster.
Examples:
- `db=mydb` - queries the database named `mydb`
- `db=mydb` with `rp=autogen` - queries the database named `mydb/autogen`
_Note: The specified database must exist in your Cloud Dedicated cluster. Queries will fail if the database does not exist._
### Related
- [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention)
- [Migrate data from InfluxDB 1.x to Cloud Dedicated](/influxdb3/cloud-dedicated/guides/migrate-data/migrate-1x-to-cloud-dedicated/)
- [InfluxQL data retention policy mapping differences between InfluxDB Cloud Dedicated and Cloud Serverless](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences)
rp:
description: |
The retention policy name for InfluxQL compatibility
Optional parameter that, when combined with the db parameter, forms the complete database name to query. In InfluxDB Cloud Dedicated, databases can be named using the
database_name/retention_policy_name convention for InfluxQL compatibility.
When a request specifies both `db` and `rp`, Cloud Dedicated combines them as `db/rp` to target the database--for example:
- If `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen`
- If only `db=mydb` is provided (no `rp`), the query targets the database named `mydb`
Unlike InfluxDB v1 and Cloud Serverless, Cloud Dedicated does not use DBRP mappings or separate retention policy objects. This parameter exists solely for v1 API
compatibility and database naming conventions.
_Note: The retention policy name does not control data retention in Cloud Dedicated. Data retention is determined by the database's **retention period** setting._
### Related
- [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention)
- [Migrate data from InfluxDB 1.x to Cloud Dedicated](/influxdb3/cloud-dedicated/guides/migrate-data/migrate-1x-to-cloud-dedicated/)
- [InfluxQL data retention policy mapping differences](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences)
type: string
q:
description: Defines the InfluxQL query to run.
type: string
chunked:
description: |
If true, the response is divided into chunks of size `chunk_size`.
type: boolean
chunk_size:
description: |
The number of records that will go into a chunk.
This parameter is only used if `chunked=true`.
type: integer
default: 10000
epoch:
description: |
A unix timestamp precision.
type: string
enum:
- h
- m
- s
- ms
- u
- µ
- ns
parameters:
- $ref: '#/components/parameters/TraceSpan'
- $ref: '#/components/parameters/AuthUserV1'
@ -184,7 +430,7 @@ paths:
schema:
type: string
required: true
description: Bucket to query.
description: Database to query.
- in: query
name: rp
schema:

View File

@ -65,7 +65,7 @@ paths:
schema:
type: string
required: true
description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy.
description: Database to write to. If none exists, InfluxDB creates a database with a default 3-day retention policy.
- in: query
name: rp
schema:
@ -136,6 +136,188 @@ paths:
schema:
$ref: '#/components/schemas/Error'
/query:
get:
operationId: GetQueryV1
tags:
- Query
summary: Query using the InfluxDB v1 HTTP API
parameters:
- $ref: '#/components/parameters/TraceSpan'
- $ref: '#/components/parameters/AuthUserV1'
- $ref: '#/components/parameters/AuthPassV1'
- in: header
name: Accept
schema:
type: string
description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps.
default: application/json
enum:
- application/json
- application/csv
- text/csv
- application/x-msgpack
- in: header
name: Accept-Encoding
description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand.
schema:
type: string
description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity.
default: identity
enum:
- gzip
- identity
- in: query
name: chunked
description: |
If true, the response is divided into chunks of size `chunk_size`.
schema:
type: boolean
default: false
- in: query
name: chunk_size
description: |
The number of records that will go into a chunk.
This parameter is only used if `chunked=true`.
schema:
type: integer
default: 10000
- in: query
name: db
schema:
type: string
required: true
description: |
The database name for InfluxQL queries
Required parameter that specifies the database to query via DBRP (Database Retention Policy) mapping. In Cloud Serverless, this parameter is used together with DBRP
mappings to identify which bucket to query.
The `db` parameter (optionally combined with `rp`) must have an existing DBRP mapping that points to a bucket. Without a valid DBRP mapping, queries will fail with an
authorization error.
**DBRP mapping requirements:**
- A DBRP mapping must exist before querying
- Mappings can be created automatically when writing data with the v1 API (if your token has permissions)
- Mappings can be created manually using the InfluxDB CLI or API
### Examples
- `db=mydb` - uses the default DBRP mapping for `mydb`
- `db=mydb` with `rp=weekly` - uses the DBRP mapping for `mydb/weekly`
_Note: Unlike the v1 `/write` endpoint which can auto-create buckets and mappings, the `/query` endpoint requires pre-existing DBRP mappings. The actual data is stored in and
queried from the bucket that the DBRP mapping points to._
### Related
- [Use the InfluxDB v1 query API and InfluxQL in Cloud Serverless](/influxdb3/cloud-serverless/query-data/execute-queries/v1-http/)
- [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets)
- [Migrate from InfluxDB 1.x to Cloud Serverless](/influxdb3/cloud-serverless/guides/migrate-data/migrate-1x-to-serverless/)
- in: query
name: pretty
description: |
If true, the JSON response is formatted in a human-readable format.
schema:
type: boolean
default: false
- in: query
name: q
description: Defines the InfluxQL query to run.
required: true
schema:
type: string
- in: query
name: rp
schema:
type: string
description: |
The retention policy name for InfluxQL queries
Optional parameter that specifies the retention policy to use when querying data with InfluxQL. In Cloud Serverless, this parameter works with DBRP (Database Retention
Policy) mappings to identify the target bucket.
When provided together with the `db` parameter, Cloud Serverless uses the DBRP mapping to determine which bucket to query. The combination of `db` and `rp` must have an
existing DBRP mapping that points to a bucket. If no `rp` is specified, Cloud Serverless uses the default retention policy mapping for the database.
Requirements: A DBRP mapping must exist for the db/rp combination before you can query data. DBRP mappings can be created:
- Automatically when writing data with the v1 API (if your token has sufficient permissions)
- Manually using the InfluxDB CLI or API
Example: If `db=mydb` and `rp=weekly`, the query uses the DBRP mapping for `mydb/weekly` to determine which bucket to query.
_Note: The retention policy name is used only for DBRP mapping. Actual data retention is controlled by the target bucket's retention period setting, not by the retention
policy name._
### Related
- [Use the InfluxDB v1 query API and InfluxQL in Cloud Serverless](/influxdb3/cloud-serverless/query-data/execute-queries/v1-http/)
- [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets)
- [Migrate from InfluxDB 1.x to Cloud Serverless](/influxdb3/cloud-serverless/guides/migrate-data/migrate-1x-to-serverless/)
- name: epoch
description: |
Formats timestamps as unix (epoch) timestamps with the specified precision
instead of RFC3339 timestamps with nanosecond precision.
in: query
schema:
type: string
enum:
- h
- m
- s
- ms
- u
- µ
- ns
responses:
'200':
description: Query results
headers:
Content-Encoding:
description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body
schema:
type: string
description: Specifies that the response in the body is encoded with gzip or not encoded with identity.
default: identity
enum:
- gzip
- identity
Trace-Id:
description: The Trace-Id header reports the request's trace ID, if one was generated.
schema:
type: string
description: Specifies the request's trace ID.
content:
application/csv:
schema:
$ref: '#/components/schemas/InfluxQLCSVResponse'
text/csv:
schema:
$ref: '#/components/schemas/InfluxQLCSVResponse'
application/json:
schema:
$ref: '#/components/schemas/InfluxQLResponse'
examples:
influxql-chunk_size_2:
value: |
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:55Z",90,"1"],["2016-05-19T18:37:56Z",90,"1"]],"partial":true}],"partial":true}]}
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:57Z",90,"1"],["2016-05-19T18:37:58Z",90,"1"]]}]}]}
application/x-msgpack:
schema:
type: string
format: binary
'429':
description: Token is temporarily over quota. The Retry-After header describes when to try the read again.
headers:
Retry-After:
description: A non-negative decimal integer indicating the seconds to delay after the response is received.
schema:
type: integer
format: int32
default:
description: Error processing query
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
post:
operationId: PostQueryV1
tags:
@ -147,6 +329,87 @@ paths:
text/plain:
schema:
type: string
application/json:
schema:
type: object
properties:
db:
type: string
description: |
The database name for InfluxQL queries
Required parameter that specifies the database to query via DBRP (Database Retention Policy) mapping. In Cloud Serverless, this parameter is used together with DBRP
mappings to identify which bucket to query.
The `db` parameter (optionally combined with `rp`) must have an existing DBRP mapping that points to a bucket. Without a valid DBRP mapping, queries will fail with an
authorization error.
**DBRP mapping requirements:**
- A DBRP mapping must exist before querying
- Mappings can be created automatically when writing data with the v1 API (if your token has permissions)
- Mappings can be created manually using the InfluxDB CLI or API
### Examples
- `db=mydb` - uses the default DBRP mapping for `mydb`
- `db=mydb` with `rp=weekly` - uses the DBRP mapping for `mydb/weekly`
_Note: Unlike the v1 `/write` endpoint which can auto-create buckets and mappings, the `/query` endpoint requires pre-existing DBRP mappings. The actual data is stored in and
queried from the bucket that the DBRP mapping points to._
### Related
- [Execute InfluxQL queries using the v1 API](/influxdb3/cloud-serverless/query-data/execute-queries/influxql/api/v1-http/)
- [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets)
- [Manage DBRP mappings in Cloud Serverless](/influxdb3/cloud-serverless/admin/dbrp/)
rp:
description: |
The retention policy name for InfluxQL queries
Optional parameter that specifies the retention policy to use when querying data with InfluxQL. In Cloud Serverless, this parameter works with DBRP (Database Retention
Policy) mappings to identify the target bucket.
When provided together with the `db` parameter, Cloud Serverless uses the DBRP mapping to determine which bucket to query. The combination of `db` and `rp` must have an
existing DBRP mapping that points to a bucket. If no `rp` is specified, Cloud Serverless uses the default retention policy mapping for the database.
Requirements: A DBRP mapping must exist for the db/rp combination before you can query data. DBRP mappings can be created:
- Automatically when writing data with the v1 API (if your token has sufficient permissions)
- Manually using the InfluxDB CLI or API
Example: If `db=mydb` and `rp=weekly`, the query uses the DBRP mapping for `mydb/weekly` to determine which bucket to query.
_Note: The retention policy name is used only for DBRP mapping. Actual data retention is controlled by the target bucket's retention period setting, not by the retention policy name._
### Related
- [Execute InfluxQL queries using the v1 API](/influxdb3/cloud-serverless/query-data/execute-queries/influxql/api/v1-http/)
- [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets)
- [Manage DBRP mappings in Cloud Serverless](/influxdb3/cloud-serverless/admin/dbrp/)
type: string
q:
description: Defines the InfluxQL query to run.
type: string
chunked:
description: |
If true, the response is divided into chunks of size `chunk_size`.
type: boolean
chunk_size:
description: |
The number of records that will go into a chunk.
This parameter is only used if `chunked=true`.
type: integer
default: 10000
epoch:
description: |
A unix timestamp precision.
type: string
enum:
- h
- m
- s
- ms
- u
- µ
- ns
parameters:
- $ref: '#/components/parameters/TraceSpan'
- $ref: '#/components/parameters/AuthUserV1'

View File

@ -65,7 +65,23 @@ paths:
schema:
type: string
required: true
description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy.
description: |
The database to write to.
**Database targeting:** In InfluxDB Clustered, databases can be named using the `database_name/retention_policy_name` convention for InfluxQL compatibility. InfluxDB Clustered does not use DBRP mappings. The db and rp parameters are used to construct the target database name following this naming convention.
**Auto-creation behavior:** InfluxDB Clustered requires databases to be created before writing data. The v1 `/write` API does not automatically create databases. If the specified
database does not exist, the write request will fail.
Authentication: Requires a valid API token with _write_ permissions for the target database.
### Related
- [Write data to InfluxDB Clustered](/influxdb3/clustered/write-data/)
- [Use the InfluxDB v1 API with InfluxDB Clustered](/influxdb3/clustered/guides/api-compatibility/v1/)
- [Manage databases in InfluxDB Clustered](/influxdb3/clustered/admin/databases/)
- [InfluxQL DBRP naming convention in InfluxDB Clustered](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention)
- [Migrate data from InfluxDB v1 to InfluxDB Clustered](/influxdb3/clustered/guides/migrate-data/migrate-1x-to-clustered/)
- in: query
name: rp
schema:
@ -136,6 +152,141 @@ paths:
schema:
$ref: '#/components/schemas/Error'
/query:
get:
operationId: GetQueryV1
tags:
- Query
summary: Query using the InfluxDB v1 HTTP API
parameters:
- $ref: '#/components/parameters/TraceSpan'
- $ref: '#/components/parameters/AuthUserV1'
- $ref: '#/components/parameters/AuthPassV1'
- in: header
name: Accept
schema:
type: string
description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps.
default: application/json
enum:
- application/json
- application/csv
- text/csv
- application/x-msgpack
- in: header
name: Accept-Encoding
description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand.
schema:
type: string
description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity.
default: identity
enum:
- gzip
- identity
- in: query
name: chunked
description: |
If true, the response is divided into chunks of size `chunk_size`.
schema:
type: boolean
default: false
- in: query
name: chunk_size
description: |
The number of records that will go into a chunk.
This parameter is only used if `chunked=true`.
schema:
type: integer
default: 10000
- in: query
name: db
schema:
type: string
required: true
description: The database to query from.
- in: query
name: pretty
description: |
If true, the JSON response is formatted in a human-readable format.
schema:
type: boolean
default: false
- in: query
name: q
description: Defines the InfluxQL query to run.
required: true
schema:
type: string
- in: query
name: rp
schema:
type: string
description: Retention policy name.
- name: epoch
description: |
Formats timestamps as unix (epoch) timestamps with the specified precision
instead of RFC3339 timestamps with nanosecond precision.
in: query
schema:
type: string
enum:
- h
- m
- s
- ms
- u
- µ
- ns
responses:
'200':
description: Query results
headers:
Content-Encoding:
description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body
schema:
type: string
description: Specifies that the response in the body is encoded with gzip or not encoded with identity.
default: identity
enum:
- gzip
- identity
Trace-Id:
description: The Trace-Id header reports the request's trace ID, if one was generated.
schema:
type: string
description: Specifies the request's trace ID.
content:
application/csv:
schema:
$ref: '#/components/schemas/InfluxQLCSVResponse'
text/csv:
schema:
$ref: '#/components/schemas/InfluxQLCSVResponse'
application/json:
schema:
$ref: '#/components/schemas/InfluxQLResponse'
examples:
influxql-chunk_size_2:
value: |
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:55Z",90,"1"],["2016-05-19T18:37:56Z",90,"1"]],"partial":true}],"partial":true}]}
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:57Z",90,"1"],["2016-05-19T18:37:58Z",90,"1"]]}]}]}
application/x-msgpack:
schema:
type: string
format: binary
'429':
description: Token is temporarily over quota. The Retry-After header describes when to try the read again.
headers:
Retry-After:
description: A non-negative decimal integer indicating the seconds to delay after the response is received.
schema:
type: integer
format: int32
default:
description: Error processing query
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
post:
operationId: PostQueryV1
tags:
@ -147,6 +298,64 @@ paths:
text/plain:
schema:
type: string
application/json:
schema:
type: object
properties:
db:
type: string
description: Database to query.
rp:
description: |
The retention policy name for InfluxQL compatibility
Optional parameter that, when combined with the db parameter, forms the complete database name to query. In InfluxDB Clustered, databases can be named using the
database_name/retention_policy_name convention for InfluxQL compatibility.
When a request specifies both `db` and `rp`, InfluxDB Clustered combines them as `db/rp` to target the database--for example:
- If `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen`
- If only `db=mydb` is provided (no `rp`), the query targets the database named `mydb`
Unlike InfluxDB v1 and Cloud Serverless, InfluxDB Clustered does not use DBRP mappings or separate retention policy objects. This parameter exists solely for v1 API
compatibility and database naming conventions.
Note: The retention policy name does not control data retention in InfluxDB Clustered. Data retention is determined by the database's _retention period_ setting.
### Related
- [Use the v1 query API and InfluxQL to query data in InfluxDB Clustered](/influxdb3/clustered/query-data/execute-queries/influxdb-v1-api/)
- [Use the InfluxDB v1 API with InfluxDB Clustered](/influxdb3/clustered/guides/api-compatibility/v1/)
- [Manage databases in InfluxDB Clustered](/influxdb3/clustered/admin/databases/)
- [InfluxQL DBRP naming convention in InfluxDB Clustered](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention)
- [Migrate data from InfluxDB v1 to InfluxDB Clustered](/influxdb3/clustered/guides/migrate-data/migrate-1x-to-clustered/)
type: string
q:
description: |
Defines the InfluxQL query to run.
type: string
chunked:
description: |
If true, the response is divided into chunks of size `chunk_size`.
type: boolean
chunk_size:
description: |
The number of records that will go into a chunk.
This parameter is only used if `chunked=true`.
type: integer
default: 10000
epoch:
description: |
A unix timestamp precision.
type: string
enum:
- h
- m
- s
- ms
- u
- µ
- ns
parameters:
- $ref: '#/components/parameters/TraceSpan'
- $ref: '#/components/parameters/AuthUserV1'

View File

@ -349,7 +349,6 @@ services:
- --data-dir=/var/lib/influxdb3/data
- --plugin-dir=/var/lib/influxdb3/plugins
environment:
- INFLUXDB3_ENTERPRISE_LICENSE_EMAIL=${INFLUXDB3_ENTERPRISE_LICENSE_EMAIL}
- INFLUXDB3_AUTH_TOKEN=/run/secrets/influxdb3-enterprise-admin-token
volumes:
- type: bind

View File

@ -6,7 +6,7 @@ description: >
monitoring data and easily create alerting and automation rules.
menu:
chronograf_v1:
name: Chronograf v1.10
name: Chronograf
weight: 1
---

View File

@ -10,6 +10,12 @@ aliases:
- /chronograf/v1/about_the_project/release-notes-changelog/
---
## v1.10.8 {date="2025-08-15"}
### Bug Fixes
- Fix missing retention policies on the Databases page.
## v1.10.7 {date="2025-04-15"}
### Bug Fixes

View File

@ -1,5 +1,5 @@
---
title: InfluxDB Enterprise 1.11 release notes
title: InfluxDB Enterprise v1 release notes
description: >
Important changes and what's new in each version InfluxDB Enterprise.
menu:
@ -7,9 +7,16 @@ menu:
name: Release notes
weight: 10
parent: About the project
alt_links:
v1: /influxdb/v1/about_the_project/release-notes/
---
## v1.12.1 {date="2025-06-26"}
## v1.12.x {date="TBD"}
> [!Important]
> #### Pre-release documentation
>
> This release is not yet available. [**v{{% latest-patch %}}**](#v1118) is the latest InfluxDB Enterprise v1 release.
> [!Important]
> #### Upgrade meta nodes first
@ -22,31 +29,53 @@ menu:
- Add additional log output when using
[`influx_inspect buildtsi`](/enterprise_influxdb/v1/tools/influx_inspect/#buildtsi) to
rebuild the TSI index.
<!-- TODO: Uncomment with 1.12.x release:
- Use [`influx_inspect export`](/enterprise_influxdb/v1/tools/influx_inspect/#export) with
[`-tsmfile` option](/enterprise_influxdb/v1/tools/influx_inspect/#--tsmfile-tsm_file-) to
export a single TSM file.
-->
<!-- TODO: Remove with 1.12.x release: -->
- Use [`influx_inspect export`](/enterprise_influxdb/v1/tools/influx_inspect/#export) with
`-tsmfile` option to
export a single TSM file.
- Add `-m` flag to the [`influxd-ctl show-shards` command](/enterprise_influxdb/v1/tools/influxd-ctl/show-shards/)
to output inconsistent shards.
- Allow the specification of a write window for retention policies.
- Add `fluxQueryRespBytes` metric to the `/debug/vars` metrics endpoint.
- Log whenever meta gossip times exceed expiration.
<!-- TODO: Uncomment with 1.12.x release:
- Add [`query-log-path` configuration option](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#query-log-path)
to data nodes.
- Add [`aggressive-points-per-block` configuration option](/influxdb/v1/administration/config/#aggressive-points-per-block)
to prevent TSM files from not getting fully compacted.
-->
<!-- TODO: Remove with 1.12.x release: -->
- Add `query-log-path` configuration option to data nodes.
- Add `aggressive-points-per-block` configuration option to prevent TSM files from not getting fully compacted.
- Log TLS configuration settings on startup.
- Check for TLS certificate and private key permissions.
- Add a warning if the TLS certificate is expired.
- Add authentication to the Raft portal and add the following related _data_
node configuration options:
<!-- Uncomment with 1.12.x release
- [`[meta].raft-portal-auth-required`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#raft-portal-auth-required)
- [`[meta].raft-dialer-auth-required`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#raft-dialer-auth-required)
-->
<!-- TODO: Remove with 1.12.x release: -->
- `[meta].raft-portal-auth-required`
- `[meta].raft-dialer-auth-required`
- Improve error handling.
- InfluxQL updates:
- Delete series by retention policy.
<!-- TODO: Uncomment with 1.12.x release:
- Allow retention policies to discard writes that fall within their range, but
outside of [`FUTURE LIMIT`](/enterprise_influxdb/v1/query_language/manage-database/#future-limit)
and [`PAST LIMIT`](/enterprise_influxdb/v1/query_language/manage-database/#past-limit).
-->
<!-- TODO: Remove with 1.12.x release: -->
- Allow retention policies to discard writes that fall within their range, but
outside of `FUTURE LIMIT` and `PAST LIMIT`.
## Bug fixes

View File

@ -326,7 +326,7 @@ Very useful for troubleshooting, but will log any sensitive data contained withi
Environment variable: `INFLUXDB_DATA_QUERY_LOG_ENABLED`
#### query-log-path
#### query-log-path {metadata="v1.12.0+"}
Default is `""`.
@ -352,7 +352,7 @@ The following is an example of a `logrotate` configuration:
```
Environment variable: `INFLUXDB_DATA_QUERY_LOG_PATH`
-->
#### wal-fsync-delay
Default is `"0s"`.

View File

@ -306,7 +306,7 @@ See
[Shard group duration management](/enterprise_influxdb/v1/concepts/schema_and_data_layout/#shard-group-duration-management)
for recommended configurations.
##### `PAST LIMIT`
##### `PAST LIMIT` {metadata="v1.12.0+"}
The `PAST LIMIT` clause defines a time boundary before and relative to _now_
in which points written to the retention policy are accepted. If a point has a
@ -317,7 +317,7 @@ For example, if a write request tries to write data to a retention policy with a
`PAST LIMIT 6h` and there are points in the request with timestamps older than
6 hours, those points are rejected.
##### `FUTURE LIMIT`
##### `FUTURE LIMIT` {metadata="v1.12.0+"}
The `FUTURE LIMIT` clause defines a time boundary after and relative to _now_
in which points written to the retention policy are accepted. If a point has a

View File

@ -453,7 +453,7 @@ Default value is `$HOME/.influxdb/wal`.
See the [file system layout](/enterprise_influxdb/v1/concepts/file-system-layout/#file-system-layout)
for InfluxDB on your system.
##### [ `-tsmfile <tsm_file>` ]
##### [ `-tsmfile <tsm_file>` ] {metadata="v1.12.0+"}
Path to a single tsm file to export. This requires both `-database` and
`-retention` to be specified.
@ -472,7 +472,7 @@ influx_inspect export -compress
influx_inspect export -database DATABASE_NAME -retention RETENTION_POLICY
```
##### Export data from a single TSM file
##### Export data from a single TSM file {metadata="v1.12.0+"}
```bash
influx_inspect export \

View File

@ -44,6 +44,8 @@ ID Database Retention Policy Desired Replicas Shard Group Start
{{% /expand %}}
{{< /expand-wrapper >}}
#### Show inconsistent shards {metadata="v1.12.0+"}
You can also use the `-m` flag to output "inconsistent" shards which are shards
that are either in metadata but not on disk or on disk but not in metadata.
@ -52,10 +54,8 @@ that are either in metadata but not on disk or on disk but not in metadata.
| Flag | Description |
| :--- | :-------------------------------- |
| `-v` | Return detailed shard information |
| `-m` | Return inconsistent shards |
| `-m` | Return inconsistent shards |
{{% caption %}}
_Also see [`influxd-ctl` global flags](/enterprise_influxdb/v1/tools/influxd-ctl/#influxd-ctl-global-flags)._
{{% /caption %}}
## Examples

View File

@ -16,4 +16,4 @@ source: /shared/influxdb-v2/write-data/replication/replicate-data.md
---
<!-- The content of this file is at
// SOURCE content/shared/influxdb-v2/write-data/replication/replicate-data.md-->
// SOURCE content/shared/influxdb-v2/write-data/replication/replicate-data.md -->

View File

@ -10,27 +10,50 @@ aliases:
- /influxdb/v1/about_the_project/releasenotes-changelog/
alt_links:
v2: /influxdb/v2/reference/release-notes/influxdb/
enterprise_v1: /enterprise_influxdb/v1/about-the-project/release-notes/
---
## v1.12.1 {date="2025-06-26"}
## v1.12.x {date="TBD"}
> [!Important]
> #### Pre-release documentation
>
> This release is not yet available. [**v{{% latest-patch %}}**](#v1118) is the latest InfluxDB v1 release.
## Features
- Add additional log output when using
[`influx_inspect buildtsi`](/influxdb/v1/tools/influx_inspect/#buildtsi) to
rebuild the TSI index.
<!-- TODO: Uncomment with 1.12.x release:
- Use [`influx_inspect export`](/influxdb/v1/tools/influx_inspect/#export) with
[`-tsmfile` option](/influxdb/v1/tools/influx_inspect/#--tsmfile-tsm_file-) to
export a single TSM file.
-->
<!-- TODO: Remove with 1.12.x release: -->
- Use [`influx_inspect export`](/influxdb/v1/tools/influx_inspect/#export) with
`-tsmfile` option to
export a single TSM file.
- Add `fluxQueryRespBytes` metric to the `/debug/vars` metrics endpoint.
<!-- TODO: Uncomment with 1.12.x release:
- Add [`aggressive-points-per-block` configuration option](/influxdb/v1/administration/config/#aggressive-points-per-block)
to prevent TSM files from not getting fully compacted.
-->
<!-- TODO: Remove with 1.12.x release: -->
- Add `aggressive-points-per-block` configuration option
to prevent TSM files from not getting fully compacted.
- Improve error handling.
- InfluxQL updates:
- Delete series by retention policy.
<!-- TODO: Uncomment with 1.12.x release:
- Allow retention policies to discard writes that fall within their range, but
outside of [`FUTURE LIMIT`](/influxdb/v1/query_language/manage-database/#future-limit)
and [`PAST LIMIT`](/influxdb/v1/query_language/manage-database/#past-limit).
-->
<!-- TODO: Remove with 1.12.x release: -->
- Allow retention policies to discard writes that fall within their range, but
outside of `FUTURE LIMIT` and `PAST LIMIT`.
## Bug fixes

View File

@ -75,8 +75,8 @@ For Ubuntu/Debian users, add the InfluxData repository with the following comman
# Primary key fingerprint: 24C9 75CB A61A 024E E1B6 3178 7C3D 5715 9FC2 F927
# Subkey fingerprint: 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E
wget -q https://repos.influxdata.com/influxdata-archive.key
gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null
echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null
echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
```
{{% /code-tab-content %}}
@ -86,8 +86,8 @@ echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repo
# Primary key fingerprint: 24C9 75CB A61A 024E E1B6 3178 7C3D 5715 9FC2 F927
# Subkey fingerprint: 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E
curl --silent --location -O https://repos.influxdata.com/influxdata-archive.key
gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null
echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null
echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
```
{{% /code-tab-content %}}
{{< /code-tabs-wrapper >}}

View File

@ -307,7 +307,7 @@ See
[Shard group duration management](/influxdb/v1/concepts/schema_and_data_layout/#shard-group-duration-management)
for recommended configurations.
##### `PAST LIMIT`
##### `PAST LIMIT` {metadata="v1.12.0+"}
The `PAST LIMIT` clause defines a time boundary before and relative to _now_
in which points written to the retention policy are accepted. If a point has a
@ -318,7 +318,7 @@ For example, if a write request tries to write data to a retention policy with a
`PAST LIMIT 6h` and there are points in the request with timestamps older than
6 hours, those points are rejected.
##### `FUTURE LIMIT`
##### `FUTURE LIMIT` {metadata="v1.12.0+"}
The `FUTURE LIMIT` clause defines a time boundary after and relative to _now_
in which points written to the retention policy are accepted. If a point has a

View File

@ -449,7 +449,7 @@ Default value is `$HOME/.influxdb/wal`.
See the [file system layout](/influxdb/v1/concepts/file-system-layout/#file-system-layout)
for InfluxDB on your system.
##### [ `-tsmfile <tsm_file>` ]
##### [ `-tsmfile <tsm_file>` ] {metadata="v1.12.0+"}
Path to a single tsm file to export. This requires both `-database` and
`-retention` to be specified.
@ -468,7 +468,7 @@ influx_inspect export -compress
influx_inspect export -database DATABASE_NAME -retention RETENTION_POLICY
```
##### Export data from a single TSM file
##### Export data from a single TSM file {metadata="v1.12.0+"}
```bash
influx_inspect export \

View File

@ -112,7 +112,7 @@ _If `gpg` isn't available on your system, see
The following steps guide you through using GPG to verify InfluxDB
binary releases:
1. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-system).
1. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-os-version).
2. Download and import the InfluxData public key.
`gpg --import` outputs to stderr.
@ -354,8 +354,8 @@ To install {{% product-name %}} on Linux, do one of the following:
| grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' \
&& cat influxdata-archive.key \
| gpg --dearmor \
| sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null \
&& echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' \
| sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null \
&& echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' \
| sudo tee /etc/apt/sources.list.d/influxdata.list
# Install influxdb
sudo apt-get update && sudo apt-get install influxdb2
@ -376,7 +376,7 @@ To install {{% product-name %}} on Linux, do one of the following:
cat <<EOF | tee /etc/yum.repos.d/influxdata.repo
[influxdata]
name = InfluxData Repository - Stable
baseurl = https://repos.influxdata.com/stable/${basearch}/main
baseurl = https://repos.influxdata.com/stable/\${basearch}/main
enabled = 1
gpgcheck = 1
gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-influxdata
@ -473,7 +473,7 @@ _If necessary, adjust the example file paths and utilities for your system._
https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz
```
2. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-system).
2. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-os-version).
3. {{< req text="Recommended:" color="magenta" >}}: Verify the authenticity of the downloaded binary--for example,
enter the following command in your terminal.
@ -675,7 +675,7 @@ data isn't deleted if you delete the container._
flags for initial setup options and file system mounts.
_If you don't specify InfluxDB initial setup options, you can
[set up manually](#set-up-influxdb) later using the UI or CLI in a running
[set up manually](/influxdb/v2/get-started/setup/) later using the UI or CLI in a running
container._
{{% code-placeholders "ADMIN_(USERNAME|PASSWORD)|ORG_NAME|BUCKET_NAME" %}}
@ -731,7 +731,8 @@ and _[Operator token](/influxdb/v2/admin/tokens/#operator-token)_, and logs to s
You can view the Operator token in the `/etc/influxdb2/influx-configs` file and
use it to authorize
[creating an All Access token](#optional-create-all-access-tokens).
[creating an All Access token](#examples).
For more information, see [API token types](/influxdb/v2/admin/tokens/#api-token-types).
_To run the InfluxDB container in
[detached mode](https://docs.docker.com/engine/reference/run/#detached-vs-foreground),
@ -761,6 +762,13 @@ docker exec -it <CONTAINER_NAME> <CLI_NAME> <COMMAND>`
<!--pytest.mark.skip-->
```bash
# Create an All Access token
docker exec -it influxdb2 influx auth create \
--all-access \
--token OPERATOR_TOKEN
```
```bash
# List CLI configurations
docker exec -it influxdb2 influx config ls

View File

@ -176,8 +176,8 @@ To download the Linux `influxctl` package, do one of the following:
# Primary key fingerprint: 24C9 75CB A61A 024E E1B6 3178 7C3D 5715 9FC2 F927
# Subkey fingerprint: 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E
wget -q https://repos.influxdata.com/influxdata-archive.key
gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null
echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null
echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
sudo apt-get update && sudo apt-get install influxctl
```

View File

@ -8,9 +8,11 @@ menu:
parent: Administer InfluxDB Clustered
name: Scale your cluster
weight: 207
influxdb3/clustered/tags: [scale]
influxdb3/clustered/tags: [scale, performance, Kubernetes]
related:
- /influxdb3/clustered/reference/internals/storage-engine/
- /influxdb3/clustered/write-data/best-practices/data-lifecycle/
- /influxdb3/clustered/query-data/troubleshoot-and-optimize/optimize-queries/
- https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits, Kubernetes resource requests and limits
---
@ -559,11 +561,14 @@ concurrency demands or reaches the hardware limits of your underlying nodes.
### Compactor
- **Recommended**: Maintain **1 Compactor pod** and use [vertical scaling](#vertical-scaling) (especially
increasing the available CPU) for the Compactor.
- **Recommended**: Maintain **1 Compactor pod** and use [vertical scaling](#vertical-scaling) for the Compactor.
Scale CPU and memory resources together, as compactor concurrency settings scale based on memory, not CPU count.
- Because compaction is a compute-heavy process, horizontal scaling increases compaction throughput, but not as
efficiently as vertical scaling.
> [!Important]
> When scaling the Compactor, scale CPU and memory resources together.
### Garbage collector
The [Garbage collector](/influxdb3/clustered/reference/internals/storage-engine/#garbage-collector) is a lightweight process that typically doesn't require

View File

@ -166,8 +166,8 @@ To download the Linux `influxctl` package, do one of the following:
# Primary key fingerprint: 24C9 75CB A61A 024E E1B6 3178 7C3D 5715 9FC2 F927
# Subkey fingerprint: 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E
wget -q https://repos.influxdata.com/influxdata-archive.key
gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null
echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null
echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
```
{{% /code-tab-content %}}

View File

@ -61,6 +61,31 @@ directory. This new directory contains artifacts associated with the specified r
---
## 20250814-1819052 {date="2025-08-14"}
### Quickstart
```yaml
spec:
package:
image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250814-1819052
```
### Bug Fixes
- Fix incorrect service address for tokens in Clustered auth sidecar. If you were overriding the `AUTHZ_TOKEN_SVC_ADDRESS` environment variable in your `AppInstance`, you can now remove that override.
- Remove default `fallbackScrapeProtocol` environment variable for prometheus-operator.
- Update Grafana to `12.1.1` to address CVE-2025-6023 and CVE-2025-6197.
### Changes
#### Database Engine
- Update DataFusion to `48`.
- Tweak compaction to reduce write amplification and querier cache churn in some circumstances.
---
## 20250721-1796368 {date="2025-07-21"}
### Quickstart

View File

@ -18,6 +18,7 @@ prepend: |
> [!Note]
> InfluxDB 3 Core is purpose-built for real-time data monitoring and recent data.
> InfluxDB 3 Enterprise builds on top of Core with support for historical data
> analysis and extended features.
> querying, high availability, read replicas, and more.
> Enterprise will soon unlock
> enhanced security, row-level deletions, an administration UI, and more.

View File

@ -0,0 +1,19 @@
---
title: Usage telemetry
seotitle: InfluxDB 3 Core usage telemetry
description: >
InfluxData collects telemetry data to help improve the {{< product-name >}}.
Learn what data {{< product-name >}} collects and sends to InfluxData, how it's used, and
how you can opt out.
menu:
influxdb3_core:
parent: Reference
weight: 108
influxdb3/core/tags: [telemetry, monitoring, metrics, observability]
source: /shared/influxdb3-reference/telemetry.md
---
<!--
The content of this file is located at
//SOURCE - content/shared/influxdb3-reference/telemetry.md
-->

View File

@ -13,4 +13,4 @@ source: /shared/influxdb3-cli/config-options.md
<!-- The content of this file is at
//SOURCE - content/shared/influxdb3-cli/config-options.md
-->
-->

View File

@ -0,0 +1,19 @@
---
title: Usage telemetry
seotitle: InfluxDB 3 Enterprise usage telemetry
description: >
InfluxData collects telemetry data to help improve the {{< product-name >}}.
Learn what data {{< product-name >}} collects and sends to InfluxData, how it's used, and
how you can opt out.
menu:
influxdb3_enterprise:
parent: Reference
weight: 108
influxdb3/enterprise/tags: [telemetry, monitoring, metrics, observability]
source: /shared/influxdb3-reference/telemetry.md
---
<!--
The content of this file is located at
//SOURCE - content/shared/influxdb3-reference/telemetry.md
-->

View File

@ -5,7 +5,7 @@ description: >
create alerts, run ETL jobs and detect anomalies.
menu:
kapacitor_v1:
name: Kapacitor v1.7
name: Kapacitor
weight: 1
---

View File

@ -704,7 +704,7 @@ name: data
## ATAN2()
Returns the the arctangent of `y/x` in radians.
Returns the arctangent of `y/x` in radians.
### Basic syntax
@ -1609,7 +1609,7 @@ SELECT DERIVATIVE(<function> ([ * | <field_key> | /<regular_expression>/ ]) [ ,
The advanced syntax requires a [`GROUP BY time()` clause](/influxdb/version/query-data/influxql/explore-data/group-by/#group-by-time-intervals) and a nested InfluxQL function.
The query first calculates the results for the nested function at the specified `GROUP BY time()` interval and then applies the `DERIVATIVE()` function to those results.
The `unit` argument is an integer followed by a [duration](//influxdb/version/reference/glossary/#duration) and it is optional.
The `unit` argument is an integer followed by a [duration](/influxdb/version/reference/glossary/#duration) and it is optional.
If the query does not specify the `unit` the `unit` defaults to the `GROUP BY time()` interval.
Note that this behavior is different from the [basic syntax's](#basic-syntax-1) default behavior.

View File

@ -1,9 +1,9 @@
Use InfluxDB replication streams (InfluxDB Edge Data Replication) to replicate
the incoming data of select buckets to one or more buckets on a remote
InfluxDB OSS, InfluxDB Cloud, or InfluxDB Enterprise instance.
InfluxDB OSS, InfluxDB Cloud, or InfluxDB Enterprise v1 instance.
Replicate data from InfluxDB OSS to InfluxDB Cloud, InfluxDB OSS, or InfluxDB Enterprise.
Replicate data from InfluxDB OSS to InfluxDB Cloud, InfluxDB OSS, or InfluxDB Enterprise v1.
- [Configure a replication stream](#configure-a-replication-stream)
- [Replicate downsampled or processed data](#replicate-downsampled-or-processed-data)
@ -17,10 +17,9 @@ Use the [`influx` CLI](/influxdb/version/tools/influx-cli/) or the
[InfluxDB {{< current-version >}} API](/influxdb/version/reference/api/) to configure
a replication stream.
{{% note %}}
To replicate data to InfluxDB OSS or InfluxDB Enterprise, adjust the
remote connection values accordingly.
{{% /note %}}
> [!Note]
> To replicate data to InfluxDB OSS or InfluxDB Enterprise v1, adjust the
> remote connection values accordingly.
{{< tabs-wrapper >}}
{{% tabs %}}
@ -30,156 +29,202 @@ remote connection values accordingly.
{{% tab-content %}}
<!--------------------------------- BEGIN CLI --------------------------------->
### Step 1: Create or find a remote connection
1. In your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS instance, use
the `influx remote create` command to create a remote connection to replicate data to.
- [Create a remote connection](#create-a-remote-connection-cli)
- [Use an existing remote connection](#use-an-existing-remote-connection-cli)
**Provide the following:**
#### Create a remote connection (CLI)
- Remote connection name
{{% show-in "v2" %}}- Remote InfluxDB instance URL{{% /show-in %}}
{{% show-in "v2" %}}- Remote InfluxDB API token _(API token must have write access to the target bucket)_{{% /show-in %}}
{{% show-in "v2" %}}- Remote InfluxDB organization ID{{% /show-in %}}
{{% show-in "cloud,cloud-serverless" %}}- [InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/){{% /show-in %}}
{{% show-in "cloud,cloud-serverless" %}}- InfluxDB Cloud API token _(API token must have write access to the target bucket)_{{% /show-in %}}
{{% show-in "cloud,cloud-serverless" %}}- InfluxDB Cloud organization ID{{% /show-in %}}
In your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS instance, use
the `influx remote create` command and provide the following arguments for the remote instance:
```sh
influx remote create \
--name example-remote-name \
--remote-url https://cloud2.influxdata.com \
--remote-api-token mYsuP3r5Ecr37t0k3n \
--remote-org-id 00xoXXoxXX00
```
{{% show-in "v2" %}}
- Remote connection name
- Remote InfluxDB instance URL
- Remote InfluxDB API token _(API token must have write access to the target bucket)_
- Remote InfluxDB organization ID
{{% /show-in %}}
{{% show-in "cloud,cloud-serverless" %}}
- Remote connection name
- [InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/)
- InfluxDB Cloud API token _(API token must have write access to the target bucket)_
- InfluxDB Cloud organization ID
{{% /show-in %}}
If you already have remote InfluxDB connections configured, you can use an existing connection. To view existing connections, run `influx remote list`.
```sh
influx remote create \
--name example-remote-name \
--remote-url https://cloud2.influxdata.com \
--remote-api-token mYsuP3r5Ecr37t0k3n \
--remote-org-id 00xoXXoxXX00
```
2. In your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS instance, use the
`influx replication create` command to create a replication stream.
#### Use an existing remote connection (CLI)
Alternatively, you can use an existing connection that you have already configured.
To retrieve existing connections, run `influx remote list`.
### Step 2: Create a replication stream (CLI)
In your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS instance, use the
`influx replication create` command and provide the following arguments:
**Provide the following:**
{{% show-in "v2" %}}
- Replication stream name
- Remote connection ID (created in the previous step)
- Local bucket ID to replicate writes from
- Remote bucket name or ID to replicate writes to. If replicating to **InfluxDB Enterprise v1**, use the `db-name/rp-name` bucket name syntax.{{% /show-in %}}
{{% show-in "cloud,cloud-serverless" %}}
- Replication stream name
- Remote connection ID (created in the previous step)
- InfluxDB OSS bucket ID to replicate writes from
- InfluxDB Cloud bucket ID to replicate writes to
{{% /show-in %}}
- Replication stream name
{{% show-in "v2" %}}- Remote connection ID{{% /show-in %}}
{{% show-in "v2" %}}- Local bucket ID to replicate writes from{{% /show-in %}}
{{% show-in "v2" %}}- Remote bucket name or ID to replicate writes to. If replicating to **InfluxDB Enterprise**, use the `db-name/rp-name` bucket name syntax.{{% /show-in %}}
{{% show-in "cloud,cloud-serverless" %}}- Remote connection ID{{% /show-in %}}
{{% show-in "cloud,cloud-serverless" %}}- InfluxDB OSS bucket ID to replicate writes from{{% /show-in %}}
{{% show-in "cloud,cloud-serverless" %}}- InfluxDB Cloud bucket ID to replicate writes to{{% /show-in %}}
```sh
influx replication create \
--name REPLICATION_STREAM_NAME \
--remote-id REPLICATION_REMOTE_ID \
--local-bucket-id INFLUX_BUCKET_ID \
--remote-bucket REMOTE_INFLUX_BUCKET_NAME
```
```sh
influx replication create \
--name REPLICATION_STREAM_NAME \
--remote-id REPLICATION_REMOTE_ID \
--local-bucket-id INFLUX_BUCKET_ID \
--remote-bucket REMOTE_INFLUX_BUCKET_NAME
```
Once a replication stream is created, InfluxDB {{% show-in "v2" %}}OSS{{% /show-in %}}
will replicate all writes to the specified bucket to the {{% show-in "v2" %}}remote {{% /show-in %}}
After you create the replication stream, InfluxDB {{% show-in "v2" %}}OSS{{% /show-in %}}
replicates all writes to the specified local bucket to the {{% show-in "v2" %}}remote {{% /show-in %}}
InfluxDB {{% show-in "cloud,cloud-serverless" %}}Cloud {{% /show-in %}}bucket.
Use the `influx replication list` command to view information such as the current queue size,
max queue size, and latest status code.
<!---------------------------------- END CLI ---------------------------------->
{{% /tab-content %}}
{{% tab-content %}}
<!--------------------------------- BEGIN API --------------------------------->
1. Send a `POST` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS `/api/v2/remotes` endpoint to create a remote connection to replicate data to.
### Step 1: Create or find a remote connection (API)
{{< keep-url >}}
{{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="POST" api-ref="/influxdb/version/api/#operation/PostRemoteConnection" >}}
- [Create a remote connection](#create-a-remote-connection-api)
- [Use an existing remote connection](#use-an-existing-remote-connection-api)
Include the following in your request:
#### Create a remote connection (API)
- **Request method:** `POST`
- **Headers:**
- **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/)
- **Content-type:** `application/json`
- **Request body:** JSON object with the following fields:
{{< req type="key" >}}
- {{< req "\*" >}} **allowInsecureTLS:** All insecure TLS connections
- **description:** Remote description
- {{< req "\*" >}} **name:** Remote connection name
- {{< req "\*" >}} **orgID:** {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS organization ID
{{% show-in "v2" %}}- {{< req "\*" >}} **remoteAPIToken:** Remote InfluxDB API token _(API token must have write access to the target bucket)_{{% /show-in %}}
{{% show-in "v2" %}}- {{< req "\*" >}} **remoteOrgID:** Remote InfluxDB organization ID{{% /show-in %}}
{{% show-in "v2" %}}- {{< req "\*" >}} **remoteURL:** Remote InfluxDB instance URL{{% /show-in %}}
{{% show-in "cloud,cloud-serverless" %}}- {{< req "\*" >}} **remoteAPIToken:** InfluxDB Cloud API token _(API token must have write access to the target bucket)_{{% /show-in %}}
{{% show-in "cloud,cloud-serverless" %}}- {{< req "\*" >}} **remoteOrgID:** InfluxDB Cloud organization ID{{% /show-in %}}
{{% show-in "cloud,cloud-serverless" %}}- {{< req "\*" >}} **remoteURL:** [InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/){{% /show-in %}}
To create a remote connection to replicate data to,
send a `POST` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS `/api/v2/remotes` endpoint:
{{< keep-url >}}
```sh
curl --request POST http://localhost:8086/api/v2/remotes \
--header 'Authorization: Token INFLUX_OSS_TOKEN' \
--data '{
"allowInsecureTLS": false,
"description": "Example remote description",
"name": "Example remote name",
"orgID": "INFLUX_OSS_ORG_ID",
"remoteAPIToken": "REMOTE_INFLUX_TOKEN",
"remoteOrgID": "REMOTE_INFLUX_ORG_ID",
"remoteURL": "https://cloud2.influxdata.com"
}'
```
{{< keep-url >}}
{{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="POST" api-ref="/influxdb/version/api/#operation/PostRemoteConnection" >}}
If you already have remote InfluxDB connections configured, you can use an
existing connection. To view existing connections, use the `/api/v2/remotes`
endpoint with the `GET` request method.
Include the following parameters in your request:
{{< keep-url >}}
{{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="GET" api-ref="/influxdb/version/api/#operation/GetRemoteConnections" >}}
- **Request method:** `POST`
- **Headers:**
- **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/)
- **Content-type:** `application/json`
{{% show-in "v2" %}}
- **Request body:** JSON object with the following fields:
{{< req type="key" >}}
- {{< req "\*" >}} **allowInsecureTLS:** All insecure TLS connections
- **description:** Remote description
- {{< req "\*" >}} **name:** Remote connection name
- {{< req "\*" >}} **orgID:** {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS organization ID
- {{< req "\*" >}} **remoteAPIToken:** Remote InfluxDB API token _(API token must have write access to the target bucket)_
- {{< req "\*" >}} **remoteOrgID:** Remote InfluxDB organization ID
- {{< req "\*" >}} **remoteURL:** Remote InfluxDB instance URL
{{% /show-in %}}
{{% show-in "cloud,cloud-serverless" %}}
- **Request body:** JSON object with the following fields:
{{< req type="key" >}}
- {{< req "\*" >}} **allowInsecureTLS:** All insecure TLS connections
- **description:** Remote description
- {{< req "\*" >}} **name:** Remote connection name
- {{< req "\*" >}} **orgID:** {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS organization ID
- {{< req "\*" >}} **remoteAPIToken:** InfluxDB Cloud API token _(API token must have write access to the target bucket)_
- {{< req "\*" >}} **remoteOrgID:** InfluxDB Cloud organization ID
- {{< req "\*" >}} **remoteURL:** [InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/)
{{% /show-in %}}
Include the following in your request:
{{< keep-url >}}
```sh
curl --request POST http://localhost:8086/api/v2/remotes \
--header 'Authorization: Token INFLUX_OSS_TOKEN' \
--data '{
"allowInsecureTLS": false,
"description": "Example remote description",
"name": "Example remote name",
"orgID": "INFLUX_OSS_ORG_ID",
"remoteAPIToken": "REMOTE_INFLUX_TOKEN",
"remoteOrgID": "REMOTE_INFLUX_ORG_ID",
"remoteURL": "https://cloud2.influxdata.com"
}'
```
- **Request method:** `GET`
- **Headers:**
- **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/)
- **Query parameters:**
- **orgID:** {{% show-in "v2" %}}Local{{% /show-in %}} InfluxDB OSS organization ID
#### Use an existing remote connection (API)
{{< keep-url >}}
```sh
curl --request GET \
http://localhost:8086/api/v2/remotes?orgID=INFLUX_OSS_ORG_ID \
--header 'Authorization: Token INFLUX_OSS_TOKEN' \
```
Alternatively, you can use an
existing connection that you have already configured.
To retrieve existing connections, use the `/api/v2/remotes`
endpoint with the `GET` request method:
2. Send a `POST` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS
`/api/v2/replications` endpoint to create a replication stream.
{{< keep-url >}}
{{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="GET" api-ref="/influxdb/version/api/#operation/GetRemoteConnections" >}}
{{< keep-url >}}
{{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="POST" api-ref="/influxdb/version/api/#operation/PostRemoteConnection" >}}
Include the following in your request:
Include the following parameters in your request:
- **Request method:** `POST`
- **Headers:**
- **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/)
- **Content-type:** `application/json`
- **Request body:** JSON object with the following fields:
{{< req type="key" >}}
- **dropNonRetryableData:** Drop data when a non-retryable error is encountered.
- {{< req "\*" >}} **localBucketID:** {{% show-in "v2" %}}Local{{% /show-in %}} InfluxDB OSS bucket ID to replicate writes from.
- {{< req "\*" >}} **maxAgeSeconds:** Maximum age of data in seconds before it is dropped (default is `604800`, must be greater than or equal to `0`).
- {{< req "\*" >}} **maxQueueSizeBytes:** Maximum replication queue size in bytes (default is `67108860`, must be greater than or equal to `33554430`).
- {{< req "\*" >}} **name:** Replication stream name.
- {{< req "\*" >}} **orgID:** {{% show-in "v2" %}}Local{{% /show-in %}} InfluxDB OSS organization ID.
{{% show-in "v2" %}}- {{< req "\*" >}} **remoteBucketID:** Remote bucket ID to replicate writes to.{{% /show-in %}}
{{% show-in "v2" %}}- {{< req "\*" >}} **remoteBucketName:** Remote bucket name to replicate writes to. If replicating to **InfluxDB Enterprise**, use the `db-name/rp-name` bucket name syntax.{{% /show-in %}}
{{% show-in "cloud,cloud-serverless" %}}- {{< req "\*" >}} **remoteBucketID:** InfluxDB Cloud bucket ID to replicate writes to.{{% /show-in %}}
{{% show-in "cloud,cloud-serverless" %}}- {{< req "\*" >}} **remoteBucketName:** InfluxDB Cloud bucket name to replicate writes to.{{% /show-in %}}
- {{< req "\*" >}} **remoteID:** Remote connection ID
- **Headers:**
- **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/)
- **Query parameters:**
- **orgID:** {{% show-in "v2" %}}Local{{% /show-in %}} InfluxDB OSS organization ID
{{% note %}}
`remoteBucketID` and `remoteBucketName` are mutually exclusive.
{{% show-in "v2" %}}If replicating to **InfluxDB Enterprise**, use `remoteBucketName` with the `db-name/rp-name` bucket name syntax.{{% /show-in %}}
{{% /note %}}
{{< keep-url >}}
```sh
curl --request GET \
http://localhost:8086/api/v2/remotes?orgID=INFLUX_OSS_ORG_ID \
--header 'Authorization: Token INFLUX_OSS_TOKEN' \
```
### Step 2: Create a replication stream (API)
Send a `POST` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS
`/api/v2/replications` endpoint to create a replication stream.
{{< keep-url >}}
{{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="POST" api-ref="/influxdb/version/api/#operation/PostRemoteConnection" >}}
Include the following parameters in your request:
- **Headers:**
- **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/)
- **Content-type:** `application/json`
{{% show-in "v2" %}}
- **Request body:** JSON object with the following fields:
{{< req type="key" >}}
- **dropNonRetryableData:** Drop data when a non-retryable error is encountered.
- {{< req "\*" >}} **localBucketID:** Local InfluxDB OSS bucket ID to replicate writes from.
- {{< req "\*" >}} **maxAgeSeconds:** Maximum age of data in seconds before it is dropped (default is `604800`, must be greater than or equal to `0`).
- {{< req "\*" >}} **maxQueueSizeBytes:** Maximum replication queue size in bytes (default is `67108860`, must be greater than or equal to `33554430`).
- {{< req "\*" >}} **name:** Replication stream name.
- {{< req "\*" >}} **orgID:** Local InfluxDB OSS organization ID.
- {{< req "\*" >}} **remoteBucketID:** Remote bucket ID to replicate writes to.
- {{< req "\*" >}} **remoteBucketName:** Remote bucket name to replicate writes to. If replicating to **InfluxDB Enterprise v1**, use the `db-name/rp-name` bucket name syntax.
{{% /show-in %}}
{{% show-in "cloud,cloud-serverless" %}}
- **Request body:** JSON object with the following fields:
{{< req type="key" >}}
- **dropNonRetryableData:** Drop data when a non-retryable error is encountered
- {{< req "\*" >}} **localBucketID:** InfluxDB OSS bucket ID to replicate writes from
- {{< req "\*" >}} **maxAgeSeconds:** Maximum age of data in seconds before it is dropped (default is `604800`, must be greater than or equal to `0`)
- {{< req "\*" >}} **maxQueueSizeBytes:** Maximum replication queue size in bytes (default is `67108860`, must be greater than or equal to `33554430`)
- {{< req "\*" >}} **name:** Replication stream name
- {{< req "\*" >}} **orgID:** InfluxDB OSS organization ID
- {{< req "\*" >}} **remoteBucketID:** InfluxDB Cloud bucket ID to replicate writes to (mutually exclusive with `remoteBucketName`)
- {{< req "\*" >}} **remoteBucketName:** InfluxDB Cloud bucket name to replicate writes to (mutually exclusive with `remoteBucketID`)
- {{< req "\*" >}} **remoteID:** Remote connection ID
{{% /show-in %}}
> [!Note]
> `remoteBucketID` and `remoteBucketName` are mutually exclusive.
> {{% show-in "v2" %}}If replicating to **InfluxDB Enterprise v1**, use `remoteBucketName` with the `db-name/rp-name` bucket name syntax.{{% /show-in %}}
{{< keep-url >}}
```sh
@ -197,19 +242,18 @@ curl --request POST http://localhost:8086/api/v2/replications \
}'
```
Once a replication stream is created, InfluxDB {{% show-in "v2" %}}OSS{{% /show-in %}}
will replicate all writes from the specified local bucket to the {{% show-in "v2" %}}remote {{% /show-in %}}
After you create a replication stream, InfluxDB {{% show-in "v2" %}}OSS{{% /show-in %}}
replicates all writes from the specified local bucket to the {{% show-in "v2" %}}remote {{% /show-in %}}
InfluxDB {{% show-in "cloud,cloud-serverless" %}}Cloud {{% /show-in %}}bucket.
To get
information such as the current queue size, max queue size, and latest status
code for each replication stream, send a `GET` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS `/api/v2/replications` endpoint.
code for each replication stream, send a `GET` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS `/api/v2/replications` endpoint:
{{< keep-url >}}
{{< api-endpoint endpoint="localhost:8086/api/v2/replications" method="GET" api-ref="/influxdb/version/api/#operation/GetReplications" >}}
Include the following in your request:
Include the following parameters in your request:
- **Request method:** `GET`
- **Headers:**
- **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/)
- **Query parameters:**

View File

@ -13,7 +13,7 @@ stored. Each database can contain multiple tables.
> **If coming from InfluxDB v2, InfluxDB Cloud (TSM), or InfluxDB Cloud Serverless**,
> _database_ and _bucket_ are synonymous.
<!--
{{% show-in "enterprise" %}}
## Retention periods
A database **retention period** is the maximum age of data stored in the database.
@ -22,10 +22,9 @@ When a point's timestamp is beyond the retention period (relative to now), the
point is marked for deletion and is removed from the database the next time the
retention enforcement service runs.
The _minimum_ retention period for an InfluxDB database is 1 hour.
The _maximum_ retention period is infinite meaning data does not expire and will
never be removed by the retention enforcement service.
-->
The _maximum_ retention period is infinite (`none`) meaning data does not expire
and will never be removed by the retention enforcement service.
{{% /show-in %}}
## Database, table, and column limits
@ -40,9 +39,11 @@ never be removed by the retention enforcement service.
**Maximum number of tables across all databases**: {{% influxdb3/limit "table" %}}
{{< product-name >}} limits the number of tables you can have across _all_
databases to {{% influxdb3/limit "table" %}}. There is no specific limit on how
many tables you can have in an individual database, as long as the total across
all databases is below the limit.
databases to {{% influxdb3/limit "table" %}}{{% show-in "enterprise" %}} by default{{% /show-in %}}.
{{% show-in "enterprise" %}}You can configure the table limit using the
[`--num-table-limit` configuration option](/influxdb3/enterprise/reference/config-options/#num-table-limit).{{% /show-in %}}
InfluxDB doesn't limit how many tables you can have in an individual database,
as long as the total across all databases is below the limit.
Having more tables affects your {{% product-name %}} installation in the
following ways:
@ -64,7 +65,8 @@ persists data to Parquet files. Each `PUT` request incurs a monetary cost and
increases the operating cost of {{< product-name >}}.
{{% /expand %}}
{{% expand "**More work for the compactor** _(Enterprise only)_ <em style='opacity:.5;font-weight:normal;'>View more info</em>" %}}
{{% show-in "enterprise" %}}
{{% expand "**More work for the compactor** <em style='opacity:.5;font-weight:normal;'>View more info</em>" %}}
To optimize storage over time, InfluxDB 3 Enterprise has a compactor that
routinely compacts Parquet files.
@ -72,6 +74,7 @@ With more tables and Parquet files to compact, the compactor may need to be scal
to keep up with demand, adding to the operating cost of InfluxDB 3 Enterprise.
{{% /expand %}}
{{% /show-in %}}
{{< /expand-wrapper >}}
### Column limit
@ -80,11 +83,17 @@ to keep up with demand, adding to the operating cost of InfluxDB 3 Enterprise.
Each row must include a time column, with the remaining columns representing
tags and fields.
As a result, a table can have one time column and up to {{% influxdb3/limit "column" -1 %}}
As a result,{{% show-in "enterprise" %}} by default,{{% /show-in %}} a table can
have one time column and up to {{% influxdb3/limit "column" -1 %}}
_combined_ field and tag columns.
If you attempt to write to a table and exceed the column limit, the write
request fails and InfluxDB returns an error.
{{% show-in "enterprise" %}}
You can configure the maximum number of columns per
table using the [`num-total-columns-per-table-limit` configuration option](/influxdb3/enterprise/reference/config-options/#num-total-columns-per-table-limit).
{{% /show-in %}}
Higher numbers of columns has the following side-effects:
{{< expand-wrapper >}}

View File

@ -130,7 +130,12 @@ database_name/retention_policy_name
## Database limit
{{% show-in "enterprise" %}}
**Default maximum number of databases**: {{% influxdb3/limit "database" %}}
{{% /show-in %}}
{{% show-in "core" %}}
**Maximum number of databases**: {{% influxdb3/limit "database" %}}
{{% /show-in %}}
_For more information about {{< product-name >}} database, table, and column limits,
see [Database, table, and column limits](/influxdb3/version/admin/databases/#database-table-and-column-limits)._

View File

@ -69,6 +69,59 @@ influxdb3 create distinct_cache \
<!--------------------------- END ENTERPRISE EXAMPLE -------------------------->
{{% /show-in %}}
## Use the HTTP API
To use the HTTP API to create a Distinct Value Cache, send a `POST` request to the `/api/v3/configure/distinct_cache` endpoint.
{{% api-endpoint method="POST" endpoint="/api/v3/configure/distinct_cache" api-ref="/influxdb3/version/api/v3/#operation/PostConfigureDistinctCache" %}}
{{% code-placeholders "(DATABASE|TABLE|DVC)_NAME|AUTH_TOKEN|COLUMNS|MAX_(CARDINALITY|AGE)" %}}
```bash
curl -X POST "https://localhost:8181/api/v3/configure/distinct_cache" \
--header "Authorization: Bearer AUTH_TOKEN" \
--json '{
"db": "DATABASE_NAME",
"table": "TABLE_NAME",
"name": "DVC_NAME",
"columns": ["COLUMNS"],
"max_cardinality": MAX_CARDINALITY,
"max_age": MAX_AGE
}'
```
{{% /code-placeholders %}}
### Example
```bash
curl -X POST "https://localhost:8181/api/v3/configure/distinct_cache" \
--header "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \
--json '{
"db": "example-db",
"table": "wind_data",
"name": "windDistinctCache",
"columns": ["country", "county", "city"],
"max_cardinality": 10000,
"max_age": 86400
}'
```
**Response codes:**
- `201` : Success. The distinct cache has been created.
- `204` : Not created. A distinct cache with this configuration already exists.
- `400` : Bad request.
> [!Note]
> #### API parameter differences
>
> - **Columns format**: The API uses a JSON array (`["country", "county", "city"]`)
> instead of the CLI's comma-delimited format (`country,county,city`).
> - **Maximum age format**: The API uses seconds (`86400`) instead of the CLI's
> [humantime format](https://docs.rs/humantime/latest/humantime/fn.parse_duration.html) (`24h`, `1 day`).
Replace the following:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:

View File

@ -31,3 +31,37 @@ FROM
WHERE
country = 'Spain'
```
## Use the HTTP API
To use the HTTP API to query cached data, send a `GET` or `POST` request to the `/api/v3/query_sql` endpoint and include the [`distinct_cache()`](/influxdb3/version/reference/sql/functions/cache/#distinct_cache) function in your query.
{{% api-endpoint method="GET" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/GetExecuteQuerySQL" %}}
{{% api-endpoint method="POST" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/PostExecuteQuerySQL" %}}
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TABLE_NAME|CACHE_NAME" %}}
```bash
curl -X POST "https://localhost:8181/api/v3/query_sql" \
--header "Authorization: Bearer AUTH_TOKEN" \
--json '{
"db": "DATABASE_NAME",
"q": "SELECT * FROM distinct_cache('\''TABLE_NAME'\'', '\''CACHE_NAME'\'')",
"format": "json"
}'
```
{{% /code-placeholders %}}
## Example with WHERE clause
```bash
curl -X POST "https://localhost:8181/api/v3/query_sql" \
--header "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \
--json '{
"db": "example-db",
"q": "SELECT room, temp FROM last_cache('\''home'\'', '\''homeCache'\'') WHERE room = '\''Kitchen'\''",
"format": "json"
}'
```

View File

@ -67,3 +67,44 @@ In the examples above, replace the following:
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
your {{< product-name >}} {{% show-in "enterprise" %}}admin {{% /show-in %}}
authentication token
## Use the HTTP API
To use the HTTP API to query and output cache information from the system table, send a `GET` or `POST` request to the `/api/v3/query_sql` endpoint.
{{% api-endpoint method="GET" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/GetExecuteQuerySQL" %}}
{{% api-endpoint method="POST" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/PostExecuteQuerySQL" %}}
### Query all caches
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
```bash
curl -X POST "https://localhost:8181/api/v3/query_sql" \
--header "Authorization: Bearer AUTH_TOKEN" \
--json '{
"db": "DATABASE_NAME",
"q": "SELECT * FROM system.distinct_caches",
"format": "json"
}'
```
{{% /code-placeholders %}}
## Query specific cache details
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|CACHE_NAME" %}}
```bash
curl -X POST "https://localhost:8181/api/v3/query_sql" \
--header "Authorization: Bearer AUTH_TOKEN" \
--json '{
"db": "DATABASE_NAME",
"q": "SELECT * FROM system.distinct_caches WHERE name = '\''CACHE_NAME'\''",
"format": "json"
}'
```
{{% /code-placeholders %}}

View File

@ -80,6 +80,59 @@ influxdb3 create last_cache \
<!--------------------------- END ENTERPRISE EXAMPLE -------------------------->
{{% /show-in %}}
## Use the HTTP API
To use the HTTP API to create a Last Value Cache, send a `POST` request to the `/api/v3/configure/last_cache` endpoint.
{{% api-endpoint method="POST" endpoint="/api/v3/configure/last_cache" api-ref="/influxdb3/version/api/v3/#operation/PostConfigureLastCache" %}}
{{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN|(KEY|VALUE)_COLUMNS|COUNT|TTL" %}}
```bash
curl -X POST "https://localhost:8181/api/v3/configure/last_cache" \
--header "Authorization: Bearer AUTH_TOKEN" \
--json '{
"db": "DATABASE_NAME",
"table": "TABLE_NAME",
"name": "LVC_NAME",
"key_columns": ["KEY_COLUMNS"],
"value_columns": ["VALUE_COLUMNS"],
"count": COUNT,
"ttl": TTL
}'
```
{{% /code-placeholders %}}
### Example
```bash
curl -X POST "https://localhost:8181/api/v3/configure/last_cache" \
--header "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \
--json '{
"db": "example-db",
"table": "home",
"name": "homeLastCache",
"key_columns": ["room", "wall"],
"value_columns": ["temp", "hum", "co"],
"count": 5,
"ttl": 14400
}'
```
**Response codes:**
- `201` : Success. Last cache created.
- `400` : Bad request.
- `401` : Unauthorized.
- `404` : Cache not found.
- `409` : Cache already exists.
> [!Note]
> #### API parameter differences
> Column format: The API uses JSON arrays (["room", "wall"]) instead of the CLI's comma-delimited format (room,wall).
> TTL format: The API uses seconds (14400) instead of the CLI's humantime format (4h, 4 hours).
Replace the following:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
@ -116,4 +169,4 @@ The cache imports the distinct values from the table and starts caching them.
>
> The LVC is stored in memory, so it's important to consider the size and persistence
> of the cache. For more information, see
> [Important things to know about the Last Value Cache](/influxdb3/version/admin/last-value-cache/#important-things-to-know-about-the-last-value-cache).
> [Important things to know about the Last Value Cache.](/influxdb3/version/admin/last-value-cache/#important-things-to-know-about-the-last-value-cache)

View File

@ -23,6 +23,33 @@ influxdb3 delete last_cache \
```
{{% /code-placeholders %}}
## Use the HTTP API
To use the HTTP API to delete a Last Value Cache, send a `DELETE` request to the `/api/v3/configure/last_cache` endpoint with query parameters.
{{% api-endpoint method="DELETE" endpoint="/api/v3/configure/last_cache" api-ref="/influxdb3/core/api/v3/#operation/DeleteConfigureLastCache" %}}
{{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN" %}}
```bash
curl -X DELETE "https://localhost:8181/api/v3/configure/last_cache?db=DATABASE_NAME&table=TABLE_NAME&name=LVC_NAME" \
--header "Authorization: Bearer AUTH_TOKEN"
```
{{% /code-placeholders %}}
## Example
```bash
curl -X DELETE "https://localhost:8181/api/v3/configure/last_cache?db=example-db&table=home&name=homeLastCache" \
--header "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0"
```
**Response codes:**
- `200` : Success. The last cache has been deleted.
- `400` : Bad request.
- `401` : Unauthorized.
- `404` : Cache not found.
Replace the following:
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:

View File

@ -66,3 +66,43 @@ In the examples above, replace the following:
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
your {{< product-name >}} {{% show-in "enterprise" %}}admin {{% /show-in %}}
authentication token
## Use the HTTP API
To use the HTTP API to query and output cache information from the system table, send a `GET` or `POST` request to the `/api/v3/query_sql` endpoint.
{{% api-endpoint method="GET" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/GetExecuteQuerySQL" %}}
{{% api-endpoint method="POST" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/PostExecuteQuerySQL" %}}
### Query all last value caches
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
```bash
curl -X POST "https://localhost:8181/api/v3/query_sql" \
--header "Authorization: Bearer AUTH_TOKEN" \
--json '{
"db": "DATABASE_NAME",
"q": "SELECT * FROM system.last_caches",
"format": "json"
}'
```
{{% /code-placeholders %}}
## Query specific cache details
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|CACHE_NAME" %}}
```bash
curl -X POST "https://localhost:8181/api/v3/query_sql" \
--header "Authorization: Bearer AUTH_TOKEN" \
--json '{
"db": "DATABASE_NAME",
"q": "SELECT * FROM system.last_caches WHERE name = '\''CACHE_NAME'\''",
"format": "json"
}'
```
{{% /code-placeholders %}}

View File

@ -53,6 +53,10 @@ influxdb3 serve
- [tls-minimum-versions](#tls-minimum-version)
- [without-auth](#without-auth)
- [disable-authz](#disable-authz)
{{% show-in "enterprise" %}}
- [num-database-limit](#num-database-limit)
- [num-table-limit](#num-table-limit)
- [num-total-columns-per-table-limit](#num-total-columns-per-table-limit){{% /show-in %}}
- [AWS](#aws)
- [aws-access-key-id](#aws-access-key-id)
- [aws-secret-access-key](#aws-secret-access-key)
@ -204,7 +208,7 @@ This value must be different than the [`--node-id`](#node-id) value.
#### data-dir
For the `file` object store, defines the location InfluxDB 3 uses to store files locally.
For the `file` object store, defines the location {{< product-name >}} uses to store files locally.
Required when using the `file` [object store](#object-store).
| influxdb3 serve option | Environment variable |
@ -216,7 +220,7 @@ Required when using the `file` [object store](#object-store).
{{% show-in "enterprise" %}}
#### license-email
Specifies the email address to associate with your InfluxDB 3 Enterprise license
Specifies the email address to associate with your {{< product-name >}} license
and automatically responds to the interactive email prompt when the server starts.
This option is mutually exclusive with [license-file](#license-file).
@ -228,7 +232,7 @@ This option is mutually exclusive with [license-file](#license-file).
#### license-file
Specifies the path to a license file for InfluxDB 3 Enterprise. When provided, the license
Specifies the path to a license file for {{< product-name >}}. When provided, the license
file's contents are used instead of requesting a new license.
This option is mutually exclusive with [license-email](#license-email).
@ -361,10 +365,44 @@ The server processes all requests without requiring tokens or authentication.
Optionally disable authz by passing in a comma separated list of resources.
Valid values are `health`, `ping`, and `metrics`.
| influxdb3 serve option | Environment variable |
| :--------------------- | :----------------------- |
| `--disable-authz` | `INFLUXDB3_DISABLE_AUTHZ`|
| influxdb3 serve option | Environment variable |
| :--------------------- | :------------------------ |
| `--disable-authz` | `INFLUXDB3_DISABLE_AUTHZ` |
{{% show-in "enterprise" %}}
---
#### num-database-limit
Limits the total number of active databases.
Default is {{% influxdb3/limit "database" %}}.
| influxdb3 serve option | Environment variable |
| :---------------------- | :---------------------------------------- |
| `--num-database-limit` | `INFLUXDB3_ENTERPRISE_NUM_DATABASE_LIMIT` |
---
#### num-table-limit
Limits the total number of active tables across all databases.
Default is {{% influxdb3/limit "table" %}}.
| influxdb3 serve option | Environment variable |
| :--------------------- | :------------------------------------- |
| `--num-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TABLE_LIMIT` |
---
#### num-total-columns-per-table-limit
Limits the total number of columns per table.
Default is {{% influxdb3/limit "column" %}}.
| influxdb3 serve option | Environment variable |
| :------------------------------------ | :------------------------------------------------------- |
| `--num-total-columns-per-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TOTAL_COLUMNS_PER_TABLE_LIMIT` |
{{% /show-in %}}
---
### AWS

View File

@ -0,0 +1,93 @@
InfluxData collects information, or _telemetry data_, about the usage of {{% product-name %}} to help improve the product.
Learn what data {{% product-name %}} collects and sends to InfluxData, how it's used, and
how you can opt out.
## What data is collected
{{< product-name >}} collects the following telemetry data:
### System metrics
- **CPU utilization**: Process-specific CPU usage
- **Memory usage**: Process memory consumption in MB
- **Cores**: Number of CPU cores in use
- **OS**: Operating system information
- **Version**: {{< product-name >}} version
- **Uptime**: Server uptime in seconds
### Write metrics
- **Write requests**: Number of write operations
- **Write lines**: Number of lines written
- **Write bytes**: Amount of data written in MB
### Query metrics
- **Query requests**: Number of query operations
### Storage metrics
- **Parquet file count**: Number of Parquet files
- **Parquet file size**: Total size of Parquet files in MB
- **Parquet row count**: Total number of rows in Parquet files
### Processing engine metrics
- **WAL triggers**: Write-Ahead Log trigger counts
- **Schedule triggers**: Scheduled processing trigger counts
- **Request triggers**: Request-based processing trigger counts
### Instance information
- **Instance ID**: Unique identifier for the server instance
- **Cluster UUID**: Unique identifier for the cluster
- **Storage type**: Type of object storage being used
{{% show-in "core" %}}
- **Product type**: "Core"
{{% /show-in %}}
{{% show-in "enterprise" %}}
- **Product type**: "Enterprise"
{{% /show-in %}}
## Collection frequency
- **System metrics** (CPU, memory): Collected every 60 seconds
- **Write and query metrics**: Collected per operation, rolled up every 60 seconds
- **Storage and processing engine metrics**: Collected at snapshot time (when available)
- **Instance information**: Static data collected once
Telemetry data is transmitted once per hour.
## Disable telemetry
To "opt-out" of collecting and sending {{% product-name %}} telemetry data,
include the `--disable-telemetry-upload` flag or set the `INFLUXDB3_TELEMETRY_DISABLE_UPLOAD` environment variable
when starting {{% product-name %}}.
**Default:** `false`
| influxdb3 flag | Environment variable |
| :------------- | :------------------- |
| `--disable-telemetry-upload` | `INFLUXDB3_TELEMETRY_DISABLE_UPLOAD` |
#### Command line flag
```sh
influxdb3 serve --disable-telemetry-upload
```
#### Environment variable
```sh
export INFLUXDB3_TELEMETRY_DISABLE_UPLOAD=true
```
When telemetry is disabled, no usage data is collected or transmitted.
## Data handling
The telemetry data is used by InfluxData to understand product usage patterns, improve product performance and reliability, prioritize feature development, and identify/resolve issues. No personally identifiable information (PII) is collected.
## Privacy and security
All telemetry data is transmitted securely via HTTPS. No database contents, queries, or user data is collected; only operational metrics and system information is transmitted.
All data collection follows InfluxData's privacy policy.

View File

@ -5,7 +5,7 @@ description: >
time series platform, used to collect and report metrics. Telegraf supports four categories of plugins -- input, output, aggregator, and processor.
menu:
telegraf_v1:
name: Telegraf v1.35
name: Telegraf
weight: 1
related:
- /resources/videos/intro-to-telegraf/

View File

@ -15,7 +15,7 @@ To install Telegraf, do the following:
- [Review requirements](#requirements)
- [Download and install Telegraf](#download-and-install-telegraf)
- [Custom compile Telegraf](#custom-compile)
- [Custom compile Telegraf](#custom-compile-telegraf)
## Requirements
@ -121,7 +121,7 @@ InfluxData uses [GPG (GnuPG)](https://www.gnupg.org/software/) to sign released
public key and encrypted private key (`.key` file) pairs that you can use to
verify the integrity of packages and binaries from the InfluxData repository.
Before running the [install](#install) sample code, substitute the key-pair compatible with your OS version:
Before running the [install](#download-and-install-instructions) sample code, substitute the key-pair compatible with your OS version:
For newer OS releases (for example, Ubuntu 20.04 LTS and newer, Debian Buster
and newer) that support subkey verification:
@ -180,8 +180,8 @@ gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 \
| grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' \
&& cat influxdata-archive.key \
| gpg --dearmor \
| sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null \
&& echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' \
| sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null \
&& echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' \
| sudo tee /etc/apt/sources.list.d/influxdata.list
sudo apt-get update && sudo apt-get install telegraf
```
@ -198,8 +198,8 @@ gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive_compat.key
| grep -q '^fpr:\+9D539D90D3328DC7D6C8D3B9D8FF8E1F7DF8B07E:$' \
&& cat influxdata-archive_compat.key \
| gpg --dearmor \
| sudo tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null
echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' \
| sudo tee /etc/apt/keyrings/influxdata-archive_compat.gpg > /dev/null
echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' \
| sudo tee /etc/apt/sources.list.d/influxdata.list
sudo apt-get update && sudo apt-get install telegraf
```
@ -329,7 +329,7 @@ Replace the following:
Choose from the following options to install Telegraf binary files for Linux ARM:
- To install on Linux ARMv7(32-bit), see the [downloads page](https://www.influxdata.com/downloads/#telegraf).
- [Download and install on Linux ARMv8 (64-bit)](#download-and-install-on-linux-arm-64)
- [Download and install on Linux ARMv8 (64-bit)](#download-and-install-on-linux-armv8)
### Download and install on Linux ARMv8
@ -388,7 +388,7 @@ To install using Homebrew, do the following:
3. Choose one of the following methods to start Telegraf and begin collecting and processing metrics:
- [Run Telegraf in your terminal](#run-telegraf-in-your-terminal)
- [Run Telegraf as a service](#run-telegraf-as-a-service)
- [Run Telegraf as a service](#run-telegraf-as-a-background-service)
### Run Telegraf in your terminal
@ -627,7 +627,7 @@ Use the Telegraf custom builder tool to compile Telegraf with only the plugins y
### Prerequisites
- Follow the instructions to install [Go](https://go.dev/) for your system.
- [Create your Telegraf configuration file](#generate-a-custom-configuration-file) with the plugins you want to use.
- [Create your Telegraf configuration file](#generate-a-configuration-file) with the plugins you want to use.
### Build the custom builder tool

View File

@ -2,14 +2,6 @@ import { defineConfig } from 'cypress';
import { cwd as _cwd } from 'process';
import * as fs from 'fs';
import * as yaml from 'js-yaml';
import {
BROKEN_LINKS_FILE,
FIRST_BROKEN_LINK_FILE,
initializeReport,
readBrokenLinksReport,
saveCacheStats,
saveValidationStrategy,
} from './cypress/support/link-reporter.js';
export default defineConfig({
e2e: {
@ -88,98 +80,6 @@ export default defineConfig({
}
},
// Broken links reporting tasks
initializeBrokenLinksReport() {
return initializeReport();
},
// Special case domains are now handled directly in the test without additional reporting
// This task is kept for backward compatibility but doesn't do anything special
reportSpecialCaseLink(linkData) {
console.log(
`✅ Expected status code: ${linkData.url} (status: ${linkData.status}) is valid for this domain`
);
return true;
},
reportBrokenLink(linkData) {
try {
// Validate link data
if (!linkData || !linkData.url || !linkData.page) {
console.error('Invalid link data provided');
return false;
}
// Read current report
const report = readBrokenLinksReport();
// Find or create entry for this page
let pageReport = report.find((r) => r.page === linkData.page);
if (!pageReport) {
pageReport = { page: linkData.page, links: [] };
report.push(pageReport);
}
// Check if link is already in the report to avoid duplicates
const isDuplicate = pageReport.links.some(
(link) => link.url === linkData.url && link.type === linkData.type
);
if (!isDuplicate) {
// Add the broken link to the page's report
pageReport.links.push({
url: linkData.url,
status: linkData.status,
type: linkData.type,
linkText: linkData.linkText,
});
// Write updated report back to file
fs.writeFileSync(
BROKEN_LINKS_FILE,
JSON.stringify(report, null, 2)
);
// Store first broken link if not already recorded
const firstBrokenLinkExists =
fs.existsSync(FIRST_BROKEN_LINK_FILE) &&
fs.readFileSync(FIRST_BROKEN_LINK_FILE, 'utf8').trim() !== '';
if (!firstBrokenLinkExists) {
// Store first broken link with complete information
const firstBrokenLink = {
url: linkData.url,
status: linkData.status,
type: linkData.type,
linkText: linkData.linkText,
page: linkData.page,
time: new Date().toISOString(),
};
fs.writeFileSync(
FIRST_BROKEN_LINK_FILE,
JSON.stringify(firstBrokenLink, null, 2)
);
console.error(
`🔴 FIRST BROKEN LINK: ${linkData.url} (${linkData.status}) - ${linkData.type} on page ${linkData.page}`
);
}
// Log the broken link immediately to console
console.error(
`❌ BROKEN LINK: ${linkData.url} (${linkData.status}) - ${linkData.type} on page ${linkData.page}`
);
}
return true;
} catch (error) {
console.error(`Error reporting broken link: ${error.message}`);
// Even if there's an error, we want to ensure the test knows there was a broken link
return true;
}
},
// Cache and incremental validation tasks
saveCacheStatistics(stats) {
try {

View File

@ -1,370 +0,0 @@
/// <reference types="cypress" />
describe('Article', () => {
let subjects = Cypress.env('test_subjects')
? Cypress.env('test_subjects')
.split(',')
.filter((s) => s.trim() !== '')
: [];
// Cache will be checked during test execution at the URL level
// Always use HEAD for downloads to avoid timeouts
const useHeadForDownloads = true;
// Set up initialization for tests
before(() => {
// Initialize the broken links report
cy.task('initializeBrokenLinksReport');
// Clean up expired cache entries
cy.task('cleanupCache').then((cleaned) => {
if (cleaned > 0) {
cy.log(`🧹 Cleaned up ${cleaned} expired cache entries`);
}
});
});
// Display cache statistics after all tests complete
after(() => {
cy.task('getCacheStats').then((stats) => {
cy.log('📊 Link Validation Cache Statistics:');
cy.log(` • Cache hits: ${stats.hits}`);
cy.log(` • Cache misses: ${stats.misses}`);
cy.log(` • New entries stored: ${stats.stores}`);
cy.log(` • Hit rate: ${stats.hitRate}`);
cy.log(` • Total validations: ${stats.total}`);
if (stats.total > 0) {
const message = stats.hits > 0
? `✨ Cache optimization saved ${stats.hits} link validations`
: '🔄 No cache hits - all links were validated fresh';
cy.log(message);
}
// Save cache statistics for the reporter to display
cy.task('saveCacheStatsForReporter', {
hitRate: parseFloat(stats.hitRate.replace('%', '')),
cacheHits: stats.hits,
cacheMisses: stats.misses,
totalValidations: stats.total,
newEntriesStored: stats.stores,
cleanups: stats.cleanups
});
});
});
// Helper function to identify download links
function isDownloadLink(href) {
// Check for common download file extensions
const downloadExtensions = [
'.pdf',
'.zip',
'.tar.gz',
'.tgz',
'.rar',
'.exe',
'.dmg',
'.pkg',
'.deb',
'.rpm',
'.xlsx',
'.csv',
'.doc',
'.docx',
'.ppt',
'.pptx',
];
// Check for download domains or paths
const downloadDomains = ['dl.influxdata.com', 'downloads.influxdata.com'];
// Check if URL contains a download extension
const hasDownloadExtension = downloadExtensions.some((ext) =>
href.toLowerCase().endsWith(ext)
);
// Check if URL is from a download domain
const isFromDownloadDomain = downloadDomains.some((domain) =>
href.toLowerCase().includes(domain)
);
// Return true if either condition is met
return hasDownloadExtension || isFromDownloadDomain;
}
// Helper function for handling failed links
function handleFailedLink(url, status, type, redirectChain = '', linkText = '', pageUrl = '') {
// Report the broken link
cy.task('reportBrokenLink', {
url: url + redirectChain,
status,
type,
linkText,
page: pageUrl,
});
// Throw error for broken links
throw new Error(
`BROKEN ${type.toUpperCase()} LINK: ${url} (status: ${status})${redirectChain} on ${pageUrl}`
);
}
// Helper function to test a link with cache integration
function testLink(href, linkText = '', pageUrl) {
// Check cache first
return cy.task('isLinkCached', href).then((isCached) => {
if (isCached) {
cy.log(`✅ Cache hit: ${href}`);
return cy.task('getLinkCache', href).then((cachedResult) => {
if (cachedResult && cachedResult.result && cachedResult.result.status >= 400) {
// Cached result shows this link is broken
handleFailedLink(href, cachedResult.result.status, cachedResult.result.type || 'cached', '', linkText, pageUrl);
}
// For successful cached results, just return - no further action needed
});
} else {
// Not cached, perform actual validation
return performLinkValidation(href, linkText, pageUrl);
}
});
}
// Helper function to perform actual link validation and cache the result
function performLinkValidation(href, linkText = '', pageUrl) {
// Common request options for both methods
const requestOptions = {
failOnStatusCode: true,
timeout: 15000, // Increased timeout for reliability
followRedirect: true, // Explicitly follow redirects
retryOnNetworkFailure: true, // Retry on network issues
retryOnStatusCodeFailure: true, // Retry on 5xx errors
};
if (useHeadForDownloads && isDownloadLink(href)) {
cy.log(`** Testing download link with HEAD: ${href} **`);
return cy.request({
method: 'HEAD',
url: href,
...requestOptions,
}).then((response) => {
// Prepare result for caching
const result = {
status: response.status,
type: 'download',
timestamp: new Date().toISOString()
};
// Check final status after following any redirects
if (response.status >= 400) {
const redirectInfo =
response.redirects && response.redirects.length > 0
? ` (redirected to: ${response.redirects.join(' -> ')})`
: '';
// Cache the failed result
cy.task('setLinkCache', { url: href, result });
handleFailedLink(href, response.status, 'download', redirectInfo, linkText, pageUrl);
} else {
// Cache the successful result
cy.task('setLinkCache', { url: href, result });
}
});
} else {
cy.log(`** Testing link: ${href} **`);
return cy.request({
url: href,
...requestOptions,
}).then((response) => {
// Prepare result for caching
const result = {
status: response.status,
type: 'regular',
timestamp: new Date().toISOString()
};
if (response.status >= 400) {
const redirectInfo =
response.redirects && response.redirects.length > 0
? ` (redirected to: ${response.redirects.join(' -> ')})`
: '';
// Cache the failed result
cy.task('setLinkCache', { url: href, result });
handleFailedLink(href, response.status, 'regular', redirectInfo, linkText, pageUrl);
} else {
// Cache the successful result
cy.task('setLinkCache', { url: href, result });
}
});
}
}
// Test setup validation
it('Test Setup Validation', function () {
cy.log(`📋 Test Configuration:`);
cy.log(` • Test subjects: ${subjects.length}`);
cy.log(` • Cache: URL-level caching with 30-day TTL`);
cy.log(` • Link validation: Internal, anchor, and allowed external links`);
cy.log('✅ Test setup validation completed');
});
subjects.forEach((subject) => {
it(`${subject} has valid internal links`, function () {
// Add error handling for page visit failures
cy.visit(`${subject}`, { timeout: 20000 }).then(() => {
cy.log(`✅ Successfully loaded page: ${subject}`);
});
// Test internal links
cy.get('article, .api-content').then(($article) => {
// Find links without failing the test if none are found
const $links = $article.find('a[href^="/"]');
if ($links.length === 0) {
cy.log('No internal links found on this page');
return;
}
cy.log(`🔍 Testing ${$links.length} internal links on ${subject}`);
// Now test each link
cy.wrap($links).each(($a) => {
const href = $a.attr('href');
const linkText = $a.text().trim();
try {
testLink(href, linkText, subject);
} catch (error) {
cy.log(`❌ Error testing link ${href}: ${error.message}`);
throw error; // Re-throw to fail the test
}
});
});
});
it(`${subject} has valid anchor links`, function () {
cy.visit(`${subject}`).then(() => {
cy.log(`✅ Successfully loaded page for anchor testing: ${subject}`);
});
// Define selectors for anchor links to ignore, such as behavior triggers
const ignoreLinks = ['.tabs a[href^="#"]', '.code-tabs a[href^="#"]'];
const anchorSelector =
'a[href^="#"]:not(' + ignoreLinks.join('):not(') + ')';
cy.get('article, .api-content').then(($article) => {
const $anchorLinks = $article.find(anchorSelector);
if ($anchorLinks.length === 0) {
cy.log('No anchor links found on this page');
return;
}
cy.log(`🔗 Testing ${$anchorLinks.length} anchor links on ${subject}`);
cy.wrap($anchorLinks).each(($a) => {
const href = $a.prop('href');
const linkText = $a.text().trim();
if (href && href.length > 1) {
// Get just the fragment part
const url = new URL(href);
const anchorId = url.hash.substring(1); // Remove the # character
if (!anchorId) {
cy.log(`Skipping empty anchor in ${href}`);
return;
}
// Use DOM to check if the element exists
cy.window().then((win) => {
const element = win.document.getElementById(anchorId);
if (!element) {
cy.task('reportBrokenLink', {
url: `#${anchorId}`,
status: 404,
type: 'anchor',
linkText,
page: subject,
});
cy.log(`⚠️ Missing anchor target: #${anchorId}`);
}
});
}
});
});
});
it(`${subject} has valid external links`, function () {
// Check if we should skip external links entirely
if (Cypress.env('skipExternalLinks') === true) {
cy.log(
'Skipping all external links as configured by skipExternalLinks'
);
return;
}
cy.visit(`${subject}`).then(() => {
cy.log(
`✅ Successfully loaded page for external link testing: ${subject}`
);
});
// Define allowed external domains to test
const allowedExternalDomains = ['github.com', 'kapa.ai'];
// Test external links
cy.get('article, .api-content').then(($article) => {
// Find links without failing the test if none are found
const $links = $article.find('a[href^="http"]');
if ($links.length === 0) {
cy.log('No external links found on this page');
return;
}
cy.log(`🔍 Found ${$links.length} total external links on ${subject}`);
// Filter links to only include allowed domains
const $allowedLinks = $links.filter((_, el) => {
const href = el.getAttribute('href');
try {
const url = new URL(href);
return allowedExternalDomains.some(
(domain) =>
url.hostname === domain || url.hostname.endsWith(`.${domain}`)
);
} catch (urlError) {
cy.log(`⚠️ Invalid URL found: ${href}`);
return false;
}
});
if ($allowedLinks.length === 0) {
cy.log('No links to allowed external domains found on this page');
cy.log(` • Allowed domains: ${allowedExternalDomains.join(', ')}`);
return;
}
cy.log(
`🌐 Testing ${$allowedLinks.length} links to allowed external domains`
);
cy.wrap($allowedLinks).each(($a) => {
const href = $a.attr('href');
const linkText = $a.text().trim();
try {
testLink(href, linkText, subject);
} catch (error) {
cy.log(`❌ Error testing external link ${href}: ${error.message}`);
throw error;
}
});
});
});
});
});

View File

@ -1,215 +0,0 @@
/**
* Link Cache Manager for Cypress Tests
* Manages caching of link validation results at the URL level
*/
import fs from 'fs';
import path from 'path';
import crypto from 'crypto';
const CACHE_VERSION = 'v2';
const CACHE_KEY_PREFIX = 'link-validation';
const LOCAL_CACHE_DIR = path.join(process.cwd(), '.cache', 'link-validation');
/**
* Cache manager for individual link validation results
*/
export class LinkCacheManager {
constructor(options = {}) {
this.localCacheDir = options.localCacheDir || LOCAL_CACHE_DIR;
// Configurable cache TTL - default 30 days
this.cacheTTLDays =
options.cacheTTLDays || parseInt(process.env.LINK_CACHE_TTL_DAYS) || 30;
this.maxAge = this.cacheTTLDays * 24 * 60 * 60 * 1000;
this.ensureLocalCacheDir();
// Track cache statistics
this.stats = {
hits: 0,
misses: 0,
stores: 0,
cleanups: 0
};
}
ensureLocalCacheDir() {
if (!fs.existsSync(this.localCacheDir)) {
fs.mkdirSync(this.localCacheDir, { recursive: true });
}
}
/**
* Generate cache key for a URL
* @param {string} url - The URL to cache
* @returns {string} Cache key
*/
generateCacheKey(url) {
const urlHash = crypto
.createHash('sha256')
.update(url)
.digest('hex')
.substring(0, 16);
return `${CACHE_KEY_PREFIX}-${CACHE_VERSION}-${urlHash}`;
}
/**
* Get cache file path for a URL
* @param {string} url - The URL
* @returns {string} File path
*/
getCacheFilePath(url) {
const cacheKey = this.generateCacheKey(url);
return path.join(this.localCacheDir, `${cacheKey}.json`);
}
/**
* Check if a URL's validation result is cached
* @param {string} url - The URL to check
* @returns {Object|null} Cached result or null
*/
get(url) {
const cacheFile = this.getCacheFilePath(url);
if (!fs.existsSync(cacheFile)) {
this.stats.misses++;
return null;
}
try {
const content = fs.readFileSync(cacheFile, 'utf8');
const cached = JSON.parse(content);
// TTL check
const age = Date.now() - new Date(cached.cachedAt).getTime();
if (age > this.maxAge) {
fs.unlinkSync(cacheFile);
this.stats.misses++;
this.stats.cleanups++;
return null;
}
this.stats.hits++;
return cached;
} catch (error) {
// Clean up corrupted cache
try {
fs.unlinkSync(cacheFile);
this.stats.cleanups++;
} catch (cleanupError) {
// Ignoring cleanup errors as they are non-critical, but logging for visibility
console.warn(`Failed to clean up corrupted cache file: ${cleanupError.message}`);
}
this.stats.misses++;
return null;
}
}
/**
* Store validation result for a URL
* @param {string} url - The URL
* @param {Object} result - Validation result
* @returns {boolean} True if successfully cached, false otherwise
*/
set(url, result) {
const cacheFile = this.getCacheFilePath(url);
const cacheData = {
url,
result,
cachedAt: new Date().toISOString(),
ttl: new Date(Date.now() + this.maxAge).toISOString()
};
try {
fs.writeFileSync(cacheFile, JSON.stringify(cacheData, null, 2));
this.stats.stores++;
return true;
} catch (error) {
console.warn(`Failed to cache validation result for ${url}: ${error.message}`);
return false;
}
}
/**
* Check if a URL is cached and valid
* @param {string} url - The URL to check
* @returns {boolean} True if cached and valid
*/
isCached(url) {
return this.get(url) !== null;
}
/**
* Get cache statistics
* @returns {Object} Cache statistics
*/
getStats() {
const total = this.stats.hits + this.stats.misses;
const hitRate = total > 0 ? (this.stats.hits / total * 100).toFixed(1) : 0;
return {
...this.stats,
total,
hitRate: `${hitRate}%`
};
}
/**
* Clean up expired cache entries
* @returns {number} Number of entries cleaned up
*/
cleanup() {
let cleaned = 0;
try {
const files = fs.readdirSync(this.localCacheDir);
const cacheFiles = files.filter(file =>
file.startsWith(CACHE_KEY_PREFIX) && file.endsWith('.json')
);
for (const file of cacheFiles) {
const filePath = path.join(this.localCacheDir, file);
try {
const content = fs.readFileSync(filePath, 'utf8');
const cached = JSON.parse(content);
const age = Date.now() - new Date(cached.cachedAt).getTime();
if (age > this.maxAge) {
fs.unlinkSync(filePath);
cleaned++;
}
} catch (error) {
console.warn(`Failed to process cache file "${filePath}": ${error.message}`);
// Remove corrupted files
fs.unlinkSync(filePath);
cleaned++;
}
}
} catch (error) {
console.warn(`Cache cleanup failed: ${error.message}`);
}
this.stats.cleanups += cleaned;
return cleaned;
}
}
/**
* Cypress task helper to integrate cache with Cypress tasks
*/
export const createCypressCacheTasks = (options = {}) => {
const cache = new LinkCacheManager(options);
return {
getLinkCache: (url) => cache.get(url),
setLinkCache: ({ url, result }) => cache.set(url, result),
isLinkCached: (url) => cache.isCached(url),
getCacheStats: () => cache.getStats(),
cleanupCache: () => cache.cleanup()
};
};

View File

@ -1,310 +0,0 @@
/**
* Broken Links Reporter
* Handles collecting, storing, and reporting broken links found during tests
*/
import fs from 'fs';
export const BROKEN_LINKS_FILE = '/tmp/broken_links_report.json';
export const FIRST_BROKEN_LINK_FILE = '/tmp/first_broken_link.json';
const SOURCES_FILE = '/tmp/test_subjects_sources.json';
const CACHE_STATS_FILE = '/tmp/cache_statistics.json';
const VALIDATION_STRATEGY_FILE = '/tmp/validation_strategy.json';
/**
* Reads the broken links report from the file system
* @returns {Array} Parsed report data or empty array if file doesn't exist
*/
export function readBrokenLinksReport() {
if (!fs.existsSync(BROKEN_LINKS_FILE)) {
return [];
}
try {
const fileContent = fs.readFileSync(BROKEN_LINKS_FILE, 'utf8');
// Check if the file is empty or contains only an empty array
if (!fileContent || fileContent.trim() === '' || fileContent === '[]') {
return [];
}
// Try to parse the JSON content
try {
const parsedContent = JSON.parse(fileContent);
// Ensure the parsed content is an array
if (!Array.isArray(parsedContent)) {
console.error('Broken links report is not an array');
return [];
}
return parsedContent;
} catch (parseErr) {
console.error(
`Error parsing broken links report JSON: ${parseErr.message}`
);
return [];
}
} catch (err) {
console.error(`Error reading broken links report: ${err.message}`);
return [];
}
}
/**
* Reads the sources mapping file
* @returns {Object} A mapping from URLs to their source files
*/
function readSourcesMapping() {
try {
if (fs.existsSync(SOURCES_FILE)) {
const sourcesData = JSON.parse(fs.readFileSync(SOURCES_FILE, 'utf8'));
return sourcesData.reduce((acc, item) => {
if (item.url && item.source) {
acc[item.url] = item.source;
}
return acc;
}, {});
}
} catch (err) {
console.warn(`Warning: Could not read sources mapping: ${err.message}`);
}
return {};
}
/**
* Read cache statistics from file
* @returns {Object|null} Cache statistics or null if not found
*/
function readCacheStats() {
try {
if (fs.existsSync(CACHE_STATS_FILE)) {
const content = fs.readFileSync(CACHE_STATS_FILE, 'utf8');
return JSON.parse(content);
}
} catch (err) {
console.warn(`Warning: Could not read cache stats: ${err.message}`);
}
return null;
}
/**
* Read validation strategy from file
* @returns {Object|null} Validation strategy or null if not found
*/
function readValidationStrategy() {
try {
if (fs.existsSync(VALIDATION_STRATEGY_FILE)) {
const content = fs.readFileSync(VALIDATION_STRATEGY_FILE, 'utf8');
return JSON.parse(content);
}
} catch (err) {
console.warn(`Warning: Could not read validation strategy: ${err.message}`);
}
return null;
}
/**
* Save cache statistics for reporting
* @param {Object} stats - Cache statistics to save
*/
export function saveCacheStats(stats) {
try {
fs.writeFileSync(CACHE_STATS_FILE, JSON.stringify(stats, null, 2));
} catch (err) {
console.warn(`Warning: Could not save cache stats: ${err.message}`);
}
}
/**
* Save validation strategy for reporting
* @param {Object} strategy - Validation strategy to save
*/
export function saveValidationStrategy(strategy) {
try {
fs.writeFileSync(
VALIDATION_STRATEGY_FILE,
JSON.stringify(strategy, null, 2)
);
} catch (err) {
console.warn(`Warning: Could not save validation strategy: ${err.message}`);
}
}
/**
* Formats and displays the broken links report to the console
* @param {Array} brokenLinksReport - The report data to display
* @returns {number} The total number of broken links found
*/
export function displayBrokenLinksReport(brokenLinksReport = null) {
// If no report provided, read from file
if (!brokenLinksReport) {
brokenLinksReport = readBrokenLinksReport();
}
// Read cache statistics and validation strategy
const cacheStats = readCacheStats();
const validationStrategy = readValidationStrategy();
// Display cache performance first
if (cacheStats) {
console.log('\n📊 Link Validation Cache Performance:');
console.log('=======================================');
console.log(`Cache hit rate: ${cacheStats.hitRate}%`);
console.log(`Cache hits: ${cacheStats.cacheHits}`);
console.log(`Cache misses: ${cacheStats.cacheMisses}`);
console.log(`Total validations: ${cacheStats.totalValidations || cacheStats.cacheHits + cacheStats.cacheMisses}`);
console.log(`New entries stored: ${cacheStats.newEntriesStored || 0}`);
if (cacheStats.cleanups > 0) {
console.log(`Expired entries cleaned: ${cacheStats.cleanups}`);
}
if (cacheStats.totalValidations > 0) {
const message = cacheStats.cacheHits > 0
? `✨ Cache optimization saved ${cacheStats.cacheHits} link validations`
: '🔄 No cache hits - all links were validated fresh';
console.log(message);
}
if (validationStrategy) {
console.log(`Files analyzed: ${validationStrategy.total}`);
console.log(
`Links needing validation: ${validationStrategy.newLinks.length}`
);
}
console.log(''); // Add spacing after cache stats
}
// Check both the report and first broken link file to determine if we have broken links
const firstBrokenLink = readFirstBrokenLink();
// Only report "no broken links" if both checks pass
if (
(!brokenLinksReport || brokenLinksReport.length === 0) &&
!firstBrokenLink
) {
console.log('\n✅ No broken links detected in the validation report');
return 0;
}
// Special case: check if the single broken link file could be missing from the report
if (
firstBrokenLink &&
(!brokenLinksReport || brokenLinksReport.length === 0)
) {
console.error(
'\n⚠ Warning: First broken link record exists but no links in the report.'
);
console.error('This could indicate a reporting issue.');
}
// Load sources mapping
const sourcesMapping = readSourcesMapping();
// Print a prominent header
console.error('\n\n' + '='.repeat(80));
console.error(' 🚨 BROKEN LINKS DETECTED 🚨 ');
console.error('='.repeat(80));
// Show first failing link if available
if (firstBrokenLink) {
console.error('\n🔴 FIRST FAILING LINK:');
console.error(` URL: ${firstBrokenLink.url}`);
console.error(` Status: ${firstBrokenLink.status}`);
console.error(` Type: ${firstBrokenLink.type}`);
console.error(` Page: ${firstBrokenLink.page}`);
if (firstBrokenLink.linkText) {
console.error(
` Link text: "${firstBrokenLink.linkText.substring(0, 50)}${firstBrokenLink.linkText.length > 50 ? '...' : ''}"`
);
}
console.error('-'.repeat(40));
}
let totalBrokenLinks = 0;
brokenLinksReport.forEach((report) => {
console.error(`\n📄 PAGE: ${report.page}`);
// Add source information if available
const source = sourcesMapping[report.page];
if (source) {
console.error(` PAGE CONTENT SOURCE: ${source}`);
}
console.error('-'.repeat(40));
report.links.forEach((link) => {
console.error(`${link.url}`);
console.error(` - Status: ${link.status}`);
console.error(` - Type: ${link.type}`);
if (link.linkText) {
console.error(
` - Link text: "${link.linkText.substring(0, 50)}${link.linkText.length > 50 ? '...' : ''}"`
);
}
console.error('');
totalBrokenLinks++;
});
});
// Print a prominent summary footer
console.error('='.repeat(80));
console.error(`📊 TOTAL BROKEN LINKS FOUND: ${totalBrokenLinks}`);
console.error('='.repeat(80) + '\n');
return totalBrokenLinks;
}
/**
* Reads the first broken link info from the file system
* @returns {Object|null} First broken link data or null if not found
*/
export function readFirstBrokenLink() {
if (!fs.existsSync(FIRST_BROKEN_LINK_FILE)) {
return null;
}
try {
const fileContent = fs.readFileSync(FIRST_BROKEN_LINK_FILE, 'utf8');
// Check if the file is empty or contains whitespace only
if (!fileContent || fileContent.trim() === '') {
return null;
}
// Try to parse the JSON content
try {
return JSON.parse(fileContent);
} catch (parseErr) {
console.error(
`Error parsing first broken link JSON: ${parseErr.message}`
);
return null;
}
} catch (err) {
console.error(`Error reading first broken link: ${err.message}`);
return null;
}
}
/**
* Initialize the broken links report files
* @returns {boolean} True if initialization was successful
*/
export function initializeReport() {
try {
// Create an empty array for the broken links report
fs.writeFileSync(BROKEN_LINKS_FILE, '[]', 'utf8');
// Reset the first broken link file by creating an empty file
// Using empty string as a clear indicator that no broken link has been recorded yet
fs.writeFileSync(FIRST_BROKEN_LINK_FILE, '', 'utf8');
console.debug('🔄 Initialized broken links reporting system');
return true;
} catch (err) {
console.error(`Error initializing broken links report: ${err.message}`);
return false;
}
}

View File

@ -2,34 +2,10 @@
* InfluxData Documentation E2E Test Runner
*
* This script automates running Cypress end-to-end tests for the InfluxData documentation site.
* It handles starting a local Hugo server, mapping content files to their URLs, running Cypress tests,
* It handles starting a local Hugo server, mapping content files to their URLs, and running Cypress tests,
* and reporting broken links.
*
* Usage: node run-e2e-specs.js [file paths...] [--spec test // Display broken links report
const brokenLinksCount = displayBrokenLinksReport();
// Check if we might have special case failures
const hasSpecialCaseFailures =
results &&
results.totalFailed > 0 &&
brokenLinksCount === 0;
if (hasSpecialCaseFailures) {
console.warn(
` Note: Tests failed (${results.totalFailed}) but no broken links were reported. This may be due to special case URLs (like Reddit) that return expected status codes.`
);
}
if (
(results && results.totalFailed && results.totalFailed > 0 && !hasSpecialCaseFailures) ||
brokenLinksCount > 0
) {
console.error(
`⚠️ Tests failed: ${results.totalFailed || 0} test(s) failed, ${brokenLinksCount || 0} broken links found`
);
cypressFailed = true;
exitCode = 1; *
* Example: node run-e2e-specs.js content/influxdb/v2/write-data.md --spec cypress/e2e/content/article-links.cy.js
* Usage: node run-e2e-specs.js [file paths...] [--spec test specs...]
*/
import { spawn } from 'child_process';
@ -39,7 +15,6 @@ import path from 'path';
import cypress from 'cypress';
import net from 'net';
import { Buffer } from 'buffer';
import { displayBrokenLinksReport, initializeReport } from './link-reporter.js';
import {
HUGO_ENVIRONMENT,
HUGO_PORT,
@ -119,7 +94,7 @@ async function main() {
let exitCode = 0;
let hugoStarted = false;
// (Lines 124-126 removed; no replacement needed)
// (Lines 124-126 removed; no replacement needed)
// Add this signal handler to ensure cleanup on unexpected termination
const cleanupAndExit = (code = 1) => {
@ -364,10 +339,6 @@ async function main() {
// 4. Run Cypress tests
let cypressFailed = false;
try {
// Initialize/clear broken links report before running tests
console.log('Initializing broken links report...');
initializeReport();
console.log(`Running Cypress tests for ${urlList.length} URLs...`);
// Add CI-specific configuration
@ -426,19 +397,13 @@ async function main() {
clearInterval(hugoHealthCheckInterval);
}
// Process broken links report
const brokenLinksCount = displayBrokenLinksReport();
// Determine why tests failed
const testFailureCount = results?.totalFailed || 0;
if (testFailureCount > 0 && brokenLinksCount === 0) {
if (testFailureCount > 0) {
console.warn(
` Note: ${testFailureCount} test(s) failed but no broken links were detected in the report.`
);
console.warn(
' This usually indicates test errors unrelated to link validation.'
);
// Provide detailed failure analysis
if (results) {
@ -531,14 +496,8 @@ async function main() {
// but we'll still report other test failures
cypressFailed = true;
exitCode = 1;
} else if (brokenLinksCount > 0) {
console.error(
`⚠️ Tests failed: ${brokenLinksCount} broken link(s) detected`
);
cypressFailed = true;
exitCode = 1;
} else if (results) {
console.log('✅ Tests completed successfully');
console.log('✅ e2e tests completed successfully');
}
} catch (err) {
console.error(`❌ Cypress execution error: ${err.message}`);
@ -609,9 +568,6 @@ async function main() {
console.error(' • Check if test URLs are accessible manually');
console.error(' • Review Cypress screenshots/videos if available');
// Still try to display broken links report if available
displayBrokenLinksReport();
cypressFailed = true;
exitCode = 1;
} finally {

View File

@ -100,7 +100,7 @@ influxdb:
latest: v2.7
latest_patches:
v2: 2.7.12
v1: 1.12.1
v1: 1.11.8
latest_cli:
v2: 2.7.5
ai_sample_questions:
@ -157,7 +157,7 @@ chronograf:
versions: [v1]
latest: v1.10
latest_patches:
v1: 1.10.7
v1: 1.10.8
ai_sample_questions:
- How do I configure Chronograf for InfluxDB v1?
- How do I create a dashboard in Chronograf?
@ -183,9 +183,9 @@ enterprise_influxdb:
menu_category: self-managed
list_order: 5
versions: [v1]
latest: v1.12
latest: v1.11
latest_patches:
v1: 1.12.1
v1: 1.11.8
ai_sample_questions:
- How can I configure my InfluxDB v1 Enterprise server?
- How do I replicate data between InfluxDB v1 Enterprise and OSS?

View File

@ -2940,8 +2940,8 @@ output:
Explorer](https://docs.microsoft.com/en-us/azure/data-explorer), [Azure
Synapse Data
Explorer](https://docs.microsoft.com/en-us/azure/synapse-analytics/data-explorer/data-explorer-overview),
and [Real time analytics in
Fabric](https://learn.microsoft.com/en-us/fabric/real-time-analytics/overview)
and [Real-Time Intelligence in
Fabric](https://learn.microsoft.com/fabric/real-time-intelligence/overview)
services.
Azure Data Explorer is a distributed, columnar store, purpose built for

View File

@ -1,373 +0,0 @@
# InfluxDB 3 Monolith (Core and Enterprise) Helper Scripts
This directory contains helper scripts specifically for InfluxDB 3 Core and Enterprise (monolith deployments), as opposed to distributed/clustered deployments.
## Overview
These scripts help with documentation workflows for InfluxDB 3 Core and Enterprise, including CLI change detection, authentication setup, API analysis, and release preparation.
## Prerequisites
- **Docker and Docker Compose**: For running InfluxDB 3 containers
- **Node.js 16+**: For running JavaScript ESM scripts
- **Active containers**: InfluxDB 3 Core and/or Enterprise containers running via `docker compose`
- **Secret files**: Docker Compose secrets for auth tokens (`~/.env.influxdb3-core-admin-token` and `~/.env.influxdb3-enterprise-admin-token`)
## Scripts
### 🔐 Authentication & Setup
#### `setup-auth-tokens.sh`
Creates and configures authentication tokens for InfluxDB 3 containers.
**Usage:**
```bash
./setup-auth-tokens.sh [core|enterprise|both]
```
**What it does:**
- Checks existing tokens in secret files (`~/.env.influxdb3-core-admin-token` and `~/.env.influxdb3-enterprise-admin-token`)
- Starts containers if not running
- Creates admin tokens using `influxdb3 create token --admin`
- Updates appropriate secret files with new tokens
- Tests tokens to ensure they work
**Example:**
```bash
# Set up both Core and Enterprise tokens
./setup-auth-tokens.sh both
# Set up only Enterprise
./setup-auth-tokens.sh enterprise
```
### 🔍 CLI Documentation Audit
#### `audit-cli-documentation.js`
JavaScript ESM script that audits InfluxDB 3 CLI commands against existing documentation to identify missing or outdated content.
**Usage:**
```bash
node audit-cli-documentation.js [core|enterprise|both] [version|local]
```
**Features:**
- Compares actual CLI help output with documented commands
- Identifies missing documentation for new CLI options
- Finds documented options that no longer exist in the CLI
- Supports both released versions and local containers
- Generates detailed audit reports with recommendations
- Handles authentication automatically using Docker secrets
**Examples:**
```bash
# Audit Core documentation against local container
node audit-cli-documentation.js core local
# Audit Enterprise documentation against specific version
node audit-cli-documentation.js enterprise v3.2.0
# Audit both products against local containers
node audit-cli-documentation.js both local
```
**Output:**
- `../output/cli-audit/documentation-audit-{product}-{version}.md` - Detailed audit report
- `../output/cli-audit/parsed-cli-{product}-{version}.md` - Parsed CLI structure
- `../output/cli-audit/patches/{product}/` - Generated patches for missing documentation
### 🛠️ CLI Documentation Updates
#### `apply-cli-patches.js`
JavaScript ESM script that applies generated patches to update CLI documentation with missing options.
**Usage:**
```bash
node apply-cli-patches.js [core|enterprise|both] [--dry-run]
```
**Features:**
- Applies patches generated by `audit-cli-documentation.js`
- Updates CLI reference documentation with missing options
- Supports dry-run mode to preview changes
- Maintains existing documentation structure and formatting
- Creates backups before applying changes
**Examples:**
```bash
# Preview changes without applying (dry run)
node apply-cli-patches.js core --dry-run
# Apply patches to Enterprise documentation
node apply-cli-patches.js enterprise
# Apply patches to both products
node apply-cli-patches.js both
```
**Output:**
- Updates CLI reference documentation files in place
- Creates backup files with `.backup` extension
- Logs all changes made to the documentation
## Quick Start Guide
### 1. Initial Setup
```bash
# Navigate to the monolith scripts directory
cd helper-scripts/influxdb3-monolith
# Make scripts executable
chmod +x *.sh
# Set up authentication for both products
./setup-auth-tokens.sh both
# Restart containers to load new secrets
docker compose down && docker compose up -d influxdb3-core influxdb3-enterprise
```
### 2. CLI Documentation Audit
```bash
# Start your containers
docker compose up -d influxdb3-core influxdb3-enterprise
# Audit CLI documentation
node audit-cli-documentation.js core local
node audit-cli-documentation.js enterprise local
# Review the output
ls ../output/cli-audit/
```
### 3. Development Workflow
```bash
# Audit documentation for both products
node audit-cli-documentation.js both local
# Check the audit results
cat ../output/cli-audit/documentation-audit-core-local.md
cat ../output/cli-audit/documentation-audit-enterprise-local.md
# Apply patches if needed (dry run first)
node apply-cli-patches.js both --dry-run
```
### 4. Release Documentation Updates
For release documentation, use the audit and patch workflow:
```bash
# Audit against released version
node audit-cli-documentation.js enterprise v3.2.0
# Review missing documentation
cat ../output/cli-audit/documentation-audit-enterprise-v3.2.0.md
# Apply patches to update documentation
node apply-cli-patches.js enterprise
# Verify changes look correct
git diff content/influxdb3/enterprise/reference/cli/
```
## Container Integration
The scripts work with your Docker Compose setup:
**Expected container names:**
- `influxdb3-core` (port 8282)
- `influxdb3-enterprise` (port 8181)
**Docker Compose secrets:**
- `influxdb3-core-admin-token` - Admin token for Core (stored in `~/.env.influxdb3-core-admin-token`)
- `influxdb3-enterprise-admin-token` - Admin token for Enterprise (stored in `~/.env.influxdb3-enterprise-admin-token`)
- `INFLUXDB3_LICENSE_EMAIL` - Enterprise license email (set in `.env.3ent` env_file)
## Use Cases
### 📋 Release Documentation
1. **Pre-release audit:**
```bash
node audit-cli-documentation.js core v3.2.0
```
2. **Review audit results and update documentation**
3. **Apply patches for missing content**
4. **Test documented commands work correctly**
### 🔬 Development Testing
1. **Audit local development:**
```bash
node audit-cli-documentation.js enterprise local
```
2. **Verify new features are documented**
3. **Test authentication setup**
4. **Apply patches to keep docs current**
### 🚀 Release Preparation
1. **Final audit before release:**
```bash
node audit-cli-documentation.js both local
```
2. **Apply all pending patches**
3. **Update examples and tutorials**
4. **Verify all CLI commands work as documented**
## Output Structure
```
helper-scripts/
├── output/
│ └── cli-audit/
│ ├── documentation-audit-core-local.md # CLI documentation audit report
│ ├── documentation-audit-enterprise-v3.2.0.md # CLI documentation audit report
│ ├── parsed-cli-core-local.md # Parsed CLI structure
│ ├── parsed-cli-enterprise-v3.2.0.md # Parsed CLI structure
│ └── patches/
│ ├── core/ # Generated patches for Core
│ │ ├── influxdb3-cli-patch-001.md
│ │ └── influxdb3-cli-patch-002.md
│ └── enterprise/ # Generated patches for Enterprise
│ ├── influxdb3-cli-patch-001.md
│ └── influxdb3-cli-patch-002.md
└── influxdb3-monolith/
├── README.md # This file
├── setup-auth-tokens.sh # Auth setup
├── audit-cli-documentation.js # CLI documentation audit
└── apply-cli-patches.js # CLI documentation patches
```
## Error Handling
### Common Issues
**Container not running:**
```bash
# Check status
docker compose ps
# Start specific service
docker compose up -d influxdb3-core
```
**Authentication failures:**
```bash
# Recreate tokens
./setup-auth-tokens.sh both
# Test manually
docker exec influxdb3-core influxdb3 create token --admin
```
**Version not found:**
```bash
# Check available versions
docker pull influxdb:3-core:3.2.0
docker pull influxdb:3-enterprise:3.2.0
```
### Debug Mode
Enable debug output for troubleshooting:
```bash
DEBUG=1 node audit-cli-documentation.js core local
```
## Integration with CI/CD
### GitHub Actions Example
```yaml
- name: Audit CLI Documentation
run: |
cd helper-scripts/influxdb3-monolith
node audit-cli-documentation.js core ${{ env.VERSION }}
- name: Upload CLI Audit Results
uses: actions/upload-artifact@v3
with:
name: cli-audit
path: helper-scripts/output/cli-audit/
```
### CircleCI Example
```yaml
- run:
name: CLI Documentation Audit
command: |
cd helper-scripts/influxdb3-monolith
node audit-cli-documentation.js enterprise v3.2.0
- store_artifacts:
path: helper-scripts/output/cli-audit/
```
## Best Practices
### 🔒 Security
- Secret files (`~/.env.influxdb3-*-admin-token`) are stored in your home directory and not in version control
- Rotate auth tokens regularly by re-running `setup-auth-tokens.sh`
- Use minimal token permissions when possible
### 📚 Documentation
- Run audits early in release cycle
- Review all audit reports for missing content
- Apply patches to keep documentation current
- Test all documented commands work correctly
### 🔄 Workflow
- Use `local` version for development testing
- Audit against released versions for release prep
- Generate patches before documentation updates
- Validate changes with stakeholders
## Troubleshooting
### Script Permissions
```bash
chmod +x *.sh
```
### Missing Dependencies
```bash
# Node.js dependencies
node --version # Should be 16 or higher
# Docker Compose
docker compose version
```
### Container Health
```bash
# Check container logs
docker logs influxdb3-core
docker logs influxdb3-enterprise
# Test basic connectivity
docker exec influxdb3-core influxdb3 --version
```
## Contributing
When adding new scripts to this directory:
1. **Follow naming conventions**: Use lowercase with hyphens
2. **Add usage documentation**: Include help text in scripts
3. **Handle errors gracefully**: Use proper exit codes
4. **Test with both products**: Ensure Core and Enterprise compatibility
5. **Update this README**: Document new functionality
## Related Documentation
- [InfluxDB 3 Core CLI Reference](/influxdb3/core/reference/cli/)
- [InfluxDB 3 Enterprise CLI Reference](/influxdb3/enterprise/reference/cli/)

View File

@ -1,277 +0,0 @@
#!/usr/bin/env node
/**
* Apply CLI documentation patches generated by audit-cli-documentation.js
* Usage: node apply-cli-patches.js [core|enterprise|both] [--dry-run]
*/
import { promises as fs } from 'fs';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
import { process } from 'node:process';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
// Color codes
const Colors = {
RED: '\x1b[0;31m',
GREEN: '\x1b[0;32m',
YELLOW: '\x1b[1;33m',
BLUE: '\x1b[0;34m',
NC: '\x1b[0m', // No Color
};
async function fileExists(path) {
try {
await fs.access(path);
return true;
} catch {
return false;
}
}
async function ensureDir(dir) {
await fs.mkdir(dir, { recursive: true });
}
async function extractFrontmatter(content) {
const lines = content.split('\n');
if (lines[0] !== '---') return { frontmatter: null, content };
const frontmatterLines = [];
let i = 1;
while (i < lines.length && lines[i] !== '---') {
frontmatterLines.push(lines[i]);
i++;
}
if (i >= lines.length) return { frontmatter: null, content };
const frontmatterText = frontmatterLines.join('\n');
const remainingContent = lines.slice(i + 1).join('\n');
return { frontmatter: frontmatterText, content: remainingContent };
}
async function getActualDocumentationPath(docPath, projectRoot) {
// Check if the documentation file exists and has a source field
const fullPath = join(projectRoot, docPath);
if (await fileExists(fullPath)) {
const content = await fs.readFile(fullPath, 'utf8');
const { frontmatter } = await extractFrontmatter(content);
if (frontmatter) {
// Look for source: field in frontmatter
const sourceMatch = frontmatter.match(/^source:\s*(.+)$/m);
if (sourceMatch) {
const sourcePath = sourceMatch[1].trim();
return sourcePath;
}
}
}
return docPath;
}
async function applyPatches(product, dryRun = false) {
const patchDir = join(
dirname(__dirname),
'output',
'cli-audit',
'patches',
product
);
const projectRoot = join(__dirname, '..', '..');
console.log(
`${Colors.BLUE}📋 Applying CLI documentation patches for ${product}${Colors.NC}`
);
if (dryRun) {
console.log(
`${Colors.YELLOW}🔍 DRY RUN - No files will be created${Colors.NC}`
);
}
console.log();
// Check if patch directory exists
if (!(await fileExists(patchDir))) {
console.log(`${Colors.YELLOW}No patches found for ${product}.${Colors.NC}`);
console.log("Run 'yarn audit:cli' first to generate patches.");
return;
}
// Read all patch files
const patchFiles = await fs.readdir(patchDir);
const mdFiles = patchFiles.filter((f) => f.endsWith('.md'));
if (mdFiles.length === 0) {
console.log(
`${Colors.YELLOW}No patch files found in ${patchDir}${Colors.NC}`
);
return;
}
console.log(`Found ${mdFiles.length} patch file(s) to apply:\n`);
// Map patch files to their destination
const baseCliPath = `content/influxdb3/${product}/reference/cli/influxdb3`;
const commandToFile = {
'create-database.md': `${baseCliPath}/create/database.md`,
'create-token.md': `${baseCliPath}/create/token/_index.md`,
'create-token-admin.md': `${baseCliPath}/create/token/admin.md`,
'create-trigger.md': `${baseCliPath}/create/trigger.md`,
'create-table.md': `${baseCliPath}/create/table.md`,
'create-last_cache.md': `${baseCliPath}/create/last_cache.md`,
'create-distinct_cache.md': `${baseCliPath}/create/distinct_cache.md`,
'show-databases.md': `${baseCliPath}/show/databases.md`,
'show-tokens.md': `${baseCliPath}/show/tokens.md`,
'delete-database.md': `${baseCliPath}/delete/database.md`,
'delete-table.md': `${baseCliPath}/delete/table.md`,
'query.md': `${baseCliPath}/query.md`,
'write.md': `${baseCliPath}/write.md`,
};
let applied = 0;
let skipped = 0;
for (const patchFile of mdFiles) {
const destinationPath = commandToFile[patchFile];
if (!destinationPath) {
console.log(
`${Colors.YELLOW}⚠️ Unknown patch file: ${patchFile}${Colors.NC}`
);
continue;
}
// Get the actual documentation path (handles source: frontmatter)
const actualPath = await getActualDocumentationPath(
destinationPath,
projectRoot
);
const fullDestPath = join(projectRoot, actualPath);
const patchPath = join(patchDir, patchFile);
// Check if destination already exists
if (await fileExists(fullDestPath)) {
console.log(
`${Colors.YELLOW}⏭️ Skipping${Colors.NC} ${patchFile} - destination already exists:`
);
console.log(` ${actualPath}`);
skipped++;
continue;
}
if (dryRun) {
console.log(`${Colors.BLUE}🔍 Would create${Colors.NC} ${actualPath}`);
console.log(` from patch: ${patchFile}`);
if (actualPath !== destinationPath) {
console.log(` (resolved from: ${destinationPath})`);
}
applied++;
} else {
try {
// Ensure destination directory exists
await ensureDir(dirname(fullDestPath));
// Copy patch to destination
const content = await fs.readFile(patchPath, 'utf8');
// Update the menu configuration based on product
let updatedContent = content;
if (product === 'enterprise') {
updatedContent = content
.replace('influxdb3/core/tags:', 'influxdb3/enterprise/tags:')
.replace(
'influxdb3_core_reference:',
'influxdb3_enterprise_reference:'
);
}
await fs.writeFile(fullDestPath, updatedContent);
console.log(`${Colors.GREEN}✅ Created${Colors.NC} ${actualPath}`);
console.log(` from patch: ${patchFile}`);
if (actualPath !== destinationPath) {
console.log(` (resolved from: ${destinationPath})`);
}
applied++;
} catch (error) {
console.log(
`${Colors.RED}❌ Error${Colors.NC} creating ${actualPath}:`
);
console.log(` ${error.message}`);
}
}
}
console.log();
console.log(`${Colors.BLUE}Summary:${Colors.NC}`);
console.log(`- Patches ${dryRun ? 'would be' : ''} applied: ${applied}`);
console.log(`- Files skipped (already exist): ${skipped}`);
console.log(`- Total patch files: ${mdFiles.length}`);
if (!dryRun && applied > 0) {
console.log();
console.log(
`${Colors.GREEN}✨ Success!${Colors.NC} Created ${applied} new ` +
'documentation file(s).'
);
console.log();
console.log('Next steps:');
console.log('1. Review the generated files and customize the content');
console.log('2. Add proper examples with placeholders');
console.log('3. Update descriptions and add any missing options');
console.log('4. Run tests: yarn test:links');
}
}
async function main() {
const args = process.argv.slice(2);
const product =
args.find((arg) => ['core', 'enterprise', 'both'].includes(arg)) || 'both';
const dryRun = args.includes('--dry-run');
if (args.includes('--help') || args.includes('-h')) {
console.log(
'Usage: node apply-cli-patches.js [core|enterprise|both] [--dry-run]'
);
console.log();
console.log('Options:');
console.log(
' --dry-run Show what would be done without creating files'
);
console.log();
console.log('Examples:');
console.log(
' node apply-cli-patches.js # Apply patches for both products'
);
console.log(
' node apply-cli-patches.js core --dry-run # Preview core patches'
);
console.log(
' node apply-cli-patches.js enterprise # Apply enterprise patches'
);
process.exit(0);
}
try {
if (product === 'both') {
await applyPatches('core', dryRun);
console.log();
await applyPatches('enterprise', dryRun);
} else {
await applyPatches(product, dryRun);
}
} catch (error) {
console.error(`${Colors.RED}Error:${Colors.NC}`, error.message);
process.exit(1);
}
}
// Run if called directly
if (import.meta.url === `file://${process.argv[1]}`) {
main();
}

View File

@ -1,974 +0,0 @@
#!/usr/bin/env node
/**
* Audit CLI documentation against current CLI help output
* Usage: node audit-cli-documentation.js [core|enterprise|both] [version]
* Example: node audit-cli-documentation.js core 3.2.0
*/
import { spawn } from 'child_process';
import { promises as fs } from 'fs';
import { homedir } from 'os';
import { join, dirname } from 'path';
import { fileURLToPath } from 'url';
import {
validateVersionInputs,
getRepositoryRoot,
} from '../common/validate-tags.js';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
// Color codes
const Colors = {
RED: '\x1b[0;31m',
GREEN: '\x1b[0;32m',
YELLOW: '\x1b[1;33m',
BLUE: '\x1b[0;34m',
NC: '\x1b[0m', // No Color
};
class CLIDocAuditor {
constructor(product = 'both', version = 'local') {
this.product = product;
this.version = version;
this.outputDir = join(dirname(__dirname), 'output', 'cli-audit');
// Token paths - check environment variables first (Docker Compose), then fall back to local files
const coreTokenEnv = process.env.INFLUXDB3_CORE_TOKEN;
const enterpriseTokenEnv = process.env.INFLUXDB3_ENTERPRISE_TOKEN;
if (coreTokenEnv && this.fileExists(coreTokenEnv)) {
// Running in Docker Compose with secrets
this.coreTokenFile = coreTokenEnv;
this.enterpriseTokenFile = enterpriseTokenEnv;
} else {
// Running locally
this.coreTokenFile = join(homedir(), '.env.influxdb3-core-admin-token');
this.enterpriseTokenFile = join(
homedir(),
'.env.influxdb3-enterprise-admin-token'
);
}
// Commands to extract help for
this.mainCommands = [
'create',
'delete',
'disable',
'enable',
'query',
'show',
'test',
'update',
'write',
];
this.subcommands = [
'create database',
'create token admin',
'create token',
'create trigger',
'create last_cache',
'create distinct_cache',
'create table',
'show databases',
'show tokens',
'show system',
'delete database',
'delete table',
'delete trigger',
'update database',
'test wal_plugin',
'test schedule_plugin',
];
// Map for command tracking during option parsing
this.commandOptionsMap = {};
}
async fileExists(path) {
try {
await fs.access(path);
return true;
} catch {
return false;
}
}
async ensureDir(dir) {
await fs.mkdir(dir, { recursive: true });
}
async loadTokens() {
let coreToken = null;
let enterpriseToken = null;
try {
if (await this.fileExists(this.coreTokenFile)) {
const stat = await fs.stat(this.coreTokenFile);
if (stat.size > 0) {
coreToken = (await fs.readFile(this.coreTokenFile, 'utf8')).trim();
}
}
} catch {
// Token file doesn't exist or can't be read
}
try {
if (await this.fileExists(this.enterpriseTokenFile)) {
const stat = await fs.stat(this.enterpriseTokenFile);
if (stat.size > 0) {
enterpriseToken = (
await fs.readFile(this.enterpriseTokenFile, 'utf8')
).trim();
}
}
} catch {
// Token file doesn't exist or can't be read
}
return { coreToken, enterpriseToken };
}
runCommand(cmd, args = []) {
return new Promise((resolve) => {
const child = spawn(cmd, args, { encoding: 'utf8' });
let stdout = '';
let stderr = '';
child.stdout.on('data', (data) => {
stdout += data.toString();
});
child.stderr.on('data', (data) => {
stderr += data.toString();
});
child.on('close', (code) => {
resolve({ code, stdout, stderr });
});
child.on('error', (err) => {
resolve({ code: 1, stdout: '', stderr: err.message });
});
});
}
async extractCurrentCLI(product, outputFile) {
process.stdout.write(
`Extracting current CLI help from influxdb3-${product}...`
);
await this.loadTokens();
if (this.version === 'local') {
const containerName = `influxdb3-${product}`;
// Check if container is running
const { code, stdout } = await this.runCommand('docker', [
'ps',
'--format',
'{{.Names}}',
]);
if (code !== 0 || !stdout.includes(containerName)) {
console.log(` ${Colors.RED}${Colors.NC}`);
console.log(`Error: Container ${containerName} is not running.`);
console.log(`Start it with: docker compose up -d influxdb3-${product}`);
return false;
}
// Extract comprehensive help
let fileContent = '';
// Main help
const mainHelp = await this.runCommand('docker', [
'exec',
containerName,
'influxdb3',
'--help',
]);
fileContent += mainHelp.code === 0 ? mainHelp.stdout : mainHelp.stderr;
// Extract all subcommand help
for (const cmd of this.mainCommands) {
fileContent += `\n\n===== influxdb3 ${cmd} --help =====\n`;
const cmdHelp = await this.runCommand('docker', [
'exec',
containerName,
'influxdb3',
cmd,
'--help',
]);
fileContent += cmdHelp.code === 0 ? cmdHelp.stdout : cmdHelp.stderr;
}
// Extract detailed subcommand help
for (const subcmd of this.subcommands) {
fileContent += `\n\n===== influxdb3 ${subcmd} --help =====\n`;
const cmdParts = [
'exec',
containerName,
'influxdb3',
...subcmd.split(' '),
'--help',
];
const subcmdHelp = await this.runCommand('docker', cmdParts);
fileContent +=
subcmdHelp.code === 0 ? subcmdHelp.stdout : subcmdHelp.stderr;
}
await fs.writeFile(outputFile, fileContent);
console.log(` ${Colors.GREEN}${Colors.NC}`);
} else {
// Use specific version image
const image = `influxdb:${this.version}-${product}`;
process.stdout.write(`Extracting CLI help from ${image}...`);
// Pull image if needed
const pullResult = await this.runCommand('docker', ['pull', image]);
if (pullResult.code !== 0) {
console.log(` ${Colors.RED}${Colors.NC}`);
console.log(`Error: Failed to pull image ${image}`);
return false;
}
// Extract help from specific version
let fileContent = '';
// Main help
const mainHelp = await this.runCommand('docker', [
'run',
'--rm',
image,
'influxdb3',
'--help',
]);
fileContent += mainHelp.code === 0 ? mainHelp.stdout : mainHelp.stderr;
// Extract subcommand help
for (const cmd of this.mainCommands) {
fileContent += `\n\n===== influxdb3 ${cmd} --help =====\n`;
const cmdHelp = await this.runCommand('docker', [
'run',
'--rm',
image,
'influxdb3',
cmd,
'--help',
]);
fileContent += cmdHelp.code === 0 ? cmdHelp.stdout : cmdHelp.stderr;
}
await fs.writeFile(outputFile, fileContent);
console.log(` ${Colors.GREEN}${Colors.NC}`);
}
return true;
}
async parseCLIHelp(helpFile, parsedFile) {
const content = await fs.readFile(helpFile, 'utf8');
const lines = content.split('\n');
let output = '# CLI Commands and Options\n\n';
let currentCommand = '';
let inOptions = false;
for (const line of lines) {
// Detect command headers
if (line.startsWith('===== influxdb3') && line.endsWith('--help =====')) {
currentCommand = line
.replace('===== ', '')
.replace(' --help =====', '')
.trim();
output += `## ${currentCommand}\n\n`;
inOptions = false;
// Initialize options list for this command
this.commandOptionsMap[currentCommand] = [];
}
// Detect options sections
else if (line.trim() === 'Options:') {
output += '### Options:\n\n';
inOptions = true;
}
// Parse option lines
else if (inOptions && /^\s*-/.test(line)) {
// Extract option and description
const optionMatch = line.match(/--[a-z][a-z0-9-]*/);
const shortMatch = line.match(/\s-[a-zA-Z],/);
if (optionMatch) {
const option = optionMatch[0];
const shortOption = shortMatch
? shortMatch[0].replace(/[,\s]/g, '')
: null;
// Extract description by removing option parts
let description = line.replace(/^\s*-[^\s]*\s*/, '');
description = description.replace(/^\s*--[^\s]*\s*/, '').trim();
if (shortOption) {
output += `- \`${shortOption}, ${option}\`: ${description}\n`;
} else {
output += `- \`${option}\`: ${description}\n`;
}
// Store option with its command context
if (currentCommand && option) {
this.commandOptionsMap[currentCommand].push(option);
}
}
}
// Reset options flag for new sections
else if (/^[A-Z][a-z]+:$/.test(line.trim())) {
inOptions = false;
}
}
await fs.writeFile(parsedFile, output);
}
findDocsPath(product) {
if (product === 'core') {
return 'content/influxdb3/core/reference/cli/influxdb3';
} else if (product === 'enterprise') {
return 'content/influxdb3/enterprise/reference/cli/influxdb3';
}
return '';
}
async extractCommandHelp(content, command) {
// Find the section for this specific command in the CLI help
const lines = content.split('\n');
let inCommand = false;
let helpText = [];
const commandHeader = `===== influxdb3 ${command} --help =====`;
for (let i = 0; i < lines.length; i++) {
if (lines[i] === commandHeader) {
inCommand = true;
continue;
}
if (inCommand && lines[i].startsWith('===== influxdb3')) {
break;
}
if (inCommand) {
helpText.push(lines[i]);
}
}
return helpText.join('\n').trim();
}
async generateDocumentationTemplate(command, helpText) {
// Parse the help text to extract description and options
const lines = helpText.split('\n');
let description = '';
let usage = '';
let options = [];
let inOptions = false;
for (let i = 0; i < lines.length; i++) {
const line = lines[i];
if (i === 0 && !line.startsWith('Usage:') && line.trim()) {
description = line.trim();
}
if (line.startsWith('Usage:')) {
usage = line.replace('Usage:', '').trim();
}
if (line.trim() === 'Options:') {
inOptions = true;
continue;
}
if (inOptions && /^\s*-/.test(line)) {
const optionMatch = line.match(/--([a-z][a-z0-9-]*)/);
const shortMatch = line.match(/\s-([a-zA-Z]),/);
if (optionMatch) {
const optionName = optionMatch[1];
const shortOption = shortMatch ? shortMatch[1] : null;
let optionDesc = line
.replace(/^\s*-[^\s]*\s*/, '')
.replace(/^\s*--[^\s]*\s*/, '')
.trim();
options.push({
name: optionName,
short: shortOption,
description: optionDesc,
});
}
}
}
// Generate markdown template
let template = `---
title: influxdb3 ${command}
description: >
The \`influxdb3 ${command}\` command ${description.toLowerCase()}.
influxdb3/core/tags: [cli]
menu:
influxdb3_core_reference:
parent: influxdb3 cli
weight: 201
---
# influxdb3 ${command}
${description}
## Usage
\`\`\`bash
${usage || `influxdb3 ${command} [OPTIONS]`}
\`\`\`
`;
if (options.length > 0) {
template += `## Options
| Option | Description |
|--------|-------------|
`;
for (const opt of options) {
const optionDisplay = opt.short
? `\`-${opt.short}\`, \`--${opt.name}\``
: `\`--${opt.name}\``;
template += `| ${optionDisplay} | ${opt.description} |\n`;
}
}
template += `
## Examples
### Example 1: Basic usage
{{% code-placeholders "PLACEHOLDER1|PLACEHOLDER2" %}}
\`\`\`bash
influxdb3 ${command} --example PLACEHOLDER1
\`\`\`
{{% /code-placeholders %}}
Replace the following:
- {{% code-placeholder-key %}}\`PLACEHOLDER1\`{{% /code-placeholder-key %}}: Description of placeholder
`;
return template;
}
async extractFrontmatter(content) {
const lines = content.split('\n');
if (lines[0] !== '---') return { frontmatter: null, content };
const frontmatterLines = [];
let i = 1;
while (i < lines.length && lines[i] !== '---') {
frontmatterLines.push(lines[i]);
i++;
}
if (i >= lines.length) return { frontmatter: null, content };
const frontmatterText = frontmatterLines.join('\n');
const remainingContent = lines.slice(i + 1).join('\n');
return { frontmatter: frontmatterText, content: remainingContent };
}
async getActualContentPath(filePath) {
// Get the actual content path, resolving source fields
try {
const content = await fs.readFile(filePath, 'utf8');
const { frontmatter } = await this.extractFrontmatter(content);
if (frontmatter) {
const sourceMatch = frontmatter.match(/^source:\s*(.+)$/m);
if (sourceMatch) {
let sourcePath = sourceMatch[1].trim();
// Handle relative paths from project root
if (sourcePath.startsWith('/shared/')) {
sourcePath = `content${sourcePath}`;
}
return sourcePath;
}
}
return null; // No source field found
} catch {
return null;
}
}
async parseDocumentedOptions(filePath) {
// Parse a documentation file to extract all documented options
try {
const content = await fs.readFile(filePath, 'utf8');
const options = [];
// Look for options in various patterns:
// 1. Markdown tables with option columns
// 2. Option lists with backticks
// 3. Code examples with --option flags
// Pattern 1: Markdown tables (| Option | Description |)
const tableMatches = content.match(/\|\s*`?--[a-z][a-z0-9-]*`?\s*\|/gi);
if (tableMatches) {
for (const match of tableMatches) {
const option = match.match(/--[a-z][a-z0-9-]*/i);
if (option) {
options.push(option[0]);
}
}
}
// Pattern 2: Backtick-enclosed options in text
const backtickMatches = content.match(/`--[a-z][a-z0-9-]*`/gi);
if (backtickMatches) {
for (const match of backtickMatches) {
const option = match.replace(/`/g, '');
options.push(option);
}
}
// Pattern 3: Options in code blocks
const codeBlockMatches = content.match(/```[\s\S]*?```/g);
if (codeBlockMatches) {
for (const block of codeBlockMatches) {
const blockOptions = block.match(/--[a-z][a-z0-9-]*/gi);
if (blockOptions) {
options.push(...blockOptions);
}
}
}
// Pattern 4: Environment variable mappings (INFLUXDB3_* to --option)
const envMatches = content.match(
/\|\s*`INFLUXDB3_[^`]*`\s*\|\s*`--[a-z][a-z0-9-]*`\s*\|/gi
);
if (envMatches) {
for (const match of envMatches) {
const option = match.match(/--[a-z][a-z0-9-]*/);
if (option) {
options.push(option[0]);
}
}
}
// Remove duplicates and return sorted
return [...new Set(options)].sort();
} catch {
return [];
}
}
async auditDocs(product, cliFile, auditFile) {
const docsPath = this.findDocsPath(product);
const sharedPath = 'content/shared/influxdb3-cli';
const patchDir = join(this.outputDir, 'patches', product);
await this.ensureDir(patchDir);
let output = `# CLI Documentation Audit - ${product}\n`;
output += `Generated: ${new Date().toISOString()}\n\n`;
// GitHub base URL for edit links
const githubBase = 'https://github.com/influxdata/docs-v2/edit/master';
const githubNewBase = 'https://github.com/influxdata/docs-v2/new/master';
// VSCode links for local editing
const vscodeBase = 'vscode://file';
const projectRoot = join(__dirname, '..', '..');
// Check for missing documentation
output += '## Missing Documentation\n\n';
let missingCount = 0;
const missingDocs = [];
// Map commands to expected documentation files
const commandToFile = {
'create database': 'create/database.md',
'create token': 'create/token/_index.md',
'create token admin': 'create/token/admin.md',
'create trigger': 'create/trigger.md',
'create table': 'create/table.md',
'create last_cache': 'create/last_cache.md',
'create distinct_cache': 'create/distinct_cache.md',
'show databases': 'show/databases.md',
'show tokens': 'show/tokens.md',
'delete database': 'delete/database.md',
'delete table': 'delete/table.md',
query: 'query.md',
write: 'write.md',
};
// Extract commands from CLI help
const content = await fs.readFile(cliFile, 'utf8');
const lines = content.split('\n');
for (const line of lines) {
if (line.startsWith('===== influxdb3') && line.endsWith('--help =====')) {
const command = line
.replace('===== influxdb3 ', '')
.replace(' --help =====', '');
if (commandToFile[command]) {
const expectedFile = commandToFile[command];
const productFile = join(docsPath, expectedFile);
const sharedFile = join(sharedPath, expectedFile);
const productExists = await this.fileExists(productFile);
const sharedExists = await this.fileExists(sharedFile);
let needsContent = false;
let targetPath = null;
let stubPath = null;
if (!productExists && !sharedExists) {
// Completely missing
needsContent = true;
targetPath = productFile;
} else if (productExists) {
// Check if it has a source field pointing to missing content
const actualPath = await this.getActualContentPath(productFile);
if (actualPath && !(await this.fileExists(actualPath))) {
needsContent = true;
targetPath = actualPath;
stubPath = productFile;
}
} else if (sharedExists) {
// Shared file exists, check if it has content
const actualPath = await this.getActualContentPath(sharedFile);
if (actualPath && !(await this.fileExists(actualPath))) {
needsContent = true;
targetPath = actualPath;
stubPath = sharedFile;
}
}
if (needsContent && targetPath) {
const githubNewUrl = `${githubNewBase}/${targetPath}`;
const localPath = join(projectRoot, targetPath);
output += `- **Missing**: Documentation for \`influxdb3 ${command}\`\n`;
if (stubPath) {
output += ` - Stub exists at: \`${stubPath}\`\n`;
output += ` - Content needed at: \`${targetPath}\`\n`;
} else {
output += ` - Expected: \`${targetPath}\` or \`${sharedFile}\`\n`;
}
output += ` - [Create on GitHub](${githubNewUrl})\n`;
output += ` - Local: \`${localPath}\`\n`;
// Generate documentation template
const helpText = await this.extractCommandHelp(content, command);
const docTemplate = await this.generateDocumentationTemplate(
command,
helpText
);
// Save patch file
const patchFileName = `${command.replace(/ /g, '-')}.md`;
const patchFile = join(patchDir, patchFileName);
await fs.writeFile(patchFile, docTemplate);
output += ` - **Template generated**: \`${patchFile}\`\n`;
missingDocs.push({ command, file: targetPath, patchFile });
missingCount++;
}
}
}
}
if (missingCount === 0) {
output += 'No missing documentation files detected.\n';
} else {
output += '\n### Quick Actions\n\n';
output +=
'Copy and paste these commands to create missing documentation:\n\n';
output += '```bash\n';
for (const doc of missingDocs) {
const relativePatch = join(
'helper-scripts/output/cli-audit/patches',
product,
`${doc.command.replace(/ /g, '-')}.md`
);
output += `# Create ${doc.command} documentation\n`;
output += `mkdir -p $(dirname ${doc.file})\n`;
output += `cp ${relativePatch} ${doc.file}\n\n`;
}
output += '```\n';
}
output += '\n';
// Check for outdated options in existing docs
output += '## Existing Documentation Review\n\n';
// Parse CLI help first to populate commandOptionsMap
const parsedFile = join(
this.outputDir,
`parsed-cli-${product}-${this.version}.md`
);
await this.parseCLIHelp(cliFile, parsedFile);
// For each command, check if documentation exists and compare content
const existingDocs = [];
for (const [command, expectedFile] of Object.entries(commandToFile)) {
const productFile = join(docsPath, expectedFile);
const sharedFile = join(sharedPath, expectedFile);
let docFile = null;
let actualContentFile = null;
// Find the documentation file
if (await this.fileExists(productFile)) {
docFile = productFile;
// Check if it's a stub with source field
const actualPath = await this.getActualContentPath(productFile);
actualContentFile = actualPath
? join(projectRoot, actualPath)
: join(projectRoot, productFile);
} else if (await this.fileExists(sharedFile)) {
docFile = sharedFile;
actualContentFile = join(projectRoot, sharedFile);
}
if (docFile && (await this.fileExists(actualContentFile))) {
const githubEditUrl = `${githubBase}/${docFile}`;
const localPath = join(projectRoot, docFile);
const vscodeUrl = `${vscodeBase}/${localPath}`;
// Get CLI options for this command
const cliOptions = this.commandOptionsMap[`influxdb3 ${command}`] || [];
// Parse documentation content to find documented options
const documentedOptions =
await this.parseDocumentedOptions(actualContentFile);
// Find missing options (in CLI but not in docs)
const missingOptions = cliOptions.filter(
(opt) => !documentedOptions.includes(opt)
);
// Find extra options (in docs but not in CLI)
const extraOptions = documentedOptions.filter(
(opt) => !cliOptions.includes(opt)
);
existingDocs.push({
command,
file: docFile,
actualContentFile: actualContentFile.replace(
join(projectRoot, ''),
''
),
githubUrl: githubEditUrl,
localPath,
vscodeUrl,
cliOptions,
documentedOptions,
missingOptions,
extraOptions,
});
}
}
if (existingDocs.length > 0) {
output += 'Review these existing documentation files for accuracy:\n\n';
for (const doc of existingDocs) {
output += `### \`influxdb3 ${doc.command}\`\n`;
output += `- **File**: \`${doc.file}\`\n`;
if (doc.actualContentFile !== doc.file) {
output += `- **Content**: \`${doc.actualContentFile}\`\n`;
}
output += `- [Edit on GitHub](${doc.githubUrl})\n`;
output += `- [Open in VS Code](${doc.vscodeUrl})\n`;
output += `- **Local**: \`${doc.localPath}\`\n`;
// Show option analysis
if (doc.missingOptions.length > 0) {
output += `- **⚠️ Missing from docs** (${doc.missingOptions.length} options):\n`;
for (const option of doc.missingOptions.sort()) {
output += ` - \`${option}\`\n`;
}
}
if (doc.extraOptions.length > 0) {
output += `- ** Documented but not in CLI** (${doc.extraOptions.length} options):\n`;
for (const option of doc.extraOptions.sort()) {
output += ` - \`${option}\`\n`;
}
}
if (doc.missingOptions.length === 0 && doc.extraOptions.length === 0) {
output += `- **✅ Options match** (${doc.cliOptions.length} options)\n`;
}
if (doc.cliOptions.length > 0) {
output += `- **All CLI Options** (${doc.cliOptions.length}):\n`;
const uniqueOptions = [...new Set(doc.cliOptions)].sort();
for (const option of uniqueOptions) {
const status = doc.missingOptions.includes(option) ? '❌' : '✅';
output += ` - ${status} \`${option}\`\n`;
}
}
output += '\n';
}
}
output += '\n## Summary\n';
output += `- Missing documentation files: ${missingCount}\n`;
output += `- Existing documentation files: ${existingDocs.length}\n`;
output += `- Generated templates: ${missingCount}\n`;
output += '- Options are grouped by command for easier review\n\n';
output += '## Automation Suggestions\n\n';
output +=
'1. **Use generated templates**: Check the `patches` directory for pre-filled documentation templates\n';
output +=
'2. **Batch creation**: Use the shell commands above to quickly create all missing files\n';
output +=
'3. **CI Integration**: Add this audit to your CI pipeline to catch missing docs early\n';
output +=
'4. **Auto-PR**: Create a GitHub Action that runs this audit and opens PRs for missing docs\n\n';
await fs.writeFile(auditFile, output);
console.log(`📄 Audit complete: ${auditFile}`);
if (missingCount > 0) {
console.log(
`📝 Generated ${missingCount} documentation templates in: ${patchDir}`
);
}
}
async run() {
console.log(
`${Colors.BLUE}🔍 InfluxDB 3 CLI Documentation Audit${Colors.NC}`
);
console.log('=======================================');
console.log(`Product: ${this.product}`);
console.log(`Version: ${this.version}`);
console.log();
// Ensure output directory exists
await this.ensureDir(this.outputDir);
if (this.product === 'core') {
const cliFile = join(
this.outputDir,
`current-cli-core-${this.version}.txt`
);
const auditFile = join(
this.outputDir,
`documentation-audit-core-${this.version}.md`
);
if (await this.extractCurrentCLI('core', cliFile)) {
await this.auditDocs('core', cliFile, auditFile);
}
} else if (this.product === 'enterprise') {
const cliFile = join(
this.outputDir,
`current-cli-enterprise-${this.version}.txt`
);
const auditFile = join(
this.outputDir,
`documentation-audit-enterprise-${this.version}.md`
);
if (await this.extractCurrentCLI('enterprise', cliFile)) {
await this.auditDocs('enterprise', cliFile, auditFile);
}
} else if (this.product === 'both') {
// Core
const cliFileCore = join(
this.outputDir,
`current-cli-core-${this.version}.txt`
);
const auditFileCore = join(
this.outputDir,
`documentation-audit-core-${this.version}.md`
);
if (await this.extractCurrentCLI('core', cliFileCore)) {
await this.auditDocs('core', cliFileCore, auditFileCore);
}
// Enterprise
const cliFileEnt = join(
this.outputDir,
`current-cli-enterprise-${this.version}.txt`
);
const auditFileEnt = join(
this.outputDir,
`documentation-audit-enterprise-${this.version}.md`
);
if (await this.extractCurrentCLI('enterprise', cliFileEnt)) {
await this.auditDocs('enterprise', cliFileEnt, auditFileEnt);
}
} else {
console.error(`Error: Invalid product '${this.product}'`);
console.error(
'Usage: node audit-cli-documentation.js [core|enterprise|both] [version]'
);
process.exit(1);
}
console.log();
console.log(
`${Colors.GREEN}✅ CLI documentation audit complete!${Colors.NC}`
);
console.log();
console.log('Next steps:');
console.log(`1. Review the audit reports in: ${this.outputDir}`);
console.log('2. Update missing documentation files');
console.log('3. Verify options match current CLI behavior');
console.log('4. Update examples and usage patterns');
}
}
// Main execution
async function main() {
const args = process.argv.slice(2);
const product = args[0] || 'both';
const version = args[1] || 'local';
// Validate product
if (!['core', 'enterprise', 'both'].includes(product)) {
console.error(`Error: Invalid product '${product}'`);
console.error(
'Usage: node audit-cli-documentation.js [core|enterprise|both] [version]'
);
console.error('Example: node audit-cli-documentation.js core 3.2.0');
process.exit(1);
}
// Validate version tag
try {
const repoRoot = await getRepositoryRoot();
await validateVersionInputs(version, null, repoRoot);
} catch (error) {
console.error(`Version validation failed: ${error.message}`);
process.exit(1);
}
const auditor = new CLIDocAuditor(product, version);
await auditor.run();
}
// Run if called directly
if (import.meta.url === `file://${process.argv[1]}`) {
main().catch((err) => {
console.error('Error:', err);
process.exit(1);
});
}
export { CLIDocAuditor };

View File

@ -1,164 +0,0 @@
#!/bin/bash
# Set up authentication tokens for InfluxDB 3 Core and Enterprise containers
# Usage: ./setup-auth-tokens.sh [core|enterprise|both]
set -e
# Color codes
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
# Parse arguments
TARGET=${1:-both}
echo -e "${BLUE}🔐 InfluxDB 3 Authentication Setup${NC}"
echo "=================================="
echo ""
# Check for and load existing secret files
SECRET_CORE_FILE="$HOME/.env.influxdb3-core-admin-token"
SECRET_ENT_FILE="$HOME/.env.influxdb3-enterprise-admin-token"
if [ -f "$SECRET_CORE_FILE" ]; then
echo "✅ Found existing Core token secret file"
else
echo "📝 Creating new Core token secret file: $SECRET_CORE_FILE"
touch "$SECRET_CORE_FILE"
fi
if [ -f "$SECRET_ENT_FILE" ]; then
echo "✅ Found existing Enterprise token secret file"
else
echo "📝 Creating new Enterprise token secret file: $SECRET_ENT_FILE"
touch "$SECRET_ENT_FILE"
fi
echo ""
# Function to setup auth for a product
setup_auth() {
local product=$1
local container_name="influxdb3-${product}"
local port
local secret_file
case "$product" in
"core")
port="8282"
secret_file="$SECRET_CORE_FILE"
;;
"enterprise")
port="8181"
secret_file="$SECRET_ENT_FILE"
;;
esac
echo -e "${BLUE}Setting up $(echo ${product} | awk '{print toupper(substr($0,1,1)) tolower(substr($0,2))}') authentication...${NC}"
# Check if token already exists in secret file
if [ -s "$secret_file" ]; then
local existing_token=$(cat "$secret_file")
echo "✅ Token already exists in secret file"
echo " Token: ${existing_token:0:20}..."
# Test if the token works
echo -n "🧪 Testing existing token..."
if docker exec "${container_name}" influxdb3 show databases --token "${existing_token}" --host "http://localhost:${port}" > /dev/null 2>&1; then
echo -e " ${GREEN}✓ Working${NC}"
return 0
else
echo -e " ${YELLOW}⚠ Not working, will create new token${NC}"
fi
fi
# Check if container is running
if ! docker ps --format '{{.Names}}' | grep -q "^${container_name}$"; then
echo "🚀 Starting ${container_name} container..."
if ! docker compose up -d "${container_name}"; then
echo -e "${RED}❌ Failed to start container${NC}"
return 1
fi
echo -n "⏳ Waiting for container to be ready..."
sleep 5
echo -e " ${GREEN}${NC}"
else
echo "✅ Container ${container_name} is running"
fi
# Create admin token
echo "🔑 Creating admin token..."
local token_output
if token_output=$(docker exec "${container_name}" influxdb3 create token --admin 2>&1); then
# Extract the token from the "Token: " line
local new_token=$(echo "$token_output" | grep "^Token: " | sed 's/^Token: //' | tr -d '\r\n')
echo -e "${GREEN}Token created successfully!${NC}"
echo " Token: ${new_token:0:20}..."
# Update secret file
echo "${new_token}" > "$secret_file"
echo "📝 Updated secret file: $secret_file"
# Test the new token
echo -n "🧪 Testing new token..."
if docker exec "${container_name}" influxdb3 show databases --token "${new_token}" --host "http://localhost:${port}" > /dev/null 2>&1; then
echo -e " ${GREEN}✓ Working${NC}"
else
echo -e " ${YELLOW}⚠ Test failed, but token was created${NC}"
fi
else
echo -e "${RED}❌ Failed to create token${NC}"
echo "Error output: $token_output"
return 1
fi
echo ""
}
# Main execution
case "$TARGET" in
"core")
setup_auth "core"
;;
"enterprise")
setup_auth "enterprise"
;;
"both")
setup_auth "core"
setup_auth "enterprise"
;;
*)
echo "Usage: $0 [core|enterprise|both]"
exit 1
;;
esac
echo -e "${GREEN}🎉 Authentication setup complete!${NC}"
echo ""
echo "📋 Next steps:"
echo "1. Restart containers to load new secrets:"
echo " docker compose down && docker compose up -d influxdb3-core influxdb3-enterprise"
echo "2. Test CLI commands with authentication:"
echo " ./detect-cli-changes.sh core 3.1.0 local"
echo " ./detect-cli-changes.sh enterprise 3.1.0 local"
echo ""
echo "📄 Your secret files now contain:"
# Show Core tokens
if [ -f "$SECRET_CORE_FILE" ] && [ -s "$SECRET_CORE_FILE" ]; then
token_preview=$(head -c 20 "$SECRET_CORE_FILE")
echo " $SECRET_CORE_FILE: ${token_preview}..."
fi
# Show Enterprise tokens
if [ -f "$SECRET_ENT_FILE" ] && [ -s "$SECRET_ENT_FILE" ]; then
token_preview=$(head -c 20 "$SECRET_ENT_FILE")
echo " $SECRET_ENT_FILE: ${token_preview}..."
fi

View File

@ -5,7 +5,7 @@
{{ $productKey := cond (eq $product "influxdb3") (print "influxdb3_" (replaceRE "-" "_" $version)) $product }}
{{ $productData := index $.Site.Data.products $productKey }}
{{ $displayName := $productData.name }}
{{ $earlyAccessList := slice "influxdb3/explorer" }}
{{ $earlyAccessList := slice "" }}
{{ if in $earlyAccessList (print $product "/" $version )}}
<div class="block special-state">

View File

@ -111,16 +111,6 @@ pre-push:
node cypress/support/run-e2e-specs.js --spec "cypress/e2e/content/article-links.cy.js" content/example.md
exit $?
# Link validation runs in GitHub actions.
# You can still run it locally for development.
# e2e-links:
# tags: test,links
# glob: 'content/*.{md,html}'
# run: |
# echo "Running link checker for: {staged_files}"
# yarn test:links {staged_files}
# exit $?
# Manage Docker containers
prune-legacy-containers:
priority: 1

View File

@ -36,7 +36,7 @@
"js-yaml": "^4.1.0",
"lefthook": "^1.10.10",
"markdown-link": "^0.1.1",
"mermaid": "^11.4.1",
"mermaid": "^11.10.0",
"vanillajs-datepicker": "^1.3.4"
},
"scripts": {
@ -55,21 +55,7 @@
"test:codeblocks:v2": "docker compose run --rm --name v2-pytest v2-pytest",
"test:codeblocks:stop-monitors": "./test/scripts/monitor-tests.sh stop cloud-dedicated-pytest && ./test/scripts/monitor-tests.sh stop clustered-pytest",
"test:e2e": "node cypress/support/run-e2e-specs.js",
"test:links": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\"",
"test:links:v1": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/influxdb/{v1,enterprise_influxdb}/**/*.{md,html}",
"test:links:v2": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/influxdb/{cloud,v2}/**/*.{md,html}",
"test:links:v3": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/influxdb3/**/*.{md,html}",
"test:links:chronograf": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/chronograf/**/*.{md,html}",
"test:links:kapacitor": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/kapacitor/**/*.{md,html}",
"test:links:telegraf": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/telegraf/**/*.{md,html}",
"test:links:shared": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/shared/**/*.{md,html}",
"test:links:api-docs": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" /influxdb3/core/api/,/influxdb3/enterprise/api/,/influxdb3/cloud-dedicated/api/,/influxdb3/cloud-dedicated/api/v1/,/influxdb/cloud-dedicated/api/v1/,/influxdb/cloud-dedicated/api/management/,/influxdb3/cloud-dedicated/api/management/",
"test:shortcode-examples": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/example.md",
"audit:cli": "node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js both local",
"audit:cli:3core": "node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js core local",
"audit:cli:3ent": "node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js enterprise local",
"audit:cli:apply": "node ./helper-scripts/influxdb3-monolith/apply-cli-patches.js both",
"audit:cli:apply:dry": "node ./helper-scripts/influxdb3-monolith/apply-cli-patches.js both --dry-run"
"test:shortcode-examples": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/example.md"
},
"type": "module",
"browserslist": [

View File

@ -0,0 +1,342 @@
# yaml-language-server: $schema=app-instance-schema.json
apiVersion: kubecfg.dev/v1alpha1
kind: AppInstance
metadata:
name: influxdb
namespace: influxdb
spec:
# One or more secrets that are used to pull the images from an authenticated registry.
# This will either be the secret provided to you, if using our registry, or a secret for your own registry
# if self-hosting the images.
imagePullSecrets:
- name: <name of the secret>
package:
# The version of the clustered package that will be used.
# This determines the version of all of the individual components.
# When a new version of the product is released, this version should be updated and any
# new config options should be updated below.
image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250814-1819052
apiVersion: influxdata.com/v1alpha1
spec:
# # Provides a way to pass down hosting environment specific configuration, such as an role ARN when using EKS IRSA.
# # This section contains three multually-exclusive "blocks". Uncomment the block named after the hosting environment
# # you run: "aws", "openshift" or "gke".
# hostingEnvironment:
# # # Uncomment this block if you're running in EKS.
# # aws:
# # eksRoleArn: 'arn:aws:iam::111111111111:role/your-influxdb-clustered-role'
# #
# # # Uncomment this block if you're running inside OpenShift.
# # # Note: there are currently no OpenShift-specific parameters. You have to pass an empty object
# # # as a marker that you're choosing OpenShift as hosting environment.
# # openshift: {}
# #
# # # Uncomment this block if you're running in GKE:
# # gke:
# # # Authenticate to Google Cloud services via workload identity, this
# # # annotates the 'iox' ServiceAccount with the role name you specify.
# # # NOTE: This setting just enables GKE specific authentication mechanism,
# # # You still need to enable `spec.objectStore.google` below if you want to use GCS.
# # workloadIdentity:
# # # Google Service Account name to use for the workload identity.
# # serviceAccountEmail: <service-account>@<project-name>.iam.gserviceaccount.com
catalog:
# A postgresql style DSN that points at a postgresql compatible database.
# eg: postgres://[user[:password]@][netloc][:port][/dbname][?param1=value1&...]
dsn:
valueFrom:
secretKeyRef:
name: <your secret name here>
key: <the key in the secret that contains the dsn>
# images:
# # This can be used to override a specific image name with its FQIN
# # (Fully Qualified Image Name) for testing. eg.
# overrides:
# - name: influxdb2-artifacts/iox/iox
# newFQIN: mycompany/test-iox-build:aninformativetag
#
# # Set this variable to the prefix of your internal registry. This will be prefixed to all expected images.
# # eg. us-docker.pkg.dev/iox:latest => registry.mycompany.io/us-docker.pkg.dev/iox:latest
# registryOverride: <the domain name portion of your registry (registry.mycompany.io in the example above)>
objectStore:
# Bucket that the parquet files will be stored in
bucket: <bucket name>
# Uncomment one of the following (s3, azure)
# to enable the configuration of your object store
s3:
# URL for S3 Compatible object store
endpoint: <S3 url>
# Set to true to allow communication over HTTP (instead of HTTPS)
allowHttp: "false"
# S3 Access Key
# This can also be provided as a valueFrom: secretKeyRef:
accessKey:
value: <your access key>
# S3 Secret Key
# This can also be provided as a valueFrom: secretKeyRef:
secretKey:
value: <your secret>
# This value is required for AWS S3, it may or may not be required for other providers.
region: <region>
# azure:
# Azure Blob Storage Access Key
# This can also be provided as a valueFrom: secretKeyRef:
# accessKey:
# value: <your access key>
# Azure Blob Storage Account
# This can also be provided as a valueFrom: secretKeyRef:
# account:
# value: <your access key>
# There are two main ways you can access a Google:
#
# a) GKE Workload Identity: configure workload identity in the top level `hostingEnvironment.gke` section.
# b) Explicit service account secret (JSON) file: use the `serviceAccountSecret` field here
#
# If you pick (a) you may not need to uncomment anything else in this section,
# but you still need to tell influxdb that you intend to use Google Cloud Storage.
# so you need to specify an empty object. Uncomment the following line:
#
# google: {}
#
#
# If you pick (b), uncomment the following block:
#
# google:
# # If you're authenticating to Google Cloud service using a Service Account credentials file, as opposed
# # as to use workload identity (see above) you need to provide a reference to a k8s secret containing the credentials file.
# serviceAccountSecret:
# # Kubernetes Secret name containing the credentials for a Google IAM Service Account.
# name: <secret name>
# # The key within the Secret containing the credentials.
# key: <key name>
# Parameters to tune observability configuration, such as Prometheus ServiceMonitor's.
observability: {}
# retention: 12h
# serviceMonitor:
# interval: 10s
# scrapeTimeout: 30s
# Ingester pods have a volume attached.
ingesterStorage:
# (Optional) Set the storage class. This will differ based on the K8s environment and desired storage characteristics.
# If not set, the default storage class will be used.
# storageClassName: <storage-class>
# Set the storage size (minimum 2Gi recommended)
storage: <storage-size>
# Monitoring pods have a volume attached.
monitoringStorage:
# (Optional) Set the storage class. This will differ based on the K8s environment and desired storage characteristics.
# If not set, the default storage class will be used.
# storageClassName: <storage-class>
# Set the storage size (minimum 10Gi recommended)
storage: <storage-size>
# Uncomment the follow block if using our provided Ingress.
#
# We currently only support the ingress NGINX ingress controller: https://github.com/kubernetes/ingress-nginx
#
# ingress:
# hosts:
# # This is the host on which you will access Influxdb 3.0, for both reads and writes
# - <influxdb-host>
# (Optional)
# The name of the Kubernetes Secret containing a TLS certificate, this should exist in the same namespace as the Clustered installation.
# If you are using cert-manager, enter a name for the Secret it should create.
# tlsSecretName: <secret-name>
# http:
# # Usually you have only one ingress controller installed in a given cluster.
# # In case you have more than one, you have to specify the "class name" of the ingress controller you want to use
# className: nginx
# grpc:
# # Usually you have only one ingress controller installed in a given cluster.
# # In case you have more than one, you have to specify the "class name" of the ingress controller you want to use
# className: nginx
#
# Enables specifying which 'type' of Ingress to use, alongside whether to place additional annotations
# onto those objects, this is useful for third party software in your environment, such as cert-manager.
# template:
# apiVersion: 'route.openshift.io/v1'
# kind: 'Route'
# metadata:
# annotations:
# 'example-annotation': 'annotation-value'
# Enables specifying customizations for the various components in InfluxDB 3.0.
# components:
# # router:
# # template:
# # containers:
# # iox:
# # env:
# # INFLUXDB_IOX_MAX_HTTP_REQUESTS: "5000"
# # nodeSelector:
# # disktype: ssd
# # tolerations:
# # - effect: NoSchedule
# # key: example
# # operator: Exists
# # Common customizations for all components go in a pseudo-component called "common"
# # common:
# # template:
# # # Metadata contains custom annotations (and labels) to be added to a component. E.g.:
# # metadata:
# # annotations:
# # telegraf.influxdata.com/class: "foo"
# Example of setting nodeAffinity for the querier component to ensure it runs on nodes with specific labels
# components:
# # querier:
# # template:
# # affinity:
# # nodeAffinity:
# # requiredDuringSchedulingIgnoredDuringExecution:
# # Node must have these labels to be considered for scheduling
# # nodeSelectorTerms:
# # - matchExpressions:
# # - key: required
# # operator: In
# # values:
# # - ssd
# # preferredDuringSchedulingIgnoredDuringExecution:
# # Scheduler will prefer nodes with these labels but they're not required
# # - weight: 1
# # preference:
# # matchExpressions:
# # - key: preferred
# # operator: In
# # values:
# # - postgres
# Example of setting podAntiAffinity for the querier component to ensure it runs on nodes with specific labels
# components:
# # querier:
# # template:
# # affinity:
# # podAntiAffinity:
# # requiredDuringSchedulingIgnoredDuringExecution:
# # Ensures that the pod will not be scheduled on a node if another pod matching the labelSelector is already running there
# # - labelSelector:
# # matchExpressions:
# # - key: app
# # operator: In
# # values:
# # - querier
# # topologyKey: "kubernetes.io/hostname"
# # preferredDuringSchedulingIgnoredDuringExecution:
# # Scheduler will prefer not to schedule pods together but may do so if necessary
# # - weight: 1
# # podAffinityTerm:
# # labelSelector:
# # matchExpressions:
# # - key: app
# # operator: In
# # values:
# # - querier
# # topologyKey: "kubernetes.io/hostname"
# Uncomment the following block to tune the various pods for their cpu/memory/replicas based on workload needs.
# Only uncomment the specific resources you want to change, anything uncommented will use the package default.
# (You can read more about k8s resources and limits in https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits)
#
# resources:
# # The ingester handles data being written
# ingester:
# requests:
# cpu: <cpu amount>
# memory: <ram amount>
# replicas: <num replicas> # The default for ingesters is 3 to increase availability
#
# # optionally you can specify the resource limits which improves isolation.
# # (see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits)
# # limits:
# # cpu: <cpu amount>
# # memory: <ram amount>
# # The compactor reorganizes old data to improve query and storage efficiency.
# compactor:
# requests:
# cpu: <cpu amount>
# memory: <ram amount>
# replicas: <num replicas> # the default is 1
# # The querier handles querying data.
# querier:
# requests:
# cpu: <cpu amount>
# memory: <ram amount>
# replicas: <num replicas> # the default is 3
# # The router performs some api routing.
# router:
# requests:
# cpu: <cpu amount>
# memory: <ram amount>
# replicas: <num replicas> # the default is 3
admin:
# The list of users to grant access to Clustered via influxctl
users:
# First name of user
- firstName: <first-name>
# Last name of user
lastName: <last-name>
# Email of user
email: <email>
# The ID that the configured Identity Provider uses for the user in oauth flows
id: <id>
# Optional list of user groups to assign to the user, rather than the default groups. The following groups are currently supported: Admin, Auditor, Member
userGroups:
- <group-name>
# The dsn for the postgres compatible database (note this is the same as defined above)
dsn:
valueFrom:
secretKeyRef:
name: <secret name>
key: <dsn key>
# The identity provider to be used e.g. "keycloak", "auth0", "azure", etc
# Note for Azure Active Directory it must be exactly "azure"
identityProvider: <identity-provider>
# The JWKS endpoint provided by the Identity Provider
jwksEndpoint: <endpoint>
# # This (optional) section controls how InfluxDB issues outbound requests to other services
# egress:
# # If you're using a custom CA you will need to specify the full custom CA bundle here.
# #
# # NOTE: the custom CA is currently only honoured for outbound requests used to obtain
# # the JWT public keys from your identiy provider (see `jwksEndpoint`).
# customCertificates:
# valueFrom:
# configMapKeyRef:
# key: ca.pem
# name: custom-ca
# We also include the ability to enable some features that are not yet ready for general availability
# or for which we don't yet have a proper place to turn on an optional feature in the configuration file.
# To turn on these you should include the name of the feature flag in the `featureFlag` array.
#
# featureFlags:
# # Uncomment to install a Grafana deployment.
# # Depends on one of the prometheus features being deployed.
# # - grafana
# # The following 2 flags should be uncommented for k8s API 1.21 support.
# # Note that this is an experimental configuration.
# # - noMinReadySeconds
# # - noGrpcProbes

View File

@ -3667,10 +3667,10 @@ merge2@^1.3.0:
resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae"
integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==
mermaid@^11.4.1:
version "11.9.0"
resolved "https://registry.yarnpkg.com/mermaid/-/mermaid-11.9.0.tgz#fdc055d0f2a7f2afc13a78cb3e3c9b1374614e2e"
integrity sha512-YdPXn9slEwO0omQfQIsW6vS84weVQftIyyTGAZCwM//MGhPzL1+l6vO6bkf0wnP4tHigH1alZ5Ooy3HXI2gOag==
mermaid@^11.10.0:
version "11.10.0"
resolved "https://registry.yarnpkg.com/mermaid/-/mermaid-11.10.0.tgz#4949f98d08cfdc4cda429372ed2f843a64c99946"
integrity sha512-oQsFzPBy9xlpnGxUqLbVY8pvknLlsNIJ0NWwi8SUJjhbP1IT0E0o1lfhU4iYV3ubpy+xkzkaOyDUQMn06vQElQ==
dependencies:
"@braintree/sanitize-url" "^7.0.4"
"@iconify/utils" "^2.1.33"