Merge branch 'master' into patch-1

pull/5924/head
Jason Stirnaman 2025-08-19 11:17:02 -05:00 committed by GitHub
commit a16a1ea286
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1182 changed files with 147527 additions and 21758 deletions

View File

@ -0,0 +1,74 @@
# Lychee link checker configuration
# Generated by link-checker
[lychee]
# Performance settings
# Maximum number of retries for failed checks
max_retries = 3
# Timeout for each link check (in seconds)
timeout = 30
# Maximum number of concurrent checks
max_concurrency = 128
skip_code_blocks = false
# HTTP settings
# Identify the tool to external services
user_agent = "Mozilla/5.0 (compatible; link-checker)"
# Accept these HTTP status codes as valid
accept = [200, 201, 202, 203, 204, 206, 301, 302, 303, 304,
307, 308]
# Skip these URL schemes
scheme = ["file", "mailto", "tel"]
# Exclude patterns (regex supported)
exclude = [
# Localhost URLs
"^https?://localhost",
"^https?://127\\.0\\.0\\.1",
# Common CI/CD environments
"^https?://.*\\.local",
# Example domains used in documentation
"^https?://example\\.(com|org|net)",
# Placeholder URLs from code block filtering
"https://example.com/REMOVED_FROM_CODE_BLOCK",
"example.com/INLINE_CODE_URL",
# URLs that require authentication
"^https?://.*\\.slack\\.com",
"^https?://.*\\.atlassian\\.net",
# GitHub URLs (often fail due to rate limiting and bot
# detection)
"^https?://github\\.com",
# StackExchange network URLs (often block automated requests)
"^https?://.*\\.stackexchange\\.com",
"^https?://stackoverflow\\.com",
"^https?://.*\\.stackoverflow\\.com",
# Docker Hub URLs (rate limiting and bot detection)
"^https?://hub\\.docker\\.com",
# Common documentation placeholders
"YOUR_.*",
"REPLACE_.*",
"<.*>",
]
# Request headers
[headers]
# Add custom headers here if needed
# "Authorization" = "Bearer $GITHUB_TOKEN"
# Cache settings
cache = true
max_cache_age = "1d"

View File

@ -0,0 +1,116 @@
# Production Link Checker Configuration for InfluxData docs-v2
# Optimized for performance, reliability, and reduced false positives
[lychee]
# Performance settings
# Maximum number of retries for failed checks
max_retries = 3
# Timeout for each link check (in seconds)
timeout = 30
# Maximum number of concurrent checks
max_concurrency = 128
skip_code_blocks = false
# HTTP settings
# Identify the tool to external services
"User-Agent" = "Mozilla/5.0 (compatible; influxdata-link-checker/1.0; +https://github.com/influxdata/docs-v2)"
accept = [200, 201, 202, 203, 204, 206, 301, 302, 303, 304, 307, 308]
# Skip these URL schemes
scheme = ["mailto", "tel"]
# Performance optimizations
cache = true
max_cache_age = "1h"
# Retry configuration for reliability
include_verbatim = false
# Exclusion patterns for docs-v2 (regex supported)
exclude = [
# Localhost URLs
"^https?://localhost",
"^https?://127\\.0\\.0\\.1",
# Common CI/CD environments
"^https?://.*\\.local",
# Example domains used in documentation
"^https?://example\\.(com|org|net)",
# Placeholder URLs from code block filtering
"https://example.com/REMOVED_FROM_CODE_BLOCK",
"example.com/INLINE_CODE_URL",
# URLs that require authentication
"^https?://.*\\.slack\\.com",
"^https?://.*\\.atlassian\\.net",
# GitHub URLs (often fail due to rate limiting and bot
# detection)
"^https?://github\\.com",
# Social media URLs (often block bots)
"^https?://reddit\\.com",
"^https?://.*\\.reddit\\.com",
# StackExchange network URLs (often block automated requests)
"^https?://.*\\.stackexchange\\.com",
"^https?://stackoverflow\\.com",
"^https?://.*\\.stackoverflow\\.com",
# Docker Hub URLs (rate limiting and bot detection)
"^https?://hub\\.docker\\.com",
# InfluxData support URLs (certificate/SSL issues in CI)
"^https?://support\\.influxdata\\.com",
# Common documentation placeholders
"YOUR_.*",
"REPLACE_.*",
"<.*>",
]
# Request headers
[headers]
# Add custom headers here if needed
# "Authorization" = "Bearer $GITHUB_TOKEN"
"Accept" = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
"Accept-Language" = "en-US,en;q=0.5"
"Accept-Encoding" = "gzip, deflate"
"DNT" = "1"
"Connection" = "keep-alive"
"Upgrade-Insecure-Requests" = "1"
[ci]
# CI-specific settings
[ci.github_actions]
output_format = "json"
create_annotations = true
fail_fast = false
max_annotations = 50 # Limit to avoid overwhelming PR comments
[ci.performance]
# Performance tuning for CI environment
parallel_requests = 32
connection_timeout = 10
read_timeout = 30
# Resource limits
max_memory_mb = 512
max_execution_time_minutes = 10
[reporting]
# Report configuration
include_fragments = false
verbose = false
no_progress = true # Disable progress bar in CI
# Summary settings
show_success_count = true
show_skipped_count = true

View File

@ -31,7 +31,7 @@ LogicalPlan
[Mm]onitor
MBs?
PBs?
Parquet
Parquet|\b\w*-*parquet-\w*\b|\b--\w*parquet\w*\b|`[^`]*parquet[^`]*`
Redoc
SQLAlchemy
SQLAlchemy

View File

@ -1,4 +1,4 @@
version: 2
version: 2.1
jobs:
build:
docker:
@ -31,17 +31,17 @@ jobs:
command: cd api-docs && bash generate-api-docs.sh
- run:
name: Inject Flux stdlib frontmatter
command: node ./flux-build-scripts/inject-flux-stdlib-frontmatter.js
command: node ./flux-build-scripts/inject-flux-stdlib-frontmatter.cjs
- run:
name: Update Flux/InfluxDB versions
command: node ./flux-build-scripts/update-flux-versions.js
command: node ./flux-build-scripts/update-flux-versions.cjs
- save_cache:
key: install-{{ .Environment.CACHE_VERSION }}-{{ checksum ".circleci/config.yml" }}
paths:
- /home/circleci/bin
- run:
name: Hugo Build
command: npx hugo --logLevel info --minify --destination workspace/public
command: yarn hugo --environment production --logLevel info --gc --destination workspace/public
- persist_to_workspace:
root: workspace
paths:
@ -68,7 +68,6 @@ jobs:
when: on_success
workflows:
version: 2
build:
jobs:
- build

View File

@ -0,0 +1,25 @@
analyze-api-source:
<product_name> <endpoint_name> <parameter_name>
Analyze source code in the specified repo to determine:
1. HTTP method and endpoint path
2. Parameters for the given endpoint
3. Whether the specified parameter is supported for the given API endpoint
4. Parameter format, valid values, and default behavior
5. Any limitations or quirks of the parameter
For product InfluxDB 3 Core and Enterprise,
Search repo influxdata/influxdb
Search through:
- HTTP endpoint handlers in
influxdb3_server/src/http/
- Parameter structs and deserialization
- Request routing and processing logic
- Type definitions in influxdb3_types/src/
In the output, provide:
- Comparison across v1, v2, and v3 API compatibility
In the output, provide:
Concrete examples of endpoint and parameter usage
Cite specific source code locations.

View File

@ -0,0 +1,192 @@
# enhance-release-notes
Analyze GitHub PRs referenced in release notes and enhance descriptions following Google Developer Documentation style.
## Overview
This command improves release note descriptions by:
1. Fetching PR data from GitHub API
2. Analyzing code changes and PR content
3. Generating clear, action-oriented descriptions
4. Following Google Developer Documentation principles
5. Creating a descriptive commit message
## Usage
```
enhance-release-notes <release-notes-file> [--dry-run]
```
## Process
### 1. Extract PR References
- Scan the release notes file for GitHub PR links
- Extract PR numbers and repository information
- Example pattern: `([#26574](https://github.com/influxdata/influxdb/pull/26574))`
### 2. Fetch PR Data
For each PR, collect:
- PR title and description
- Files modified (to determine component scope)
- Labels and metadata
- Code change statistics
### 3. Analyze and Categorize
**Component Detection** (based on file paths):
- `src/database/`, `catalog/`, `schema/` → Database operations
- `cmd/`, `cli/` → CLI commands
- `api/`, `http/` → HTTP API
- `src/query/`, `sql/` → Query engine
- `src/auth/`, `token/` → Authentication
- `storage/`, `parquet/`, `wal/` → Storage engine
- `license/` → License management
**Change Type Detection**:
- `feat:` or "add", "new" → Feature
- `fix:` or "resolve", "correct" → Bug fix
- `perf:` or "optim", "faster" → Performance improvement
### 4. Generate Google Developer Documentation Style Descriptions
**Principles**:
- Clear, concise, action-oriented language
- Focus on what developers can do
- Avoid marketing speak ("enhanced", "improved", "better")
- Use specific, concrete benefits
- Start with action verbs when possible
**Templates**:
**Database Operations**:
- `hard.*delet.*date` → "Set custom hard deletion dates for deleted databases and tables"
- `retention.*period` → "Configure automatic data expiration for databases"
- `schema.*updat` → "Modify database schema after creation"
**CLI Commands**:
- `help.*text` → "Access help documentation for commands"
- `show.*license` → "View license details including expiration and limits"
- `object.*store.*required` → "Specify object store configuration when starting the server"
**HTTP API**:
- `v1.*query.*endpoint.*ns` → "Use nanosecond precision by default in V1 API CSV responses"
- `trigger.*request_path` → "Configure processing engine triggers with request paths"
**Query Engine**:
- `csv.*precision` → "Get consistent timestamp precision in CSV output"
- `query.*performance` → "Execute queries without performance degradation"
**Authentication**:
- `token.*creation` → "Generate tokens with additional configuration options"
- `admin.*token.*expiration` → "Set expiration dates for admin tokens"
**Storage Engine**:
- `aws.*credential.*reload` → "Automatically refresh AWS credentials from files"
- `wal.*replay.*concurrency` → "Control memory usage during database startup"
- `corrupt.*wal.*recovery` → "Recover from corrupted write-ahead log files"
**Fallback Patterns**:
- Features: "Use [functionality] to [specific action]"
- Bug fixes: "Avoid [specific problem] when [specific action]"
- Performance: "Execute [operation] without [specific issue]"
### 5. Enhancement Format
Transform:
```markdown
- **Database management**: Allow hard_deleted date of deleted schema to be updated ([#26574](https://github.com/influxdata/influxdb/pull/26574))
```
Into:
```markdown
- **Database operations**: Set custom hard deletion dates for deleted databases and tables ([#26574](https://github.com/influxdata/influxdb/pull/26574))
```
### 6. Output Processing
**Dry Run Mode**:
- Show before/after comparison
- List all proposed changes
- Don't modify the file
**Apply Mode**:
- Replace descriptions in the original file
- Preserve all formatting and PR links
- Log successful enhancements
### 7. Create Descriptive Commit Message
After enhancing the release notes, generate a commit message:
**Format**:
```
docs: enhance release notes with specific user benefits
- Transform generic descriptions into action-oriented language
- Add specific benefits following Google Developer Documentation style
- Focus on what developers can do with each change
- Enhanced [X] descriptions across [Y] components
Enhanced components: [list of components modified]
```
**Example**:
```
docs: enhance v3.2.1 release notes with specific user benefits
- Transform generic descriptions into action-oriented language
- Add specific benefits following Google Developer Documentation style
- Focus on what developers can do with each change
- Enhanced 8 descriptions across database, CLI, and API components
Enhanced components: Database operations, CLI commands, HTTP API
```
## Error Handling
- **Missing GitHub token**: Warn about rate limits, continue with public API
- **Private repos**: Skip PRs that can't be accessed
- **Invalid PR URLs**: Log error and skip enhancement
- **API rate limits**: Implement exponential backoff
- **Network issues**: Retry with fallback to original description
## Configuration
**Environment Variables**:
- `GITHUB_TOKEN`: Personal access token for GitHub API access
**GitHub Enterprise Support**:
- Detect GitHub Enterprise URLs in PR links
- Use appropriate API base URL
## Implementation Notes
1. **Rate Limiting**: Respect GitHub API rate limits (5000/hour authenticated, 60/hour unauthenticated)
2. **Caching**: Consider caching PR data to avoid repeated API calls during development
3. **Validation**: Verify PR URLs match expected format before API calls
4. **Preservation**: Maintain all existing formatting, spacing, and non-PR content
5. **Atomic Updates**: Only modify the file if all enhancements succeed (or provide partial success options)
## Example Usage
```bash
# Dry run to see proposed changes
enhance-release-notes release-notes-v3.2.1.md --dry-run
# Apply enhancements
enhance-release-notes release-notes-v3.2.1.md
# With verbose output
enhance-release-notes release-notes-v3.2.1.md --verbose
```
## Success Criteria
1. All PR descriptions follow Google Developer Documentation style
2. Descriptions focus on specific developer actions and benefits
3. No marketing language or vague improvements
4. Component categories are accurate based on code changes
5. Original formatting and PR links are preserved
6. Commit message clearly describes the enhancement approach

View File

@ -0,0 +1,16 @@
Please analyze and fix the GitHub issue: $ARGUMENTS.
Follow these steps:
1. Use `gh issue view` to get the issue details
2. Understand the problem described in the issue
3. Search the codebase for relevant files, using your knowledge of the project structure and the issue description
4. Implement the necessary changes to fix the issue
5. Write and run tests (store in `tests/` directory) to verify the fix
6. Create a descriptive commit message
7. Ensure code passes linting and type checking
8. Push
9. Ensure code passes pre-push tests
10. Create a PR
Remember to use the GitHub CLI (`gh`) for all GitHub-related tasks.

44
.context/README.md Normal file
View File

@ -0,0 +1,44 @@
# Context Files for LLMs and AI Tools
This directory contains plans, reports, and other context files that are:
- Used to provide context to LLMs during development
- Not committed to the repository
- May be transient or belong in other repositories
## Directory Structure
- `plans/` - Documentation plans and roadmaps
- `reports/` - Generated reports and analyses
- `research/` - Research notes and findings
- `templates/` - Reusable templates for Claude interactions
## Usage
Place files here that you want to reference--for example, using @ mentions in Claude--such as:
- Documentation planning documents
- API migration guides
- Performance reports
- Architecture decisions
## Example Structure
```
.context/
├── plans/
│ ├── v3.2-release-plan.md
│ └── api-migration-guide.md
├── reports/
│ ├── weekly-progress-2025-07.md
│ └── pr-summary-2025-06.md
├── research/
│ └── competitor-analysis.md
└── templates/
└── release-notes-template.md
```
## Best Practices
1. Use descriptive filenames that indicate the content and date
2. Keep files organized in appropriate subdirectories
3. Consider using date prefixes for time-sensitive content (e.g., `2025-07-01-meeting-notes.md`)
4. Remove outdated files periodically to keep the context relevant

View File

@ -0,0 +1,29 @@
name: 'Setup Documentation Environment'
description: 'Sets up Node.js environment and installs dependencies for documentation workflows'
runs:
using: 'composite'
steps:
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'yarn'
- name: Install dependencies
run: yarn install
shell: bash
- name: Verify Hugo installation
run: |
echo "Checking Hugo availability..."
if command -v hugo &> /dev/null; then
echo "✅ Hugo found on PATH: $(which hugo)"
hugo version
else
echo "⚠️ Hugo not found on PATH, will use project-local Hugo via yarn"
fi
echo "Checking yarn hugo command..."
yarn hugo version || echo "⚠️ Project Hugo not available via yarn"
shell: bash

View File

@ -1,98 +1,282 @@
# GitHub Copilot Instructions for InfluxData Documentation
# InfluxData Documentation Repository (docs-v2)
## Purpose and Scope
Always follow these instructions first and fallback to additional search and context gathering only when the information provided here is incomplete or found to be in error.
GitHub Copilot should help document InfluxData products by creating clear, accurate technical content with proper code examples, frontmatter, and formatting.
## Working Effectively
## Documentation Structure
### Collaboration approach
Be a critical thinking partner, provide honest feedback, and identify potential issues.
### Bootstrap, Build, and Test the Repository
Execute these commands in order to set up a complete working environment:
1. **Install Node.js dependencies** (takes ~4 seconds):
```bash
# Skip Cypress binary download due to network restrictions in CI environments
CYPRESS_INSTALL_BINARY=0 yarn install
```
2. **Build the static site** (takes ~75 seconds, NEVER CANCEL - set timeout to 180+ seconds):
```bash
npx hugo --quiet
```
3. **Start the development server** (builds in ~92 seconds, NEVER CANCEL - set timeout to 150+ seconds):
```bash
npx hugo server --bind 0.0.0.0 --port 1313
```
- Access at: http://localhost:1313/
- Serves 5,359+ pages and 441 static files
- Auto-rebuilds on file changes
4. **Alternative Docker development setup** (use if local Hugo fails):
```bash
docker compose up local-dev
```
**Note**: May fail in restricted network environments due to Alpine package manager issues.
### Testing (CRITICAL: NEVER CANCEL long-running tests)
#### Code Block Testing (takes 5-15 minutes per product, NEVER CANCEL - set timeout to 30+ minutes):
```bash
# Build test environment first (takes ~30 seconds, may fail due to network restrictions)
docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest .
# Test all products (takes 15-45 minutes total)
yarn test:codeblocks:all
# Test specific products
yarn test:codeblocks:cloud
yarn test:codeblocks:v2
yarn test:codeblocks:telegraf
```
#### Link Validation (takes 1-5 minutes):
Runs automatically on pull requests.
Requires the **link-checker** binary from the repo release artifacts.
```bash
# Test specific files/products (faster)
# JSON format is required for accurate reporting
link-checker map content/influxdb3/core/**/*.md \
| link-checker check \
--config .ci/link-checker/production.lycherc.toml
--format json
```
#### Style Linting (takes 30-60 seconds):
```bash
# Basic Vale linting
docker compose run -T vale content/**/*.md
# Product-specific linting with custom configurations
docker compose run -T vale --config=content/influxdb3/cloud-dedicated/.vale.ini --minAlertLevel=error content/influxdb3/cloud-dedicated/**/*.md
```
#### JavaScript and CSS Linting (takes 5-10 seconds):
```bash
yarn eslint assets/js/**/*.js
yarn prettier --check "**/*.{css,js,ts,jsx,tsx}"
```
### Pre-commit Hooks (automatically run, can be skipped if needed):
```bash
# Run all pre-commit checks manually
yarn lint
# Skip pre-commit hooks if necessary (not recommended)
git commit -m "message" --no-verify
```
## Validation Scenarios
Always test these scenarios after making changes to ensure full functionality:
### 1. Documentation Rendering Test
```bash
# Start Hugo server
npx hugo server --bind 0.0.0.0 --port 1313
# Verify key pages load correctly (200 status)
curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/influxdb3/core/
curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/influxdb/v2/
curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/telegraf/v1/
# Verify content contains expected elements
curl -s http://localhost:1313/influxdb3/core/ | grep -i "influxdb"
```
### 2. Build Output Validation
```bash
# Verify build completes successfully
npx hugo --quiet
# Check build output exists and has reasonable size (~529MB)
ls -la public/
du -sh public/
# Verify key files exist
file public/index.html
file public/influxdb3/core/index.html
```
### 3. Shortcode and Formatting Test
```bash
# Test shortcode examples page
yarn test:links content/example.md
```
## Repository Structure and Key Locations
### Content Organization
- **InfluxDB 3**: `/content/influxdb3/` (core, enterprise, cloud-dedicated, cloud-serverless, clustered, explorer)
- **InfluxDB v2**: `/content/influxdb/` (v2, cloud, enterprise_influxdb, v1)
- **Telegraf**: `/content/telegraf/v1/`
- **Other tools**: `/content/kapacitor/`, `/content/chronograf/`, `/content/flux/`
- **Shared content**: `/content/shared/`
- **Examples**: `/content/example.md` (comprehensive shortcode reference)
### Configuration Files
- **Hugo config**: `/config/_default/`
- **Package management**: `package.json`, `yarn.lock`
- **Docker**: `compose.yaml`, `Dockerfile.pytest`
- **Git hooks**: `lefthook.yml`
- **Testing**: `cypress.config.js`, `pytest.ini` (in test directories)
- **Linting**: `.vale.ini`, `.prettierrc.yaml`, `eslint.config.js`
### Build and Development
- **Hugo binary**: Available via `npx hugo` (version 0.148.2+)
- **Static assets**: `/assets/` (JavaScript, CSS, images)
- **Build output**: `/public/` (generated, ~529MB)
- **Layouts**: `/layouts/` (Hugo templates)
- **Data files**: `/data/` (YAML/JSON data for templates)
## Technology Stack
- **Static Site Generator**: Hugo (0.148.2+ extended)
- **Package Manager**: Yarn (1.22.22+) with Node.js (20.19.4+)
- **Testing Framework**:
- Pytest with pytest-codeblocks (for code examples)
- Cypress (for E2E tests)
- influxdata/docs-link-checker (for link validation)
- Vale (for style and writing guidelines)
- **Containerization**: Docker with Docker Compose
- **Linting**: ESLint, Prettier, Vale
- **Git Hooks**: Lefthook
## Common Tasks and Build Times
### Network Connectivity Issues
In restricted environments, these commands may fail due to external dependency downloads:
- `docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest .` (InfluxData repositories, HashiCorp repos)
- `docker compose up local-dev` (Alpine package manager)
- Cypress binary installation (use `CYPRESS_INSTALL_BINARY=0`)
Document these limitations but proceed with available functionality.
### Validation Commands for CI
Always run these before committing changes:
```bash
# Format and lint code
yarn prettier --write "**/*.{css,js,ts,jsx,tsx}"
yarn eslint assets/js/**/*.js
# Test Hugo build
npx hugo --quiet
# Test development server startup
timeout 150 npx hugo server --bind 0.0.0.0 --port 1313 &
sleep 120
curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/
pkill hugo
```
## Key Projects in This Codebase
1. **InfluxDB 3 Documentation** (Core, Enterprise, Clustered, Cloud Dedicated, Cloud Serverless, and InfluxDB 3 plugins for Core and Enterprise)
2. **InfluxDB 3 Explorer** (UI)
3. **InfluxDB v2 Documentation** (OSS and Cloud)
3. **InfuxDB v1 Documentation** (OSS and Enterprise)
4. **Telegraf Documentation** (agent and plugins)
5. **Supporting Tools Documentation** (Kapacitor, Chronograf, Flux)
6. **API Reference Documentation** (`/api-docs/`)
7. **Shared Documentation Components** (`/content/shared/`)
## Important Locations for Frequent Tasks
- **Shortcode reference**: `/content/example.md`
- **Contributing guide**: `CONTRIBUTING.md`
- **Testing guide**: `TESTING.md`
- **Product configurations**: `/data/products.yml`
- **Vale style rules**: `/.ci/vale/styles/`
- **GitHub workflows**: `/.github/workflows/`
- **Test scripts**: `/test/scripts/`
- **Hugo layouts and shortcodes**: `/layouts/`
- **CSS/JS assets**: `/assets/`
## Content Guidelines and Style
### Documentation Structure
- **Product version data**: `/data/products.yml`
- **Products**:
- InfluxDB OSS 1.x
- Documentation source path: `/content/influxdb/v1`
- Code repository: https://github.com/influxdata/influxdb
- InfluxDB OSS 2.x
- Documentation source path: `/content/influxdb/v2`
- Code repository: https://github.com/influxdata/influxdb
- InfluxDB 3 Core
- Documentation source path: `/content/influxdb3/core`
- Code repositories: https://github.com/influxdata/influxdb, https://github.com/influxdata/influxdb3_core
- InfluxDB Enterprise v1 (1.x)
- Documentation source path: `/content/influxdb/enterprise_influxdb`
- Code repository: https://github.com/influxdata/influxdb
- InfluxDB Cloud v2 (TSM)
- Documentation source path: `/content/influxdb/cloud`
- Code repository: https://github.com/influxdata/idpe
- InfluxDB 3 Cloud Dedicated
- Documentation source path: `/content/influxdb3/cloud-dedicated`
- Code repository: https://github.com/influxdata/influxdb
- InfluxDB 3 Cloud Serverless
- Documentation source path: `/content/influxdb3/cloud-serverless`
- Code repository: https://github.com/influxdata/idpe
- InfluxDB 3 Clustered
- Documentation source path: `/content/influxdb3/clustered`
- Code repository: https://github.com/influxdata/influxdb
- Telegraf
- Documentation source path: `/content/telegraf/v1`
- Code repository: https://github.com/influxdata/telegraf
- Kapacitor
- Documentation source path: `/content/kapacitor/v1`
- Code repository: https://github.com/influxdata/kapacitor
- Chronograf
- Documentation source path: `/content/chronograf/v1`
- Code repository: https://github.com/influxdata/chronograf
- Flux
- Documentation source path: `/content/flux/v0`
- Code repository: https://github.com/influxdata/flux
- **InfluxData-supported tools**:
- InfluxDB API client libraries
- Code repositories: https://github.com/InfluxCommunity
- InfluxDB 3 processing engine plugins
- Code repository: https://github.com/influxdata/influxdb3_plugins
- **Query Languages**: SQL, InfluxQL, Flux (use appropriate language per product version)
- **Documentation Site**: https://docs.influxdata.com
- **Repository**: https://github.com/influxdata/docs-v2
- **Framework**: Hugo static site generator
## Style Guidelines
### Style Guidelines
- Follow Google Developer Documentation style guidelines
- For API references, follow YouTube Data API style
- Use semantic line feeds (one sentence per line)
- Use only h2-h6 headings in content (h1 comes from frontmatter title properties)
- Use sentence case for headings
- Use GitHub callout syntax
- Format code examples to fit within 80 characters
- Use long options in command line examples (`--option` instead of `-o`)
- Use GitHub callout syntax for notes and warnings
- Image naming: `project/version-context-description.png`
- Use appropriate product names and versions consistently
- Follow InfluxData vocabulary guidelines
## Markdown and Shortcodes
### Markdown and Shortcodes
- Include proper frontmatter for each page:
Include proper frontmatter for all content pages:
```yaml
title: # Page title (h1)
seotitle: # SEO title
list_title: # Title for article lists
description: # SEO description
menu:
product_version:
weight: # Page order (1-99, 101-199, etc.)
```
```yaml
title: # Page title (h1)
seotitle: # SEO title
description: # SEO description
menu:
product_version:
weight: # Page order (1-99, 101-199, etc.)
```
- Use provided shortcodes correctly:
- Notes/warnings: `{{% note %}}`, `{{% warn %}}`
- Product-specific: `{{% enterprise %}}`, `{{% cloud %}}`
- Tabbed content: `{{< tabs-wrapper >}}`, `{{% tabs %}}`, `{{% tab-content %}}`
- Version links: `{{< latest >}}`, `{{< latest-patch >}}`
- API endpoints: `{{< api-endpoint >}}`
- Required elements: `{{< req >}}`
- Navigation: `{{< page-nav >}}`
- Diagrams: `{{< diagram >}}`, `{{< filesystem-diagram >}}`
Key shortcodes (see `/content/example.md` for full reference):
## Code Examples and Testing
- Notes/warnings (GitHub syntax): `> [!Note]`, `> [!Warning]`
- Tabbed content: `{{< tabs-wrapper >}}`, `{{% tabs %}}`, `{{% tab-content %}}`
- Code examples: `{{< code-tabs-wrapper >}}`, `{{% code-tabs %}}`, `{{% code-tab-content %}}`
- Required elements: `{{< req >}}`
- API endpoints: `{{< api-endpoint >}}`
- Provide complete, working examples with proper testing annotations:
### Code Examples and Testing
Provide complete, working examples with pytest annotations:
```python
print("Hello, world!")
@ -104,44 +288,21 @@ print("Hello, world!")
Hello, world!
```
- CLI command example:
## Troubleshooting Common Issues
```sh
influx query 'from(bucket:"example") |> range(start:-1h)'
```
1. **"Pytest collected 0 items"**: Use `python` (not `py`) for code block language identifiers
2. **Hugo build errors**: Check `/config/_default/` for configuration issues
3. **Docker build failures**: Expected in restricted networks - document and continue with local Hugo
4. **Cypress installation failures**: Use `CYPRESS_INSTALL_BINARY=0 yarn install`
5. **Link validation slow**: Use file-specific testing: `yarn test:links content/specific-file.md`
6. **Vale linting errors**: Check `.ci/vale/styles/config/vocabularies` for accepted/rejected terms
<!--pytest-codeblocks:expected-output-->
## Additional Instruction Files
```
Table: keys: [_start, _stop, _field, _measurement]
_start:time _stop:time _field:string _measurement:string _time:time _value:float
------------------------------ ------------------------------ ---------------------- ---------------------- ------------------------------ ----------------------------
```
For specific workflows and content types, also refer to:
- Include necessary environment variables
- Show proper credential handling for authenticated commands
- **InfluxDB 3 code placeholders**: `.github/instructions/influxdb3-code-placeholders.instructions.md`
- **Contributing guidelines**: `.github/instructions/contributing.instructions.md`
- **Content-specific instructions**: Check `.github/instructions/` directory
## API Documentation
- Follow OpenAPI specification patterns
- Match REST API examples to current implementation
- Include complete request/response examples
- Document required headers and authentication
## Versioning and Product Differentiation
- Clearly distinguish between different InfluxDB versions (1.x, 2.x, 3.x)
- Use correct terminology for each product variant
- Apply appropriate UI descriptions and screenshots
- Reference appropriate query language per version
## Development Tools
- Vale.sh linter for style checking
- Docker for local development and testing
- pytest and pytest-codeblocks for validating code examples
- Pre-commit hooks for quality assurance
## Related repositories
- **Internal dcumentation assistance requests**: https://github.com/influxdata/DAR/issues
Remember: This is a large documentation site with complex build processes. Patience with build times is essential, and NEVER CANCEL long-running operations.

View File

@ -0,0 +1,287 @@
---
applyTo: "content/**/*.md, layouts/**/*.html"
---
# Contributing instructions for InfluxData Documentation
## Purpose and scope
Help document InfluxData products
by creating clear, accurate technical content with proper
code examples, frontmatter, shortcodes, and formatting.
## Quick Start
Ready to contribute? Here's the essential workflow:
1. [Sign the InfluxData CLA](#sign-the-influxdata-cla) (for substantial changes)
2. [Fork and clone](#fork-and-clone-influxdata-documentation-repository) this repository
3. [Install dependencies](#development-environment-setup) (Node.js, Yarn, Docker)
4. Make your changes following [style guidelines](#making-changes)
5. [Test your changes](TESTING.md) (pre-commit and pre-push hooks run automatically)
6. [Submit a pull request](#submission-process)
For detailed setup and reference information, see the sections below.
---
### Sign the InfluxData CLA
The InfluxData Contributor License Agreement (CLA) is part of the legal framework
for the open source ecosystem that protects both you and InfluxData.
To make substantial contributions to InfluxData documentation, first sign the InfluxData CLA.
What constitutes a "substantial" change is at the discretion of InfluxData documentation maintainers.
[Sign the InfluxData CLA](https://www.influxdata.com/legal/cla/)
_**Note:** Typo and broken link fixes are greatly appreciated and do not require signing the CLA._
_If you're new to contributing or you're looking for an easy update, see [`docs-v2` good-first-issues](https://github.com/influxdata/docs-v2/issues?q=is%3Aissue+is%3Aopen+label%3Agood-first-issue)._
### Fork and clone InfluxData Documentation Repository
[Fork this repository](https://help.github.com/articles/fork-a-repo/) and
[clone it](https://help.github.com/articles/cloning-a-repository/) to your local machine.
---
### Prerequisites
docs-v2 automatically runs format (Markdown, JS, and CSS) linting and code block tests for staged files that you try to commit.
For the linting and tests to run, you need to install:
- **Node.js and Yarn**: For managing dependencies and running build scripts
- **Docker**: For running Vale linter and code block tests
- **VS Code extensions** (optional): For enhanced editing experience
```sh
git commit -m "<COMMIT_MESSAGE>" --no-verify
```
# ... (see full CONTRIBUTING.md for complete example)
```bash
docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest .
```
### Install Visual Studio Code extensions
- Comment Anchors: recognizes tags (for example, `//SOURCE`) and makes links and filepaths clickable in comments.
- Vale: shows linter errors and suggestions in the editor.
- YAML Schemas: validates frontmatter attributes.
_See full CONTRIBUTING.md for complete details._
#### Markdown
Most docs-v2 documentation content uses [Markdown](https://en.wikipedia.org/wiki/Markdown).
_Some parts of the documentation, such as `./api-docs`, contain Markdown within YAML and rely on additional tooling._
#### Semantic line feeds
```diff
-Data is taking off. This data is time series. You need a database that specializes in time series. You should check out InfluxDB.
+Data is taking off. This data is time series. You need a database that specializes in time series. You need InfluxDB.
# ... (see full CONTRIBUTING.md for complete example)
```
### Essential Frontmatter Reference
```yaml
title: # Title of the page used in the page's h1
description: # Page description displayed in search engine results
# ... (see full CONTRIBUTING.md for complete example)
```
_See full CONTRIBUTING.md for complete details._
#### Notes and warnings
```md
> [!Note]
> Insert note markdown content here.
> [!Warning]
> Insert warning markdown content here.
> [!Caution]
> Insert caution markdown content here.
> [!Important]
> Insert important markdown content here.
> [!Tip]
> Insert tip markdown content here.
```
#### Tabbed content
```md
{{< tabs-wrapper >}}
{{% tabs %}}
[Button text for tab 1](#)
[Button text for tab 2](#)
{{% /tabs %}}
{{% tab-content %}}
Markdown content for tab 1.
{{% /tab-content %}}
{{% tab-content %}}
Markdown content for tab 2.
{{% /tab-content %}}
{{< /tabs-wrapper >}}
```
#### Required elements
```md
{{< req >}}
{{< req type="key" >}}
- {{< req "\*" >}} **This element is required**
- {{< req "\*" >}} **This element is also required**
- **This element is NOT required**
```
For the complete shortcodes reference with all available shortcodes, see [Complete Shortcodes Reference](#complete-shortcodes-reference).
---
### InfluxDB API documentation
docs-v2 includes the InfluxDB API reference documentation in the `/api-docs` directory.
To edit the API documentation, edit the YAML files in `/api-docs`.
InfluxData uses [Redoc](https://github.com/Redocly/redoc) to generate the full
InfluxDB API documentation when documentation is deployed.
Redoc generates HTML documentation using the InfluxDB `swagger.yml`.
For more information about generating InfluxDB API documentation, see the
[API Documentation README](https://github.com/influxdata/docs-v2/tree/master/api-docs#readme).
---
## Testing & Quality Assurance
For comprehensive testing information, including code block testing, link validation, style linting, and advanced testing procedures, see **[TESTING.md](TESTING.md)**.
### Quick Testing Reference
```bash
# Test code blocks
yarn test:codeblocks:all
# Test links
yarn test:links content/influxdb3/core/**/*.md
# Run style linting
docker compose run -T vale content/**/*.md
```
Pre-commit hooks run automatically when you commit changes, testing your staged files with Vale, Prettier, Cypress, and Pytest. To skip hooks if needed:
```sh
git commit -m "<COMMIT_MESSAGE>" --no-verify
```
---
### Commit Guidelines
When creating commits, follow these guidelines:
- Use a clear, descriptive commit message that explains the change
- Start with a type prefix: `fix()`, `feat()`, `style()`, `refactor()`, `test()`, `chore()`
- For product-specific changes, include the product in parentheses: `fix(enterprise)`, `fix(influxdb3)`, `fix(core)`
- Keep the first line under 72 characters
- Reference issues with "closes" or "fixes": `closes #123` or `closes influxdata/DAR#123`
- For multiple issues, use comma separation: `closes influxdata/DAR#517, closes influxdata/DAR#518`
**Examples:**
```
fix(enterprise): correct Docker environment variable name for license email
fix(influxdb3): correct Docker environment variable and compose examples for monolith
feat(telegraf): add new plugin documentation
chore(ci): update Vale configuration
```
## Reference Sections
_See full CONTRIBUTING.md for complete details._
### Complete Frontmatter Reference
_For the complete Complete Frontmatter Reference reference, see frontmatter-reference.instructions.md._
### Complete Shortcodes Reference
_For the complete Complete Shortcodes Reference reference, see shortcodes-reference.instructions.md._
#### Vale style linting configuration
docs-v2 includes Vale writing style linter configurations to enforce documentation writing style rules, guidelines, branding, and vocabulary terms.
**Advanced Vale usage:**
```sh
docker compose run -T vale --config=content/influxdb/cloud-dedicated/.vale.ini --minAlertLevel=error content/influxdb/cloud-dedicated/write-data/**/*.md
```
- **Error**:
- **Warning**: General style guide rules and best practices
- **Suggestion**: Style preferences that may require refactoring or updates to an exceptions list
#### Configure style rules
_See full CONTRIBUTING.md for complete details._
#### JavaScript in the documentation UI
The InfluxData documentation UI uses JavaScript with ES6+ syntax and
`assets/js/main.js` as the entry point to import modules from
1. In your HTML file, add a `data-component` attribute to the element that
# ... (see full CONTRIBUTING.md for complete example)
```js
import { debugLog, debugBreak, debugInspect } from './utils/debug-helpers.js';
const data = debugInspect(someData, 'Data');
debugLog('Processing data', 'myFunction');
function processData() {
// Add a breakpoint that works with DevTools
debugBreak();
// Your existing code...
}
```
3. Start Hugo in development mode--for example:
```bash
yarn hugo server
```
4. In VS Code, go to Run > Start Debugging, and select the "Debug JS (debug-helpers)" configuration.
Your system uses the configuration in `launch.json` to launch the site in Chrome
and attach the debugger to the Developer Tools console.
Make sure to remove the debug statements before merging your changes.
The debug helpers are designed to be used in development and should not be used in production.
_See full CONTRIBUTING.md for complete details._

View File

@ -0,0 +1,198 @@
---
applyTo: "content/**/*.md, layouts/**/*.html"
---
### Complete Frontmatter Reference
Every documentation page includes frontmatter which specifies information about the page.
Frontmatter populates variables in page templates and the site's navigation menu.
```yaml
title: # Title of the page used in the page's h1
seotitle: # Page title used in the html <head> title and used in search engine results
list_title: # Title used in article lists generated using the {{< children >}} shortcode
description: # Page description displayed in search engine results
menu:
influxdb_2_0:
name: # Article name that only appears in the left nav
parent: # Specifies a parent group and nests navigation items
weight: # Determines sort order in both the nav tree and in article lists
draft: # If true, will not render page on build
product/v2.x/tags: # Tags specific to each version (replace product and .x" with the appropriate product and minor version )
related: # Creates links to specific internal and external content at the bottom of the page
- /path/to/related/article
- https://external-link.com, This is an external link
external_url: # Used in children shortcode type="list" for page links that are external
list_image: # Image included with article descriptions in children type="articles" shortcode
list_note: # Used in children shortcode type="list" to add a small note next to listed links
list_code_example: # Code example included with article descriptions in children type="articles" shortcode
list_query_example:# Code examples included with article descriptions in children type="articles" shortcode,
# References to examples in data/query_examples
canonical: # Path to canonical page, overrides auto-gen'd canonical URL
v2: # Path to v2 equivalent page
alt_links: # Alternate pages in other products/versions for cross-product navigation
cloud-dedicated: /influxdb3/cloud-dedicated/path/to/page/
core: /influxdb3/core/path/to/page/
prepend: # Prepend markdown content to an article (especially powerful with cascade)
block: # (Optional) Wrap content in a block style (note, warn, cloud)
content: # Content to prepend to article
append: # Append markdown content to an article (especially powerful with cascade)
block: # (Optional) Wrap content in a block style (note, warn, cloud)
content: # Content to append to article
metadata: [] # List of metadata messages to include under the page h1
updated_in: # Product and version the referenced feature was updated in (displayed as a unique metadata)
source: # Specify a file to pull page content from (typically in /content/shared/)
```
#### Title usage
##### `title`
The `title` frontmatter populates each page's HTML `h1` heading tag.
It shouldn't be overly long, but should set the context for users coming from outside sources.
##### `seotitle`
The `seotitle` frontmatter populates each page's HTML `title` attribute.
Search engines use this in search results (not the page's h1) and therefore it should be keyword optimized.
##### `list_title`
The `list_title` frontmatter determines an article title when in a list generated
by the [`{{< children >}}` shortcode](#generate-a-list-of-children-articles).
##### `menu > name`
The `name` attribute under the `menu` frontmatter determines the text used in each page's link in the site navigation.
It should be short and assume the context of its parent if it has one.
#### Page Weights
To ensure pages are sorted both by weight and their depth in the directory
structure, pages should be weighted in "levels."
All top level pages are weighted 1-99.
The next level is 101-199.
Then 201-299 and so on.
_**Note:** `_index.md` files should be weighted one level up from the other `.md` files in the same directory._
#### Related content
Use the `related` frontmatter to include links to specific articles at the bottom of an article.
- If the page exists inside of this documentation, just include the path to the page.
It will automatically detect the title of the page.
- If the page exists inside of this documentation, but you want to customize the link text,
include the path to the page followed by a comma, and then the custom link text.
The path and custom text must be in that order and separated by a comma and a space.
- If the page exists outside of this documentation, include the full URL and a title for the link.
The link and title must be in that order and separated by a comma and a space.
```yaml
related:
- /v2.0/write-data/quick-start
- /v2.0/write-data/quick-start, This is custom text for an internal link
- https://influxdata.com, This is an external link
```
#### Canonical URLs
Search engines use canonical URLs to accurately rank pages with similar or identical content.
The `canonical` HTML meta tag identifies which page should be used as the source of truth.
By default, canonical URLs are automatically generated for each page in the InfluxData
documentation using the latest version of the current product and the current path.
Use the `canonical` frontmatter to override the auto-generated canonical URL.
_**Note:** The `canonical` frontmatter supports the [`{{< latest >}}` shortcode](#latest-links)._
```yaml
canonical: /path/to/canonical/doc/
# OR
canonical: /{{< latest "influxdb" "v2" >}}/path/to/canonical/doc/
```
#### v2 equivalent documentation
To display a notice on a 1.x page that links to an equivalent 2.0 page,
add the following frontmatter to the 1.x page:
```yaml
v2: /influxdb/v2.0/get-started/
```
#### Alternative links for cross-product navigation
Use the `alt_links` frontmatter to specify equivalent pages in other InfluxDB products,
for example, when a page exists at a different path in a different version or if
the feature doesn't exist in that product.
This enables the product switcher to navigate users to the corresponding page when they
switch between products. If a page doesn't exist in another product (for example, an
Enterprise-only feature), point to the nearest parent page if relevant.
```yaml
alt_links:
cloud-dedicated: /influxdb3/cloud-dedicated/admin/tokens/create-token/
cloud-serverless: /influxdb3/cloud-serverless/admin/tokens/create-token/
core: /influxdb3/core/reference/cli/influxdb3/update/ # Points to parent if exact page doesn't exist
```
Supported product keys for InfluxDB 3:
- `core`
- `enterprise`
- `cloud-serverless`
- `cloud-dedicated`
- `clustered`
#### Prepend and append content to a page
Use the `prepend` and `append` frontmatter to add content to the top or bottom of a page.
Each has the following fields:
```yaml
append: |
> [!Note]
> #### This is example markdown content
> This is just an example note block that gets appended to the article.
```
Use this frontmatter with [cascade](#cascade) to add the same content to
all children pages as well.
```yaml
cascade:
append: |
> [!Note]
> #### This is example markdown content
> This is just an example note block that gets appended to the article.
```
#### Cascade
To automatically apply frontmatter to a page and all of its children, use the
[`cascade` frontmatter](https://gohugo.io/content-management/front-matter/#front-matter-cascade)
built in into Hugo.
```yaml
title: Example page
description: Example description
cascade:
layout: custom-layout
```
`cascade` applies the frontmatter to all children unless the child already includes
those frontmatter keys. Frontmatter defined on the page overrides frontmatter
"cascaded" from a parent.
#### Use shared content in a page
Use the `source` frontmatter to specify a shared file to use to populate the
page content. Shared files are typically stored in the `/content/shared` directory.
When building shared content, use the `show-in` and `hide-in` shortcodes to show
or hide blocks of content based on the current InfluxDB product/version.
For more information, see [show-in](#show-in) and [hide-in](#hide-in).

View File

@ -0,0 +1,89 @@
---
mode: 'edit'
applyTo: "content/{influxdb3/core,influxdb3/enterprise,shared/influxdb3*}/**"
---
## Best Practices
- Use UPPERCASE for placeholders to make them easily identifiable
- Don't use pronouns in placeholders (e.g., "your", "this")
- List placeholders in the same order they appear in the code
- Provide clear descriptions including:
- - Expected data type or format
- - Purpose of the value
- - Any constraints or requirements
- Mark optional placeholders as "Optional:" in their descriptions
- Placeholder key descriptions should fit the context of the code snippet
- Include examples for complex formats
## Writing Placeholder Descriptions
Descriptions should follow consistent patterns:
1. **Admin Authentication tokens**:
- Recommended: "a {{% token-link "admin" %}} for your {{< product-name >}} instance"
- Avoid: "your token", "the token", "an authorization token"
2. **Database resource tokens**:
- Recommended: "your {{% token-link "database" %}}"{{% show-in "enterprise" %}} with permissions on the specified database{{% /show-in %}}
- Avoid: "your token", "the token", "an authorization token"
3. **Database names**:
- Recommended: "the name of the database to [action]"
- Avoid: "your database", "the database name"
4. **Conditional content**:
- Use `{{% show-in "enterprise" %}}` for content specific to enterprise versions
- Example: "your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}}"
## Common placeholders for InfluxDB 3
- `AUTH_TOKEN`: your {{% token-link %}}
- `DATABASE_NAME`: the database to use
- `TABLE_NAME`: Name of the table/measurement to query or write to
- `NODE_ID`: Node ID for a specific node in a cluster
- `CLUSTER_ID`: Cluster ID for a specific cluster
- `HOST`: InfluxDB server hostname or URL
- `PORT`: InfluxDB server port (typically 8181)
- `QUERY`: SQL or InfluxQL query string
- `LINE_PROTOCOL`: Line protocol data for writes
- `PLUGIN_FILENAME`: Name of plugin file to use
- `CACHE_NAME`: Name for a new or existing cache
## Hugo shortcodes in Markdown
- `{{% code-placeholders "PLACEHOLDER1|PLACEHOLDER2" %}}`: Use this shortcode to define placeholders in code snippets.
- `{{% /code-placeholders %}}`: End the shortcode.
- `{{% code-placeholder-key %}}`: Use this shortcode to define a specific placeholder key.
- `{{% /code-placeholder-key %}}`: End the specific placeholder key shortcode.
## Language-Specific Placeholder Formatting
- **Bash/Shell**: Use uppercase variables with no quotes or prefix
```bash
--database DATABASE_NAME
```
- Python: Use string literals with quotes
```python
database_name='DATABASE_NAME'
```
- JSON: Use key-value pairs with quotes
```json
{
"database": "DATABASE_NAME"
}
```
## Real-World Examples from Documentation
### InfluxDB CLI Commands
This pattern appears frequently in CLI documentation:
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
```bash
influxdb3 write \
--database DATABASE_NAME \
--token AUTH_TOKEN \
--precision ns
{{% /code-placeholders %}}
Replace the following placeholders with your values:
{{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to write to
{{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with write permissions on the specified database{{% /show-in %}}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,15 @@
---
applyTo: "content/**/*.md, layouts/**/*.html"
---
### Detailed Testing Setup
For comprehensive testing information, including:
- Code block testing setup and configuration
- Link validation testing procedures
- Style linting with Vale
- Pre-commit hooks and GitHub Actions integration
- Advanced testing procedures and troubleshooting
Please refer to the main **[TESTING.md](../../TESTING.md)** file.

177
.github/scripts/cache-manager.cjs vendored Normal file
View File

@ -0,0 +1,177 @@
#!/usr/bin/env node
/**
* Simple Cache Manager for Link Validation Results
* Uses GitHub Actions cache API or local file storage
*/
const fs = require('fs');
const path = require('path');
const crypto = require('crypto');
const process = require('process');
const CACHE_VERSION = 'v1';
const CACHE_KEY_PREFIX = 'link-validation';
const LOCAL_CACHE_DIR = path.join(process.cwd(), '.cache', 'link-validation');
/**
* Simple cache interface
*/
class CacheManager {
constructor(options = {}) {
this.useGitHubCache =
options.useGitHubCache !== false && process.env.GITHUB_ACTIONS;
this.localCacheDir = options.localCacheDir || LOCAL_CACHE_DIR;
// Configurable cache TTL - default 30 days, support environment variable
this.cacheTTLDays =
options.cacheTTLDays || parseInt(process.env.LINK_CACHE_TTL_DAYS) || 30;
this.maxAge = this.cacheTTLDays * 24 * 60 * 60 * 1000;
if (!this.useGitHubCache) {
this.ensureLocalCacheDir();
}
}
ensureLocalCacheDir() {
if (!fs.existsSync(this.localCacheDir)) {
fs.mkdirSync(this.localCacheDir, { recursive: true });
}
}
generateCacheKey(filePath, fileHash) {
const pathHash = crypto
.createHash('sha256')
.update(filePath)
.digest('hex')
.substring(0, 8);
return `${CACHE_KEY_PREFIX}-${CACHE_VERSION}-${pathHash}-${fileHash}`;
}
async get(filePath, fileHash) {
if (this.useGitHubCache) {
return await this.getFromGitHubCache(filePath, fileHash);
} else {
return await this.getFromLocalCache(filePath, fileHash);
}
}
async set(filePath, fileHash, results) {
if (this.useGitHubCache) {
return await this.setToGitHubCache(filePath, fileHash, results);
} else {
return await this.setToLocalCache(filePath, fileHash, results);
}
}
async getFromGitHubCache(filePath, fileHash) {
// TODO: This method is a placeholder for GitHub Actions cache integration
// GitHub Actions cache is handled directly in the workflow via actions/cache
// This method should either be implemented or removed in future versions
console.warn(
'[PLACEHOLDER] getFromGitHubCache: Using placeholder implementation - always returns null'
);
return null;
}
async setToGitHubCache(filePath, fileHash, results) {
// TODO: This method is a placeholder for GitHub Actions cache integration
// GitHub Actions cache is handled directly in the workflow via actions/cache
// This method should either be implemented or removed in future versions
console.warn(
'[PLACEHOLDER] setToGitHubCache: Using placeholder implementation - always returns true'
);
return true;
}
async getFromLocalCache(filePath, fileHash) {
const cacheKey = this.generateCacheKey(filePath, fileHash);
const cacheFile = path.join(this.localCacheDir, `${cacheKey}.json`);
if (!fs.existsSync(cacheFile)) {
return null;
}
try {
const content = fs.readFileSync(cacheFile, 'utf8');
const cached = JSON.parse(content);
// TTL check using configured cache duration
const age = Date.now() - new Date(cached.cachedAt).getTime();
if (age > this.maxAge) {
fs.unlinkSync(cacheFile);
return null;
}
return cached.results;
} catch (error) {
// Clean up corrupted cache
try {
fs.unlinkSync(cacheFile);
} catch {
// Ignore cleanup errors
}
return null;
}
}
async setToLocalCache(filePath, fileHash, results) {
const cacheKey = this.generateCacheKey(filePath, fileHash);
const cacheFile = path.join(this.localCacheDir, `${cacheKey}.json`);
const cacheData = {
filePath,
fileHash,
results,
cachedAt: new Date().toISOString(),
};
try {
fs.writeFileSync(cacheFile, JSON.stringify(cacheData, null, 2));
return true;
} catch (error) {
console.warn(`Cache save failed: ${error.message}`);
return false;
}
}
async cleanup() {
if (this.useGitHubCache) {
return { removed: 0, note: 'GitHub Actions cache auto-managed' };
}
let removed = 0;
if (!fs.existsSync(this.localCacheDir)) {
return { removed };
}
const files = fs.readdirSync(this.localCacheDir);
for (const file of files) {
if (!file.endsWith('.json')) continue;
const filePath = path.join(this.localCacheDir, file);
try {
const stat = fs.statSync(filePath);
if (Date.now() - stat.mtime.getTime() > this.maxAge) {
fs.unlinkSync(filePath);
removed++;
}
} catch {
// Remove corrupted files
try {
fs.unlinkSync(filePath);
removed++;
} catch {
// Ignore errors
}
}
}
return { removed };
}
}
module.exports = CacheManager;
module.exports.CacheManager = CacheManager;

177
.github/scripts/cache-manager.js vendored Normal file
View File

@ -0,0 +1,177 @@
#!/usr/bin/env node
/**
* Simple Cache Manager for Link Validation Results
* Uses GitHub Actions cache API or local file storage
*/
import fs from 'fs';
import path from 'path';
import crypto from 'crypto';
import process from 'process';
const CACHE_VERSION = 'v1';
const CACHE_KEY_PREFIX = 'link-validation';
const LOCAL_CACHE_DIR = path.join(process.cwd(), '.cache', 'link-validation');
/**
* Simple cache interface
*/
class CacheManager {
constructor(options = {}) {
this.useGitHubCache =
options.useGitHubCache !== false && process.env.GITHUB_ACTIONS;
this.localCacheDir = options.localCacheDir || LOCAL_CACHE_DIR;
// Configurable cache TTL - default 30 days, support environment variable
this.cacheTTLDays =
options.cacheTTLDays || parseInt(process.env.LINK_CACHE_TTL_DAYS) || 30;
this.maxAge = this.cacheTTLDays * 24 * 60 * 60 * 1000;
if (!this.useGitHubCache) {
this.ensureLocalCacheDir();
}
}
ensureLocalCacheDir() {
if (!fs.existsSync(this.localCacheDir)) {
fs.mkdirSync(this.localCacheDir, { recursive: true });
}
}
generateCacheKey(filePath, fileHash) {
const pathHash = crypto
.createHash('sha256')
.update(filePath)
.digest('hex')
.substring(0, 8);
return `${CACHE_KEY_PREFIX}-${CACHE_VERSION}-${pathHash}-${fileHash}`;
}
async get(filePath, fileHash) {
if (this.useGitHubCache) {
return await this.getFromGitHubCache(filePath, fileHash);
} else {
return await this.getFromLocalCache(filePath, fileHash);
}
}
async set(filePath, fileHash, results) {
if (this.useGitHubCache) {
return await this.setToGitHubCache(filePath, fileHash, results);
} else {
return await this.setToLocalCache(filePath, fileHash, results);
}
}
async getFromGitHubCache(filePath, fileHash) {
// TODO: This method is a placeholder for GitHub Actions cache integration
// GitHub Actions cache is handled directly in the workflow via actions/cache
// This method should either be implemented or removed in future versions
console.warn(
'[PLACEHOLDER] getFromGitHubCache: Using placeholder implementation - always returns null'
);
return null;
}
async setToGitHubCache(filePath, fileHash, results) {
// TODO: This method is a placeholder for GitHub Actions cache integration
// GitHub Actions cache is handled directly in the workflow via actions/cache
// This method should either be implemented or removed in future versions
console.warn(
'[PLACEHOLDER] setToGitHubCache: Using placeholder implementation - always returns true'
);
return true;
}
async getFromLocalCache(filePath, fileHash) {
const cacheKey = this.generateCacheKey(filePath, fileHash);
const cacheFile = path.join(this.localCacheDir, `${cacheKey}.json`);
if (!fs.existsSync(cacheFile)) {
return null;
}
try {
const content = fs.readFileSync(cacheFile, 'utf8');
const cached = JSON.parse(content);
// TTL check using configured cache duration
const age = Date.now() - new Date(cached.cachedAt).getTime();
if (age > this.maxAge) {
fs.unlinkSync(cacheFile);
return null;
}
return cached.results;
} catch (error) {
// Clean up corrupted cache
try {
fs.unlinkSync(cacheFile);
} catch {
// Ignore cleanup errors
}
return null;
}
}
async setToLocalCache(filePath, fileHash, results) {
const cacheKey = this.generateCacheKey(filePath, fileHash);
const cacheFile = path.join(this.localCacheDir, `${cacheKey}.json`);
const cacheData = {
filePath,
fileHash,
results,
cachedAt: new Date().toISOString(),
};
try {
fs.writeFileSync(cacheFile, JSON.stringify(cacheData, null, 2));
return true;
} catch (error) {
console.warn(`Cache save failed: ${error.message}`);
return false;
}
}
async cleanup() {
if (this.useGitHubCache) {
return { removed: 0, note: 'GitHub Actions cache auto-managed' };
}
let removed = 0;
if (!fs.existsSync(this.localCacheDir)) {
return { removed };
}
const files = fs.readdirSync(this.localCacheDir);
for (const file of files) {
if (!file.endsWith('.json')) continue;
const filePath = path.join(this.localCacheDir, file);
try {
const stat = fs.statSync(filePath);
if (Date.now() - stat.mtime.getTime() > this.maxAge) {
fs.unlinkSync(filePath);
removed++;
}
} catch {
// Remove corrupted files
try {
fs.unlinkSync(filePath);
removed++;
} catch {
// Ignore errors
}
}
}
return { removed };
}
}
export default CacheManager;
export { CacheManager };

329
.github/scripts/comment-generator.js vendored Normal file
View File

@ -0,0 +1,329 @@
/**
* Comment Generator for Link Validation Results
* Standardizes PR comment generation across workflows
* Includes cache performance metrics and optimization info
*/
import fs from 'fs';
import path from 'path';
import process from 'process';
import { fileURLToPath } from 'url';
/**
* Normalize broken link data from different report formats
* @param {Object|Array} reportData - Raw report data
* @returns {Array} - Normalized array of broken links
*/
function normalizeBrokenLinks(reportData) {
if (!reportData) return [];
let links = [];
if (Array.isArray(reportData)) {
reportData.forEach((item) => {
if (item.links && Array.isArray(item.links)) {
// Format: { sourceFile: "file.md", links: [...] }
item.links.forEach((link) => {
links.push({
sourceFile: item.sourceFile || item.page || 'Unknown',
url: link.url || link.href,
linkText: link.linkText || link.url || link.href,
status: link.status,
error: link.error,
type: link.type,
});
});
} else {
// Format: direct link object
links.push({
sourceFile: item.sourceFile || item.page || 'Unknown',
url: item.url || item.href,
linkText: item.linkText || item.url || item.href,
status: item.status,
error: item.error,
type: item.type,
});
}
});
}
return links;
}
/**
* Group broken links by source file
* @param {Array} brokenLinks - Array of normalized broken links
* @returns {Object} - Object with source files as keys
*/
function groupLinksBySource(brokenLinks) {
const bySource = {};
brokenLinks.forEach((link) => {
const source = link.sourceFile || 'Unknown';
if (!bySource[source]) {
bySource[source] = [];
}
bySource[source].push(link);
});
return bySource;
}
/**
* Generate markdown comment for PR
* @param {Array} allBrokenLinks - Array of all broken links
* @param {Object} options - Generation options
* @returns {string} - Markdown comment content
*/
/**
* Load cache statistics from reports directory
* @param {string} reportsDir - Directory containing reports
* @returns {Object|null} Cache statistics or null if not found
*/
function loadCacheStats(reportsDir) {
try {
const cacheStatsFile = path.join(reportsDir, 'cache_statistics.json');
if (fs.existsSync(cacheStatsFile)) {
const content = fs.readFileSync(cacheStatsFile, 'utf8');
return JSON.parse(content);
}
} catch (error) {
console.warn(`Warning: Could not load cache stats: ${error.message}`);
}
return null;
}
function generateComment(allBrokenLinks, options = {}) {
const {
includeSuccessMessage = true,
includeStats = true,
includeActionRequired = true,
maxLinksPerFile = 20,
cacheStats = null,
reportsDir = null,
} = options;
// Load cache stats if reports directory is provided
const actualCacheStats =
cacheStats || (reportsDir ? loadCacheStats(reportsDir) : null);
let comment = '';
// Add cache performance metrics at the top
if (actualCacheStats) {
comment += '## 📊 Link Validation Performance\n\n';
comment += `- **Cache Hit Rate:** ${actualCacheStats.hitRate}%\n`;
comment += `- **Files Cached:** ${actualCacheStats.cacheHits} (skipped validation)\n`;
comment += `- **Files Validated:** ${actualCacheStats.cacheMisses}\n`;
if (actualCacheStats.hitRate >= 50) {
comment +=
'- **Performance:** 🚀 Cache optimization saved significant validation time!\n';
} else if (actualCacheStats.hitRate > 0) {
comment +=
'- **Performance:** ⚡ Some files were cached, improving validation speed\n';
}
comment += '\n';
}
if (!allBrokenLinks || allBrokenLinks.length === 0) {
comment += '## ✅ Link Validation Passed\n\n';
comment += 'All links in the changed files are valid!';
if (actualCacheStats && actualCacheStats.hitRate === 100) {
comment += '\n\n✨ **All files were cached** - no validation was needed!';
}
return includeSuccessMessage ? comment : '';
}
comment += '## 🔗 Broken Links Found\n\n';
if (includeStats) {
comment += `Found ${allBrokenLinks.length} broken link(s) in the changed files:\n\n`;
}
// Group by source file
const bySource = groupLinksBySource(allBrokenLinks);
// Generate sections for each source file
for (const [source, links] of Object.entries(bySource)) {
comment += `### ${source}\n\n`;
const displayLinks = links.slice(0, maxLinksPerFile);
const hiddenCount = links.length - displayLinks.length;
displayLinks.forEach((link) => {
const url = link.url || 'Unknown URL';
const linkText = link.linkText || url;
const status = link.status || 'Unknown';
comment += `- [ ] **${linkText}** → \`${url}\`\n`;
comment += ` - Status: ${status}\n`;
if (link.type) {
comment += ` - Type: ${link.type}\n`;
}
if (link.error) {
comment += ` - Error: ${link.error}\n`;
}
comment += '\n';
});
if (hiddenCount > 0) {
comment += `<details>\n<summary>... and ${hiddenCount} more broken link(s)</summary>\n\n`;
links.slice(maxLinksPerFile).forEach((link) => {
const url = link.url || 'Unknown URL';
const linkText = link.linkText || url;
const status = link.status || 'Unknown';
comment += `- [ ] **${linkText}** → \`${url}\` (Status: ${status})\n`;
});
comment += '\n</details>\n\n';
}
}
if (includeActionRequired) {
comment += '\n---\n';
comment +=
'**Action Required:** Please fix the broken links before merging this PR.';
}
return comment;
}
/**
* Load and merge broken link reports from artifacts
* @param {string} reportsDir - Directory containing report artifacts
* @returns {Array} - Array of all broken links
*/
function loadBrokenLinkReports(reportsDir) {
const allBrokenLinks = [];
if (!fs.existsSync(reportsDir)) {
return allBrokenLinks;
}
try {
const reportDirs = fs.readdirSync(reportsDir);
for (const dir of reportDirs) {
if (dir.startsWith('broken-links-')) {
const reportPath = path.join(
reportsDir,
dir,
'broken_links_report.json'
);
if (fs.existsSync(reportPath)) {
try {
const reportContent = fs.readFileSync(reportPath, 'utf8');
const reportData = JSON.parse(reportContent);
const normalizedLinks = normalizeBrokenLinks(reportData);
allBrokenLinks.push(...normalizedLinks);
} catch (e) {
console.error(`Error reading ${reportPath}: ${e.message}`);
}
}
}
}
} catch (e) {
console.error(
`Error reading reports directory ${reportsDir}: ${e.message}`
);
}
return allBrokenLinks;
}
/**
* CLI interface for the comment generator
*/
function main() {
const args = process.argv.slice(2);
if (args.includes('--help') || args.includes('-h')) {
console.log(`
Usage: node comment-generator.js [options] <reports-dir>
Options:
--no-success Don't include success message when no broken links
--no-stats Don't include broken link statistics
--no-action-required Don't include action required message
--max-links <n> Maximum links to show per file (default: 20)
--output-file <file> Write comment to file instead of stdout
--help, -h Show this help message
Examples:
node comment-generator.js reports/
node comment-generator.js --max-links 10 --output-file comment.md reports/
`);
process.exit(0);
}
// Parse arguments
let reportsDir = '';
const options = {
includeSuccessMessage: true,
includeStats: true,
includeActionRequired: true,
maxLinksPerFile: 20,
};
let outputFile = null;
for (let i = 0; i < args.length; i++) {
const arg = args[i];
if (arg === '--no-success') {
options.includeSuccessMessage = false;
} else if (arg === '--no-stats') {
options.includeStats = false;
} else if (arg === '--no-action-required') {
options.includeActionRequired = false;
} else if (arg === '--max-links' && i + 1 < args.length) {
options.maxLinksPerFile = parseInt(args[++i]);
} else if (arg === '--output-file' && i + 1 < args.length) {
outputFile = args[++i];
} else if (!arg.startsWith('--')) {
reportsDir = arg;
}
}
if (!reportsDir) {
console.error('Error: reports directory is required');
process.exit(1);
}
// Load reports and generate comment with cache stats
const brokenLinks = loadBrokenLinkReports(reportsDir);
options.reportsDir = reportsDir;
const comment = generateComment(brokenLinks, options);
if (outputFile) {
fs.writeFileSync(outputFile, comment);
console.log(`Comment written to ${outputFile}`);
} else {
console.log(comment);
}
// Exit with error code if there are broken links
if (brokenLinks.length > 0) {
process.exit(1);
}
}
// Run CLI if this file is executed directly
if (fileURLToPath(import.meta.url) === process.argv[1]) {
main();
}
export {
generateComment,
loadBrokenLinkReports,
normalizeBrokenLinks,
groupLinksBySource,
};

View File

@ -0,0 +1,230 @@
#!/usr/bin/env node
/**
* Incremental Link Validator
* Combines link extraction and caching to validate only changed links
*/
const { extractLinksFromFile } = require('./link-extractor.cjs');
const CacheManager = require('./cache-manager.cjs');
const process = require('process');
/**
* Incremental validator that only validates changed content
*/
class IncrementalValidator {
constructor(options = {}) {
this.cacheManager = new CacheManager(options);
this.validateExternal = options.validateExternal !== false;
this.validateInternal = options.validateInternal !== false;
}
/**
* Get validation strategy for a list of files
* @param {Array} filePaths - Array of file paths
* @returns {Object} Validation strategy with files categorized
*/
async getValidationStrategy(filePaths) {
const strategy = {
unchanged: [], // Files that haven't changed (skip validation)
changed: [], // Files that changed (need full validation)
newLinks: [], // New links across all files (need validation)
total: filePaths.length,
};
const allNewLinks = new Set();
for (const filePath of filePaths) {
try {
const extractionResult = extractLinksFromFile(filePath);
if (!extractionResult) {
console.warn(`Could not extract links from ${filePath}`);
continue;
}
const { fileHash, links } = extractionResult;
// Check if we have cached results for this file version
const cachedResults = await this.cacheManager.get(filePath, fileHash);
if (cachedResults) {
// File unchanged, skip validation
strategy.unchanged.push({
filePath,
fileHash,
linkCount: links.length,
cachedResults,
});
} else {
// File changed or new, needs validation
strategy.changed.push({
filePath,
fileHash,
links: links.filter((link) => link.needsValidation),
extractionResult,
});
// Collect all new links for batch validation
links
.filter((link) => link.needsValidation)
.forEach((link) => allNewLinks.add(link.url));
}
} catch (error) {
console.error(`Error processing ${filePath}: ${error.message}`);
// Treat as changed file to ensure validation
strategy.changed.push({
filePath,
error: error.message,
});
}
}
strategy.newLinks = Array.from(allNewLinks);
return strategy;
}
/**
* Validate files using incremental strategy
* @param {Array} filePaths - Files to validate
* @returns {Object} Validation results
*/
async validateFiles(filePaths) {
console.log(
`📊 Analyzing ${filePaths.length} files for incremental validation...`
);
const strategy = await this.getValidationStrategy(filePaths);
console.log(`${strategy.unchanged.length} files unchanged (cached)`);
console.log(`🔄 ${strategy.changed.length} files need validation`);
console.log(`🔗 ${strategy.newLinks.length} unique links to validate`);
const results = {
validationStrategy: strategy,
filesToValidate: strategy.changed.map((item) => ({
filePath: item.filePath,
linkCount: item.links ? item.links.length : 0,
fileHash: item.fileHash || 'unknown',
})),
cacheStats: {
totalFiles: strategy.total,
cacheHits: strategy.unchanged.length,
cacheMisses: strategy.changed.length,
hitRate:
strategy.total > 0
? Math.round((strategy.unchanged.length / strategy.total) * 100)
: 0,
},
};
return results;
}
/**
* Store validation results in cache
* @param {string} filePath - File path
* @param {string} fileHash - File hash
* @param {Object} validationResults - Results to cache
* @returns {Promise<boolean>} Success status
*/
async cacheResults(filePath, fileHash, validationResults) {
return await this.cacheManager.set(filePath, fileHash, validationResults);
}
/**
* Clean up expired cache entries
* @returns {Promise<Object>} Cleanup statistics
*/
async cleanupCache() {
return await this.cacheManager.cleanup();
}
}
/**
* CLI usage
*/
async function main() {
const args = process.argv.slice(2);
if (args.length === 0 || args[0] === '--help') {
console.log(`
Incremental Link Validator
Usage:
node incremental-validator.cjs [files...] Analyze files for validation
node incremental-validator.cjs --cleanup Clean up expired cache
node incremental-validator.cjs --help Show this help
Options:
--no-external Don't validate external links
--no-internal Don't validate internal links
--local Use local cache instead of GitHub Actions cache
--cache-ttl=DAYS Set cache TTL in days (default: 30)
Examples:
node incremental-validator.cjs content/**/*.md
node incremental-validator.cjs --cache-ttl=7 content/**/*.md
node incremental-validator.cjs --cleanup
`);
process.exit(0);
}
if (args[0] === '--cleanup') {
const validator = new IncrementalValidator();
const stats = await validator.cleanupCache();
console.log(`🧹 Cleaned up ${stats.removed} expired cache entries`);
if (stats.note) console.log(` ${stats.note}`);
return;
}
const options = {
validateExternal: !args.includes('--no-external'),
validateInternal: !args.includes('--no-internal'),
useGitHubCache: !args.includes('--local'),
};
// Extract cache TTL option if provided
const cacheTTLArg = args.find((arg) => arg.startsWith('--cache-ttl='));
if (cacheTTLArg) {
options.cacheTTLDays = parseInt(cacheTTLArg.split('=')[1]);
}
const filePaths = args.filter((arg) => !arg.startsWith('--'));
if (filePaths.length === 0) {
console.error('No files specified for validation');
process.exit(1);
}
const validator = new IncrementalValidator(options);
const results = await validator.validateFiles(filePaths);
console.log('\n📈 Validation Analysis Results:');
console.log('================================');
console.log(`📊 Cache hit rate: ${results.cacheStats.hitRate}%`);
console.log(`📋 Files to validate: ${results.filesToValidate.length}`);
if (results.filesToValidate.length > 0) {
console.log('\n📝 Files needing validation:');
results.filesToValidate.forEach((file) => {
console.log(` ${file.filePath} (${file.linkCount} links)`);
});
// Output files for Cypress to process
console.log('\n🎯 Files for Cypress validation (one per line):');
results.filesToValidate.forEach((file) => {
console.log(file.filePath);
});
} else {
console.log('\n✨ All files are cached - no validation needed!');
}
}
module.exports = IncrementalValidator;
module.exports.IncrementalValidator = IncrementalValidator;
// Run CLI if called directly
if (require.main === module) {
main().catch(console.error);
}

229
.github/scripts/incremental-validator.js vendored Normal file
View File

@ -0,0 +1,229 @@
#!/usr/bin/env node
/**
* Incremental Link Validator
* Combines link extraction and caching to validate only changed links
*/
import { extractLinksFromFile } from './link-extractor.js';
import { CacheManager } from './cache-manager.js';
import process from 'process';
import { fileURLToPath } from 'url';
/**
* Incremental validator that only validates changed content
*/
class IncrementalValidator {
constructor(options = {}) {
this.cacheManager = new CacheManager(options);
this.validateExternal = options.validateExternal !== false;
this.validateInternal = options.validateInternal !== false;
}
/**
* Get validation strategy for a list of files
* @param {Array} filePaths - Array of file paths
* @returns {Object} Validation strategy with files categorized
*/
async getValidationStrategy(filePaths) {
const strategy = {
unchanged: [], // Files that haven't changed (skip validation)
changed: [], // Files that changed (need full validation)
newLinks: [], // New links across all files (need validation)
total: filePaths.length,
};
const allNewLinks = new Set();
for (const filePath of filePaths) {
try {
const extractionResult = extractLinksFromFile(filePath);
if (!extractionResult) {
console.warn(`Could not extract links from ${filePath}`);
continue;
}
const { fileHash, links } = extractionResult;
// Check if we have cached results for this file version
const cachedResults = await this.cacheManager.get(filePath, fileHash);
if (cachedResults) {
// File unchanged, skip validation
strategy.unchanged.push({
filePath,
fileHash,
linkCount: links.length,
cachedResults,
});
} else {
// File changed or new, needs validation
strategy.changed.push({
filePath,
fileHash,
links: links.filter((link) => link.needsValidation),
extractionResult,
});
// Collect all new links for batch validation
links
.filter((link) => link.needsValidation)
.forEach((link) => allNewLinks.add(link.url));
}
} catch (error) {
console.error(`Error processing ${filePath}: ${error.message}`);
// Treat as changed file to ensure validation
strategy.changed.push({
filePath,
error: error.message,
});
}
}
strategy.newLinks = Array.from(allNewLinks);
return strategy;
}
/**
* Validate files using incremental strategy
* @param {Array} filePaths - Files to validate
* @returns {Object} Validation results
*/
async validateFiles(filePaths) {
console.log(
`📊 Analyzing ${filePaths.length} files for incremental validation...`
);
const strategy = await this.getValidationStrategy(filePaths);
console.log(`${strategy.unchanged.length} files unchanged (cached)`);
console.log(`🔄 ${strategy.changed.length} files need validation`);
console.log(`🔗 ${strategy.newLinks.length} unique links to validate`);
const results = {
validationStrategy: strategy,
filesToValidate: strategy.changed.map((item) => ({
filePath: item.filePath,
linkCount: item.links ? item.links.length : 0,
})),
cacheStats: {
cacheHits: strategy.unchanged.length,
cacheMisses: strategy.changed.length,
hitRate:
strategy.total > 0
? Math.round((strategy.unchanged.length / strategy.total) * 100)
: 0,
},
};
return results;
}
/**
* Store validation results in cache
* @param {string} filePath - File path
* @param {string} fileHash - File hash
* @param {Object} validationResults - Results to cache
* @returns {Promise<boolean>} Success status
*/
async cacheResults(filePath, fileHash, validationResults) {
return await this.cacheManager.set(filePath, fileHash, validationResults);
}
/**
* Clean up expired cache entries
* @returns {Promise<Object>} Cleanup statistics
*/
async cleanupCache() {
return await this.cacheManager.cleanup();
}
}
/**
* CLI usage
*/
async function main() {
const args = process.argv.slice(2);
if (args.length === 0 || args[0] === '--help') {
console.log(`
Incremental Link Validator
Usage:
node incremental-validator.js [files...] Analyze files for validation
node incremental-validator.js --cleanup Clean up expired cache
node incremental-validator.js --help Show this help
Options:
--no-external Don't validate external links
--no-internal Don't validate internal links
--local Use local cache instead of GitHub Actions cache
--cache-ttl=DAYS Set cache TTL in days (default: 30)
Examples:
node incremental-validator.js content/**/*.md
node incremental-validator.js --cache-ttl=7 content/**/*.md
node incremental-validator.js --cleanup
`);
process.exit(0);
}
if (args[0] === '--cleanup') {
const validator = new IncrementalValidator();
const stats = await validator.cleanupCache();
console.log(`🧹 Cleaned up ${stats.removed} expired cache entries`);
if (stats.note) console.log(` ${stats.note}`);
return;
}
const options = {
validateExternal: !args.includes('--no-external'),
validateInternal: !args.includes('--no-internal'),
useGitHubCache: !args.includes('--local'),
};
// Extract cache TTL option if provided
const cacheTTLArg = args.find((arg) => arg.startsWith('--cache-ttl='));
if (cacheTTLArg) {
options.cacheTTLDays = parseInt(cacheTTLArg.split('=')[1]);
}
const filePaths = args.filter((arg) => !arg.startsWith('--'));
if (filePaths.length === 0) {
console.error('No files specified for validation');
process.exit(1);
}
const validator = new IncrementalValidator(options);
const results = await validator.validateFiles(filePaths);
console.log('\n📈 Validation Analysis Results:');
console.log('================================');
console.log(`📊 Cache hit rate: ${results.cacheStats.hitRate}%`);
console.log(`📋 Files to validate: ${results.filesToValidate.length}`);
if (results.filesToValidate.length > 0) {
console.log('\n📝 Files needing validation:');
results.filesToValidate.forEach((file) => {
console.log(` ${file.filePath} (${file.linkCount} links)`);
});
// Output files for Cypress to process
console.log('\n🎯 Files for Cypress validation (one per line):');
results.filesToValidate.forEach((file) => {
console.log(file.filePath);
});
} else {
console.log('\n✨ All files are cached - no validation needed!');
}
}
export default IncrementalValidator;
export { IncrementalValidator };
// Run CLI if called directly
if (fileURLToPath(import.meta.url) === process.argv[1]) {
main().catch(console.error);
}

477
.github/scripts/link-extractor.cjs vendored Normal file
View File

@ -0,0 +1,477 @@
#!/usr/bin/env node
/**
* Link Extractor for Documentation Files
* Extracts all links from markdown and HTML files with metadata for caching and incremental validation
*/
const fs = require('fs');
const crypto = require('crypto');
const matter = require('gray-matter');
const path = require('path');
const process = require('process');
/**
* Extract links from markdown content
* @param {string} content - File content
* @param {string} filePath - Path to the file
* @returns {Array} Array of link objects with metadata
*/
function extractMarkdownLinks(content, filePath) {
const links = [];
const lines = content.split('\n');
// Track reference-style link definitions
const referenceLinks = new Map();
// First pass: collect reference definitions
content.replace(/^\s*\[([^\]]+)\]:\s*(.+)$/gm, (match, ref, url) => {
referenceLinks.set(ref.toLowerCase(), url.trim());
return match;
});
// Process each line for links
lines.forEach((line, lineIndex) => {
const lineNumber = lineIndex + 1;
// Standard markdown links
let match;
const standardLinkRegex = /\[([^\]]*)\]\(([^)]+)\)/g;
while ((match = standardLinkRegex.exec(line)) !== null) {
const linkText = match[1];
const url = match[2];
const columnStart = match.index;
links.push({
url: url.trim(),
text: linkText,
type: 'markdown',
line: lineNumber,
column: columnStart,
context: line.trim(),
hash: generateLinkHash(url.trim(), filePath, lineNumber),
});
}
// Reference-style links
const refLinkRegex = /\[([^\]]*)\]\[([^\]]*)\]/g;
while ((match = refLinkRegex.exec(line)) !== null) {
const linkText = match[1];
const refKey = (match[2] || linkText).toLowerCase();
const url = referenceLinks.get(refKey);
if (url) {
const columnStart = match.index;
links.push({
url: url,
text: linkText,
type: 'markdown-reference',
line: lineNumber,
column: columnStart,
context: line.trim(),
reference: refKey,
hash: generateLinkHash(url, filePath, lineNumber),
});
}
}
// Autolinks
const autolinkRegex = /<(https?:\/\/[^>]+)>/g;
while ((match = autolinkRegex.exec(line)) !== null) {
const url = match[1];
const columnStart = match.index;
links.push({
url: url,
text: url,
type: 'autolink',
line: lineNumber,
column: columnStart,
context: line.trim(),
hash: generateLinkHash(url, filePath, lineNumber),
});
}
// Bare URLs (basic detection, avoid false positives)
// Regex to match bare URLs in text
// - (?:^|[\s\n]): Match the start of the line or any whitespace character
// - (https?:\/\/): Match the protocol (http or https) followed by ://
// - [^\s\)\]\}]+: Match the rest of the URL, stopping at spaces or closing characters like ), ], or }
const bareUrlRegex = /(?<start>^|[\s\n])(?<url>https?:\/\/[^\s\)\]\}]+)/g;
while ((match = bareUrlRegex.exec(line)) !== null) {
const url = match.groups.url;
const columnStart = match.index + match[0].length - url.length;
// Skip if this URL is already captured in a proper markdown link
const alreadyCaptured = links.some(
(link) =>
link.line === lineNumber &&
Math.abs(link.column - columnStart) < 10 &&
link.url === url
);
if (!alreadyCaptured) {
links.push({
url: url,
text: url,
type: 'bare-url',
line: lineNumber,
column: columnStart,
context: line.trim(),
hash: generateLinkHash(url, filePath, lineNumber),
});
}
}
});
return links;
}
/**
* Extract links from HTML content
* @param {string} content - File content
* @param {string} filePath - Path to the file
* @returns {Array} Array of link objects with metadata
*/
function extractHtmlLinks(content, filePath) {
const links = [];
const lines = content.split('\n');
lines.forEach((line, lineIndex) => {
const lineNumber = lineIndex + 1;
let match;
const htmlLinkRegex = /<a\s+[^>]*href\s*=\s*["']([^"']+)["'][^>]*>/gi;
while ((match = htmlLinkRegex.exec(line)) !== null) {
const url = match[1];
const columnStart = match.index;
// Extract link text if possible
const fullMatch = match[0];
const textMatch = fullMatch.match(/>([^<]*)</);
const linkText = textMatch ? textMatch[1].trim() : url;
links.push({
url: url,
text: linkText,
type: 'html',
line: lineNumber,
column: columnStart,
context: line.trim(),
hash: generateLinkHash(url, filePath, lineNumber),
});
}
});
return links;
}
/**
* Generate a unique hash for a link
* @param {string} url - The URL
* @param {string} filePath - File path
* @param {number} line - Line number
* @returns {string} Hash string
*/
function generateLinkHash(url, filePath, line) {
const data = `${filePath}:${line}:${url.trim()}`;
return crypto
.createHash('sha256')
.update(data)
.digest('hex')
.substring(0, 16);
}
/**
* Generate a hash for file content
* @param {string} content - File content
* @returns {string} Hash string
*/
function generateFileHash(content) {
return crypto
.createHash('sha256')
.update(content)
.digest('hex')
.substring(0, 16);
}
/**
* Categorize link types for validation
* @param {string} url - The URL to categorize
* @returns {Object} Link category information
*/
function categorizeLinkType(url) {
const trimmedUrl = url.trim();
// External links
if (trimmedUrl.startsWith('http://') || trimmedUrl.startsWith('https://')) {
return {
category: 'external',
protocol: trimmedUrl.startsWith('https://') ? 'https' : 'http',
needsValidation: true,
};
}
// Internal absolute links
if (trimmedUrl.startsWith('/')) {
return {
category: 'internal-absolute',
needsValidation: true,
};
}
// Relative links
if (
trimmedUrl.startsWith('./') ||
trimmedUrl.startsWith('../') ||
(!trimmedUrl.startsWith('#') && !trimmedUrl.includes('://'))
) {
return {
category: 'internal-relative',
needsValidation: true,
};
}
// Fragment/anchor links
if (trimmedUrl.startsWith('#')) {
return {
category: 'fragment',
needsValidation: true, // May need validation for internal page anchors
};
}
// Special protocols (mailto, tel, etc.)
if (trimmedUrl.includes('://') && !trimmedUrl.startsWith('http')) {
return {
category: 'special-protocol',
needsValidation: false,
};
}
return {
category: 'unknown',
needsValidation: true,
};
}
/**
* Extract all links from a file
* @param {string} filePath - Path to the file
* @returns {Object} File analysis with links and metadata
*/
function extractLinksFromFile(filePath) {
try {
if (!fs.existsSync(filePath)) {
throw new Error(`File not found: ${filePath}`);
}
const content = fs.readFileSync(filePath, 'utf8');
const fileHash = generateFileHash(content);
const extension = path.extname(filePath).toLowerCase();
let links = [];
let frontmatter = {};
let bodyContent = content;
// Parse frontmatter for .md files
if (extension === '.md') {
try {
const parsed = matter(content);
frontmatter = parsed.data || {};
bodyContent = parsed.content;
} catch (err) {
console.warn(
`Warning: Could not parse frontmatter in ${filePath}: ${err.message}`
);
}
// Extract links from markdown content
links = extractMarkdownLinks(bodyContent, filePath);
} else if (extension === '.html') {
// Extract links from HTML content
links = extractHtmlLinks(content, filePath);
} else {
console.warn(`Warning: Unsupported file type for ${filePath}`);
return null;
}
// Categorize and enhance links
const enhancedLinks = links.map((link) => ({
...link,
...categorizeLinkType(link.url),
filePath,
}));
// Calculate statistics
const stats = {
totalLinks: enhancedLinks.length,
externalLinks: enhancedLinks.filter((l) => l.category === 'external')
.length,
internalLinks: enhancedLinks.filter((l) =>
l.category.startsWith('internal')
).length,
fragmentLinks: enhancedLinks.filter((l) => l.category === 'fragment')
.length,
linksNeedingValidation: enhancedLinks.filter((l) => l.needsValidation)
.length,
};
return {
filePath,
fileHash,
extension,
frontmatter,
links: enhancedLinks,
stats,
extractedAt: new Date().toISOString(),
};
} catch (error) {
console.error(`Error extracting links from ${filePath}: ${error.message}`);
return null;
}
}
/**
* Main function for CLI usage
*/
function main() {
const args = process.argv.slice(2);
if (args.length === 0) {
console.error('Usage: node link-extractor.cjs <file1> [file2] [...]');
console.error(' node link-extractor.cjs --help');
process.exit(1);
}
if (args[0] === '--help') {
console.log(`
Link Extractor for Documentation Files
Usage:
node link-extractor.cjs <file1> [file2] [...] Extract links from files
node link-extractor.cjs --help Show this help
Options:
--json Output results as JSON
--stats-only Show only statistics
--filter TYPE Filter links by category (external, internal-absolute, internal-relative, fragment)
Examples:
node link-extractor.cjs content/influxdb3/core/install.md
node link-extractor.cjs --json content/**/*.md
node link-extractor.cjs --stats-only --filter external content/influxdb3/**/*.md
`);
process.exit(0);
}
const jsonOutput = args.includes('--json');
const statsOnly = args.includes('--stats-only');
const filterType = args.includes('--filter')
? args[args.indexOf('--filter') + 1]
: null;
const files = args.filter(
(arg) => !arg.startsWith('--') && arg !== filterType
);
const results = [];
for (const filePath of files) {
const result = extractLinksFromFile(filePath);
if (result) {
// Apply filter if specified
if (filterType) {
result.links = result.links.filter(
(link) => link.category === filterType
);
// Recalculate stats after filtering
result.stats = {
totalLinks: result.links.length,
externalLinks: result.links.filter((l) => l.category === 'external')
.length,
internalLinks: result.links.filter((l) =>
l.category.startsWith('internal')
).length,
fragmentLinks: result.links.filter((l) => l.category === 'fragment')
.length,
linksNeedingValidation: result.links.filter((l) => l.needsValidation)
.length,
};
}
results.push(result);
}
}
if (jsonOutput) {
console.log(JSON.stringify(results, null, 2));
} else if (statsOnly) {
console.log('\nLink Extraction Statistics:');
console.log('==========================');
let totalFiles = 0;
let totalLinks = 0;
let totalExternal = 0;
let totalInternal = 0;
let totalFragment = 0;
let totalNeedingValidation = 0;
results.forEach((result) => {
totalFiles++;
totalLinks += result.stats.totalLinks;
totalExternal += result.stats.externalLinks;
totalInternal += result.stats.internalLinks;
totalFragment += result.stats.fragmentLinks;
totalNeedingValidation += result.stats.linksNeedingValidation;
console.log(
`${result.filePath}: ${result.stats.totalLinks} links (${result.stats.linksNeedingValidation} need validation)`
);
});
console.log('\nSummary:');
console.log(` Total files: ${totalFiles}`);
console.log(` Total links: ${totalLinks}`);
console.log(` External links: ${totalExternal}`);
console.log(` Internal links: ${totalInternal}`);
console.log(` Fragment links: ${totalFragment}`);
console.log(` Links needing validation: ${totalNeedingValidation}`);
} else {
results.forEach((result) => {
console.log(`\nFile: ${result.filePath}`);
console.log(`Hash: ${result.fileHash}`);
console.log(`Links found: ${result.stats.totalLinks}`);
console.log(
`Links needing validation: ${result.stats.linksNeedingValidation}`
);
if (result.links.length > 0) {
console.log('\nLinks:');
result.links.forEach((link, index) => {
console.log(` ${index + 1}. [${link.category}] ${link.url}`);
console.log(` Line ${link.line}, Column ${link.column}`);
console.log(` Text: "${link.text}"`);
console.log(` Hash: ${link.hash}`);
if (link.reference) {
console.log(` Reference: ${link.reference}`);
}
console.log('');
});
}
});
}
}
// Export functions for use as a module
module.exports = {
extractLinksFromFile,
extractMarkdownLinks,
extractHtmlLinks,
generateFileHash,
generateLinkHash,
categorizeLinkType,
};
// Run main function if called directly
if (require.main === module) {
main();
}

478
.github/scripts/link-extractor.js vendored Normal file
View File

@ -0,0 +1,478 @@
#!/usr/bin/env node
/**
* Link Extractor for Documentation Files
* Extracts all links from markdown and HTML files with metadata for caching and incremental validation
*/
import fs from 'fs';
import crypto from 'crypto';
import matter from 'gray-matter';
import path from 'path';
import process from 'process';
import { fileURLToPath } from 'url';
/**
* Extract links from markdown content
* @param {string} content - File content
* @param {string} filePath - Path to the file
* @returns {Array} Array of link objects with metadata
*/
function extractMarkdownLinks(content, filePath) {
const links = [];
const lines = content.split('\n');
// Track reference-style link definitions
const referenceLinks = new Map();
// First pass: collect reference definitions
content.replace(/^\s*\[([^\]]+)\]:\s*(.+)$/gm, (match, ref, url) => {
referenceLinks.set(ref.toLowerCase(), url.trim());
return match;
});
// Process each line for links
lines.forEach((line, lineIndex) => {
const lineNumber = lineIndex + 1;
// Standard markdown links
let match;
const standardLinkRegex = /\[([^\]]*)\]\(([^)]+)\)/g;
while ((match = standardLinkRegex.exec(line)) !== null) {
const linkText = match[1];
const url = match[2];
const columnStart = match.index;
links.push({
url: url.trim(),
text: linkText,
type: 'markdown',
line: lineNumber,
column: columnStart,
context: line.trim(),
hash: generateLinkHash(url.trim(), filePath, lineNumber),
});
}
// Reference-style links
const refLinkRegex = /\[([^\]]*)\]\[([^\]]*)\]/g;
while ((match = refLinkRegex.exec(line)) !== null) {
const linkText = match[1];
const refKey = (match[2] || linkText).toLowerCase();
const url = referenceLinks.get(refKey);
if (url) {
const columnStart = match.index;
links.push({
url: url,
text: linkText,
type: 'markdown-reference',
line: lineNumber,
column: columnStart,
context: line.trim(),
reference: refKey,
hash: generateLinkHash(url, filePath, lineNumber),
});
}
}
// Autolinks
const autolinkRegex = /<(https?:\/\/[^>]+)>/g;
while ((match = autolinkRegex.exec(line)) !== null) {
const url = match[1];
const columnStart = match.index;
links.push({
url: url,
text: url,
type: 'autolink',
line: lineNumber,
column: columnStart,
context: line.trim(),
hash: generateLinkHash(url, filePath, lineNumber),
});
}
// Bare URLs (basic detection, avoid false positives)
// Regex to match bare URLs in text
// - (?:^|[\s\n]): Match the start of the line or any whitespace character
// - (https?:\/\/): Match the protocol (http or https) followed by ://
// - [^\s\)\]\}]+: Match the rest of the URL, stopping at spaces or closing characters like ), ], or }
const bareUrlRegex = /(?<start>^|[\s\n])(?<url>https?:\/\/[^\s\)\]\}]+)/g;
while ((match = bareUrlRegex.exec(line)) !== null) {
const url = match.groups.url;
const columnStart = match.index + match[0].length - url.length;
// Skip if this URL is already captured in a proper markdown link
const alreadyCaptured = links.some(
(link) =>
link.line === lineNumber &&
Math.abs(link.column - columnStart) < 10 &&
link.url === url
);
if (!alreadyCaptured) {
links.push({
url: url,
text: url,
type: 'bare-url',
line: lineNumber,
column: columnStart,
context: line.trim(),
hash: generateLinkHash(url, filePath, lineNumber),
});
}
}
});
return links;
}
/**
* Extract links from HTML content
* @param {string} content - File content
* @param {string} filePath - Path to the file
* @returns {Array} Array of link objects with metadata
*/
function extractHtmlLinks(content, filePath) {
const links = [];
const lines = content.split('\n');
lines.forEach((line, lineIndex) => {
const lineNumber = lineIndex + 1;
let match;
const htmlLinkRegex = /<a\s+[^>]*href\s*=\s*["']([^"']+)["'][^>]*>/gi;
while ((match = htmlLinkRegex.exec(line)) !== null) {
const url = match[1];
const columnStart = match.index;
// Extract link text if possible
const fullMatch = match[0];
const textMatch = fullMatch.match(/>([^<]*)</);
const linkText = textMatch ? textMatch[1].trim() : url;
links.push({
url: url,
text: linkText,
type: 'html',
line: lineNumber,
column: columnStart,
context: line.trim(),
hash: generateLinkHash(url, filePath, lineNumber),
});
}
});
return links;
}
/**
* Generate a unique hash for a link
* @param {string} url - The URL
* @param {string} filePath - File path
* @param {number} line - Line number
* @returns {string} Hash string
*/
function generateLinkHash(url, filePath, line) {
const data = `${filePath}:${line}:${url.trim()}`;
return crypto
.createHash('sha256')
.update(data)
.digest('hex')
.substring(0, 16);
}
/**
* Generate a hash for file content
* @param {string} content - File content
* @returns {string} Hash string
*/
function generateFileHash(content) {
return crypto
.createHash('sha256')
.update(content)
.digest('hex')
.substring(0, 16);
}
/**
* Categorize link types for validation
* @param {string} url - The URL to categorize
* @returns {Object} Link category information
*/
function categorizeLinkType(url) {
const trimmedUrl = url.trim();
// External links
if (trimmedUrl.startsWith('http://') || trimmedUrl.startsWith('https://')) {
return {
category: 'external',
protocol: trimmedUrl.startsWith('https://') ? 'https' : 'http',
needsValidation: true,
};
}
// Internal absolute links
if (trimmedUrl.startsWith('/')) {
return {
category: 'internal-absolute',
needsValidation: true,
};
}
// Relative links
if (
trimmedUrl.startsWith('./') ||
trimmedUrl.startsWith('../') ||
(!trimmedUrl.startsWith('#') && !trimmedUrl.includes('://'))
) {
return {
category: 'internal-relative',
needsValidation: true,
};
}
// Fragment/anchor links
if (trimmedUrl.startsWith('#')) {
return {
category: 'fragment',
needsValidation: true, // May need validation for internal page anchors
};
}
// Special protocols (mailto, tel, etc.)
if (trimmedUrl.includes('://') && !trimmedUrl.startsWith('http')) {
return {
category: 'special-protocol',
needsValidation: false,
};
}
return {
category: 'unknown',
needsValidation: true,
};
}
/**
* Extract all links from a file
* @param {string} filePath - Path to the file
* @returns {Object} File analysis with links and metadata
*/
function extractLinksFromFile(filePath) {
try {
if (!fs.existsSync(filePath)) {
throw new Error(`File not found: ${filePath}`);
}
const content = fs.readFileSync(filePath, 'utf8');
const fileHash = generateFileHash(content);
const extension = path.extname(filePath).toLowerCase();
let links = [];
let frontmatter = {};
let bodyContent = content;
// Parse frontmatter for .md files
if (extension === '.md') {
try {
const parsed = matter(content);
frontmatter = parsed.data || {};
bodyContent = parsed.content;
} catch (err) {
console.warn(
`Warning: Could not parse frontmatter in ${filePath}: ${err.message}`
);
}
// Extract links from markdown content
links = extractMarkdownLinks(bodyContent, filePath);
} else if (extension === '.html') {
// Extract links from HTML content
links = extractHtmlLinks(content, filePath);
} else {
console.warn(`Warning: Unsupported file type for ${filePath}`);
return null;
}
// Categorize and enhance links
const enhancedLinks = links.map((link) => ({
...link,
...categorizeLinkType(link.url),
filePath,
}));
// Calculate statistics
const stats = {
totalLinks: enhancedLinks.length,
externalLinks: enhancedLinks.filter((l) => l.category === 'external')
.length,
internalLinks: enhancedLinks.filter((l) =>
l.category.startsWith('internal')
).length,
fragmentLinks: enhancedLinks.filter((l) => l.category === 'fragment')
.length,
linksNeedingValidation: enhancedLinks.filter((l) => l.needsValidation)
.length,
};
return {
filePath,
fileHash,
extension,
frontmatter,
links: enhancedLinks,
stats,
extractedAt: new Date().toISOString(),
};
} catch (error) {
console.error(`Error extracting links from ${filePath}: ${error.message}`);
return null;
}
}
/**
* Main function for CLI usage
*/
function main() {
const args = process.argv.slice(2);
if (args.length === 0) {
console.error('Usage: node link-extractor.js <file1> [file2] [...]');
console.error(' node link-extractor.js --help');
process.exit(1);
}
if (args[0] === '--help') {
console.log(`
Link Extractor for Documentation Files
Usage:
node link-extractor.js <file1> [file2] [...] Extract links from files
node link-extractor.js --help Show this help
Options:
--json Output results as JSON
--stats-only Show only statistics
--filter TYPE Filter links by category (external, internal-absolute, internal-relative, fragment)
Examples:
node link-extractor.js content/influxdb3/core/install.md
node link-extractor.js --json content/**/*.md
node link-extractor.js --stats-only --filter external content/influxdb3/**/*.md
`);
process.exit(0);
}
const jsonOutput = args.includes('--json');
const statsOnly = args.includes('--stats-only');
const filterType = args.includes('--filter')
? args[args.indexOf('--filter') + 1]
: null;
const files = args.filter(
(arg) => !arg.startsWith('--') && arg !== filterType
);
const results = [];
for (const filePath of files) {
const result = extractLinksFromFile(filePath);
if (result) {
// Apply filter if specified
if (filterType) {
result.links = result.links.filter(
(link) => link.category === filterType
);
// Recalculate stats after filtering
result.stats = {
totalLinks: result.links.length,
externalLinks: result.links.filter((l) => l.category === 'external')
.length,
internalLinks: result.links.filter((l) =>
l.category.startsWith('internal')
).length,
fragmentLinks: result.links.filter((l) => l.category === 'fragment')
.length,
linksNeedingValidation: result.links.filter((l) => l.needsValidation)
.length,
};
}
results.push(result);
}
}
if (jsonOutput) {
console.log(JSON.stringify(results, null, 2));
} else if (statsOnly) {
console.log('\nLink Extraction Statistics:');
console.log('==========================');
let totalFiles = 0;
let totalLinks = 0;
let totalExternal = 0;
let totalInternal = 0;
let totalFragment = 0;
let totalNeedingValidation = 0;
results.forEach((result) => {
totalFiles++;
totalLinks += result.stats.totalLinks;
totalExternal += result.stats.externalLinks;
totalInternal += result.stats.internalLinks;
totalFragment += result.stats.fragmentLinks;
totalNeedingValidation += result.stats.linksNeedingValidation;
console.log(
`${result.filePath}: ${result.stats.totalLinks} links (${result.stats.linksNeedingValidation} need validation)`
);
});
console.log('\nSummary:');
console.log(` Total files: ${totalFiles}`);
console.log(` Total links: ${totalLinks}`);
console.log(` External links: ${totalExternal}`);
console.log(` Internal links: ${totalInternal}`);
console.log(` Fragment links: ${totalFragment}`);
console.log(` Links needing validation: ${totalNeedingValidation}`);
} else {
results.forEach((result) => {
console.log(`\nFile: ${result.filePath}`);
console.log(`Hash: ${result.fileHash}`);
console.log(`Links found: ${result.stats.totalLinks}`);
console.log(
`Links needing validation: ${result.stats.linksNeedingValidation}`
);
if (result.links.length > 0) {
console.log('\nLinks:');
result.links.forEach((link, index) => {
console.log(` ${index + 1}. [${link.category}] ${link.url}`);
console.log(` Line ${link.line}, Column ${link.column}`);
console.log(` Text: "${link.text}"`);
console.log(` Hash: ${link.hash}`);
if (link.reference) {
console.log(` Reference: ${link.reference}`);
}
console.log('');
});
}
});
}
}
// Export functions for use as a module
export {
extractLinksFromFile,
extractMarkdownLinks,
extractHtmlLinks,
generateFileHash,
generateLinkHash,
categorizeLinkType,
};
// Run main function if called directly
if (fileURLToPath(import.meta.url) === process.argv[1]) {
main();
}

385
.github/scripts/matrix-generator.js vendored Normal file
View File

@ -0,0 +1,385 @@
/**
* Matrix Generator for Link Validation Workflows
* Replaces complex bash scripting with maintainable JavaScript
* Includes cache-aware optimization to skip validation of unchanged files
*/
import { spawn } from 'child_process';
import process from 'process';
import { fileURLToPath } from 'url'; // Used for main execution check at bottom of file
// Product configuration mapping file paths to products
const PRODUCT_MAPPING = {
'content/influxdb3/core': {
key: 'influxdb3-core',
name: 'InfluxDB 3 Core',
},
'content/influxdb3/enterprise': {
key: 'influxdb3-enterprise',
name: 'InfluxDB 3 Enterprise',
},
'content/influxdb3/cloud-dedicated': {
key: 'influxdb3-cloud-dedicated',
name: 'InfluxDB 3 Cloud Dedicated',
},
'content/influxdb3/cloud-serverless': {
key: 'influxdb3-cloud-serverless',
name: 'InfluxDB 3 Cloud Serverless',
},
'content/influxdb3/clustered': {
key: 'influxdb3-clustered',
name: 'InfluxDB 3 Clustered',
},
'content/influxdb3/explorer': {
key: 'influxdb3-explorer',
name: 'InfluxDB 3 Explorer',
},
'content/influxdb/v2': {
key: 'influxdb-v2',
name: 'InfluxDB v2',
},
'content/influxdb/cloud': {
key: 'influxdb-cloud',
name: 'InfluxDB Cloud',
},
'content/influxdb/v1': {
key: 'influxdb-v1',
name: 'InfluxDB v1',
},
'content/influxdb/enterprise_influxdb': {
key: 'influxdb-enterprise-v1',
name: 'InfluxDB Enterprise v1',
},
'content/telegraf': {
key: 'telegraf',
name: 'Telegraf',
},
'content/kapacitor': {
key: 'kapacitor',
name: 'Kapacitor',
},
'content/chronograf': {
key: 'chronograf',
name: 'Chronograf',
},
'content/flux': {
key: 'flux',
name: 'Flux',
},
'content/shared': {
key: 'shared',
name: 'Shared Content',
},
'api-docs': {
key: 'api-docs',
name: 'API Documentation',
},
};
/**
* Group files by product based on their path
* @param {string[]} files - Array of file paths
* @returns {Object} - Object with product keys and arrays of files
*/
function groupFilesByProduct(files) {
const productFiles = {};
// Initialize all products
Object.values(PRODUCT_MAPPING).forEach((product) => {
productFiles[product.key] = [];
});
files.forEach((file) => {
let matched = false;
// Check each product mapping
for (const [pathPrefix, product] of Object.entries(PRODUCT_MAPPING)) {
if (file.startsWith(pathPrefix + '/')) {
productFiles[product.key].push(file);
matched = true;
break;
}
}
// Handle edge case for api-docs (no trailing slash)
if (!matched && file.startsWith('api-docs/')) {
productFiles['api-docs'].push(file);
}
});
return productFiles;
}
/**
* Run incremental validation analysis
* @param {string[]} files - Array of file paths to analyze
* @returns {Promise<Object>} - Incremental validation results
*/
async function runIncrementalAnalysis(files) {
return new Promise((resolve) => {
const child = spawn(
'node',
['.github/scripts/incremental-validator.cjs', ...files],
{
stdio: ['pipe', 'pipe', 'pipe'],
env: process.env,
}
);
let stdout = '';
let stderr = '';
child.stdout.on('data', (data) => {
stdout += data.toString();
});
child.stderr.on('data', (data) => {
stderr += data.toString();
});
child.on('close', (code) => {
if (code === 0) {
try {
// Parse the JSON output from the validation script
const lines = stdout.trim().split('\n');
const jsonLine = lines.find((line) => line.startsWith('{'));
if (jsonLine) {
const results = JSON.parse(jsonLine);
resolve(results);
} else {
resolve({ filesToValidate: files.map((f) => ({ filePath: f })) });
}
} catch (error) {
console.warn(
`Warning: Could not parse incremental validation results: ${error.message}`
);
resolve({ filesToValidate: files.map((f) => ({ filePath: f })) });
}
} else {
console.warn(
`Incremental validation failed with code ${code}: ${stderr}`
);
resolve({ filesToValidate: files.map((f) => ({ filePath: f })) });
}
});
child.on('error', (error) => {
console.warn(`Incremental validation error: ${error.message}`);
resolve({ filesToValidate: files.map((f) => ({ filePath: f })) });
});
});
}
/**
* Generate matrix configuration for GitHub Actions with cache awareness
* @param {string[]} changedFiles - Array of changed file paths
* @param {Object} options - Configuration options
* @returns {Promise<Object>} - Matrix configuration object
*/
async function generateMatrix(changedFiles, options = {}) {
const {
maxConcurrentJobs = 5,
forceSequential = false,
minFilesForParallel = 10,
useCache = true,
} = options;
if (!changedFiles || changedFiles.length === 0) {
return {
strategy: 'none',
hasChanges: false,
matrix: { include: [] },
cacheStats: { hitRate: 100, cacheHits: 0, cacheMisses: 0 },
};
}
let filesToValidate = changedFiles;
let cacheStats = {
hitRate: 0,
cacheHits: 0,
cacheMisses: changedFiles.length,
};
// Run incremental analysis if cache is enabled
if (useCache) {
try {
console.log(
`🔍 Running cache analysis for ${changedFiles.length} files...`
);
const analysisResults = await runIncrementalAnalysis(changedFiles);
if (analysisResults.filesToValidate) {
filesToValidate = analysisResults.filesToValidate.map(
(f) => f.filePath
);
cacheStats = analysisResults.cacheStats || cacheStats;
console.log(
`📊 Cache analysis complete: ${cacheStats.hitRate}% hit rate`
);
console.log(
`${cacheStats.cacheHits} files cached, ${cacheStats.cacheMisses} need validation`
);
}
} catch (error) {
console.warn(
`Cache analysis failed: ${error.message}, proceeding without cache optimization`
);
}
}
// If no files need validation after cache analysis
if (filesToValidate.length === 0) {
return {
strategy: 'cache-hit',
hasChanges: false,
matrix: { include: [] },
cacheStats,
message: '✨ All files are cached - no validation needed!',
};
}
const productFiles = groupFilesByProduct(filesToValidate);
const productsWithFiles = Object.entries(productFiles).filter(
([key, files]) => files.length > 0
);
// Determine strategy based on file count and configuration
const totalFiles = filesToValidate.length;
const shouldUseParallel =
!forceSequential &&
totalFiles >= minFilesForParallel &&
productsWithFiles.length > 1;
if (shouldUseParallel) {
// Parallel strategy: create matrix with products
const matrixIncludes = productsWithFiles.map(([productKey, files]) => {
const product = Object.values(PRODUCT_MAPPING).find(
(p) => p.key === productKey
);
return {
product: productKey,
name: product?.name || productKey,
files: files.join(' '),
cacheEnabled: useCache,
};
});
return {
strategy: 'parallel',
hasChanges: true,
matrix: { include: matrixIncludes.slice(0, maxConcurrentJobs) },
cacheStats,
originalFileCount: changedFiles.length,
validationFileCount: filesToValidate.length,
};
} else {
// Sequential strategy: single job with all files
return {
strategy: 'sequential',
hasChanges: true,
matrix: {
include: [
{
product: 'all',
name: 'All Files',
files: filesToValidate.join(' '),
cacheEnabled: useCache,
},
],
},
cacheStats,
originalFileCount: changedFiles.length,
validationFileCount: filesToValidate.length,
};
}
}
/**
* CLI interface for the matrix generator
*/
async function main() {
const args = process.argv.slice(2);
if (args.includes('--help') || args.includes('-h')) {
console.log(`
Usage: node matrix-generator.js [options] <file1> <file2> ...
Options:
--max-concurrent <n> Maximum concurrent jobs (default: 5)
--force-sequential Force sequential execution
--min-files-parallel <n> Minimum files needed for parallel (default: 10)
--output-format <format> Output format: json, github (default: github)
--no-cache Disable cache-aware optimization
--help, -h Show this help message
Examples:
node matrix-generator.js content/influxdb3/core/file1.md content/influxdb/v2/file2.md
node matrix-generator.js --force-sequential content/shared/file.md
node matrix-generator.js --no-cache --output-format json *.md
`);
process.exit(0);
}
// Parse options
const options = {};
const files = [];
for (let i = 0; i < args.length; i++) {
const arg = args[i];
if (arg === '--max-concurrent' && i + 1 < args.length) {
options.maxConcurrentJobs = parseInt(args[++i]);
} else if (arg === '--force-sequential') {
options.forceSequential = true;
} else if (arg === '--min-files-parallel' && i + 1 < args.length) {
options.minFilesForParallel = parseInt(args[++i]);
} else if (arg === '--output-format' && i + 1 < args.length) {
options.outputFormat = args[++i];
} else if (arg === '--no-cache') {
options.useCache = false;
} else if (!arg.startsWith('--')) {
files.push(arg);
}
}
try {
const result = await generateMatrix(files, options);
if (options.outputFormat === 'json') {
console.log(JSON.stringify(result, null, 2));
} else {
// GitHub Actions format
console.log(`strategy=${result.strategy}`);
console.log(`has-changes=${result.hasChanges}`);
console.log(`matrix=${JSON.stringify(result.matrix)}`);
// Add cache statistics
if (result.cacheStats) {
console.log(`cache-hit-rate=${result.cacheStats.hitRate}`);
console.log(`cache-hits=${result.cacheStats.cacheHits}`);
console.log(`cache-misses=${result.cacheStats.cacheMisses}`);
}
if (result.originalFileCount !== undefined) {
console.log(`original-file-count=${result.originalFileCount}`);
console.log(`validation-file-count=${result.validationFileCount}`);
}
if (result.message) {
console.log(`message=${result.message}`);
}
}
} catch (error) {
console.error(`Error generating matrix: ${error.message}`);
process.exit(1);
}
}
// Run CLI if this file is executed directly
if (fileURLToPath(import.meta.url) === process.argv[1]) {
main().catch(console.error);
}
export { generateMatrix, groupFilesByProduct, PRODUCT_MAPPING };

View File

@ -0,0 +1,24 @@
/**
* URL Transformation Utilities
* Shared logic for converting file paths to URL paths
* Used across documentation testing and build tools
*/
/**
* Convert a content file path to its corresponding URL path
* @param {string} filePath - File path starting with 'content/'
* @returns {string} - URL path (starts with '/')
*/
function filePathToUrl(filePath) {
// Map to URL
let url = filePath.replace(/^content/, '');
url = url.replace(/\/_index\.(html|md)$/, '/');
url = url.replace(/\.md$/, '/');
url = url.replace(/\.html$/, '/');
if (!url.startsWith('/')) {
url = '/' + url;
}
return url;
}
export { filePathToUrl };

View File

@ -0,0 +1,429 @@
name: Audit Documentation
on:
workflow_dispatch:
inputs:
version:
description: 'Version to audit (must exist in git tags, e.g., v3.0.0 or "local" for dev containers)'
required: false
default: 'local'
create_issue:
description: 'Create GitHub issue with audit results'
required: false
type: boolean
default: false
jobs:
cli-3-core:
name: Audit InfluxDB 3 Core CLI
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
cache: 'yarn'
- name: Install dependencies
run: yarn install --frozen-lockfile
- name: Set up Docker
if: github.event.inputs.version == 'local' || github.event_name == 'schedule'
run: |
docker compose up -d influxdb3-core
sleep 10 # Wait for containers to be ready
- name: Run Core CLI audit
run: |
VERSION="${{ github.event.inputs.version || 'local' }}"
node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js core $VERSION
- name: Upload audit reports
uses: actions/upload-artifact@v4
with:
name: cli-audit-3-core-${{ github.event.inputs.version || 'local' }}
path: helper-scripts/output/cli-audit/
retention-days: 30
cli-3-enterprise:
name: Audit InfluxDB 3 Enterprise CLI
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
cache: 'yarn'
- name: Install dependencies
run: yarn install --frozen-lockfile
- name: Set up Docker
if: github.event.inputs.version == 'local' || github.event_name == 'schedule'
run: |
docker compose up -d influxdb3-enterprise
sleep 10 # Wait for containers to be ready
- name: Run Enterprise CLI audit
run: |
VERSION="${{ github.event.inputs.version || 'local' }}"
node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js enterprise $VERSION
- name: Upload audit reports
uses: actions/upload-artifact@v4
with:
name: cli-audit-3-enterprise-${{ github.event.inputs.version || 'local' }}
path: helper-scripts/output/cli-audit/
retention-days: 30
cli-3-influxctl:
name: Audit InfluxDB 3 influxctl CLI
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
cache: 'yarn'
- name: Install dependencies
run: yarn install --frozen-lockfile
- name: Run influxctl CLI audit
run: |
VERSION="${{ github.event.inputs.version || 'local' }}"
echo "influxctl CLI audit not yet implemented"
# TODO: Implement influxctl CLI audit
# node ./helper-scripts/influxdb3-distributed/audit-influxctl-cli.js $VERSION
# Create placeholder report
mkdir -p helper-scripts/output/cli-audit
cat > helper-scripts/output/cli-audit/influxctl-audit-$VERSION.md << 'EOF'
# influxctl CLI Audit Report
**CLI:** influxctl
**Version:** $VERSION
**Date:** $(date)
**Status:** Placeholder - audit not yet implemented
## TODO
- Implement influxctl CLI help extraction
- Compare against clustered and cloud-dedicated documentation
- Generate patches for missing documentation
EOF
- name: Upload audit reports
uses: actions/upload-artifact@v4
with:
name: cli-audit-3-influxctl-${{ github.event.inputs.version || 'local' }}
path: helper-scripts/output/cli-audit/
retention-days: 30
api-3-core:
name: Audit InfluxDB 3 Core API
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run Core API audit
run: |
VERSION="${{ github.event.inputs.version || 'local' }}"
echo "Core API audit not yet implemented"
# TODO: Implement Core API audit
# node ./helper-scripts/influxdb3-monolith/audit-api-documentation.js core $VERSION
# Create placeholder report
mkdir -p helper-scripts/output/api-audit
cat > helper-scripts/output/api-audit/core-api-audit-$VERSION.md << 'EOF'
# InfluxDB 3 Core API Audit Report
**API:** InfluxDB 3 Core
**Version:** $VERSION
**Date:** $(date)
**Status:** Placeholder - audit not yet implemented
## TODO
- Implement API endpoint discovery
- Compare against OpenAPI specs
- Validate documentation examples
EOF
- name: Upload audit reports
uses: actions/upload-artifact@v4
with:
name: api-audit-3-core-${{ github.event.inputs.version || 'local' }}
path: helper-scripts/output/api-audit/
retention-days: 30
api-3-enterprise:
name: Audit InfluxDB 3 Enterprise API
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run Enterprise API audit
run: |
VERSION="${{ github.event.inputs.version || 'local' }}"
echo "Enterprise API audit not yet implemented"
# TODO: Implement Enterprise API audit
# node ./helper-scripts/influxdb3-monolith/audit-api-documentation.js enterprise $VERSION
# Create placeholder report
mkdir -p helper-scripts/output/api-audit
cat > helper-scripts/output/api-audit/enterprise-api-audit-$VERSION.md << 'EOF'
# InfluxDB 3 Enterprise API Audit Report
**API:** InfluxDB 3 Enterprise
**Version:** $VERSION
**Date:** $(date)
**Status:** Placeholder - audit not yet implemented
## TODO
- Implement API endpoint discovery
- Compare against OpenAPI specs
- Validate documentation examples
- Check enterprise-specific endpoints
EOF
- name: Upload audit reports
uses: actions/upload-artifact@v4
with:
name: api-audit-3-enterprise-${{ github.event.inputs.version || 'local' }}
path: helper-scripts/output/api-audit/
retention-days: 30
api-3-cloud-dedicated:
name: Audit InfluxDB 3 Cloud Dedicated API
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run Cloud Dedicated API audit
run: |
VERSION="${{ github.event.inputs.version || 'local' }}"
echo "Cloud Dedicated API audit not yet implemented"
# TODO: Implement Cloud Dedicated API audit
# node ./helper-scripts/influxdb3-distributed/audit-api-documentation.js cloud-dedicated $VERSION
# Create placeholder report
mkdir -p helper-scripts/output/api-audit
cat > helper-scripts/output/api-audit/cloud-dedicated-api-audit-$VERSION.md << 'EOF'
# InfluxDB 3 Cloud Dedicated API Audit Report
**API:** InfluxDB 3 Cloud Dedicated
**Version:** $VERSION
**Date:** $(date)
**Status:** Placeholder - audit not yet implemented
## TODO
- Implement management API audit
- Implement data API audit
- Compare against OpenAPI specs
- Validate documentation examples
EOF
- name: Upload audit reports
uses: actions/upload-artifact@v4
with:
name: api-audit-3-cloud-dedicated-${{ github.event.inputs.version || 'local' }}
path: helper-scripts/output/api-audit/
retention-days: 30
api-3-clustered:
name: Audit InfluxDB 3 Clustered API
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run Clustered API audit
run: |
VERSION="${{ github.event.inputs.version || 'local' }}"
echo "Clustered API audit not yet implemented"
# TODO: Implement Clustered API audit
# node ./helper-scripts/influxdb3-distributed/audit-api-documentation.js clustered $VERSION
# Create placeholder report
mkdir -p helper-scripts/output/api-audit
cat > helper-scripts/output/api-audit/clustered-api-audit-$VERSION.md << 'EOF'
# InfluxDB 3 Clustered API Audit Report
**API:** InfluxDB 3 Clustered
**Version:** $VERSION
**Date:** $(date)
**Status:** Placeholder - audit not yet implemented
## TODO
- Implement management API audit
- Implement data API audit
- Compare against OpenAPI specs
- Validate documentation examples
EOF
- name: Upload audit reports
uses: actions/upload-artifact@v4
with:
name: api-audit-3-clustered-${{ github.event.inputs.version || 'local' }}
path: helper-scripts/output/api-audit/
retention-days: 30
api-3-cloud-serverless:
name: Audit InfluxDB 3 Cloud Serverless API
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run Cloud Serverless API audit
run: |
VERSION="${{ github.event.inputs.version || 'local' }}"
echo "Cloud Serverless API audit not yet implemented"
# TODO: Implement Cloud Serverless API audit
# node ./helper-scripts/influxdb3-distributed/audit-api-documentation.js cloud-serverless $VERSION
# Create placeholder report
mkdir -p helper-scripts/output/api-audit
cat > helper-scripts/output/api-audit/cloud-serverless-api-audit-$VERSION.md << 'EOF'
# InfluxDB 3 Cloud Serverless API Audit Report
**API:** InfluxDB 3 Cloud Serverless
**Version:** $VERSION
**Date:** $(date)
**Status:** Placeholder - audit not yet implemented
## TODO
- Implement management API audit
- Implement data API audit
- Compare against OpenAPI specs
- Validate documentation examples
EOF
- name: Upload audit reports
uses: actions/upload-artifact@v4
with:
name: api-audit-3-cloud-serverless-${{ github.event.inputs.version || 'local' }}
path: helper-scripts/output/api-audit/
retention-days: 30
create-audit-issues:
name: Create Issues from Audit Results
runs-on: ubuntu-latest
needs: [
cli-3-core,
cli-3-enterprise,
cli-3-influxctl,
api-3-core,
api-3-enterprise,
api-3-cloud-dedicated,
api-3-clustered,
api-3-cloud-serverless
]
if: always() && (github.event_name == 'schedule' || github.event.inputs.create_issue == 'true')
steps:
- uses: actions/checkout@v4
- name: Download all audit reports
uses: actions/download-artifact@v4
with:
path: audit-reports/
- name: Create issues from audit results
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const path = require('path');
// Find all audit report directories
const reportDirs = fs.readdirSync('audit-reports');
for (const reportDir of reportDirs) {
const reportPath = path.join('audit-reports', reportDir);
const files = fs.readdirSync(reportPath);
for (const file of files) {
if (file.endsWith('.md')) {
const content = fs.readFileSync(path.join(reportPath, file), 'utf8');
// Only create issues if there are actual problems (not placeholders)
const hasIssues = content.includes('⚠️ Missing from docs') ||
content.includes(' Documented but not in CLI') ||
content.includes('API endpoint mismatch');
if (hasIssues) {
const auditType = reportDir.replace(/-(local|\d+\.\d+\.\d+)$/, '');
await github.rest.issues.create({
owner: context.repo.owner,
repo: context.repo.repo,
title: `Documentation Audit Issues - ${auditType}`,
body: `## Audit Results\n\n${content}`,
labels: ['documentation', 'audit', auditType.includes('cli') ? 'cli-audit' : 'api-audit']
});
console.log(`Created issue for ${auditType}`);
}
}
}
}
audit-summary:
name: Generate Summary Report
runs-on: ubuntu-latest
needs: [
cli-3-core,
cli-3-enterprise,
cli-3-influxctl,
api-3-core,
api-3-enterprise,
api-3-cloud-dedicated,
api-3-clustered,
api-3-cloud-serverless
]
if: always()
steps:
- uses: actions/checkout@v4
- name: Download all artifacts
uses: actions/download-artifact@v4
with:
path: audit-artifacts/
- name: Generate summary
run: |
echo "# Documentation Audit Summary" > summary.md
echo "Date: $(date)" >> summary.md
echo "Version: ${{ github.event.inputs.version || 'local' }}" >> summary.md
echo "" >> summary.md
# Add results from each audit type
for dir in audit-artifacts/*/; do
if [ -d "$dir" ]; then
echo "## $(basename "$dir")" >> summary.md
if [ -f "$dir"/*.md ]; then
cat "$dir"/*.md >> summary.md
fi
echo "" >> summary.md
fi
done
- name: Upload summary
uses: actions/upload-artifact@v4
with:
name: audit-summary
path: summary.md
retention-days: 30

503
.github/workflows/influxdb3-release.yml vendored Normal file
View File

@ -0,0 +1,503 @@
name: InfluxDB 3 Release Documentation
on:
workflow_dispatch:
inputs:
product:
description: 'Product being released'
required: true
type: choice
options:
- core
- enterprise
- clustered
- cloud-dedicated
- cloud-serverless
version:
description: 'Release tag name (must exist in git tags, e.g., v3.0.0 or "local" for dev)'
required: true
type: string
previous_version:
description: 'Previous release tag name (must exist in git tags, e.g., v2.9.0)'
required: true
type: string
dry_run:
description: 'Dry run (do not create PRs or issues)'
required: false
type: boolean
default: true
jobs:
generate-release-notes-core-enterprise:
name: Generate Release Notes (Core/Enterprise)
runs-on: ubuntu-latest
if: contains(fromJSON('["core", "enterprise"]'), github.event.inputs.product)
outputs:
generated: ${{ steps.generate.outputs.generated }}
steps:
- uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
cache: 'yarn'
- name: Install dependencies
run: yarn install --frozen-lockfile
- name: Generate release notes
id: generate
run: |
echo "Generating Core/Enterprise release notes for ${{ github.event.inputs.product }} v${{ github.event.inputs.version }}"
# Create output directory
mkdir -p helper-scripts/output/release-notes
# Note: This generates placeholder release notes since the actual repositories
# (influxdb and influxdb_pro) are not available in the GitHub Actions environment.
# To generate actual release notes, the script would need to be run locally with:
# node ./helper-scripts/common/generate-release-notes.js \
# --config ./helper-scripts/common/config/influxdb3-core-enterprise.json \
# ${{ github.event.inputs.previous_version }} \
# ${{ github.event.inputs.version }}
# Create structured placeholder that matches the expected format
cat > helper-scripts/output/release-notes/release-notes-${{ github.event.inputs.product }}-${{ github.event.inputs.version }}.md << EOF
> [!Note]
> #### InfluxDB 3 Core and Enterprise relationship
>
> InfluxDB 3 Enterprise is a superset of InfluxDB 3 Core.
> All updates to Core are automatically included in Enterprise.
> The Enterprise sections below only list updates exclusive to Enterprise.
## ${{ github.event.inputs.version }} {date="$(date +'%Y-%m-%d')"}
### Core
#### Features
- TODO: Add Core features for ${{ github.event.inputs.product }} ${{ github.event.inputs.version }}
#### Bug Fixes
- TODO: Add Core bug fixes for ${{ github.event.inputs.product }} ${{ github.event.inputs.version }}
### Enterprise
All Core updates are included in Enterprise. Additional Enterprise-specific features and fixes:
#### Features
- TODO: Add Enterprise-specific features for ${{ github.event.inputs.version }}
#### Bug Fixes
- TODO: Add Enterprise-specific bug fixes for ${{ github.event.inputs.version }}
EOF
echo "generated=true" >> $GITHUB_OUTPUT
- name: Upload release notes
uses: actions/upload-artifact@v4
with:
name: release-notes-${{ github.event.inputs.product }}-${{ github.event.inputs.version }}
path: helper-scripts/output/release-notes/
retention-days: 30
# generate-release-notes-distributed:
# name: Generate Release Notes (Distributed)
# runs-on: ubuntu-latest
# if: contains(fromJSON('["clustered", "cloud-dedicated", "cloud-serverless"]'), github.event.inputs.product)
# outputs:
# generated: ${{ steps.generate.outputs.generated }}
# steps:
# - uses: actions/checkout@v4
# - name: Set up Node.js
# uses: actions/setup-node@v4
# with:
# node-version: '18'
# cache: 'yarn'
# - name: Install dependencies
# run: yarn install --frozen-lockfile
# - name: Generate release notes
# id: generate
# run: |
# echo "Generating distributed product release notes for ${{ github.event.inputs.product }} v${{ github.event.inputs.version }}"
# # Create output directory
# mkdir -p helper-scripts/output/release-notes
# # Note: This generates placeholder release notes since the actual repositories
# # for distributed products are not available in the GitHub Actions environment.
# # To generate actual release notes, the script would need to be run locally with:
# # node ./helper-scripts/common/generate-release-notes.js \
# # --config ./helper-scripts/common/config/influxdb3-clustered.json \
# # ${{ github.event.inputs.previous_version }} \
# # ${{ github.event.inputs.version }}
# # Create structured placeholder for distributed products
# cat > helper-scripts/output/release-notes/release-notes-${{ github.event.inputs.product }}-${{ github.event.inputs.version }}.md << EOF
# ## ${{ github.event.inputs.version }} {date="$(date +'%Y-%m-%d')"}
# ### Features
# - TODO: Add features for ${{ github.event.inputs.product }} ${{ github.event.inputs.version }}
# ### Bug Fixes
# - TODO: Add bug fixes for ${{ github.event.inputs.product }} ${{ github.event.inputs.version }}
# ### Performance Improvements
# - TODO: Add performance improvements for ${{ github.event.inputs.product }} ${{ github.event.inputs.version }}
# EOF
# echo "generated=true" >> $GITHUB_OUTPUT
# - name: Upload release notes
# uses: actions/upload-artifact@v4
# with:
# name: release-notes-${{ github.event.inputs.product }}-${{ github.event.inputs.version }}
# path: helper-scripts/output/release-notes/
# retention-days: 30
audit-cli-documentation:
name: Audit CLI Documentation
needs: generate-release-notes-core-enterprise
runs-on: ubuntu-latest
if: needs.generate-release-notes-core-enterprise.outputs.generated == 'true' && contains(fromJSON('["core", "enterprise"]'), github.event.inputs.product)
steps:
- uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
cache: 'yarn'
- name: Install dependencies
run: yarn install --frozen-lockfile
- name: Pull Docker images for version
run: |
VERSION="${{ github.event.inputs.version }}"
PRODUCT="${{ github.event.inputs.product }}"
if [ "$PRODUCT" == "both" ]; then
docker pull influxdb:${VERSION}-core || true
docker pull influxdb:${VERSION}-enterprise || true
else
docker pull influxdb:${VERSION}-${PRODUCT} || true
fi
- name: Run CLI audit
run: |
PRODUCT="${{ github.event.inputs.product }}"
VERSION="${{ github.event.inputs.version }}"
node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js $PRODUCT $VERSION
- name: Upload CLI audit reports
uses: actions/upload-artifact@v4
with:
name: cli-audit-release-${{ github.event.inputs.product }}-${{ github.event.inputs.version }}
path: helper-scripts/output/cli-audit/
retention-days: 90
# audit-distributed-documentation:
# name: Audit Distributed Products Documentation
# needs: generate-release-notes-distributed
# runs-on: ubuntu-latest
# if: needs.generate-release-notes-distributed.outputs.generated == 'true' && contains(fromJSON('["clustered", "cloud-dedicated", "cloud-serverless"]'), github.event.inputs.product)
# steps:
# - uses: actions/checkout@v4
# - name: Set up Node.js
# uses: actions/setup-node@v4
# with:
# node-version: '18'
# cache: 'yarn'
# - name: Install dependencies
# run: yarn install --frozen-lockfile
# - name: Run distributed products audit
# run: |
# PRODUCT="${{ github.event.inputs.product }}"
# VERSION="${{ github.event.inputs.version }}"
# echo "Auditing distributed product: $PRODUCT v$VERSION"
# # TODO: Implement distributed products audit for release
# # This would audit API docs, deployment guides, configuration references
# # node ./helper-scripts/influxdb3-distributed/audit-documentation.js $PRODUCT $VERSION
# # For now, create placeholder report
# mkdir -p helper-scripts/output/distributed-audit
# cat > helper-scripts/output/distributed-audit/release-audit-$PRODUCT-$VERSION.md << 'EOF'
# # Release Audit Report - Distributed Products
# **Product:** $PRODUCT
# **Version:** $VERSION
# **Date:** $(date)
# **Status:** Placeholder - audit not yet implemented
# ## Areas to Audit
# - API documentation completeness
# - Deployment guide accuracy
# - Configuration reference updates
# - Integration guide updates
# - Version-specific feature documentation
# ## TODO
# - Implement API documentation audit
# - Implement deployment guide audit
# - Implement configuration reference audit
# - Implement integration guide audit
# EOF
# - name: Upload distributed audit reports
# uses: actions/upload-artifact@v4
# with:
# name: distributed-audit-release-${{ github.event.inputs.product }}-${{ github.event.inputs.version }}
# path: helper-scripts/output/distributed-audit/
# retention-days: 90
create-documentation-pr:
name: Create Documentation PR
needs: [generate-release-notes-core-enterprise, audit-cli-documentation]
runs-on: ubuntu-latest
if: github.event.inputs.dry_run != 'true' && always() && (needs.generate-release-notes-core-enterprise.result == 'success')
steps:
- uses: actions/checkout@v4
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: artifacts/
- name: Create release branch
run: |
BRANCH="release-docs-${{ github.event.inputs.product }}-${{ github.event.inputs.version }}"
git checkout -b $BRANCH
echo "BRANCH=$BRANCH" >> $GITHUB_ENV
- name: Copy release notes to docs
run: |
# Download the generated release notes artifact
PRODUCT="${{ github.event.inputs.product }}"
VERSION="${{ github.event.inputs.version }}"
# Determine the target documentation file based on product
case "$PRODUCT" in
"core"|"enterprise")
TARGET_FILE="content/shared/v3-core-enterprise-release-notes/_index.md"
SOURCE_FILE="artifacts/release-notes-${PRODUCT}-${VERSION}/release-notes-${PRODUCT}-${VERSION}.md"
;;
"clustered")
TARGET_FILE="content/influxdb3/clustered/reference/release-notes/_index.md"
SOURCE_FILE="artifacts/release-notes-${PRODUCT}-${VERSION}/release-notes-${PRODUCT}-${VERSION}.md"
;;
"cloud-dedicated")
TARGET_FILE="content/influxdb3/cloud-dedicated/reference/release-notes/_index.md"
SOURCE_FILE="artifacts/release-notes-${PRODUCT}-${VERSION}/release-notes-${PRODUCT}-${VERSION}.md"
;;
"cloud-serverless")
TARGET_FILE="content/influxdb3/cloud-serverless/reference/release-notes/_index.md"
SOURCE_FILE="artifacts/release-notes-${PRODUCT}-${VERSION}/release-notes-${PRODUCT}-${VERSION}.md"
;;
*)
echo "Unknown product: $PRODUCT"
exit 1
;;
esac
# Check if source file exists
if [ -f "$SOURCE_FILE" ]; then
echo "Copying release notes from $SOURCE_FILE to $TARGET_FILE"
# For Core/Enterprise, prepend to existing file (new releases go at the top)
if [ "$PRODUCT" = "core" ] || [ "$PRODUCT" = "enterprise" ]; then
# Create temporary file with new content + existing content
cp "$SOURCE_FILE" temp_release_notes.md
echo "" >> temp_release_notes.md
cat "$TARGET_FILE" >> temp_release_notes.md
mv temp_release_notes.md "$TARGET_FILE"
else
# For other products, replace the file
cp "$SOURCE_FILE" "$TARGET_FILE"
fi
echo "Release notes successfully copied to documentation"
else
echo "Warning: Release notes file not found at $SOURCE_FILE"
echo "Available files in artifacts:"
find artifacts/ -type f -name "*.md" || echo "No markdown files found"
fi
- name: Create Pull Request
uses: peter-evans/create-pull-request@v5
with:
token: ${{ secrets.GITHUB_TOKEN }}
branch: ${{ env.BRANCH }}
title: "docs: Release documentation for ${{ github.event.inputs.product }} v${{ github.event.inputs.version }}"
body: |
## Release Documentation Update
This PR contains documentation updates for **${{ github.event.inputs.product }} v${{ github.event.inputs.version }}**
### Included Updates:
- [ ] Release notes
- [ ] Version updates
- [ ] CLI documentation audit results
### Artifacts:
- [Release Notes](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})
- [CLI Audit Report](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})
### Manual Review Needed:
Please review the CLI audit report for any missing or outdated documentation that needs to be updated.
---
*This PR was automatically generated by the release workflow.*
labels: |
documentation
release
${{ github.event.inputs.product }}
draft: true
create-audit-issue:
name: Create Audit Issue
needs: [audit-cli-documentation]
runs-on: ubuntu-latest
if: github.event.inputs.dry_run != 'true' && always() && (needs.audit-cli-documentation.result == 'success')
steps:
- uses: actions/checkout@v4
- name: Download audit reports
uses: actions/download-artifact@v4
with:
path: audit-reports/
- name: Create issue from audit
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const path = require('path');
const product = '${{ github.event.inputs.product }}';
const version = '${{ github.event.inputs.version }}';
let auditReports = [];
let hasIssues = false;
// Check for CLI audit report
const cliAuditPath = `audit-reports/cli-audit-release-${product}-${version}`;
if (fs.existsSync(cliAuditPath)) {
const files = fs.readdirSync(cliAuditPath);
const cliAuditFile = files.find(f => f.includes('documentation-audit'));
if (cliAuditFile) {
const report = fs.readFileSync(path.join(cliAuditPath, cliAuditFile), 'utf8');
const hasMissingOptions = report.includes('⚠️ Missing from docs');
const hasExtraOptions = report.includes(' Documented but not in CLI');
if (hasMissingOptions || hasExtraOptions) {
auditReports.push({
type: 'CLI',
content: report
});
hasIssues = true;
}
}
}
// Check for distributed audit report
const distributedAuditPath = `audit-reports/distributed-audit-release-${product}-${version}`;
if (fs.existsSync(distributedAuditPath)) {
const files = fs.readdirSync(distributedAuditPath);
const distributedAuditFile = files.find(f => f.includes('release-audit'));
if (distributedAuditFile) {
const report = fs.readFileSync(path.join(distributedAuditPath, distributedAuditFile), 'utf8');
// For now, always include distributed audit reports since they're placeholders
auditReports.push({
type: 'Distributed Products',
content: report
});
hasIssues = true;
}
}
if (hasIssues && auditReports.length > 0) {
// Create comprehensive issue
const issueBody = [
'## Release Documentation Audit Results',
'',
`The following documentation issues were found during the release of **${product} v${version}**:`,
'',
...auditReports.map(report => [
`### ${report.type} Audit`,
'',
report.content,
''
]).flat(),
'### Action Items:',
'- [ ] Review and update documentation for missing or outdated content',
'- [ ] Verify all examples work with the new version',
'- [ ] Update any version-specific content',
'- [ ] Remove documentation for deprecated features',
'',
'---',
'*This issue was automatically generated during the release process.*'
].join('\n');
await github.rest.issues.create({
owner: context.repo.owner,
repo: context.repo.repo,
title: `Documentation Updates Needed - ${product} v${version}`,
body: issueBody,
labels: ['documentation', 'release', product, 'audit']
});
console.log('Created issue for documentation updates');
} else {
console.log('No documentation issues found - skipping issue creation');
}
influxdb3-monolith-release-summary:
name: Release Summary
needs: [generate-release-notes-core-enterprise, audit-cli-documentation, create-documentation-pr, create-audit-issue]
runs-on: ubuntu-latest
if: always()
steps:
- name: Generate summary
run: |
echo "# Release Documentation Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "## Release Information" >> $GITHUB_STEP_SUMMARY
echo "- **Product**: ${{ github.event.inputs.product }}" >> $GITHUB_STEP_SUMMARY
echo "- **Version**: ${{ github.event.inputs.version }}" >> $GITHUB_STEP_SUMMARY
echo "- **Previous Version**: ${{ github.event.inputs.previous_version }}" >> $GITHUB_STEP_SUMMARY
echo "- **Dry Run**: ${{ github.event.inputs.dry_run }}" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "## Workflow Results" >> $GITHUB_STEP_SUMMARY
echo "| Step | Status |" >> $GITHUB_STEP_SUMMARY
echo "|------|--------|" >> $GITHUB_STEP_SUMMARY
echo "| Generate Release Notes (Core/Enterprise) | ${{ needs.generate-release-notes-core-enterprise.result }} |" >> $GITHUB_STEP_SUMMARY
echo "| CLI Documentation Audit | ${{ needs.audit-cli-documentation.result }} |" >> $GITHUB_STEP_SUMMARY
echo "| Create Documentation PR | ${{ needs.create-documentation-pr.result }} |" >> $GITHUB_STEP_SUMMARY
echo "| Create Audit Issue | ${{ needs.create-audit-issue.result }} |" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
if [ "${{ github.event.inputs.dry_run }}" == "true" ]; then
echo "**Note**: This was a dry run. No PRs or issues were created." >> $GITHUB_STEP_SUMMARY
fi

241
.github/workflows/pr-link-check.yml vendored Normal file
View File

@ -0,0 +1,241 @@
name: Link Check PR Changes
on:
pull_request:
paths:
- 'content/**/*.md'
- 'data/**/*.yml'
- 'layouts/**/*.html'
types: [opened, synchronize, reopened]
jobs:
link-check:
name: Check links in affected files
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Detect content changes
id: detect
run: |
echo "🔍 Detecting changes between ${{ github.base_ref }} and ${{ github.sha }}"
# For PRs, use the GitHub Files API to get changed files
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
echo "Using GitHub API to detect PR changes..."
curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
"https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.number }}/files" \
| jq -r '.[].filename' > all_changed_files.txt
else
echo "Using git diff to detect changes..."
git diff --name-only ${{ github.event.before }}..${{ github.sha }} > all_changed_files.txt
fi
# Filter for content markdown files
CHANGED_FILES=$(grep '^content/.*\.md$' all_changed_files.txt || true)
echo "📁 All changed files:"
cat all_changed_files.txt
echo ""
echo "📝 Content markdown files:"
echo "$CHANGED_FILES"
if [[ -n "$CHANGED_FILES" ]]; then
echo "✅ Found $(echo "$CHANGED_FILES" | wc -l) changed content file(s)"
echo "has-changes=true" >> $GITHUB_OUTPUT
echo "changed-content<<EOF" >> $GITHUB_OUTPUT
echo "$CHANGED_FILES" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
# Check if any shared content files were modified
SHARED_CHANGES=$(echo "$CHANGED_FILES" | grep '^content/shared/' || true)
if [[ -n "$SHARED_CHANGES" ]]; then
echo "has-shared-content=true" >> $GITHUB_OUTPUT
echo "🔄 Detected shared content changes: $SHARED_CHANGES"
else
echo "has-shared-content=false" >> $GITHUB_OUTPUT
fi
else
echo "❌ No content changes detected"
echo "has-changes=false" >> $GITHUB_OUTPUT
echo "has-shared-content=false" >> $GITHUB_OUTPUT
fi
- name: Skip if no content changes
if: steps.detect.outputs.has-changes == 'false'
run: |
echo "No content changes detected in this PR - skipping link check"
echo "✅ **No content changes detected** - link check skipped" >> $GITHUB_STEP_SUMMARY
- name: Setup Node.js
if: steps.detect.outputs.has-changes == 'true'
uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'yarn'
- name: Install dependencies
if: steps.detect.outputs.has-changes == 'true'
run: yarn install --frozen-lockfile
- name: Build Hugo site
if: steps.detect.outputs.has-changes == 'true'
run: npx hugo --minify
- name: Download link-checker binary
if: steps.detect.outputs.has-changes == 'true'
run: |
echo "Downloading link-checker binary from docs-v2 releases..."
# Download from docs-v2's own releases (always accessible)
curl -L -H "Accept: application/vnd.github+json" \
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
-o link-checker-info.json \
"https://api.github.com/repos/influxdata/docs-v2/releases/tags/link-checker-v1.2.2"
# Extract download URL for linux binary
DOWNLOAD_URL=$(jq -r '.assets[] | select(.name | test("link-checker.*linux")) | .url' link-checker-info.json)
if [[ "$DOWNLOAD_URL" == "null" || -z "$DOWNLOAD_URL" ]]; then
echo "❌ No linux binary found in release"
echo "Available assets:"
jq -r '.assets[].name' link-checker-info.json
exit 1
fi
echo "📥 Downloading: $DOWNLOAD_URL"
curl -L -H "Accept: application/octet-stream" \
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
-o link-checker "$DOWNLOAD_URL"
chmod +x link-checker
./link-checker --version
- name: Verify link checker config exists
if: steps.detect.outputs.has-changes == 'true'
run: |
if [[ ! -f .ci/link-checker/production.lycherc.toml ]]; then
echo "❌ Configuration file .ci/link-checker/production.lycherc.toml not found"
echo "Please copy production.lycherc.toml from docs-tooling/link-checker/"
exit 1
fi
echo "✅ Using configuration: .ci/link-checker/production.lycherc.toml"
- name: Map changed content to public files
if: steps.detect.outputs.has-changes == 'true'
id: mapping
run: |
echo "Mapping changed content files to public HTML files..."
# Create temporary file with changed content files
echo "${{ steps.detect.outputs.changed-content }}" > changed-files.txt
# Map content files to public files
PUBLIC_FILES=$(cat changed-files.txt | xargs -r ./link-checker map --existing-only)
if [[ -n "$PUBLIC_FILES" ]]; then
echo "Found affected public files:"
echo "$PUBLIC_FILES"
echo "public-files<<EOF" >> $GITHUB_OUTPUT
echo "$PUBLIC_FILES" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
# Count files for summary
FILE_COUNT=$(echo "$PUBLIC_FILES" | wc -l)
echo "file-count=$FILE_COUNT" >> $GITHUB_OUTPUT
else
echo "No public files found to check"
echo "public-files=" >> $GITHUB_OUTPUT
echo "file-count=0" >> $GITHUB_OUTPUT
fi
- name: Run link checker
if: steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != ''
id: link-check
run: |
echo "Checking links in ${{ steps.mapping.outputs.file-count }} affected files..."
# Create temporary file with public files list
echo "${{ steps.mapping.outputs.public-files }}" > public-files.txt
# Run link checker with detailed JSON output
set +e # Don't fail immediately on error
cat public-files.txt | xargs -r ./link-checker check \
--config .ci/link-checker/production.lycherc.toml \
--format json \
--output link-check-results.json
EXIT_CODE=$?
if [[ -f link-check-results.json ]]; then
# Parse results
BROKEN_COUNT=$(jq -r '.summary.broken_count // 0' link-check-results.json)
TOTAL_COUNT=$(jq -r '.summary.total_checked // 0' link-check-results.json)
SUCCESS_RATE=$(jq -r '.summary.success_rate // 0' link-check-results.json)
echo "broken-count=$BROKEN_COUNT" >> $GITHUB_OUTPUT
echo "total-count=$TOTAL_COUNT" >> $GITHUB_OUTPUT
echo "success-rate=$SUCCESS_RATE" >> $GITHUB_OUTPUT
if [[ $BROKEN_COUNT -gt 0 ]]; then
echo "❌ Found $BROKEN_COUNT broken links out of $TOTAL_COUNT total links"
echo "check-result=failed" >> $GITHUB_OUTPUT
else
echo "✅ All $TOTAL_COUNT links are valid"
echo "check-result=passed" >> $GITHUB_OUTPUT
fi
else
echo "❌ Link check failed to generate results"
echo "check-result=error" >> $GITHUB_OUTPUT
fi
exit $EXIT_CODE
- name: Process and report results
if: always() && steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != ''
run: |
if [[ -f link-check-results.json ]]; then
# Create detailed error annotations for broken links
if [[ "${{ steps.link-check.outputs.check-result }}" == "failed" ]]; then
echo "Creating error annotations for broken links..."
jq -r '.broken_links[]? |
"::error file=\(.file // "unknown"),line=\(.line // 1)::Broken link: \(.url) - \(.error // "Unknown error")"' \
link-check-results.json || true
fi
# Generate summary comment
cat >> $GITHUB_STEP_SUMMARY << 'EOF'
## Link Check Results
**Files Checked:** ${{ steps.mapping.outputs.file-count }}
**Total Links:** ${{ steps.link-check.outputs.total-count }}
**Broken Links:** ${{ steps.link-check.outputs.broken-count }}
**Success Rate:** ${{ steps.link-check.outputs.success-rate }}%
EOF
if [[ "${{ steps.link-check.outputs.check-result }}" == "failed" ]]; then
echo "❌ **Link check failed** - see annotations above for details" >> $GITHUB_STEP_SUMMARY
else
echo "✅ **All links are valid**" >> $GITHUB_STEP_SUMMARY
fi
else
echo "⚠️ **Link check could not complete** - no results file generated" >> $GITHUB_STEP_SUMMARY
fi
- name: Upload detailed results
if: always() && steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != ''
uses: actions/upload-artifact@v4
with:
name: link-check-results
path: |
link-check-results.json
changed-files.txt
public-files.txt
retention-days: 30

107
.github/workflows/prepare-release.yml vendored Normal file
View File

@ -0,0 +1,107 @@
name: Prepare Documentation Release
on:
workflow_dispatch:
inputs:
product:
description: 'Product to release'
required: true
type: choice
options:
- core
- enterprise
- cloud-serverless
- cloud-dedicated
version:
description: 'Version number (e.g., 3.2.1)'
required: true
release_type:
description: 'Release type'
required: true
type: choice
options:
- major
- minor
- patch
- hotfix
jobs:
prepare-release:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Create release branch
run: |
git checkout -b docs-release-v${{ inputs.version }}
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
cache: 'yarn'
- name: Install dependencies
run: yarn install --frozen-lockfile
- name: Generate release notes
run: |
# Note: This workflow assumes release notes are generated manually or from tagged releases
# For Core/Enterprise products, the script needs repository access which would require
# checking out the influxdb and influxdb_pro repositories first
echo "Warning: Release notes generation requires access to InfluxDB source repositories"
echo "For now, creating a placeholder file that should be replaced with actual release notes"
# Create output directory
mkdir -p helper-scripts/output/release-notes
# Create placeholder release notes file
cat > helper-scripts/output/release-notes/release-notes-v${{ inputs.version }}.md << EOF
## v${{ inputs.version }} {date="$(date +'%Y-%m-%d')"}
### Features
- TODO: Add features for ${{ inputs.product }} v${{ inputs.version }}
### Bug Fixes
- TODO: Add bug fixes for ${{ inputs.product }} v${{ inputs.version }}
<!--
Note: This is a placeholder file generated by the workflow.
To generate actual release notes with commit history, run:
For Core/Enterprise:
node ./helper-scripts/common/generate-release-notes.js \\
--config ./helper-scripts/common/config/influxdb3-core-enterprise.json \\
v$(echo "${{ inputs.version }}" | sed 's/^v//') \\
v${{ inputs.version }}
For other products:
node ./helper-scripts/common/generate-release-notes.js \\
--config ./helper-scripts/common/config/[product-config].json \\
v$(echo "${{ inputs.version }}" | sed 's/^v//') \\
v${{ inputs.version }}
-->
EOF
- name: Update product versions
run: |
# Script to update data/products.yml
./helper-scripts/common/update-product-version.sh \
--product ${{ inputs.product }} \
--version ${{ inputs.version }}
- name: Create release checklist issue
uses: actions/github-script@v7
with:
script: |
const checklist = require('./.github/scripts/release-checklist.js');
await checklist.createIssue({
github,
context,
product: '${{ inputs.product }}',
version: '${{ inputs.version }}',
releaseType: '${{ inputs.release_type }}'
})

View File

@ -0,0 +1,68 @@
name: Sync Link Checker Binary from docs-tooling
on:
workflow_dispatch:
inputs:
version:
description: 'Link checker version to sync (e.g., v1.2.2)'
required: true
type: string
jobs:
sync-binary:
name: Sync link-checker binary from docs-tooling
runs-on: ubuntu-latest
steps:
- name: Download binary from docs-tooling release
run: |
echo "Downloading link-checker ${{ inputs.version }} from docs-tooling..."
# Download binary from docs-tooling release
curl -L -H "Accept: application/octet-stream" \
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
-o link-checker-linux-x86_64 \
"https://github.com/influxdata/docs-tooling/releases/download/link-checker-${{ inputs.version }}/link-checker-linux-x86_64"
# Download checksums
curl -L -H "Accept: application/octet-stream" \
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
-o checksums.txt \
"https://github.com/influxdata/docs-tooling/releases/download/link-checker-${{ inputs.version }}/checksums.txt"
# Verify downloads
ls -la link-checker-linux-x86_64 checksums.txt
- name: Create docs-v2 release
run: |
echo "Creating link-checker-${{ inputs.version }} release in docs-v2..."
gh release create \
--title "Link Checker Binary ${{ inputs.version }}" \
--notes "Link validation tooling binary for docs-v2 GitHub Actions workflows.
This binary is distributed from the docs-tooling repository release link-checker-${{ inputs.version }}.
### Usage in GitHub Actions
The binary is automatically downloaded by docs-v2 workflows for link validation.
### Manual Usage
\`\`\`bash
# Download and make executable
curl -L -o link-checker https://github.com/influxdata/docs-v2/releases/download/link-checker-${{ inputs.version }}/link-checker-linux-x86_64
chmod +x link-checker
# Verify installation
./link-checker --version
\`\`\`
### Changes in ${{ inputs.version }}
See the [docs-tooling release](https://github.com/influxdata/docs-tooling/releases/tag/link-checker-${{ inputs.version }}) for detailed changelog." \
link-checker-${{ inputs.version }} \
link-checker-linux-x86_64 \
checksums.txt
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -0,0 +1,61 @@
name: Trigger Documentation Update on Release
on:
# Can be triggered by external workflows using repository_dispatch
repository_dispatch:
types: [influxdb3-release]
# Can also be triggered via GitHub API
# Example:
# curl -X POST \
# -H "Authorization: token $GITHUB_TOKEN" \
# -H "Accept: application/vnd.github.v3+json" \
# https://api.github.com/repos/influxdata/docs-v2/dispatches \
# -d '{"event_type":"influxdb3-release","client_payload":{"product":"core","version":"3.0.0","previous_version":"2.9.0"}}'
jobs:
trigger-release-workflow:
name: Trigger Release Documentation
runs-on: ubuntu-latest
steps:
- name: Validate payload
run: |
if [ -z "${{ github.event.client_payload.product }}" ]; then
echo "Error: product is required in client_payload"
exit 1
fi
if [ -z "${{ github.event.client_payload.version }}" ]; then
echo "Error: version is required in client_payload"
exit 1
fi
if [ -z "${{ github.event.client_payload.previous_version }}" ]; then
echo "Error: previous_version is required in client_payload"
exit 1
fi
echo "Received release notification:"
echo "Product: ${{ github.event.client_payload.product }}"
echo "Version: ${{ github.event.client_payload.version }}"
echo "Previous Version: ${{ github.event.client_payload.previous_version }}"
- name: Trigger release documentation workflow
uses: actions/github-script@v7
with:
script: |
await github.rest.actions.createWorkflowDispatch({
owner: context.repo.owner,
repo: context.repo.repo,
workflow_id: 'influxdb3-release.yml',
ref: 'master',
inputs: {
product: '${{ github.event.client_payload.product }}',
version: '${{ github.event.client_payload.version }}',
previous_version: '${{ github.event.client_payload.previous_version }}',
dry_run: '${{ github.event.client_payload.dry_run || 'false' }}'
}
});
console.log('Successfully triggered release documentation workflow');

26
.gitignore vendored
View File

@ -3,16 +3,38 @@
public
.*.swp
node_modules
package-lock.json
.config*
**/.env*
*.log
/resources
.hugo_build.lock
# Content generation
/content/influxdb*/**/api/**/*.html
!api-docs/**/.config.yml
/api-docs/redoc-static.html*
/helper-scripts/output/*
/telegraf-build
!telegraf-build/templates
!telegraf-build/scripts
!telegraf-build/README.md
# CI/CD tool files
/cypress/downloads/*
/cypress/screenshots/*
/cypress/videos/*
.lycheecache
test-results.xml
/influxdb3cli-build-scripts/content
tmp
# IDE files
.vscode/*
!.vscode/launch.json
.idea
**/config.toml
package-lock.json
tmp
# User context files for AI assistant tools
.context/*
!.context/README.md

57
.husky/_/pre-push.old Executable file
View File

@ -0,0 +1,57 @@
#!/bin/sh
if [ "$LEFTHOOK_VERBOSE" = "1" -o "$LEFTHOOK_VERBOSE" = "true" ]; then
set -x
fi
if [ "$LEFTHOOK" = "0" ]; then
exit 0
fi
call_lefthook()
{
if test -n "$LEFTHOOK_BIN"
then
"$LEFTHOOK_BIN" "$@"
elif lefthook -h >/dev/null 2>&1
then
lefthook "$@"
else
dir="$(git rev-parse --show-toplevel)"
osArch=$(uname | tr '[:upper:]' '[:lower:]')
cpuArch=$(uname -m | sed 's/aarch64/arm64/;s/x86_64/x64/')
if test -f "$dir/node_modules/lefthook-${osArch}-${cpuArch}/bin/lefthook"
then
"$dir/node_modules/lefthook-${osArch}-${cpuArch}/bin/lefthook" "$@"
elif test -f "$dir/node_modules/@evilmartians/lefthook/bin/lefthook-${osArch}-${cpuArch}/lefthook"
then
"$dir/node_modules/@evilmartians/lefthook/bin/lefthook-${osArch}-${cpuArch}/lefthook" "$@"
elif test -f "$dir/node_modules/@evilmartians/lefthook-installer/bin/lefthook"
then
"$dir/node_modules/@evilmartians/lefthook-installer/bin/lefthook" "$@"
elif test -f "$dir/node_modules/lefthook/bin/index.js"
then
"$dir/node_modules/lefthook/bin/index.js" "$@"
elif bundle exec lefthook -h >/dev/null 2>&1
then
bundle exec lefthook "$@"
elif yarn lefthook -h >/dev/null 2>&1
then
yarn lefthook "$@"
elif pnpm lefthook -h >/dev/null 2>&1
then
pnpm lefthook "$@"
elif swift package plugin lefthook >/dev/null 2>&1
then
swift package --disable-sandbox plugin lefthook "$@"
elif command -v mint >/dev/null 2>&1
then
mint run csjones/lefthook-plugin "$@"
else
echo "Can't find lefthook in PATH"
fi
fi
}
call_lefthook run "pre-push" "$@"

57
.husky/_/serve Executable file
View File

@ -0,0 +1,57 @@
#!/bin/sh
if [ "$LEFTHOOK_VERBOSE" = "1" -o "$LEFTHOOK_VERBOSE" = "true" ]; then
set -x
fi
if [ "$LEFTHOOK" = "0" ]; then
exit 0
fi
call_lefthook()
{
if test -n "$LEFTHOOK_BIN"
then
"$LEFTHOOK_BIN" "$@"
elif lefthook -h >/dev/null 2>&1
then
lefthook "$@"
else
dir="$(git rev-parse --show-toplevel)"
osArch=$(uname | tr '[:upper:]' '[:lower:]')
cpuArch=$(uname -m | sed 's/aarch64/arm64/;s/x86_64/x64/')
if test -f "$dir/node_modules/lefthook-${osArch}-${cpuArch}/bin/lefthook"
then
"$dir/node_modules/lefthook-${osArch}-${cpuArch}/bin/lefthook" "$@"
elif test -f "$dir/node_modules/@evilmartians/lefthook/bin/lefthook-${osArch}-${cpuArch}/lefthook"
then
"$dir/node_modules/@evilmartians/lefthook/bin/lefthook-${osArch}-${cpuArch}/lefthook" "$@"
elif test -f "$dir/node_modules/@evilmartians/lefthook-installer/bin/lefthook"
then
"$dir/node_modules/@evilmartians/lefthook-installer/bin/lefthook" "$@"
elif test -f "$dir/node_modules/lefthook/bin/index.js"
then
"$dir/node_modules/lefthook/bin/index.js" "$@"
elif bundle exec lefthook -h >/dev/null 2>&1
then
bundle exec lefthook "$@"
elif yarn lefthook -h >/dev/null 2>&1
then
yarn lefthook "$@"
elif pnpm lefthook -h >/dev/null 2>&1
then
pnpm lefthook "$@"
elif swift package plugin lefthook >/dev/null 2>&1
then
swift package --disable-sandbox plugin lefthook "$@"
elif command -v mint >/dev/null 2>&1
then
mint run csjones/lefthook-plugin "$@"
else
echo "Can't find lefthook in PATH"
fi
fi
}
call_lefthook run "serve" "$@"

1
.nvmrc Normal file
View File

@ -0,0 +1 @@
v23.10.0

View File

@ -3,3 +3,4 @@
**/.svn
**/.hg
**/node_modules
assets/jsconfig.json

47
.vscode/launch.json vendored Normal file
View File

@ -0,0 +1,47 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "Debug JS (debug-helpers)",
"type": "chrome",
"request": "launch",
"url": "http://localhost:1313",
"webRoot": "${workspaceFolder}",
"skipFiles": [
"<node_internals>/**"
],
"sourceMaps": false,
"trace": true,
"smartStep": false
},
{
"name": "Debug JS (source maps)",
"type": "chrome",
"request": "launch",
"url": "http://localhost:1313",
"webRoot": "${workspaceFolder}",
"sourceMaps": true,
"sourceMapPathOverrides": {
"*": "${webRoot}/assets/js/*",
"main.js": "${webRoot}/assets/js/main.js",
"page-context.js": "${webRoot}/assets/js/page-context.js",
"ask-ai-trigger.js": "${webRoot}/assets/js/ask-ai-trigger.js",
"ask-ai.js": "${webRoot}/assets/js/ask-ai.js",
"utils/*": "${webRoot}/assets/js/utils/*",
"services/*": "${webRoot}/assets/js/services/*"
},
"skipFiles": [
"<node_internals>/**",
"node_modules/**",
"chrome-extension://**"
],
"trace": true,
"smartStep": true,
"disableNetworkCache": true,
"userDataDir": "${workspaceFolder}/.vscode/chrome-user-data",
"runtimeArgs": [
"--disable-features=VizDisplayCompositor"
]
},
]
}

26
.vscode/settings.json vendored
View File

@ -1,12 +1,12 @@
{
"commentAnchors.tags.anchors":
{ "SOURCE": {
"scope": "file",
"behavior": "link",
"iconColor": "#FF0000",
"highlightColor": "#FF0000",
"style": "bold"
}},
"commentAnchors.tags.anchors":
{ "SOURCE": {
"scope": "file",
"behavior": "link",
"iconColor": "#FF0000",
"highlightColor": "#FF0000",
"style": "bold"
}},
"commentAnchors.workspace.matchFiles": "**/*.{md,ini,json,yaml,yml}",
"commentAnchors.workspace.enabled": true,
"yaml.schemas": {
@ -14,11 +14,7 @@
},
"vale.valeCLI.config": "${workspaceFolder}/.vale.ini",
"vale.valeCLI.minAlertLevel": "warning",
"github.copilot.chat.codeGeneration.useInstructionFiles": true,
"github.copilot.chat.codeGeneration.instructionFiles": [
{
"path": "${workspaceFolder}/.github/copilot-instructions.md",
"enabled": true
}
],
"cSpell.words": [
"influxctl"
]
}

38
CLAUDE.md Normal file
View File

@ -0,0 +1,38 @@
# Instructions for InfluxData Documentation
## Purpose and scope
Claude should help document InfluxData products by creating clear, accurate technical content with proper code examples, frontmatter, and formatting.
## Project overview
See @README.md
## Available NPM commands
@package.json
## Instructions for contributing
See @.github/copilot-instructions.md for style guidelines and
product-specific documentation paths and URLs managed in this project.
See @.github/instructions/contributing.instructions.md for essential InfluxData
documentation contributing guidelines, such as style and
formatting, and commonly used shortcodes.
See @TESTING.md for comprehensive testing information, including code block
testing, link validation, style linting, and advanced testing procedures.
See @.github/instructions/shortcodes-reference.instructions.md for detailed
information about shortcodes used in this project.
See @.github/instructions/frontmatter-reference.instructions.md for detailed
information about frontmatter used in this project.
See @.github/instructions/influxdb3-code-placeholders.instructions.md for using
placeholders in code samples and CLI commands.
See @api-docs/README.md for information about the API reference documentation, how to
generate it, and how to contribute to it.

File diff suppressed because it is too large Load Diff

View File

@ -1,60 +0,0 @@
Doc is a public custom GPT for OpenAI ChatGPT used to help write and style InfluxData and InfluxDB documentation.
## Introduction
You write technical software documentation for InfluxData. The public web site is https://docs.influxdata.com and the source repository is https://github.com/influxdata/docs-v2.
Documentation provides step-by-step guides and reference documentation for InfluxDB and associated clients (CLIs, client libraries (SDKs), and Telegraf (https://docs.influxdata.com/telegraf/v1/)), and the legacy v1 components Kapacitor and Chronograf.
## Instruction
When a user asks a question and doesn't include a product from the list below, ask them which product in the list they are using, along with the version and query language:
InfluxDB OSS 1.x (AKA "OSS v1")
- Documentation: https://docs.influxdata.com/influxdb/v1/
- Query languages: v1.8+ supports InfluxQL and Flux
- Clients: Telegraf, influx CLI, v1 client libraries
InfluxDB Enterprise (AKA "v1 Enterprise")
- Documentation: https://docs.influxdata.com/enterprise_influxdb/v1/
- Query languages: v1.8+ supports InfluxQL and Flux
- Clients: Telegraf, influx CLI, v1 client libraries
InfluxDB OSS 2.x (AKA "OSS v2", "OSS (TSM)")
- Documentation: https://docs.influxdata.com/influxdb/v2/
- Query languages: InfluxQL and Flux
- Clients: Telegraf, influx CLI, v2 client libraries
InfluxDB Cloud (TSM) (AKA "Cloud 2")
- Documentation: https://docs.influxdata.com/influxdb/cloud/
- Query languages: InfluxQL and Flux
- Clients: Telegraf, influx CLI, v2 client libraries
InfluxDB 3 Clustered (AKA "Clustered", "v3 Clustered")
- Documentation: https://docs.influxdata.com/influxdb3/clustered/
- Query languages: SQL and InfluxQL
- Clients: Telegraf, influxctl CLI, `influxdb3-` (v3) client libraries
InfluxDB 3 Cloud Dedicated (AKA "Cloud Dedicated", "v3 Cloud Dedicated", "Dedicated", "CST (Cloud single-tenant)")
- Documentation: https://docs.influxdata.com/influxdb3/cloud-dedicated/
- Query languages: SQL and InfluxQL
- Clients: Telegraf, influxctl CLI, `influxdb3-` (v3) client libraries
InfluxDB 3 Cloud Serverless (AKA "Cloud Serverless", "v3 Cloud", "Serverless", "Cloud multi-tenant")
- Documentation: https://docs.influxdata.com/influxdb3/cloud-serverless/
- Query languages: SQL and InfluxQL
- Clients: Telegraf, influx CLI, `influxdb3-` (v3) client libraries
InfluxDB 3 Core (AKA "Core", "InfluxDB 3 OSS", "v3 Core", "v3 free")
- Documentation: https://docs.influxdata.com/influxdb3/core/
- Query languages: SQL and InfluxQL
- Clients: Telegraf, influxdb3 CLI, `influxdb3-` (v3) client libraries
InfluxDB 3 Enterprise (AKA "Enterprise", "v3 Enterprise")
- Documentation: https://docs.influxdata.com/influxdb3/enterprise/
- Query languages: SQL and InfluxQL
- Clients: Telegraf, influxdb3 CLI, `influxdb3-` (v3) client libraries
If I ask about a REST API or SDK (client library) and don't specify a product, ask which product.
For API client libraries, refer to the documentation and to the source repositories in https://github.com/InfluxCommunity for the version-specific client library.
When writing documentation, always use Google Developer Documentation style guidelines and Markdown format.
If writing REST API reference documentation follow YouTube Data API style and Google Developer Documentation style guidelines.
The project uses the Hugo static site generator to build the documentation.
The site uses JavaScript and jQuery.
For information about linting, tests (using pytests for codeblocks), shortcode <shortcode_name>, refer to https://github.com/influxdata/docs-v2/blob/master/README.md and https://github.com/influxdata/docs-v2/blob/master/CONTRIBUTING.md.
If something in CONTRIBUTING.md needs clarification, then give me the suggested revision for CONTRIBUTING.md in Markdown.
The community forum is https://community.influxdata.com/ and should not be used as a primary source of information, but might contain useful suggestions or solutions to specific problems from users.

View File

@ -3,12 +3,13 @@ FROM golang:latest
### Install InfluxDB clients for testing
# Install InfluxDB keys to verify client installs.
# Follow the install instructions (https://docs.influxdata.com/telegraf/v1/install/?t=curl), except for sudo (which isn't available in Docker).
# influxdata-archive_compat.key GPG fingerprint:
# 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E
ADD https://repos.influxdata.com/influxdata-archive_compat.key ./influxdata-archive_compat.key
RUN echo '393e8779c89ac8d958f81f942f9ad7fb82a25e133faddaf92e15b16e6ac9ce4c influxdata-archive_compat.key' | sha256sum -c && cat influxdata-archive_compat.key | gpg --dearmor | tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null
# influxdata-archive.key GPG fingerprint:
# Primary key fingerprint: 24C9 75CB A61A 024E E1B6 3178 7C3D 5715 9FC2 F927
# Subkey fingerprint: 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E
ADD https://repos.influxdata.com/influxdata-archive.key ./influxdata-archive.key
RUN gpg --no-default-keyring --homedir $(mktemp -d) --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null
RUN echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' | tee /etc/apt/sources.list.d/influxdata.list
RUN echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | tee /etc/apt/sources.list.d/influxdata.list
# Vault is used for testing InfluxDB 2.0 Secrets
# Fetch vault package information from HashiCorp repository
@ -100,4 +101,4 @@ ENTRYPOINT [ "pytest" ]
# Specify command arguments:
# --env-file to pass environment variables to the test suite.
# the test directory to run the test suite.
CMD [ "--codeblocks", "" ]
CMD [ "--codeblocks", "" ]

View File

@ -11,6 +11,10 @@ This repository contains the InfluxDB 2.x documentation published at [docs.influ
We welcome and encourage community contributions.
For information about contributing to the InfluxData documentation, see [Contribution guidelines](CONTRIBUTING.md).
## Testing
For information about testing the documentation, including code block testing, link validation, and style linting, see [Testing guide](TESTING.md).
## Reporting a Vulnerability
InfluxData takes security and our users' trust very seriously.

519
TESTING.md Normal file
View File

@ -0,0 +1,519 @@
# Testing Guide for InfluxData Documentation
This guide covers all testing procedures for the InfluxData documentation, including code block testing, link validation, and style linting.
## Quick Start
1. **Prerequisites**: Install [Node.js](https://nodejs.org/en), [Yarn](https://yarnpkg.com/getting-started/install), and [Docker](https://docs.docker.com/get-docker/)
2. **Install dependencies**: Run `yarn` to install all dependencies
3. **Build test environment**: Run `docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest .`
4. **Run tests**: Use any of the test commands below
## Test Types Overview
| Test Type | Purpose | Command |
|-----------|---------|---------|
| **Code blocks** | Validate shell/Python code examples | `yarn test:codeblocks:all` |
| **Link validation** | Check internal/external links | `yarn test:links` |
| **Style linting** | Enforce writing standards | `docker compose run -T vale` |
| **E2E tests** | UI and functionality testing | `yarn test:e2e` |
## Code Block Testing
Code block testing validates that shell commands and Python scripts in documentation work correctly using [pytest-codeblocks](https://github.com/nschloe/pytest-codeblocks/tree/main).
### Basic Usage
```bash
# Test all code blocks
yarn test:codeblocks:all
# Test specific products
yarn test:codeblocks:cloud
yarn test:codeblocks:v2
yarn test:codeblocks:telegraf
```
### Setup and Configuration
#### 1. Set executable permissions on test scripts
```sh
chmod +x ./test/src/*.sh
```
#### 2. Create test credentials
Create databases, buckets, and tokens for the product(s) you're testing.
If you don't have access to a Clustered instance, you can use your Cloud Dedicated instance for testing in most cases.
#### 3. Configure environment variables
Copy the `./test/env.test.example` file into each product directory and rename as `.env.test`:
```sh
# Example locations
./content/influxdb/cloud-dedicated/.env.test
./content/influxdb3/clustered/.env.test
```
Inside each product's `.env.test` file, assign your InfluxDB credentials:
- Include the usual `INFLUX_` environment variables
- For `cloud-dedicated/.env.test` and `clustered/.env.test`, also define:
- `ACCOUNT_ID`, `CLUSTER_ID`: Found in your `influxctl config.toml`
- `MANAGEMENT_TOKEN`: Generate with `influxctl management create`
See `./test/src/prepare-content.sh` for the full list of variables you may need.
#### 4. Configure influxctl commands
For influxctl commands to run in tests, move or copy your `config.toml` file to the `./test` directory.
> [!Warning]
> - The database you configure in `.env.test` and any written data may be deleted during test runs
> - Don't add your `.env.test` files to Git. Git is configured to ignore `.env*` files to prevent accidentally committing credentials
### Writing Testable Code Blocks
#### Basic Example
```python
print("Hello, world!")
```
<!--pytest-codeblocks:expected-output-->
```
Hello, world!
```
#### Interactive Commands
For commands that require TTY interaction (like `influxctl` authentication), wrap the command in a subshell and redirect output:
```sh
# Test the preceding command outside of the code block.
# influxctl authentication requires TTY interaction--
# output the auth URL to a file that the host can open.
script -c "influxctl user list " \
/dev/null > /shared/urls.txt
```
To hide test blocks from users, wrap them in HTML comments. pytest-codeblocks will still collect and run them.
#### Skipping Tests
pytest-codeblocks has features for skipping tests and marking blocks as failed. See the [pytest-codeblocks README](https://github.com/nschloe/pytest-codeblocks/tree/main) for details.
### Troubleshooting
#### "Pytest collected 0 items"
Potential causes:
- Check test discovery options in `pytest.ini`
- Use `python` (not `py`) for Python code block language identifiers:
```python
# This works
```
vs
```py
# This is ignored
```
## Link Validation with Link-Checker
Link validation uses the `link-checker` tool to validate internal and external links in documentation files.
### Basic Usage
#### Installation
**Option 1: Build from source (macOS/local development)**
For local development on macOS, build the link-checker from source:
```bash
# Clone and build link-checker
git clone https://github.com/influxdata/docs-tooling.git
cd docs-tooling/link-checker
cargo build --release
# Copy binary to your PATH or use directly
cp target/release/link-checker /usr/local/bin/
# OR use directly: ./target/release/link-checker
```
**Option 2: Download pre-built binary (GitHub Actions/Linux)**
The link-checker binary is distributed via docs-v2 releases for reliable access from GitHub Actions workflows:
```bash
# Download Linux binary from docs-v2 releases
curl -L -o link-checker \
https://github.com/influxdata/docs-v2/releases/download/link-checker-v1.0.0/link-checker-linux-x86_64
chmod +x link-checker
# Verify installation
./link-checker --version
```
> [!Note]
> Pre-built binaries are currently Linux x86_64 only. For macOS development, use Option 1 to build from source.
```bash
# Clone and build link-checker
git clone https://github.com/influxdata/docs-tooling.git
cd docs-tooling/link-checker
cargo build --release
# Copy binary to your PATH or use directly
cp target/release/link-checker /usr/local/bin/
```
#### Binary Release Process
**For maintainers:** To create a new link-checker release in docs-v2:
1. **Create release in docs-tooling** (builds and releases binary automatically):
```bash
cd docs-tooling
git tag link-checker-v1.2.x
git push origin link-checker-v1.2.x
```
2. **Manually distribute to docs-v2** (required due to private repository access):
```bash
# Download binary from docs-tooling release
curl -L -H "Authorization: Bearer $(gh auth token)" \
-o link-checker-linux-x86_64 \
"https://github.com/influxdata/docs-tooling/releases/download/link-checker-v1.2.x/link-checker-linux-x86_64"
curl -L -H "Authorization: Bearer $(gh auth token)" \
-o checksums.txt \
"https://github.com/influxdata/docs-tooling/releases/download/link-checker-v1.2.x/checksums.txt"
# Create docs-v2 release
gh release create \
--repo influxdata/docs-v2 \
--title "Link Checker Binary v1.2.x" \
--notes "Link validation tooling binary for docs-v2 GitHub Actions workflows." \
link-checker-v1.2.x \
link-checker-linux-x86_64 \
checksums.txt
```
3. **Update workflow reference** (if needed):
```bash
# Update .github/workflows/pr-link-check.yml line 98 to use new version
sed -i 's/link-checker-v[0-9.]*/link-checker-v1.2.x/' .github/workflows/pr-link-check.yml
```
> [!Note]
> The manual distribution is required because docs-tooling is a private repository and the default GitHub token doesn't have cross-repository access for private repos.
#### Core Commands
```bash
# Map content files to public HTML files
link-checker map content/path/to/file.md
# Check links in HTML files
link-checker check public/path/to/file.html
# Generate configuration file
link-checker config
```
### Link Resolution Behavior
The link-checker automatically handles relative link resolution based on the input type:
**Local Files → Local Resolution**
```bash
# When checking local files, relative links resolve to the local filesystem
link-checker check public/influxdb3/core/admin/scale-cluster/index.html
# Relative link /influxdb3/clustered/tags/kubernetes/ becomes:
# → /path/to/public/influxdb3/clustered/tags/kubernetes/index.html
```
**URLs → Production Resolution**
```bash
# When checking URLs, relative links resolve to the production site
link-checker check https://docs.influxdata.com/influxdb3/core/admin/scale-cluster/
# Relative link /influxdb3/clustered/tags/kubernetes/ becomes:
# → https://docs.influxdata.com/influxdb3/clustered/tags/kubernetes/
```
**Why This Matters**
- **Testing new content**: Tag pages generated locally will be found when testing local files
- **Production validation**: Production URLs validate against the live site
- **No false positives**: New content won't appear broken when testing locally before deployment
### Content Mapping Workflows
#### Scenario 1: Map and check InfluxDB 3 Core content
```bash
# Map Markdown files to HTML
link-checker map content/influxdb3/core/get-started/
# Check links in mapped HTML files
link-checker check public/influxdb3/core/get-started/
```
#### Scenario 2: Map and check shared CLI content
```bash
# Map shared content files
link-checker map content/shared/influxdb3-cli/
# Check the mapped output files
# (link-checker map outputs the HTML file paths)
link-checker map content/shared/influxdb3-cli/ | \
xargs link-checker check
```
#### Scenario 3: Direct HTML checking
```bash
# Check HTML files directly without mapping
link-checker check public/influxdb3/core/get-started/
```
#### Combined workflow for changed files
```bash
# Check only files changed in the last commit
git diff --name-only HEAD~1 HEAD | grep '\.md$' | \
xargs link-checker map | \
xargs link-checker check
```
### Configuration Options
#### Local usage (default configuration)
```bash
# Uses default settings or test.lycherc.toml if present
link-checker check public/influxdb3/core/get-started/
```
#### Production usage (GitHub Actions)
```bash
# Use production configuration with comprehensive exclusions
link-checker check \
--config .ci/link-checker/production.lycherc.toml \
public/influxdb3/core/get-started/
```
### GitHub Actions Integration
**Automated Integration (docs-v2)**
The docs-v2 repository includes automated link checking for pull requests:
- **Trigger**: Runs automatically on PRs that modify content files
- **Binary distribution**: Downloads latest pre-built binary from docs-v2 releases
- **Smart detection**: Only checks files affected by PR changes
- **Production config**: Uses optimized settings with exclusions for GitHub, social media, etc.
- **Results reporting**: Broken links reported as GitHub annotations with detailed summaries
The workflow automatically:
1. Detects content changes in PRs using GitHub Files API
2. Downloads latest link-checker binary from docs-v2 releases
3. Builds Hugo site and maps changed content to public HTML files
4. Runs link checking with production configuration
5. Reports results with annotations and step summaries
**Manual Integration (other repositories)**
For other repositories, you can integrate link checking manually:
```yaml
name: Link Check
on:
pull_request:
paths:
- 'content/**/*.md'
jobs:
link-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Download link-checker
run: |
curl -L -o link-checker \
https://github.com/influxdata/docs-tooling/releases/latest/download/link-checker-linux-x86_64
chmod +x link-checker
cp target/release/link-checker ../../link-checker
cd ../..
- name: Build Hugo site
run: |
npm install
npx hugo --minify
- name: Check changed files
run: |
git diff --name-only origin/main HEAD | \
grep '\.md$' | \
xargs ./link-checker map | \
xargs ./link-checker check \
--config .ci/link-checker/production.lycherc.toml
```
## Style Linting (Vale)
Style linting uses [Vale](https://vale.sh/) to enforce documentation writing standards, branding guidelines, and vocabulary consistency.
### Basic Usage
```bash
# Basic linting with Docker
docker compose run -T vale --config=content/influxdb/cloud-dedicated/.vale.ini --minAlertLevel=error content/influxdb/cloud-dedicated/write-data/**/*.md
```
### VS Code Integration
1. Install the [Vale VSCode](https://marketplace.visualstudio.com/items?itemName=ChrisChinchilla.vale-vscode) extension
2. Set the `Vale:Vale CLI:Path` setting to `${workspaceFolder}/node_modules/.bin/vale`
### Alert Levels
Vale can raise different alert levels:
- **Error**: Problems that can cause content to render incorrectly, violations of branding guidelines, rejected vocabulary terms
- **Warning**: General style guide rules and best practices
- **Suggestion**: Style preferences that may require refactoring or updates to an exceptions list
### Configuration
- **Styles**: `.ci/vale/styles/` contains configuration for the custom `InfluxDataDocs` style
- **Vocabulary**: Add accepted/rejected terms to `.ci/vale/styles/config/vocabularies`
- **Product-specific**: Configure per-product styles like `content/influxdb/cloud-dedicated/.vale.ini`
For more configuration details, see [Vale configuration](https://vale.sh/docs/topics/config).
## Pre-commit Hooks
docs-v2 uses [Lefthook](https://github.com/evilmartians/lefthook) to manage Git hooks that run automatically during pre-commit and pre-push.
### What Runs Automatically
When you run `git commit`, Git runs:
- **Vale**: Style linting (if configured)
- **Prettier**: Code formatting
- **Cypress**: Link validation tests
- **Pytest**: Code block tests
### Skipping Pre-commit Hooks
We strongly recommend running linting and tests, but you can skip them:
```sh
# Skip with --no-verify flag
git commit -m "<COMMIT_MESSAGE>" --no-verify
# Skip with environment variable
LEFTHOOK=0 git commit
```
## Advanced Testing
### E2E Testing
```bash
# Run all E2E tests
yarn test:e2e
# Run specific E2E specs
node cypress/support/run-e2e-specs.js --spec "cypress/e2e/content/article-links.cy.js"
```
### JavaScript Testing and Debugging
For JavaScript code in the documentation UI (`assets/js`):
#### Using Source Maps and Chrome DevTools
1. In VS Code, select Run > Start Debugging
2. Select "Debug Docs (source maps)" configuration
3. Set breakpoints in the `assets/js/ns-hugo-imp:` namespace
#### Using Debug Helpers
1. Import debug helpers in your JavaScript module:
```js
import { debugLog, debugBreak, debugInspect } from './utils/debug-helpers.js';
```
2. Insert debug statements:
```js
const data = debugInspect(someData, 'Data');
debugLog('Processing data', 'myFunction');
debugBreak(); // Add breakpoint
```
3. Start Hugo: `yarn hugo server`
4. In VS Code, select "Debug JS (debug-helpers)" configuration
Remember to remove debug statements before committing.
## Docker Compose Services
Available test services:
```bash
# All code block tests
docker compose --profile test up
# Individual product tests
docker compose run --rm cloud-pytest
docker compose run --rm v2-pytest
docker compose run --rm telegraf-pytest
# Stop monitoring services
yarn test:codeblocks:stop-monitors
```
## Testing Best Practices
### Code Block Examples
- Always test code examples before committing
- Use realistic data and examples that users would encounter
- Include proper error handling in examples
- Format code to fit within 80 characters
- Use long options in command-line examples (`--option` vs `-o`)
### Link Validation
- Test links regularly, especially after content restructuring
- Use appropriate cache TTL settings for your validation needs
- Monitor cache hit rates to optimize performance
- Clean up expired cache entries periodically
### Style Guidelines
- Run Vale regularly to catch style issues early
- Add accepted terms to vocabulary files rather than ignoring errors
- Configure product-specific styles for branding consistency
- Review suggestions periodically for content improvement opportunities
## Related Files
- **Configuration**: `pytest.ini`, `cypress.config.js`, `lefthook.yml`
- **Docker**: `compose.yaml`, `Dockerfile.pytest`
- **Scripts**: `.github/scripts/` directory
- **Test data**: `./test/` directory
- **Vale config**: `.ci/vale/styles/`
## Getting Help
- **GitHub Issues**: [docs-v2 issues](https://github.com/influxdata/docs-v2/issues)
- **Good first issues**: [good-first-issue label](https://github.com/influxdata/docs-v2/issues?q=is%3Aissue+is%3Aopen+label%3Agood-first-issue)
- **InfluxData CLA**: [Sign here](https://www.influxdata.com/legal/cla/) for substantial contributions

View File

@ -1,5 +1,5 @@
plugins:
- './../openapi/plugins/docs-plugin.js'
- './../openapi/plugins/docs-plugin.cjs'
extends:
- recommended
- docs/all

View File

@ -62,7 +62,7 @@ function showHelp {
subcommand=$1
case "$subcommand" in
cloud-dedicated-v2|cloud-dedicated-management|cloud-serverless-v2|clustered-v2|cloud-v2|v2|v1-compat|core-v3|enterprise-v3|all)
cloud-dedicated-v2|cloud-dedicated-management|cloud-serverless-v2|clustered-management|clustered-v2|cloud-v2|v2|v1-compat|core-v3|enterprise-v3|all)
product=$1
shift
@ -187,6 +187,22 @@ function updateCloudServerlessV2 {
postProcess $outFile 'influxdb3/cloud-serverless/.config.yml' v2@2
}
function updateClusteredManagement {
outFile="influxdb3/clustered/management/openapi.yml"
if [[ -z "$baseUrl" ]];
then
echo "Using existing $outFile"
else
# Clone influxdata/granite and fetch the latest openapi.yaml file.
echo "Fetching the latest openapi.yaml file from influxdata/granite"
tmp_dir=$(mktemp -d)
git clone --depth 1 --branch main https://github.com/influxdata/granite.git "$tmp_dir"
cp "$tmp_dir/openapi.yaml" "$outFile"
rm -rf "$tmp_dir"
fi
postProcess $outFile 'influxdb3/clustered/.config.yml' management@0
}
function updateClusteredV2 {
outFile="influxdb3/clustered/v2/ref.yml"
if [[ -z "$baseUrl" ]];
@ -278,6 +294,9 @@ then
elif [ "$product" = "cloud-serverless-v2" ];
then
updateCloudServerlessV2
elif [ "$product" = "clustered-management" ];
then
updateClusteredManagement
elif [ "$product" = "clustered-v2" ];
then
updateClusteredV2
@ -305,6 +324,6 @@ then
updateOSSV2
updateV1Compat
else
echo "Provide a product argument: cloud-v2, cloud-serverless-v2, cloud-dedicated-v2, cloud-dedicated-management, clustered-v2, core-v3, enterprise-v3, v2, v1-compat, or all."
echo "Provide a product argument: cloud-v2, cloud-serverless-v2, cloud-dedicated-v2, cloud-dedicated-management, clustered-management, clustered-v2, core-v3, enterprise-v3, v2, v1-compat, or all."
showHelp
fi

View File

@ -1,5 +1,5 @@
plugins:
- '../../openapi/plugins/docs-plugin.js'
- '../../openapi/plugins/docs-plugin.cjs'
extends:
- recommended
- docs/all

View File

@ -13731,7 +13731,7 @@ components:
Default is [`RFC3339` date/time format](/influxdb/cloud/reference/glossary/#rfc3339-timestamp).
To include nanoseconds in timestamps, use `RFC3339Nano`.
#### Example formatted date/time values
### Example formatted date/time values
| Format | Value |
|:------------|:----------------------------|

View File

@ -1,5 +1,5 @@
plugins:
- '../../openapi/plugins/docs-plugin.js'
- '../../openapi/plugins/docs-plugin.cjs'
extends:
- recommended
- docs/all
@ -10,7 +10,5 @@ apis:
root: v2/ref.yml
x-influxdata-docs-aliases:
- /influxdb/v2/api/
v1-compatibility@2:
root: v1-compatibility/swaggerV1Compat.yml
x-influxdata-docs-aliases:
- /influxdb/v2/api/v1-compatibility/
- /influxdb/v2/api/v1/

View File

@ -6,5 +6,6 @@
- Headers
- Pagination
- Response codes
- Compatibility endpoints
- name: All endpoints
tags: []

View File

@ -58,6 +58,7 @@ tags:
- [Manage API tokens](/influxdb/v2/security/tokens/)
- [Assign a token to a specific user](/influxdb/v2/security/tokens/create-token/)
name: Authorizations (API tokens)
- name: Authorizations (v1-compatible)
- name: Backup
- description: |
Store your data in InfluxDB [buckets](/influxdb/v2/reference/glossary/#bucket).
@ -88,6 +89,15 @@ tags:
| `orgID` | 16-byte string | The organization ID ([find your organization](/influxdb/v2/organizations/view-orgs/). |
name: Common parameters
x-traitTag: true
- name: Compatibility endpoints
description: |
InfluxDB v2 provides a v1-compatible API for backward compatibility with InfluxDB 1.x clients and integrations.
Use these endpoints with InfluxDB 1.x client libraries and third-party integrations such as Grafana, Telegraf, and other tools designed for InfluxDB 1.x. The compatibility layer maps InfluxDB 1.x concepts (databases, retention policies) to InfluxDB v2 resources (buckets, organizations) through database retention policy (DBRP) mappings.
- [Write data (v1-compatible)](#tag/Write-data-(v1-compatible))
- [Query data using InfluxQL (v1-compatible)](#tag/Query-data-(v1-compatible))
- [Manage v1-compatible users and permissions](#tag/Authorizations-(v1-compatible))
- name: Config
- name: Dashboards
- name: Data I/O endpoints
@ -99,7 +109,7 @@ tags:
databases and retention policies are mapped to buckets using the
database and retention policy (DBRP) mapping service.
The DBRP mapping service uses the database and retention policy
specified in 1.x compatibility API requests to route operations to a bucket.
specified in v1 compatibility API requests to route operations to a bucket.
### Related guides
@ -139,9 +149,6 @@ tags:
x-traitTag: true
- name: Health
- name: Labels
- name: Legacy Authorizations
- name: Legacy Query
- name: Legacy Write
- name: Metrics
- name: NotificationEndpoints
- name: NotificationRules
@ -194,6 +201,7 @@ tags:
- description: |
Retrieve data, analyze queries, and get query suggestions.
name: Query
- name: Query data (v1-compatible)
- description: |
See the [**API Quick Start**](/influxdb/v2/api-guide/api_intro/)
to get up and running authenticating with tokens, writing to buckets, and querying data.
@ -218,11 +226,11 @@ tags:
|:-----------:|:------------------------ |:--------------------- |
| `200` | Success | |
| `204` | Success. No content | InfluxDB doesn't return data for the request. |
| `400` | Bad request | May indicate one of the following: <ul><li>Line protocol is malformed. The response body contains the first malformed line in the data and indicates what was expected. For partial writes, the number of points written and the number of points rejected are also included. For more information, check the `rejected_points` measurement in your `_monitoring` bucket.</li><li>`Authorization` header is missing or malformed or the API token doesn't have permission for the operation.</li></ul> |
| `400` | Bad request | May indicate one of the following:<ul><li>the request body is malformed</li><li>`Authorization` header is missing or malformed</li><li>the API token doesn't have permission for the operation.</li></ul> |
| `401` | Unauthorized | May indicate one of the following: <ul><li>`Authorization: Token` header is missing or malformed</li><li>API token value is missing from the header</li><li>API token doesn't have permission. For more information about token types and permissions, see [Manage API tokens](/influxdb/v2/security/tokens/)</li></ul> |
| `404` | Not found | Requested resource was not found. `message` in the response body provides details about the requested resource. |
| `413` | Request entity too large | Request payload exceeds the size limit. |
| `422` | Unprocessable entity | Request data is invalid. `code` and `message` in the response body provide details about the problem. |
| `422` | Unprocessable entity | Request data is invalid. The request was well-formed, but couldn't complete due to semantic errors--for example, some or all points in a write request were rejected due to a schema or retention policy violation. The response body provides details about the problem. For more information about rejected points, see how to [Troubleshoot issues writing data](/influxdb/v2/write-data/troubleshoot/)|
| `429` | Too many requests | API token is temporarily over the request quota. The `Retry-After` header describes when to try the request again. |
| `500` | Internal server error | |
| `503` | Service unavailable | Server is temporarily unavailable to process the request. The `Retry-After` header describes when to try the request again. |
@ -314,6 +322,7 @@ tags:
- description: |
Write time series data to [buckets](/influxdb/v2/reference/glossary/#bucket).
name: Write
- name: Write data (v1-compatible)
paths:
/api/v2:
get:
@ -12752,6 +12761,12 @@ paths:
- Returns this error only if the [Go (golang) `ioutil.ReadAll()`](https://pkg.go.dev/io/ioutil#ReadAll) function raises an error.
- Returns `Content-Type: application/json` for this error.
'422':
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
description: The request was well-formed, but some or all the points were rejected due to semantic errors--for example, schema conflicts or retention policy violations. Error message contains details for one or more rejected points.
'429':
description: |
Too many requests.
@ -12863,7 +12878,7 @@ paths:
description: Unexpected error
summary: List all legacy authorizations
tags:
- Legacy Authorizations
- Authorizations (v1-compatible)
post:
description: |
Creates a legacy authorization and returns the legacy authorization.
@ -12926,7 +12941,7 @@ paths:
description: Unexpected error
summary: Create a legacy authorization
tags:
- Legacy Authorizations
- Authorizations (v1-compatible)
servers:
- url: /private
/legacy/authorizations/{authID}:
@ -12948,7 +12963,7 @@ paths:
description: Unexpected error
summary: Delete a legacy authorization
tags:
- Legacy Authorizations
- Authorizations (v1-compatible)
get:
operationId: GetLegacyAuthorizationsID
parameters:
@ -12971,7 +12986,7 @@ paths:
description: Unexpected error
summary: Retrieve a legacy authorization
tags:
- Legacy Authorizations
- Authorizations (v1-compatible)
patch:
operationId: PatchLegacyAuthorizationsID
parameters:
@ -13001,7 +13016,7 @@ paths:
description: Unexpected error
summary: Update a legacy authorization to be active or inactive
tags:
- Legacy Authorizations
- Authorizations (v1-compatible)
servers:
- url: /private
/legacy/authorizations/{authID}/password:
@ -13034,94 +13049,29 @@ paths:
description: Unexpected error
summary: Set a legacy authorization password
tags:
- Legacy Authorizations
- Authorizations (v1-compatible)
servers:
- url: /private
/query:
get:
description: Queries InfluxDB using InfluxQL.
summary: Execute InfluxQL query (v1-compatible)
description: |
Executes an InfluxQL query to retrieve data from the specified database.
This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana.
Use query parameters to specify the database and the InfluxQL query.
operationId: GetLegacyQuery
parameters:
- $ref: '#/components/parameters/TraceSpan'
- in: header
name: Accept
schema:
default: application/json
description: |
Media type that the client can understand.
**Note**: With `application/csv`, query results include [**unix timestamps**](/influxdb/v2/reference/glossary/#unix-timestamp) instead of [RFC3339 timestamps](/influxdb/v2/reference/glossary/#rfc3339-timestamp).
enum:
- application/json
- application/csv
- text/csv
- application/x-msgpack
type: string
- description: The content encoding (usually a compression algorithm) that the client can understand.
in: header
name: Accept-Encoding
schema:
default: identity
description: The content coding. Use `gzip` for compressed data or `identity` for unmodified, uncompressed data.
enum:
- gzip
- identity
type: string
- in: header
name: Content-Type
schema:
enum:
- application/json
type: string
- description: The InfluxDB 1.x username to authenticate the request.
in: query
name: u
schema:
type: string
- description: The InfluxDB 1.x password to authenticate the request.
in: query
name: p
schema:
type: string
- description: |
The database to query data from.
This is mapped to an InfluxDB [bucket](/influxdb/v2/reference/glossary/#bucket).
For more information, see [Database and retention policy mapping](/influxdb/v2/api/influxdb-1x/dbrp/).
in: query
name: db
required: true
schema:
type: string
- description: |
The retention policy to query data from.
This is mapped to an InfluxDB [bucket](/influxdb/v2/reference/glossary/#bucket).
For more information, see [Database and retention policy mapping](/influxdb/v2/api/influxdb-1x/dbrp/).
in: query
name: rp
schema:
type: string
- description: The InfluxQL query to execute. To execute multiple queries, delimit queries with a semicolon (`;`).
in: query
name: q
required: true
schema:
type: string
- description: |
A unix timestamp precision.
Formats timestamps as [unix (epoch) timestamps](/influxdb/v2/reference/glossary/#unix-timestamp) with the specified precision
instead of [RFC3339 timestamps](/influxdb/v2/reference/glossary/#rfc3339-timestamp) with nanosecond precision.
in: query
name: epoch
schema:
enum:
- ns
- u
- µ
- ms
- s
- m
- h
type: string
- $ref: '#/components/parameters/AuthV1Username'
- $ref: '#/components/parameters/AuthV1Password'
- $ref: '#/components/parameters/Accept'
- $ref: '#/components/parameters/AcceptEncoding'
- $ref: '#/components/parameters/Content-Type'
- $ref: '#/components/parameters/V1Database'
- $ref: '#/components/parameters/V1RetentionPolicy'
- $ref: '#/components/parameters/V1Epoch'
- $ref: '#/components/parameters/V1Query'
responses:
'200':
content:
@ -13185,9 +13135,85 @@ paths:
schema:
$ref: '#/components/schemas/Error'
description: Error processing query
summary: Query with the 1.x compatibility API
tags:
- Legacy Query
- Query data (v1-compatible)
post:
operationId: PostQueryV1
summary: Execute InfluxQL query (v1-compatible)
description: |
Executes an InfluxQL query to retrieve data from the specified database.
This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana.
Use query parameters to specify the database and the InfluxQL query.
tags:
- Query data (v1-compatible)
requestBody:
description: InfluxQL query to execute.
content:
text/plain:
schema:
type: string
parameters:
- $ref: '#/components/parameters/TraceSpan'
- $ref: '#/components/parameters/AuthV1Username'
- $ref: '#/components/parameters/AuthV1Password'
- $ref: '#/components/parameters/Accept'
- $ref: '#/components/parameters/AcceptEncoding'
- $ref: '#/components/parameters/Content-Type'
- $ref: '#/components/parameters/V1Database'
- $ref: '#/components/parameters/V1RetentionPolicy'
- $ref: '#/components/parameters/V1Epoch'
responses:
'200':
description: Query results
headers:
Content-Encoding:
description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body
schema:
type: string
description: Specifies that the response in the body is encoded with gzip or not encoded with identity.
default: identity
enum:
- gzip
- identity
Trace-Id:
description: The Trace-Id header reports the request's trace ID, if one was generated.
schema:
type: string
description: Specifies the request's trace ID.
content:
application/csv:
schema:
$ref: '#/components/schemas/InfluxqlCsvResponse'
application/json:
schema:
$ref: '#/components/schemas/InfluxqlJsonResponse'
text/csv:
schema:
$ref: '#/components/schemas/InfluxqlCsvResponse'
examples:
influxql-chunk_size_2:
value: |
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:55Z",90,"1"],["2016-05-19T18:37:56Z",90,"1"]],"partial":true}],"partial":true}]}
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:57Z",90,"1"],["2016-05-19T18:37:58Z",90,"1"]]}]}]}
application/x-msgpack:
schema:
type: string
format: binary
'429':
description: Token is temporarily over quota. The Retry-After header describes when to try the read again.
headers:
Retry-After:
description: A non-negative decimal integer indicating the seconds to delay after the response is received.
schema:
type: integer
format: int32
default:
description: Error processing query
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
/write:
post:
operationId: PostLegacyWrite
@ -13244,7 +13270,7 @@ paths:
application/json:
schema:
$ref: '#/components/schemas/LineProtocolError'
description: Line protocol poorly formed and no points were written. Response can be used to determine the first malformed line in the body line-protocol. All data in body was rejected and not written.
description: Line protocol is poorly formed and no points were written. Response can be used to determine the first malformed line in the body line-protocol.
'401':
content:
application/json:
@ -13256,13 +13282,19 @@ paths:
application/json:
schema:
$ref: '#/components/schemas/Error'
description: No token was sent and they are required.
description: The request didn't provide an authorization token.
'413':
content:
application/json:
schema:
$ref: '#/components/schemas/LineProtocolLengthError'
description: Write has been rejected because the payload is too large. Error message returns max size supported. All data in body was rejected and not written.
'422':
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
description: The request was well-formed, but some points were rejected due to semantic errors--for example, schema conflicts or retention policy violations. Error message contains details for one or more rejected points.
'429':
description: Token is temporarily over quota. The Retry-After header describes when to try the write again.
headers:
@ -13285,9 +13317,31 @@ paths:
schema:
$ref: '#/components/schemas/Error'
description: Internal server error
summary: Write time series data into InfluxDB in a V1-compatible format
summary: Write data using a v1-compatible request
description: |
Writes data in [line protocol](/influxdb/v2/reference/syntax/line-protocol/) syntax to the specified bucket using a v1-compatible request.
This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools.
Use query parameters to specify options for writing data.
#### InfluxDB Cloud
- Validates and queues the request.
- Handles the write asynchronously - the write might not have completed yet.
- Returns a `Retry-After` header that describes when to try the write again.
#### InfluxDB OSS v2
- Validates the request and handles the write synchronously.
- If all points were written successfully, responds with HTTP `2xx` status code
- If any points were rejected, responds with HTTP `4xx` status code and details about the problem.
#### Related guides
- [Write data with the InfluxDB API](/influxdb/v2/write-data/developer-tools/api)
tags:
- Legacy Write
- Write data (v1-compatible)
components:
examples:
AuthorizationPostRequest:
@ -13392,6 +13446,96 @@ components:
required: false
schema:
type: string
Accept:
in: header
name: Accept
schema:
default: application/json
description: |
Media type that the client can understand.
**Note**: With `application/csv`, query results include [**unix timestamps**](/influxdb/v2/reference/glossary/#unix-timestamp) instead of [RFC3339 timestamps](/influxdb/v2/reference/glossary/#rfc3339-timestamp).
enum:
- application/json
- application/csv
- text/csv
- application/x-msgpack
type: string
AcceptEncoding:
description: The content encoding (usually a compression algorithm) that the client can understand.
in: header
name: Accept-Encoding
schema:
default: identity
description: The content coding. Use `gzip` for compressed data or `identity` for unmodified, uncompressed data.
enum:
- gzip
- identity
type: string
Content-Type:
in: header
name: Content-Type
schema:
enum:
- application/json
type: string
AuthV1Username:
description: |
The InfluxDB 1.x username to authenticate the request.
If you provide an API token as the password, `u` is required, but can be any value.
in: query
name: u
schema:
type: string
AuthV1Password:
description: The InfluxDB 1.x password to authenticate the request.
in: query
name: p
schema:
type: string
V1Database:
description: |
The database to query data from.
This is mapped to an InfluxDB [bucket](/influxdb/v2/reference/glossary/#bucket).
For more information, see [Database and retention policy mapping](/influxdb/v2/api/influxdb-1x/dbrp/).
in: query
name: db
required: true
schema:
type: string
V1RetentionPolicy:
description: |
The retention policy to query data from.
This is mapped to an InfluxDB [bucket](/influxdb/v2/reference/glossary/#bucket).
For more information, see [Database and retention policy mapping](/influxdb/v2/api/influxdb-1x/dbrp/).
in: query
name: rp
schema:
type: string
V1Query:
description: The InfluxQL query to execute. To execute multiple queries, delimit queries with a semicolon (`;`).
in: query
name: q
required: true
schema:
type: string
V1Epoch:
description: |
A unix timestamp precision.
Formats timestamps as [unix (epoch) timestamps](/influxdb/v2/reference/glossary/#unix-timestamp) with the specified precision
instead of [RFC3339 timestamps](/influxdb/v2/reference/glossary/#rfc3339-timestamp) with nanosecond precision.
in: query
name: epoch
schema:
enum:
- ns
- u
- µ
- ms
- s
- m
- h
type: string
responses:
AuthorizationError:
content:
@ -14713,7 +14857,7 @@ components:
Default is [`RFC3339` date/time format](/influxdb/v2/reference/glossary/#rfc3339-timestamp).
To include nanoseconds in timestamps, use `RFC3339Nano`.
#### Example formatted date/time values
### Example formatted date/time values
| Format | Value |
|:------------|:----------------------------|
@ -20038,13 +20182,16 @@ x-tagGroups:
- Headers
- Pagination
- Response codes
- Compatibility endpoints
- name: All endpoints
tags:
- Authorizations (API tokens)
- Authorizations (v1-compatible)
- Backup
- Buckets
- Cells
- Checks
- Compatibility endpoints
- Config
- Dashboards
- DBRPs
@ -20052,15 +20199,13 @@ x-tagGroups:
- Delete
- Health
- Labels
- Legacy Authorizations
- Legacy Query
- Legacy Write
- Metrics
- NotificationEndpoints
- NotificationRules
- Organizations
- Ping
- Query
- Query data (v1-compatible)
- Ready
- RemoteConnections
- Replications
@ -20082,3 +20227,4 @@ x-tagGroups:
- Variables
- Views
- Write
- Write data (v1-compatible)

View File

@ -1,5 +1,5 @@
plugins:
- '../../openapi/plugins/docs-plugin.js'
- '../../openapi/plugins/docs-plugin.cjs'
extends:
- recommended
- docs/all

View File

@ -1,6 +1,6 @@
- name: Using the Management API
tags:
- Authentication
- Examples
- Quickstart
- name: All endpoints
tags: []

View File

@ -7,10 +7,10 @@ info:
This documentation is generated from the
InfluxDB OpenAPI specification.
version: ''
license:
name: MIT
url: https://opensource.org/licenses/MIT
version: ''
contact:
name: InfluxData
url: https://www.influxdata.com
@ -31,7 +31,7 @@ tags:
- name: Authentication
x-traitTag: true
description: |
The InfluxDB Management API endpoints require the following credentials:
With InfluxDB 3 Cloud Dedicated, the InfluxDB Management API endpoints require the following credentials:
- `ACCOUNT_ID`: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the cluster belongs to. To view account ID and cluster ID, [list cluster details](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json).
- `CLUSTER_ID`: The ID of the [cluster](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that you want to manage. To view account ID and cluster ID, [list cluster details](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json).
@ -40,11 +40,12 @@ tags:
See how to [create a management token](/influxdb3/cloud-dedicated/admin/tokens/management/).
By default, management tokens in InfluxDB 3 are short-lived tokens issued by an OAuth2 identity provider that grant a specific user administrative access to your InfluxDB cluster. However, for automation purposes, you can manually create management tokens that authenticate directly with your InfluxDB cluster and do not require human interaction with your identity provider.
<!-- ReDoc-Inject: <security-definitions> -->
- name: Database tokens
description: Manage database read/write tokens for a cluster
- name: Databases
description: Manage databases for a cluster
- name: Example
- name: Quickstart
x-traitTag: true
description: |
The following example script shows how to use `curl` to make database and token management requests:
@ -397,6 +398,26 @@ paths:
post:
operationId: CreateClusterDatabase
summary: Create a database
description: |
Create a database for a cluster.
The database name must be unique within the cluster.
**Default maximum number of columns**: 250
**Default maximum number of tables**: 500
The retention period is specified in nanoseconds. For example, to set a retention period of 1 hour, use `3600000000000`.
InfluxDB Cloud Dedicated lets you define a [custom partitioning](/influxdb3/cloud-dedicated/admin/custom-partitions/) strategy for each database and table.
A _partition_ is a logical grouping of data stored in [Apache Parquet](https://parquet.apache.org/).
By default, data is partitioned by day,
but, depending on your schema and workload, customizing the partitioning
strategy can improve query performance.
To use custom partitioning, you define a [partition template](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/).
If a table doesn't have a custom partition template, it inherits the database's template.
The partition template is set at the time of database creation and cannot be changed later.
For more information, see [Custom partitions](/influxdb3/cloud-dedicated/admin/custom-partitions/).
tags:
- Databases
parameters:
@ -609,7 +630,7 @@ paths:
maxTables: 300
maxColumnsPerTable: 150
retentionPeriod: 600000000000
maxTablsOnly:
maxTablesOnly:
summary: Update Max Tables Only
value:
maxTables: 300
@ -660,7 +681,7 @@ paths:
maxTables: 300
maxColumnsPerTable: 150
retentionPeriod: 600000000000
maxTablsOnly:
maxTablesOnly:
summary: Update Max Tables Only
value:
accountId: 11111111-1111-4111-8111-111111111111
@ -779,6 +800,18 @@ paths:
post:
operationId: CreateClusterDatabaseTable
summary: Create a database table
description: |
Create a table. The database must already exist. With InfluxDB Cloud Dedicated, tables and measurements are synonymous.
Typically, tables are created automatically on write using the measurement name
specified in line protocol written to InfluxDB.
However, to apply a [custom partition template](/influxdb3/cloud-dedicated/admin/custom-partitions/)
to a table, you must manually [create the table with custom partitioning](/influxdb3/cloud-dedicated/admin/tables/#create-a-table-with-custom-partitioning) before you write any data to it.
Partitioning defaults to `%Y-%m-%d` (daily).
When a partition template is applied to a database, it becomes the default template
for all tables in that database, but can be overridden when creating a
table.
tags:
- Tables
parameters:
@ -942,6 +975,10 @@ paths:
$ref: '#/components/schemas/DatabaseTokenPermissions'
createdAt:
$ref: '#/components/schemas/DatabaseTokenCreatedAt'
expiresAt:
$ref: '#/components/schemas/DatabaseTokenExpiresAt'
revokedAt:
$ref: '#/components/schemas/DatabaseTokenRevokedAt'
required:
- accountId
- clusterId
@ -1045,6 +1082,8 @@ paths:
$ref: '#/components/schemas/DatabaseTokenDescription'
permissions:
$ref: '#/components/schemas/DatabaseTokenPermissions'
expiresAt:
$ref: '#/components/schemas/DatabaseTokenExpiresAt'
required:
- description
examples:
@ -1094,6 +1133,10 @@ paths:
$ref: '#/components/schemas/DatabaseTokenCreatedAt'
accessToken:
$ref: '#/components/schemas/DatabaseTokenAccessToken'
expiresAt:
$ref: '#/components/schemas/DatabaseTokenExpiresAt'
revokedAt:
$ref: '#/components/schemas/DatabaseTokenRevokedAt'
required:
- accountId
- clusterId
@ -1185,6 +1228,14 @@ paths:
get:
operationId: GetDatabaseToken
summary: Get a database token
description: |
Retrieve metadata details for a [database token](/influxdb3/cloud-dedicated/admin/tokens/database/).
#### Store secure tokens in a secret store
We recommend storing database tokens in a **secure secret store**.
Token strings are viewable _only_ on token creation and aren't stored by InfluxDB; you can't recover a lost token.
tags:
- Database tokens
parameters:
@ -1229,6 +1280,10 @@ paths:
$ref: '#/components/schemas/DatabaseTokenPermissions'
createdAt:
$ref: '#/components/schemas/DatabaseTokenCreatedAt'
expiresAt:
$ref: '#/components/schemas/DatabaseTokenExpiresAt'
revokedAt:
$ref: '#/components/schemas/DatabaseTokenRevokedAt'
required:
- accountId
- clusterId
@ -1299,6 +1354,8 @@ paths:
patch:
operationId: UpdateDatabaseToken
summary: Update a database token
description: |
Update the description and permissions of a [database token](/influxdb3/cloud-dedicated/admin/tokens/database/).
tags:
- Database tokens
parameters:
@ -1317,7 +1374,6 @@ paths:
- name: tokenId
in: path
description: The ID of the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) to update
required: true
schema:
$ref: '#/components/schemas/UuidV4'
requestBody:
@ -1385,6 +1441,10 @@ paths:
$ref: '#/components/schemas/DatabaseTokenPermissions'
createdAt:
$ref: '#/components/schemas/DatabaseTokenCreatedAt'
expiresAt:
$ref: '#/components/schemas/DatabaseTokenExpiresAt'
revokedAt:
$ref: '#/components/schemas/DatabaseTokenRevokedAt'
required:
- accountId
- clusterId
@ -1625,9 +1685,9 @@ components:
description: |
A template for [partitioning](/influxdb3/cloud-dedicated/admin/custom-partitions/) a cluster database.
Each template part is evaluated in sequence, concatinating the final
partition key from the output of each part, delimited by the partition
key delimiter `|`.
Each partition template part is evaluated in sequence.
The outputs from each part are concatenated with the
`|` delimiter to form the final partition key.
For example, using the partition template below:
@ -1834,6 +1894,18 @@ components:
examples:
- '2023-12-21T17:32:28.000Z'
- '2024-03-02T04:20:19.000Z'
DatabaseTokenExpiresAt:
description: |
The date and time that the database token expires, if applicable
Uses RFC3339 format
$ref: '#/components/schemas/DateTimeRfc3339'
DatabaseTokenRevokedAt:
description: |
The date and time that the database token was revoked, if applicable
Uses RFC3339 format
$ref: '#/components/schemas/DateTimeRfc3339'
DatabaseTokenAccessToken:
description: |
The access token that can be used to authenticate query and write requests to the cluster
@ -1944,7 +2016,7 @@ x-tagGroups:
- name: Using the Management API
tags:
- Authentication
- Examples
- Quickstart
- name: All endpoints
tags:
- Database tokens

View File

@ -66,7 +66,22 @@ paths:
schema:
type: string
required: true
description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy.
description: |
The database to write to.
**Database targeting:** In Cloud Dedicated, databases can be named using the `database_name/retention_policy_name` convention for InfluxQL compatibility. Cloud Dedicated does not use DBRP mappings. The db and rp parameters are used to construct the target database name following this naming convention.
**Auto-creation behavior:** Cloud Dedicated requires databases to be created before writing data. The v1 `/write` API does not automatically create databases. If the specified
database does not exist, the write request will fail.
Authentication: Requires a valid API token with _write_ permissions for the target database.
### Related
- [Write data to InfluxDB Cloud Dedicated](/influxdb3/cloud-dedicated/write-data/)
- [Manage databases in InfluxDB Cloud Dedicated](/influxdb3/cloud-dedicated/admin/databases/)
- [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention)
- [InfluxQL data retention policy mapping differences](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences)
- in: query
name: rp
schema:
@ -137,6 +152,160 @@ paths:
schema:
$ref: '#/components/schemas/Error'
/query:
get:
operationId: GetQueryV1
tags:
- Query
summary: Query using the InfluxDB v1 HTTP API
parameters:
- $ref: '#/components/parameters/TraceSpan'
- $ref: '#/components/parameters/AuthUserV1'
- $ref: '#/components/parameters/AuthPassV1'
- in: header
name: Accept
schema:
type: string
description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps.
default: application/json
enum:
- application/json
- application/csv
- text/csv
- application/x-msgpack
- in: header
name: Accept-Encoding
description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand.
schema:
type: string
description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity.
default: identity
enum:
- gzip
- identity
- in: query
name: chunked
description: |
If true, the response is divided into chunks of size `chunk_size`.
schema:
type: boolean
default: false
- in: query
name: chunk_size
description: |
The number of records that will go into a chunk.
This parameter is only used if `chunked=true`.
schema:
type: integer
default: 10000
- in: query
name: db
schema:
type: string
required: true
description: The database to query from.
- in: query
name: pretty
description: |
If true, the JSON response is formatted in a human-readable format.
schema:
type: boolean
default: false
- in: query
name: q
description: Defines the InfluxQL query to run.
required: true
schema:
type: string
- in: query
name: rp
schema:
type: string
description: |
The retention policy name for InfluxQL compatibility
Optional parameter that, when combined with the db parameter, forms the complete database name to query. In InfluxDB Cloud Dedicated, databases can be named using the
database_name/retention_policy_name convention for InfluxQL compatibility.
When a request specifies both `db` and `rp`, Cloud Dedicated combines them as `db/rp` to target the database--for example:
- If `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen`
- If only `db=mydb` is provided (no `rp`), the query targets the database named `mydb`
Unlike InfluxDB v1 and Cloud Serverless, Cloud Dedicated does not use DBRP mappings or separate retention policy objects. This parameter exists solely for v1 API
compatibility and database naming conventions.
_Note: The retention policy name does not control data retention in Cloud Dedicated. Data retention is determined by the database's **retention period** setting._
### Related
- [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention)
- [InfluxQL data retention policy mapping differences](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences)
- name: epoch
description: |
Formats timestamps as unix (epoch) timestamps with the specified precision
instead of RFC3339 timestamps with nanosecond precision.
in: query
schema:
type: string
enum:
- h
- m
- s
- ms
- u
- µ
- ns
responses:
'200':
description: Query results
headers:
Content-Encoding:
description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body
schema:
type: string
description: Specifies that the response in the body is encoded with gzip or not encoded with identity.
default: identity
enum:
- gzip
- identity
Trace-Id:
description: The Trace-Id header reports the request's trace ID, if one was generated.
schema:
type: string
description: Specifies the request's trace ID.
content:
application/csv:
schema:
$ref: '#/components/schemas/InfluxQLCSVResponse'
text/csv:
schema:
$ref: '#/components/schemas/InfluxQLCSVResponse'
application/json:
schema:
$ref: '#/components/schemas/InfluxQLResponse'
examples:
influxql-chunk_size_2:
value: |
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:55Z",90,"1"],["2016-05-19T18:37:56Z",90,"1"]],"partial":true}],"partial":true}]}
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:57Z",90,"1"],["2016-05-19T18:37:58Z",90,"1"]]}]}]}
application/x-msgpack:
schema:
type: string
format: binary
'429':
description: Token is temporarily over quota. The Retry-After header describes when to try the read again.
headers:
Retry-After:
description: A non-negative decimal integer indicating the seconds to delay after the response is received.
schema:
type: integer
format: int32
default:
description: Error processing query
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
post:
operationId: PostQueryV1
tags:
@ -148,6 +317,83 @@ paths:
text/plain:
schema:
type: string
application/json:
schema:
type: object
properties:
db:
type: string
description: |
The database name for InfluxQL queries.
Required parameter that specifies the database to query.
In InfluxDB Cloud Dedicated, this can be either:
- A simple database name (for example, `mydb`)
- The database portion of a `database_name/retention_policy_name` naming convention (used together with the `rp` parameter)
When used alone, `db` specifies the complete database name to query. When used with the `rp` parameter, they combine to form the full database name as `db/rp`--for example, if `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen`.
Unlike InfluxDB Cloud Serverless, Cloud Dedicated does not use DBRP mappings. The database name directly corresponds to an existing database in your Cloud Dedicated cluster.
Examples:
- `db=mydb` - queries the database named `mydb`
- `db=mydb` with `rp=autogen` - queries the database named `mydb/autogen`
_Note: The specified database must exist in your Cloud Dedicated cluster. Queries will fail if the database does not exist._
### Related
- [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention)
- [Migrate data from InfluxDB 1.x to Cloud Dedicated](/influxdb3/cloud-dedicated/guides/migrate-data/migrate-1x-to-cloud-dedicated/)
- [InfluxQL data retention policy mapping differences between InfluxDB Cloud Dedicated and Cloud Serverless](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences)
rp:
description: |
The retention policy name for InfluxQL compatibility
Optional parameter that, when combined with the db parameter, forms the complete database name to query. In InfluxDB Cloud Dedicated, databases can be named using the
database_name/retention_policy_name convention for InfluxQL compatibility.
When a request specifies both `db` and `rp`, Cloud Dedicated combines them as `db/rp` to target the database--for example:
- If `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen`
- If only `db=mydb` is provided (no `rp`), the query targets the database named `mydb`
Unlike InfluxDB v1 and Cloud Serverless, Cloud Dedicated does not use DBRP mappings or separate retention policy objects. This parameter exists solely for v1 API
compatibility and database naming conventions.
_Note: The retention policy name does not control data retention in Cloud Dedicated. Data retention is determined by the database's **retention period** setting._
### Related
- [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention)
- [Migrate data from InfluxDB 1.x to Cloud Dedicated](/influxdb3/cloud-dedicated/guides/migrate-data/migrate-1x-to-cloud-dedicated/)
- [InfluxQL data retention policy mapping differences](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences)
type: string
q:
description: Defines the InfluxQL query to run.
type: string
chunked:
description: |
If true, the response is divided into chunks of size `chunk_size`.
type: boolean
chunk_size:
description: |
The number of records that will go into a chunk.
This parameter is only used if `chunked=true`.
type: integer
default: 10000
epoch:
description: |
A unix timestamp precision.
type: string
enum:
- h
- m
- s
- ms
- u
- µ
- ns
parameters:
- $ref: '#/components/parameters/TraceSpan'
- $ref: '#/components/parameters/AuthUserV1'
@ -184,7 +430,7 @@ paths:
schema:
type: string
required: true
description: Bucket to query.
description: Database to query.
- in: query
name: rp
schema:

View File

@ -63,12 +63,14 @@ tags:
name: API compatibility
x-traitTag: true
- description: |
Use one of the following schemes to authenticate to the InfluxDB API:
Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API:
- [Bearer authentication](#section/Authentication/BearerAuthentication)
- [Token authentication](#section/Authentication/TokenAuthentication)
- [Basic authentication](#section/Authentication/BasicAuthentication)
- [Querystring authentication](#section/Authentication/QuerystringAuthentication)
| Authentication scheme | Works with |
|:-------------------|:-----------|
| [Bearer authentication](#section/Authentication/BearerAuthentication) | All endpoints |
| [Token authentication](#section/Authentication/TokenAuthentication) | v1, v2 endpoints |
| [Basic authentication](#section/Authentication/BasicAuthentication) | v1 endpoints |
| [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints |
<!-- ReDoc-Inject: <security-definitions> -->
name: Authentication
x-traitTag: true
@ -1097,7 +1099,7 @@ components:
Default is [`RFC3339` date/time format](/influxdb3/cloud-dedicated/reference/glossary/#rfc3339-timestamp).
To include nanoseconds in timestamps, use `RFC3339Nano`.
#### Example formatted date/time values
### Example formatted date/time values
| Format | Value |
|:------------|:----------------------------|
@ -1978,61 +1980,45 @@ components:
type: string
securitySchemes:
BasicAuthentication:
type: http
scheme: basic
description: |
### Basic authentication scheme
Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests.
Use the `Authorization` header with the `Basic` scheme to authenticate v1 API `/write` and `/query` requests.
When authenticating requests, InfluxDB 3 Cloud Dedicated checks that the `password` part of the decoded credential is an authorized [database token](/influxdb3/cloud-dedicated/admin/tokens/).
InfluxDB 3 Cloud Dedicated ignores the `username` part of the decoded credential.
Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints in InfluxDB 3.
### Syntax
```http
Authorization: Basic <base64-encoded [USERNAME]:DATABASE_TOKEN>
```
Replace the following:
- **`[USERNAME]`**: an optional string value (ignored by InfluxDB 3 Cloud Dedicated).
- **`DATABASE_TOKEN`**: a [database token](/influxdb3/cloud-dedicated/admin/tokens/).
- Encode the `[USERNAME]:DATABASE_TOKEN` credential using base64 encoding, and then append the encoded string to the `Authorization: Basic` header.
When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an authorized token
and ignores the `username` part of the decoded credential.
### Example
The following example shows how to use cURL with the `Basic` authentication scheme and a [database token](/influxdb3/cloud-dedicated/admin/tokens/):
```sh
#######################################
# Use Basic authentication with a database token
# to query the InfluxDB v1 HTTP API
#######################################
# Use the --user option with `--user username:DATABASE_TOKEN` syntax
#######################################
curl --get "http://cluster-id.a.influxdb.io/query" \
```bash
curl "https://cluster-id.a.influxdb.io/write?db=DATABASE_NAME&precision=s" \
--user "":"DATABASE_TOKEN" \
--data-urlencode "db=DATABASE_NAME" \
--data-urlencode "q=SELECT * FROM MEASUREMENT"
--header "Content-type: text/plain; charset=utf-8" \
--data-binary 'home,room=kitchen temp=72 1641024000'
```
Replace the following:
- **`DATABASE_NAME`**: your InfluxDB 3 Cloud Dedicated database
- **`DATABASE_TOKEN`**: a [database token](/influxdb3/cloud-dedicated/admin/tokens/) with sufficient permissions to the database
scheme: basic
type: http
- **`DATABASE_TOKEN`**: a database token with sufficient permissions to the database
#### Related guides
- [Authenticate v1 API requests](/influxdb3/cloud-dedicated/guides/api-compatibility/v1/)
- [Manage tokens](/influxdb3/cloud-dedicated/admin/tokens/)
QuerystringAuthentication:
type: apiKey
in: query
name: u=&p=
description: |
Use the Querystring authentication
scheme with InfluxDB 1.x API parameters to provide credentials through the query string.
Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests.
### Query string authentication
Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints.
In the URL, pass the `p` query parameter to authenticate `/write` and `/query` requests.
When authenticating requests, InfluxDB 3 Cloud Dedicated checks that `p` (_password_) is an authorized database token and ignores the `u` (_username_) parameter.
When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token
and ignores the `u` (_username_) query parameter.
### Syntax
@ -2041,11 +2027,20 @@ components:
https://cluster-id.a.influxdb.io/write/?[u=any]&p=DATABASE_TOKEN
```
### Example
### Examples
The following example shows how to use cURL with query string authentication and a [database token](/influxdb3/cloud-dedicated/admin/tokens/).
```bash
curl "https://cluster-id.a.influxdb.io/write?db=DATABASE_NAME&precision=s&p=DATABASE_TOKEN" \
--header "Content-type: text/plain; charset=utf-8" \
--data-binary 'home,room=kitchen temp=72 1641024000'
```
```sh
Replace the following:
- **`DATABASE_NAME`**: your InfluxDB 3 Cloud Dedicated database
- **`DATABASE_TOKEN`**: a database token with sufficient permissions to the database
```bash
#######################################
# Use an InfluxDB 1.x compatible username and password
# to query the InfluxDB v1 HTTP API
@ -2062,16 +2057,23 @@ components:
Replace the following:
- **`DATABASE_NAME`**: your InfluxDB 3 Cloud Dedicated database
- **`DATABASE_TOKEN`**: a [database token](/influxdb3/cloud-dedicated/admin/tokens/) with sufficient permissions to the database
- **`DATABASE_NAME`**: the database to query
- **`DATABASE_TOKEN`**: a database token with sufficient permissions to the database
#### Related guides
- [Authenticate v1 API requests](/influxdb3/cloud-dedicated/guides/api-compatibility/v1/)
- [Manage tokens](/influxdb3/cloud-dedicated/admin/tokens/)
BearerAuthentication:
type: http
scheme: bearer
bearerFormat: JWT
description: |
Use the OAuth Bearer authentication
scheme to authenticate to the InfluxDB API.
Use the OAuth Bearer authentication
scheme to provide an authorization token to InfluxDB 3.
Bearer authentication works with all endpoints.
In your API requests, send an `Authorization` header.
For the header value, provide the word `Bearer` followed by a space and a database token.
@ -2080,29 +2082,20 @@ components:
### Syntax
```http
Authorization: Bearer INFLUX_TOKEN
Authorization: Bearer DATABASE_TOKEN
```
### Example
```sh
########################################################
# Use the Bearer token authentication scheme with /api/v2/write
# to write data.
########################################################
curl --request post "https://cluster-id.a.influxdb.io/api/v2/write?bucket=DATABASE_NAME&precision=s" \
--header "Authorization: Bearer DATABASE_TOKEN" \
--data-binary 'home,room=kitchen temp=72 1463683075'
```bash
curl https://cluster-id.a.influxdb.io/api/v3/query_influxql \
--header "Authorization: Bearer DATABASE_TOKEN"
```
For examples and more information, see the following:
- [Authenticate API requests](/influxdb3/cloud-dedicated/primers/api/v2/#authenticate-api-requests)
- [Manage tokens](/influxdb3/cloud-dedicated/admin/tokens/)
TokenAuthentication:
description: |
Use the Token authentication
scheme to authenticate to the InfluxDB API.
Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3.
The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3.
In your API requests, send an `Authorization` header.
For the header value, provide the word `Token` followed by a space and a database token.
@ -2111,7 +2104,7 @@ components:
### Syntax
```http
Authorization: Token INFLUX_API_TOKEN
Authorization: Token DATABASE_TOKEN
```
### Example
@ -2129,7 +2122,6 @@ components:
### Related guides
- [Authenticate API requests](/influxdb3/cloud-dedicated/primers/api/v2/#authenticate-api-requests)
- [Manage tokens](/influxdb3/cloud-dedicated/admin/tokens/)
in: header
name: Authorization

View File

@ -1,5 +1,5 @@
plugins:
- '../../openapi/plugins/docs-plugin.js'
- '../../openapi/plugins/docs-plugin.cjs'
extends:
- recommended
- docs/all

View File

@ -65,7 +65,7 @@ paths:
schema:
type: string
required: true
description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy.
description: Database to write to. If none exists, InfluxDB creates a database with a default 3-day retention policy.
- in: query
name: rp
schema:
@ -136,6 +136,188 @@ paths:
schema:
$ref: '#/components/schemas/Error'
/query:
get:
operationId: GetQueryV1
tags:
- Query
summary: Query using the InfluxDB v1 HTTP API
parameters:
- $ref: '#/components/parameters/TraceSpan'
- $ref: '#/components/parameters/AuthUserV1'
- $ref: '#/components/parameters/AuthPassV1'
- in: header
name: Accept
schema:
type: string
description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps.
default: application/json
enum:
- application/json
- application/csv
- text/csv
- application/x-msgpack
- in: header
name: Accept-Encoding
description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand.
schema:
type: string
description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity.
default: identity
enum:
- gzip
- identity
- in: query
name: chunked
description: |
If true, the response is divided into chunks of size `chunk_size`.
schema:
type: boolean
default: false
- in: query
name: chunk_size
description: |
The number of records that will go into a chunk.
This parameter is only used if `chunked=true`.
schema:
type: integer
default: 10000
- in: query
name: db
schema:
type: string
required: true
description: |
The database name for InfluxQL queries
Required parameter that specifies the database to query via DBRP (Database Retention Policy) mapping. In Cloud Serverless, this parameter is used together with DBRP
mappings to identify which bucket to query.
The `db` parameter (optionally combined with `rp`) must have an existing DBRP mapping that points to a bucket. Without a valid DBRP mapping, queries will fail with an
authorization error.
**DBRP mapping requirements:**
- A DBRP mapping must exist before querying
- Mappings can be created automatically when writing data with the v1 API (if your token has permissions)
- Mappings can be created manually using the InfluxDB CLI or API
### Examples
- `db=mydb` - uses the default DBRP mapping for `mydb`
- `db=mydb` with `rp=weekly` - uses the DBRP mapping for `mydb/weekly`
_Note: Unlike the v1 `/write` endpoint which can auto-create buckets and mappings, the `/query` endpoint requires pre-existing DBRP mappings. The actual data is stored in and
queried from the bucket that the DBRP mapping points to._
### Related
- [Use the InfluxDB v1 query API and InfluxQL in Cloud Serverless](/influxdb3/cloud-serverless/query-data/execute-queries/v1-http/)
- [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets)
- [Migrate from InfluxDB 1.x to Cloud Serverless](/influxdb3/cloud-serverless/guides/migrate-data/migrate-1x-to-serverless/)
- in: query
name: pretty
description: |
If true, the JSON response is formatted in a human-readable format.
schema:
type: boolean
default: false
- in: query
name: q
description: Defines the InfluxQL query to run.
required: true
schema:
type: string
- in: query
name: rp
schema:
type: string
description: |
The retention policy name for InfluxQL queries
Optional parameter that specifies the retention policy to use when querying data with InfluxQL. In Cloud Serverless, this parameter works with DBRP (Database Retention
Policy) mappings to identify the target bucket.
When provided together with the `db` parameter, Cloud Serverless uses the DBRP mapping to determine which bucket to query. The combination of `db` and `rp` must have an
existing DBRP mapping that points to a bucket. If no `rp` is specified, Cloud Serverless uses the default retention policy mapping for the database.
Requirements: A DBRP mapping must exist for the db/rp combination before you can query data. DBRP mappings can be created:
- Automatically when writing data with the v1 API (if your token has sufficient permissions)
- Manually using the InfluxDB CLI or API
Example: If `db=mydb` and `rp=weekly`, the query uses the DBRP mapping for `mydb/weekly` to determine which bucket to query.
_Note: The retention policy name is used only for DBRP mapping. Actual data retention is controlled by the target bucket's retention period setting, not by the retention
policy name._
### Related
- [Use the InfluxDB v1 query API and InfluxQL in Cloud Serverless](/influxdb3/cloud-serverless/query-data/execute-queries/v1-http/)
- [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets)
- [Migrate from InfluxDB 1.x to Cloud Serverless](/influxdb3/cloud-serverless/guides/migrate-data/migrate-1x-to-serverless/)
- name: epoch
description: |
Formats timestamps as unix (epoch) timestamps with the specified precision
instead of RFC3339 timestamps with nanosecond precision.
in: query
schema:
type: string
enum:
- h
- m
- s
- ms
- u
- µ
- ns
responses:
'200':
description: Query results
headers:
Content-Encoding:
description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body
schema:
type: string
description: Specifies that the response in the body is encoded with gzip or not encoded with identity.
default: identity
enum:
- gzip
- identity
Trace-Id:
description: The Trace-Id header reports the request's trace ID, if one was generated.
schema:
type: string
description: Specifies the request's trace ID.
content:
application/csv:
schema:
$ref: '#/components/schemas/InfluxQLCSVResponse'
text/csv:
schema:
$ref: '#/components/schemas/InfluxQLCSVResponse'
application/json:
schema:
$ref: '#/components/schemas/InfluxQLResponse'
examples:
influxql-chunk_size_2:
value: |
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:55Z",90,"1"],["2016-05-19T18:37:56Z",90,"1"]],"partial":true}],"partial":true}]}
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:57Z",90,"1"],["2016-05-19T18:37:58Z",90,"1"]]}]}]}
application/x-msgpack:
schema:
type: string
format: binary
'429':
description: Token is temporarily over quota. The Retry-After header describes when to try the read again.
headers:
Retry-After:
description: A non-negative decimal integer indicating the seconds to delay after the response is received.
schema:
type: integer
format: int32
default:
description: Error processing query
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
post:
operationId: PostQueryV1
tags:
@ -147,6 +329,87 @@ paths:
text/plain:
schema:
type: string
application/json:
schema:
type: object
properties:
db:
type: string
description: |
The database name for InfluxQL queries
Required parameter that specifies the database to query via DBRP (Database Retention Policy) mapping. In Cloud Serverless, this parameter is used together with DBRP
mappings to identify which bucket to query.
The `db` parameter (optionally combined with `rp`) must have an existing DBRP mapping that points to a bucket. Without a valid DBRP mapping, queries will fail with an
authorization error.
**DBRP mapping requirements:**
- A DBRP mapping must exist before querying
- Mappings can be created automatically when writing data with the v1 API (if your token has permissions)
- Mappings can be created manually using the InfluxDB CLI or API
### Examples
- `db=mydb` - uses the default DBRP mapping for `mydb`
- `db=mydb` with `rp=weekly` - uses the DBRP mapping for `mydb/weekly`
_Note: Unlike the v1 `/write` endpoint which can auto-create buckets and mappings, the `/query` endpoint requires pre-existing DBRP mappings. The actual data is stored in and
queried from the bucket that the DBRP mapping points to._
### Related
- [Execute InfluxQL queries using the v1 API](/influxdb3/cloud-serverless/query-data/execute-queries/influxql/api/v1-http/)
- [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets)
- [Manage DBRP mappings in Cloud Serverless](/influxdb3/cloud-serverless/admin/dbrp/)
rp:
description: |
The retention policy name for InfluxQL queries
Optional parameter that specifies the retention policy to use when querying data with InfluxQL. In Cloud Serverless, this parameter works with DBRP (Database Retention
Policy) mappings to identify the target bucket.
When provided together with the `db` parameter, Cloud Serverless uses the DBRP mapping to determine which bucket to query. The combination of `db` and `rp` must have an
existing DBRP mapping that points to a bucket. If no `rp` is specified, Cloud Serverless uses the default retention policy mapping for the database.
Requirements: A DBRP mapping must exist for the db/rp combination before you can query data. DBRP mappings can be created:
- Automatically when writing data with the v1 API (if your token has sufficient permissions)
- Manually using the InfluxDB CLI or API
Example: If `db=mydb` and `rp=weekly`, the query uses the DBRP mapping for `mydb/weekly` to determine which bucket to query.
_Note: The retention policy name is used only for DBRP mapping. Actual data retention is controlled by the target bucket's retention period setting, not by the retention policy name._
### Related
- [Execute InfluxQL queries using the v1 API](/influxdb3/cloud-serverless/query-data/execute-queries/influxql/api/v1-http/)
- [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets)
- [Manage DBRP mappings in Cloud Serverless](/influxdb3/cloud-serverless/admin/dbrp/)
type: string
q:
description: Defines the InfluxQL query to run.
type: string
chunked:
description: |
If true, the response is divided into chunks of size `chunk_size`.
type: boolean
chunk_size:
description: |
The number of records that will go into a chunk.
This parameter is only used if `chunked=true`.
type: integer
default: 10000
epoch:
description: |
A unix timestamp precision.
type: string
enum:
- h
- m
- s
- ms
- u
- µ
- ns
parameters:
- $ref: '#/components/parameters/TraceSpan'
- $ref: '#/components/parameters/AuthUserV1'

View File

@ -9414,7 +9414,7 @@ components:
Default is [`RFC3339` date/time format](/influxdb3/cloud-serverless/reference/glossary/#rfc3339-timestamp).
To include nanoseconds in timestamps, use `RFC3339Nano`.
#### Example formatted date/time values
### Example formatted date/time values
| Format | Value |
|:------------|:----------------------------|

View File

@ -1,11 +1,13 @@
plugins:
- '../../openapi/plugins/docs-plugin.js'
- '../../openapi/plugins/docs-plugin.cjs'
extends:
- recommended
- docs/all
x-influxdata-product-name: InfluxDB 3 Clustered
apis:
management@0:
root: management/openapi.yml
v2@2:
root: v2/ref.yml
x-influxdata-docs-aliases:

View File

@ -0,0 +1,15 @@
title: InfluxDB 3 Clustered Management API
x-influxdata-short-title: Management API
description: |
The Management API for InfluxDB 3 Clustered provides a programmatic interface for managing an InfluxDB 3 cluster.
The Management API lets you integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application.
This documentation is generated from the
InfluxDB 3 Management API OpenAPI specification.
license:
name: MIT
url: 'https://opensource.org/licenses/MIT'
contact:
name: InfluxData
url: https://www.influxdata.com
email: support@influxdata.com

View File

@ -0,0 +1,8 @@
- url: 'https://{baseurl}/api/v0'
description: InfluxDB 3 Clustered Management API URL
variables:
baseurl:
enum:
- 'console.influxdata.com'
default: 'console.influxdata.com'
description: InfluxDB 3 Clustered Console URL

View File

@ -0,0 +1,6 @@
- name: Using the Management API
tags:
- Authentication
- Quickstart
- name: All endpoints
tags: []

File diff suppressed because it is too large Load Diff

View File

@ -65,7 +65,23 @@ paths:
schema:
type: string
required: true
description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy.
description: |
The database to write to.
**Database targeting:** In InfluxDB Clustered, databases can be named using the `database_name/retention_policy_name` convention for InfluxQL compatibility. InfluxDB Clustered does not use DBRP mappings. The db and rp parameters are used to construct the target database name following this naming convention.
**Auto-creation behavior:** InfluxDB Clustered requires databases to be created before writing data. The v1 `/write` API does not automatically create databases. If the specified
database does not exist, the write request will fail.
Authentication: Requires a valid API token with _write_ permissions for the target database.
### Related
- [Write data to InfluxDB Clustered](/influxdb3/clustered/write-data/)
- [Use the InfluxDB v1 API with InfluxDB Clustered](/influxdb3/clustered/guides/api-compatibility/v1/)
- [Manage databases in InfluxDB Clustered](/influxdb3/clustered/admin/databases/)
- [InfluxQL DBRP naming convention in InfluxDB Clustered](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention)
- [Migrate data from InfluxDB v1 to InfluxDB Clustered](/influxdb3/clustered/guides/migrate-data/migrate-1x-to-clustered/)
- in: query
name: rp
schema:
@ -136,6 +152,141 @@ paths:
schema:
$ref: '#/components/schemas/Error'
/query:
get:
operationId: GetQueryV1
tags:
- Query
summary: Query using the InfluxDB v1 HTTP API
parameters:
- $ref: '#/components/parameters/TraceSpan'
- $ref: '#/components/parameters/AuthUserV1'
- $ref: '#/components/parameters/AuthPassV1'
- in: header
name: Accept
schema:
type: string
description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps.
default: application/json
enum:
- application/json
- application/csv
- text/csv
- application/x-msgpack
- in: header
name: Accept-Encoding
description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand.
schema:
type: string
description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity.
default: identity
enum:
- gzip
- identity
- in: query
name: chunked
description: |
If true, the response is divided into chunks of size `chunk_size`.
schema:
type: boolean
default: false
- in: query
name: chunk_size
description: |
The number of records that will go into a chunk.
This parameter is only used if `chunked=true`.
schema:
type: integer
default: 10000
- in: query
name: db
schema:
type: string
required: true
description: The database to query from.
- in: query
name: pretty
description: |
If true, the JSON response is formatted in a human-readable format.
schema:
type: boolean
default: false
- in: query
name: q
description: Defines the InfluxQL query to run.
required: true
schema:
type: string
- in: query
name: rp
schema:
type: string
description: Retention policy name.
- name: epoch
description: |
Formats timestamps as unix (epoch) timestamps with the specified precision
instead of RFC3339 timestamps with nanosecond precision.
in: query
schema:
type: string
enum:
- h
- m
- s
- ms
- u
- µ
- ns
responses:
'200':
description: Query results
headers:
Content-Encoding:
description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body
schema:
type: string
description: Specifies that the response in the body is encoded with gzip or not encoded with identity.
default: identity
enum:
- gzip
- identity
Trace-Id:
description: The Trace-Id header reports the request's trace ID, if one was generated.
schema:
type: string
description: Specifies the request's trace ID.
content:
application/csv:
schema:
$ref: '#/components/schemas/InfluxQLCSVResponse'
text/csv:
schema:
$ref: '#/components/schemas/InfluxQLCSVResponse'
application/json:
schema:
$ref: '#/components/schemas/InfluxQLResponse'
examples:
influxql-chunk_size_2:
value: |
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:55Z",90,"1"],["2016-05-19T18:37:56Z",90,"1"]],"partial":true}],"partial":true}]}
{"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:57Z",90,"1"],["2016-05-19T18:37:58Z",90,"1"]]}]}]}
application/x-msgpack:
schema:
type: string
format: binary
'429':
description: Token is temporarily over quota. The Retry-After header describes when to try the read again.
headers:
Retry-After:
description: A non-negative decimal integer indicating the seconds to delay after the response is received.
schema:
type: integer
format: int32
default:
description: Error processing query
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
post:
operationId: PostQueryV1
tags:
@ -147,6 +298,64 @@ paths:
text/plain:
schema:
type: string
application/json:
schema:
type: object
properties:
db:
type: string
description: Database to query.
rp:
description: |
The retention policy name for InfluxQL compatibility
Optional parameter that, when combined with the db parameter, forms the complete database name to query. In InfluxDB Clustered, databases can be named using the
database_name/retention_policy_name convention for InfluxQL compatibility.
When a request specifies both `db` and `rp`, InfluxDB Clustered combines them as `db/rp` to target the database--for example:
- If `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen`
- If only `db=mydb` is provided (no `rp`), the query targets the database named `mydb`
Unlike InfluxDB v1 and Cloud Serverless, InfluxDB Clustered does not use DBRP mappings or separate retention policy objects. This parameter exists solely for v1 API
compatibility and database naming conventions.
Note: The retention policy name does not control data retention in InfluxDB Clustered. Data retention is determined by the database's _retention period_ setting.
### Related
- [Use the v1 query API and InfluxQL to query data in InfluxDB Clustered](/influxdb3/clustered/query-data/execute-queries/influxdb-v1-api/)
- [Use the InfluxDB v1 API with InfluxDB Clustered](/influxdb3/clustered/guides/api-compatibility/v1/)
- [Manage databases in InfluxDB Clustered](/influxdb3/clustered/admin/databases/)
- [InfluxQL DBRP naming convention in InfluxDB Clustered](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention)
- [Migrate data from InfluxDB v1 to InfluxDB Clustered](/influxdb3/clustered/guides/migrate-data/migrate-1x-to-clustered/)
type: string
q:
description: |
Defines the InfluxQL query to run.
type: string
chunked:
description: |
If true, the response is divided into chunks of size `chunk_size`.
type: boolean
chunk_size:
description: |
The number of records that will go into a chunk.
This parameter is only used if `chunked=true`.
type: integer
default: 10000
epoch:
description: |
A unix timestamp precision.
type: string
enum:
- h
- m
- s
- ms
- u
- µ
- ns
parameters:
- $ref: '#/components/parameters/TraceSpan'
- $ref: '#/components/parameters/AuthUserV1'

View File

@ -63,12 +63,14 @@ tags:
name: API compatibility
x-traitTag: true
- description: |
Use one of the following schemes to authenticate to the InfluxDB API:
Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API:
- [Bearer authentication](#section/Authentication/BearerAuthentication)
- [Token authentication](#section/Authentication/TokenAuthentication)
- [Basic authentication](#section/Authentication/BasicAuthentication)
- [Querystring authentication](#section/Authentication/QuerystringAuthentication)
| Authentication scheme | Works with |
|:-------------------|:-----------|
| [Bearer authentication](#section/Authentication/BearerAuthentication) | All endpoints |
| [Token authentication](#section/Authentication/TokenAuthentication) | v1, v2 endpoints |
| [Basic authentication](#section/Authentication/BasicAuthentication) | v1 endpoints |
| [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints |
<!-- ReDoc-Inject: <security-definitions> -->
name: Authentication
x-traitTag: true
@ -1074,7 +1076,7 @@ components:
Default is [`RFC3339` date/time format](/influxdb3/clustered/reference/glossary/#rfc3339-timestamp).
To include nanoseconds in timestamps, use `RFC3339Nano`.
#### Example formatted date/time values
### Example formatted date/time values
| Format | Value |
|:------------|:----------------------------|
@ -1955,12 +1957,15 @@ components:
type: string
securitySchemes:
BasicAuthentication:
type: http
scheme: basic
description: |
### Basic authentication scheme
Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests.
Use the `Authorization` header with the `Basic` scheme to authenticate v1 API `/write` and `/query` requests.
When authenticating requests, InfluxDB 3 Clustered checks that the `password` part of the decoded credential is an authorized [database token](/influxdb3/clustered/admin/tokens/#database-tokens).
InfluxDB 3 Clustered ignores the `username` part of the decoded credential.
Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints in InfluxDB 3.
When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an authorized token
and ignores the `username` part of the decoded credential.
### Syntax
@ -1968,61 +1973,57 @@ components:
Authorization: Basic <base64-encoded [USERNAME]:DATABASE_TOKEN>
```
Replace the following:
- **`[USERNAME]`**: an optional string value (ignored by InfluxDB 3 Clustered).
- **`DATABASE_TOKEN`**: a [database token](/influxdb3/clustered/admin/tokens/#database-tokens).
- Encode the `[USERNAME]:DATABASE_TOKEN` credential using base64 encoding, and then append the encoded string to the `Authorization: Basic` header.
### Example
The following example shows how to use cURL with the `Basic` authentication scheme and a [database token](/influxdb3/clustered/admin/tokens/#database-tokens):
```sh
#######################################
# Use Basic authentication with a database token
# to query the InfluxDB v1 HTTP API
#######################################
# Use the --user option with `--user username:DATABASE_TOKEN` syntax
#######################################
curl --get "http://cluster-id.a.influxdb.io/query" \
```bash
curl "http://cluster-host.com/write?db=DATABASE_NAME&precision=s" \
--user "":"DATABASE_TOKEN" \
--data-urlencode "db=DATABASE_NAME" \
--data-urlencode "q=SELECT * FROM MEASUREMENT"
--header "Content-type: text/plain; charset=utf-8" \
--data-binary 'home,room=kitchen temp=72 1641024000'
```
Replace the following:
- **`DATABASE_NAME`**: your InfluxDB 3 Clustered database
- **`DATABASE_TOKEN`**: a [database token](/influxdb3/clustered/admin/tokens/#database-tokens) with sufficient permissions to the database
scheme: basic
type: http
- **`DATABASE_TOKEN`**: a database token with sufficient permissions to the database
#### Related guides
- [Authenticate v1 API requests](/influxdb3/clustered/guides/api-compatibility/v1/)
- [Manage tokens](/influxdb3/clustered/admin/tokens/)
QuerystringAuthentication:
type: apiKey
in: query
name: u=&p=
description: |
Use the Querystring authentication
scheme with InfluxDB 1.x API parameters to provide credentials through the query string.
Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests.
### Query string authentication
Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints.
In the URL, pass the `p` query parameter to authenticate `/write` and `/query` requests.
When authenticating requests, InfluxDB 3 Clustered checks that `p` (_password_) is an authorized database token and ignores the `u` (_username_) parameter.
When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token
and ignores the `u` (_username_) query parameter.
### Syntax
```http
https://cluster-id.a.influxdb.io/query/?[u=any]&p=DATABASE_TOKEN
https://cluster-id.a.influxdb.io/write/?[u=any]&p=DATABASE_TOKEN
https://cluster-host.com/query/?[u=any]&p=DATABASE_TOKEN
https://cluster-host.com/write/?[u=any]&p=DATABASE_TOKEN
```
### Example
### Examples
The following example shows how to use cURL with query string authentication and a [database token](/influxdb3/clustered/admin/tokens/#database-tokens).
```bash
curl "http://cluster-host.com/write?db=DATABASE_NAME&precision=s&p=DATABASE_TOKEN" \
--header "Content-type: text/plain; charset=utf-8" \
--data-binary 'home,room=kitchen temp=72 1641024000'
```
```sh
Replace the following:
- **`DATABASE_NAME`**: your InfluxDB 3 Clustered database
- **`DATABASE_TOKEN`**: a database token with sufficient permissions to the database
```bash
#######################################
# Use an InfluxDB 1.x compatible username and password
# to query the InfluxDB v1 HTTP API
@ -2031,7 +2032,7 @@ components:
# ?p=DATABASE_TOKEN
#######################################
curl --get "https://cluster-id.a.influxdb.io/query" \
curl --get "https://cluster-host.com/query" \
--data-urlencode "p=DATABASE_TOKEN" \
--data-urlencode "db=DATABASE_NAME" \
--data-urlencode "q=SELECT * FROM MEASUREMENT"
@ -2039,16 +2040,23 @@ components:
Replace the following:
- **`DATABASE_NAME`**: your InfluxDB 3 Clustered database
- **`DATABASE_TOKEN`**: a [database token](/influxdb3/clustered/admin/tokens/#database-tokens) with sufficient permissions to the database
- **`DATABASE_NAME`**: the database to query
- **`DATABASE_TOKEN`**: a database token with sufficient permissions to the database
#### Related guides
- [Authenticate v1 API requests](/influxdb3/clustered/guides/api-compatibility/v1/)
- [Manage tokens](/influxdb3/clustered/admin/tokens/)
BearerAuthentication:
type: http
scheme: bearer
bearerFormat: JWT
description: |
Use the OAuth Bearer authentication
scheme to authenticate to the InfluxDB API.
Use the OAuth Bearer authentication
scheme to provide an authorization token to InfluxDB 3.
Bearer authentication works with all endpoints.
In your API requests, send an `Authorization` header.
For the header value, provide the word `Bearer` followed by a space and a database token.
@ -2057,29 +2065,20 @@ components:
### Syntax
```http
Authorization: Bearer INFLUX_TOKEN
Authorization: Bearer DATABASE_TOKEN
```
### Example
```sh
########################################################
# Use the Bearer token authentication scheme with /api/v2/write
# to write data.
########################################################
curl --request post "https://cluster-id.a.influxdb.io/api/v2/write?bucket=DATABASE_NAME&precision=s" \
--header "Authorization: Bearer DATABASE_TOKEN" \
--data-binary 'home,room=kitchen temp=72 1463683075'
```bash
curl http://cluster-host.com/api/v3/query_influxql \
--header "Authorization: Bearer DATABASE_TOKEN"
```
For examples and more information, see the following:
- [Authenticate API requests](/influxdb3/clustered/primers/api/v2/#authenticate-api-requests)
- [Manage tokens](/influxdb3/clustered/admin/tokens/)
TokenAuthentication:
description: |
Use the Token authentication
scheme to authenticate to the InfluxDB API.
Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3.
The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3.
In your API requests, send an `Authorization` header.
For the header value, provide the word `Token` followed by a space and a database token.
@ -2088,7 +2087,7 @@ components:
### Syntax
```http
Authorization: Token INFLUX_API_TOKEN
Authorization: Token DATABASE_TOKEN
```
### Example
@ -2099,14 +2098,13 @@ components:
# to write data.
########################################################
curl --request post "https://cluster-id.a.influxdb.io/api/v2/write?bucket=DATABASE_NAME&precision=s" \
curl --request post "https://cluster-host.com/api/v2/write?bucket=DATABASE_NAME&precision=s" \
--header "Authorization: Token DATABASE_TOKEN" \
--data-binary 'home,room=kitchen temp=72 1463683075'
```
### Related guides
- [Authenticate API requests](/influxdb3/clustered/primers/api/v2/#authenticate-api-requests)
- [Manage tokens](/influxdb3/clustered/admin/tokens/)
in: header
name: Authorization

View File

@ -1,5 +1,5 @@
plugins:
- '../../openapi/plugins/docs-plugin.js'
- '../../openapi/plugins/docs-plugin.cjs'
extends:
- recommended
- docs/all
@ -10,3 +10,7 @@ apis:
root: v3/ref.yml
x-influxdata-docs-aliases:
- /influxdb3/core/api/
- /influxdb3/core/api/v1/
- /influxdb3/core/api/v2/
- /influxdb3/core/api/v1-compatibility
- /influxdb3/core/api/v2-compatibility

View File

@ -2,11 +2,14 @@
tags:
- Quick start
- Authentication
- Cache data
- Common parameters
- Response codes
- Compatibility endpoints
- Data I/O
- Databases
- Database
- Processing engine
- Server information
- Tables
- Table
- Token
- Query data
- Write data

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
plugins:
- '../../openapi/plugins/docs-plugin.js'
- '../../openapi/plugins/docs-plugin.cjs'
extends:
- recommended
- docs/all
@ -10,3 +10,7 @@ apis:
root: v3/ref.yml
x-influxdata-docs-aliases:
- /influxdb3/enterprise/api/
- /influxdb3/enterprise/api/v1/
- /influxdb3/enterprise/v2/
- /influxdb3/enterprise/v1-compatibility
- /influxdb3/enterprise/v2-compatibility

View File

@ -2,11 +2,14 @@
tags:
- Quick start
- Authentication
- Cache data
- Common parameters
- Response codes
- Compatibility endpoints
- Data I/O
- Databases
- Database
- Processing engine
- Server information
- Tables
- Table
- Token
- Query data
- Write data

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
module.exports = SetTagGroups;
const { collect, getName, sortName, isPresent } = require('../../helpers/content-helper.js')
const { collect, getName, sortName, isPresent } = require('../../helpers/content-helper.cjs')
/**
* Returns an object that defines handler functions for:
* - Operation nodes

View File

@ -1,25 +0,0 @@
module.exports = SetTags;
const { tags } = require('../../../content/content')
/**
* Returns an object that defines handler functions for:
* - DefinitionRoot (the root openapi) node
* The DefinitionRoot handler, executed when
* the parser is leaving the root node,
* sets the root `tags` list to the provided `data`.
*/
/** @type {import('@redocly/openapi-cli').OasDecorator} */
function SetTags() {
const data = tags();
return {
DefinitionRoot: {
/** Set tags from custom tags when visitor enters root. */
enter(root) {
if(data) {
root.tags = data;
}
}
}
}
};

View File

@ -1,5 +1,5 @@
const path = require('path');
const { toJSON } = require('./helpers/content-helper');
const { toJSON } = require('./helpers/content-helper.cjs');
function getVersioned(filename) {
const apiDocsRoot=path.resolve(process.env.API_DOCS_ROOT_PATH || process.cwd());

View File

@ -1,14 +1,14 @@
const {info, servers, tagGroups} = require('./docs-content');
const ReportTags = require('./rules/report-tags');
const ValidateServersUrl = require('./rules/validate-servers-url');
const RemovePrivatePaths = require('./decorators/paths/remove-private-paths');
const ReplaceShortcodes = require('./decorators/replace-shortcodes');
const SetInfo = require('./decorators/set-info');
const DeleteServers = require('./decorators/servers/delete-servers');
const SetServers = require('./decorators/servers/set-servers');
const SetTagGroups = require('./decorators/tags/set-tag-groups');
const StripVersionPrefix = require('./decorators/paths/strip-version-prefix');
const StripTrailingSlash = require('./decorators/paths/strip-trailing-slash');
const {info, servers, tagGroups} = require('./docs-content.cjs');
const ReportTags = require('./rules/report-tags.cjs');
const ValidateServersUrl = require('./rules/validate-servers-url.cjs');
const RemovePrivatePaths = require('./decorators/paths/remove-private-paths.cjs');
const ReplaceShortcodes = require('./decorators/replace-shortcodes.cjs');
const SetInfo = require('./decorators/set-info.cjs');
const DeleteServers = require('./decorators/servers/delete-servers.cjs');
const SetServers = require('./decorators/servers/set-servers.cjs');
const SetTagGroups = require('./decorators/tags/set-tag-groups.cjs');
const StripVersionPrefix = require('./decorators/paths/strip-version-prefix.cjs');
const StripTrailingSlash = require('./decorators/paths/strip-trailing-slash.cjs');
const id = 'docs';

View File

@ -2,7 +2,7 @@
///////////////// Preferred Client Library programming language ///////////////
////////////////////////////////////////////////////////////////////////////////
import { activateTabs, updateBtnURLs } from './tabbed-content.js';
import { getPreference, setPreference } from './local-storage.js';
import { getPreference, setPreference } from './services/local-storage.js';
function getVisitedApiLib() {
const path = window.location.pathname.match(

View File

@ -8,29 +8,31 @@ function setUser(userid, email) {
window[NAMESPACE] = {
user: {
uniqueClientId: userid,
email: email,
}
}
email: email,
},
};
}
// Initialize the chat widget
function initializeChat({onChatLoad, chatAttributes}) {
/* See https://docs.kapa.ai/integrations/website-widget/configuration for
function initializeChat({ onChatLoad, chatAttributes }) {
/* See https://docs.kapa.ai/integrations/website-widget/configuration for
* available configuration options.
* All values are strings.
*/
// If you make changes to data attributes here, you also need to port the changes to the api-docs/template.hbs API reference template.
// If you make changes to data attributes here, you also need to
// port the changes to the api-docs/template.hbs API reference template.
const requiredAttributes = {
websiteId: 'a02bca75-1dd3-411e-95c0-79ee1139be4d',
projectName: 'InfluxDB',
projectColor: '#020a47',
projectLogo: '/img/influx-logo-cubo-white.png',
}
};
const optionalAttributes = {
modalDisclaimer: 'This AI can access [documentation for InfluxDB, clients, and related tools](https://docs.influxdata.com). Information you submit is used in accordance with our [Privacy Policy](https://www.influxdata.com/legal/privacy-policy/).',
modalExampleQuestions: 'Use Python to write data to InfluxDB 3,How do I query using SQL?,How do I use MQTT with Telegraf?',
modalDisclaimer:
'This AI can access [documentation for InfluxDB, clients, and related tools](https://docs.influxdata.com). Information you submit is used in accordance with our [Privacy Policy](https://www.influxdata.com/legal/privacy-policy/).',
modalExampleQuestions:
'Use Python to write data to InfluxDB 3,How do I query using SQL?,How do I use MQTT with Telegraf?',
buttonHide: 'true',
exampleQuestionButtonWidth: 'auto',
modalOpenOnCommandK: 'true',
@ -52,28 +54,32 @@ function initializeChat({onChatLoad, chatAttributes}) {
modalHeaderBorderBottom: 'none',
modalTitleColor: '#fff',
modalTitleFontSize: '1.25rem',
}
};
const scriptUrl = 'https://widget.kapa.ai/kapa-widget.bundle.js';
const script = document.createElement('script');
script.async = true;
script.src = scriptUrl;
script.onload = function() {
script.onload = function () {
onChatLoad();
window.influxdatadocs.AskAI = AskAI;
};
script.onerror = function() {
script.onerror = function () {
console.error('Error loading AI chat widget script');
};
const dataset = {...requiredAttributes, ...optionalAttributes, ...chatAttributes};
Object.keys(dataset).forEach(key => {
// Assign dataset attributes from the object
const dataset = {
...requiredAttributes,
...optionalAttributes,
...chatAttributes,
};
Object.keys(dataset).forEach((key) => {
// Assign dataset attributes from the object
script.dataset[key] = dataset[key];
});
// Check for an existing script element to remove
const oldScript= document.querySelector(`script[src="${scriptUrl}"]`);
const oldScript = document.querySelector(`script[src="${scriptUrl}"]`);
if (oldScript) {
oldScript.remove();
}
@ -82,22 +88,21 @@ function initializeChat({onChatLoad, chatAttributes}) {
function getProductExampleQuestions() {
const questions = productData?.product?.ai_sample_questions;
return questions?.join(',') || '';
return questions?.join(',') || '';
}
/**
/**
* chatParams: specify custom (for example, page-specific) attribute values for the chat, pass the dataset key-values (collected in ...chatParams). See https://docs.kapa.ai/integrations/website-widget/configuration for available configuration options.
* onChatLoad: function to call when the chat widget has loaded
* userid: optional, a unique user ID for the user (not currently used for public docs)
*/
*/
export default function AskAI({ userid, email, onChatLoad, ...chatParams }) {
const modalExampleQuestions = getProductExampleQuestions();
const chatAttributes = {
...(modalExampleQuestions && { modalExampleQuestions }),
...chatParams,
}
initializeChat({onChatLoad, chatAttributes});
};
initializeChat({ onChatLoad, chatAttributes });
if (userid) {
setUser(userid, email);

View File

@ -1,8 +1,9 @@
import $ from 'jquery';
import { context } from './page-context.js';
function initialize() {
var codeBlockSelector = '.article--content pre';
var codeBlocks = $(codeBlockSelector);
var $codeBlocks = $(codeBlockSelector);
var appendHTML = `
<div class="code-controls">
@ -15,7 +16,7 @@ function initialize() {
`;
// Wrap all codeblocks with a new 'codeblock' div
$(codeBlocks).each(function () {
$codeBlocks.each(function () {
$(this).wrap("<div class='codeblock'></div>");
});
@ -68,7 +69,94 @@ function initialize() {
// Trigger copy failure state lifecycle
$('.copy-code').click(function () {
let text = $(this).closest('.code-controls').prevAll('pre:has(code)')[0].innerText;
let codeElement = $(this)
.closest('.code-controls')
.prevAll('pre:has(code)')[0];
let text = codeElement.innerText;
// Extract additional code block information
const codeBlockInfo = extractCodeBlockInfo(codeElement);
// Add Google Analytics event tracking
const currentUrl = new URL(window.location.href);
// Determine which tracking parameter to add based on product context
switch (context) {
case 'cloud':
currentUrl.searchParams.set('dl', 'cloud');
break;
case 'core':
/** Track using the same value used by www.influxdata.com pages */
currentUrl.searchParams.set('dl', 'oss3');
break;
case 'enterprise':
/** Track using the same value used by www.influxdata.com pages */
currentUrl.searchParams.set('dl', 'enterprise');
break;
case 'serverless':
currentUrl.searchParams.set('dl', 'serverless');
break;
case 'dedicated':
currentUrl.searchParams.set('dl', 'dedicated');
break;
case 'clustered':
currentUrl.searchParams.set('dl', 'clustered');
break;
case 'oss/enterprise':
currentUrl.searchParams.set('dl', 'oss');
break;
case 'other':
default:
// No tracking parameter for other/unknown products
break;
}
// Add code block specific tracking parameters
if (codeBlockInfo.language) {
currentUrl.searchParams.set('code_lang', codeBlockInfo.language);
}
if (codeBlockInfo.lineCount) {
currentUrl.searchParams.set('code_lines', codeBlockInfo.lineCount);
}
if (codeBlockInfo.hasPlaceholders) {
currentUrl.searchParams.set('has_placeholders', 'true');
}
if (codeBlockInfo.blockType) {
currentUrl.searchParams.set('code_type', codeBlockInfo.blockType);
}
if (codeBlockInfo.sectionTitle) {
currentUrl.searchParams.set(
'section',
encodeURIComponent(codeBlockInfo.sectionTitle)
);
}
if (codeBlockInfo.firstLine) {
currentUrl.searchParams.set(
'first_line',
encodeURIComponent(codeBlockInfo.firstLine.substring(0, 100))
);
}
// Update browser history without triggering page reload
if (window.history && window.history.replaceState) {
window.history.replaceState(null, '', currentUrl.toString());
}
// Send custom Google Analytics event if gtag is available
if (typeof window.gtag !== 'undefined') {
window.gtag('event', 'code_copy', {
language: codeBlockInfo.language,
line_count: codeBlockInfo.lineCount,
has_placeholders: codeBlockInfo.hasPlaceholders,
dl: codeBlockInfo.dl || null,
section_title: codeBlockInfo.sectionTitle,
first_line: codeBlockInfo.firstLine
? codeBlockInfo.firstLine.substring(0, 100)
: null,
product: context,
});
}
const copyContent = async () => {
try {
@ -82,6 +170,71 @@ function initialize() {
copyContent();
});
/**
* Extract contextual information about a code block
* @param {HTMLElement} codeElement - The code block element
* @returns {Object} Information about the code block
*/
function extractCodeBlockInfo(codeElement) {
const codeTag = codeElement.querySelector('code');
const info = {
language: null,
lineCount: 0,
hasPlaceholders: false,
blockType: 'code',
dl: null, // Download script type
sectionTitle: null,
firstLine: null,
};
// Extract language from class attribute
if (codeTag && codeTag.className) {
const langMatch = codeTag.className.match(
/language-(\w+)|hljs-(\w+)|(\w+)/
);
if (langMatch) {
info.language = langMatch[1] || langMatch[2] || langMatch[3];
}
}
// Count lines
const text = codeElement.innerText || '';
const lines = text.split('\n');
info.lineCount = lines.length;
// Get first non-empty line
info.firstLine = lines.find((line) => line.trim() !== '') || null;
// Check for placeholders (common patterns)
info.hasPlaceholders =
/\b[A-Z_]{2,}\b|\{\{[^}]+\}\}|\$\{[^}]+\}|<[^>]+>/.test(text);
// Determine if this is a download script
if (text.includes('https://www.influxdata.com/d/install_influxdb3.sh')) {
if (text.includes('install_influxdb3.sh enterprise')) {
info.dl = 'enterprise';
} else {
info.dl = 'oss3';
}
} else if (text.includes('docker pull influxdb:3-enterprise')) {
info.dl = 'enterprise';
} else if (text.includes('docker pull influxdb:3-core')) {
info.dl = 'oss3';
}
// Find nearest section heading
let element = codeElement;
while (element && element !== document.body) {
element = element.previousElementSibling || element.parentElement;
if (element && element.tagName && /^H[1-6]$/.test(element.tagName)) {
info.sectionTitle = element.textContent.trim();
break;
}
}
return info;
}
/////////////////////////////// FULL WINDOW CODE ///////////////////////////////
/*
@ -90,7 +243,10 @@ Disable scrolling on the body.
Disable user selection on everything but the fullscreen codeblock.
*/
$('.fullscreen-toggle').click(function () {
var code = $(this).closest('.code-controls').prevAll('pre:has(code)').clone();
var code = $(this)
.closest('.code-controls')
.prevAll('pre:has(code)')
.clone();
$('#fullscreen-code-placeholder').replaceWith(code[0]);
$('body').css('overflow', 'hidden');

View File

@ -1,30 +1,52 @@
const placeholderWrapper = '.code-placeholder-wrapper';
import $ from 'jquery';
const placeholderElement = 'var.code-placeholder';
const editIcon = "<span class='code-placeholder-edit-icon cf-icon Pencil'></span>";
// When clicking a placeholder, append the edit input
function handleClick(element) {
$(element).on('click', function() {
function handleClick($element) {
const $placeholder = $($element).find(placeholderElement);
$placeholder.on('click', function() {
var placeholderData = $(this)[0].dataset;
var placeholderID = placeholderData.codeVar;
var placeholderID = placeholderData.codeVarEscaped;
var placeholderValue = placeholderData.codeVarValue;
var placeholderInputWrapper = $('<div class="code-input-wrapper"></div>');
var placeholderInput = `<input class="placeholder-edit" id="${placeholderID}" value="${placeholderValue}" spellcheck=false onblur="submitPlaceholder($(this))" oninput="updateInputWidth($(this))" onkeydown="closeOnEnter($(this)[0], event)"></input>`;
$(this).before(placeholderInputWrapper)
$(this).siblings('.code-input-wrapper').append(placeholderInput);
$(`input#${placeholderID}`).width(`${placeholderValue.length}ch`);
$(`input#${placeholderID}`).focus().select();
$(this).css('opacity', 0);
const placeholderInput = document.createElement('input');
placeholderInput.setAttribute('class', 'placeholder-edit');
placeholderInput.setAttribute('data-id', placeholderID);
placeholderInput.setAttribute('data-code-var-escaped', placeholderID);
placeholderInput.setAttribute('value', placeholderValue);
placeholderInput.setAttribute('spellcheck', 'false');
placeholderInput.addEventListener('blur', function() {
submitPlaceholder($(this));
}
);
placeholderInput.addEventListener('input', function() {
updateInputWidth($(this));
}
);
placeholderInput.addEventListener('keydown', function(event) {
closeOnEnter($(this)[0], event);
}
);
const placeholderInputWrapper = $('<div class="code-input-wrapper"></div>');
$placeholder.before(placeholderInputWrapper)
$placeholder.siblings('.code-input-wrapper').append(placeholderInput);
$(`input[data-code-var-escaped="${placeholderID}"]`).width(`${placeholderValue.length}ch`);
document.querySelector(`input[data-code-var-escaped="${placeholderID}"]`).focus();
document.querySelector(`input[data-code-var-escaped="${placeholderID}"]`).select();
$placeholder.css('opacity', 0);
});
}
function submitPlaceholder(placeholderInput) {
var placeholderID = placeholderInput.attr('id');
var placeholderID = placeholderInput.attr('data-code-var-escaped');
var placeholderValue = placeholderInput[0].value;
var placeholderInput = $(`input.placeholder-edit#${placeholderID}`);
placeholderInput = $(`input.placeholder-edit[data-id="${placeholderID}"]`);
$(`*[data-code-var='${placeholderID}']`).each(function() {
$(`*[data-code-var="${placeholderID}"]`).each(function() {
$(this).attr('data-code-var-value', placeholderValue);
$(this).html(placeholderValue + editIcon);
$(this).css('opacity', 1);
@ -44,13 +66,7 @@ function closeOnEnter(input, event) {
}
}
function CodePlaceholder({element}) {
handleClick(element);
}
$(function() {
const codePlaceholders = $(placeholderElement);
codePlaceholders.each(function() {
CodePlaceholder({element: this});
});
});
export default function CodePlaceholder({ component }) {
const $component = $(component);
handleClick($component);
}

View File

@ -0,0 +1,78 @@
// Memoize the mermaid module import
let mermaidPromise = null;
export default function Diagram({ component }) {
// Import mermaid.js module (memoized)
if (!mermaidPromise) {
mermaidPromise = import('mermaid');
}
mermaidPromise
.then(({ default: mermaid }) => {
// Configure mermaid with InfluxData theming
mermaid.initialize({
startOnLoad: false, // We'll manually call run()
theme: document.body.classList.contains('dark-theme')
? 'dark'
: 'default',
themeVariables: {
fontFamily: 'Proxima Nova',
fontSize: '16px',
lineColor: '#22ADF6',
primaryColor: '#22ADF6',
primaryTextColor: '#545454',
secondaryColor: '#05CE78',
tertiaryColor: '#f4f5f5',
},
securityLevel: 'loose', // Required for interactive diagrams
logLevel: 'error',
});
// Process the specific diagram component
try {
mermaid.run({ nodes: [component] });
} catch (error) {
console.error('Mermaid diagram rendering error:', error);
}
// Store reference to mermaid for theme switching
if (!window.mermaidInstances) {
window.mermaidInstances = new Map();
}
window.mermaidInstances.set(component, mermaid);
})
.catch((error) => {
console.error('Failed to load Mermaid library:', error);
});
// Listen for theme changes to refresh diagrams
const observer = new MutationObserver((mutations) => {
mutations.forEach((mutation) => {
if (
mutation.attributeName === 'class' &&
document.body.classList.contains('dark-theme') !== window.isDarkTheme
) {
window.isDarkTheme = document.body.classList.contains('dark-theme');
// Reload this specific diagram with new theme
if (window.mermaidInstances?.has(component)) {
const mermaid = window.mermaidInstances.get(component);
mermaid.initialize({
theme: window.isDarkTheme ? 'dark' : 'default',
});
mermaid.run({ nodes: [component] });
}
}
});
});
// Watch for theme changes on body element
observer.observe(document.body, { attributes: true });
// Return cleanup function to be called when component is destroyed
return () => {
observer.disconnect();
if (window.mermaidInstances?.has(component)) {
window.mermaidInstances.delete(component);
}
};
}

View File

@ -0,0 +1,180 @@
/**
* DocSearch component for InfluxData documentation
* Handles asynchronous loading and initialization of Algolia DocSearch
*/
const debug = false; // Set to true for debugging output
export default function DocSearch({ component }) {
// Store configuration from component data attributes
const config = {
apiKey: component.getAttribute('data-api-key'),
appId: component.getAttribute('data-app-id'),
indexName: component.getAttribute('data-index-name'),
inputSelector: component.getAttribute('data-input-selector'),
searchTag: component.getAttribute('data-search-tag'),
includeFlux: component.getAttribute('data-include-flux') === 'true',
includeResources:
component.getAttribute('data-include-resources') === 'true',
debug: component.getAttribute('data-debug') === 'true',
};
// Initialize global object to track DocSearch state
window.InfluxDocs = window.InfluxDocs || {};
window.InfluxDocs.search = {
initialized: false,
options: config,
};
// Load DocSearch asynchronously
function loadDocSearch() {
if (debug) {
console.log('Loading DocSearch script...');
}
const script = document.createElement('script');
script.src =
'https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.js';
script.async = true;
script.onload = initializeDocSearch;
document.body.appendChild(script);
}
// Initialize DocSearch after script loads
function initializeDocSearch() {
if (debug) {
console.log('Initializing DocSearch...');
}
const multiVersion = ['influxdb'];
// Use object-based lookups instead of conditionals for version and product names
// These can be replaced with data from productData in the future
// Version display name mappings
const versionDisplayNames = {
cloud: 'Cloud (TSM)',
core: 'Core',
enterprise: 'Enterprise',
'cloud-serverless': 'Cloud Serverless',
'cloud-dedicated': 'Cloud Dedicated',
clustered: 'Clustered',
explorer: 'Explorer',
};
// Product display name mappings
const productDisplayNames = {
influxdb: 'InfluxDB',
influxdb3: 'InfluxDB 3',
explorer: 'InfluxDB 3 Explorer',
enterprise_influxdb: 'InfluxDB Enterprise',
flux: 'Flux',
telegraf: 'Telegraf',
chronograf: 'Chronograf',
kapacitor: 'Kapacitor',
platform: 'InfluxData Platform',
resources: 'Additional Resources',
};
// Initialize DocSearch with configuration
window.docsearch({
apiKey: config.apiKey,
appId: config.appId,
indexName: config.indexName,
inputSelector: config.inputSelector,
debug: config.debug,
transformData: function (hits) {
// Format version using object lookup instead of if-else chain
function fmtVersion(version, productKey) {
if (version == null) {
return '';
} else if (versionDisplayNames[version]) {
return versionDisplayNames[version];
} else if (multiVersion.includes(productKey)) {
return version;
} else {
return '';
}
}
hits.map((hit) => {
const pathData = new URL(hit.url).pathname
.split('/')
.filter((n) => n);
const product = productDisplayNames[pathData[0]] || pathData[0];
const version = fmtVersion(pathData[1], pathData[0]);
hit.product = product;
hit.version = version;
hit.hierarchy.lvl0 =
hit.hierarchy.lvl0 +
` <span class=\"search-product-version\">${product} ${version}</span>`;
hit._highlightResult.hierarchy.lvl0.value =
hit._highlightResult.hierarchy.lvl0.value +
` <span class=\"search-product-version\">${product} ${version}</span>`;
});
return hits;
},
algoliaOptions: {
hitsPerPage: 10,
facetFilters: buildFacetFilters(config),
},
autocompleteOptions: {
templates: {
header:
'<div class="search-all-content"><a href="https:\/\/support.influxdata.com" target="_blank">Search all InfluxData content <span class="icon-arrow-up-right"></span></a>',
empty:
'<div class="search-no-results"><p>Not finding what you\'re looking for?</p> <a href="https:\/\/support.influxdata.com" target="_blank">Search all InfluxData content <span class="icon-arrow-up-right"></span></a></div>',
},
},
});
// Mark DocSearch as initialized
window.InfluxDocs.search.initialized = true;
// Dispatch event for other components to know DocSearch is ready
window.dispatchEvent(new CustomEvent('docsearch-initialized'));
}
/**
* Helper function to build facet filters based on config
* - Uses nested arrays for AND conditions
* - Includes space after colon in filter expressions
*/
function buildFacetFilters(config) {
if (!config.searchTag) {
return ['latest:true'];
} else if (config.includeFlux) {
// Return a nested array to match original template structure
// Note the space after each colon
return [
[
'searchTag: ' + config.searchTag,
'flux:true',
'resources: ' + config.includeResources,
],
];
} else {
// Return a nested array to match original template structure
// Note the space after each colon
return [
[
'searchTag: ' + config.searchTag,
'resources: ' + config.includeResources,
],
];
}
}
// Load DocSearch when page is idle or after a slight delay
if ('requestIdleCallback' in window) {
requestIdleCallback(loadDocSearch);
} else {
setTimeout(loadDocSearch, 500);
}
// Return cleanup function
return function cleanup() {
// Clean up any event listeners if needed
if (debug) {
console.log('DocSearch component cleanup');
}
};
}

View File

@ -0,0 +1,6 @@
import SearchInteractions from '../utils/search-interactions.js';
export default function SidebarSearch({ component }) {
const searchInput = component.querySelector('.sidebar--search-field');
SearchInteractions({ searchInput });
}

View File

@ -1,7 +1,7 @@
import $ from 'jquery';
import { Datepicker } from 'vanillajs-datepicker';
import { toggleModal } from './modals.js';
import * as localStorage from './local-storage.js';
import * as localStorage from './services/local-storage.js';
// Placeholder start date used in InfluxDB custom timestamps
const defaultStartDate = '2022-01-01';
@ -53,65 +53,65 @@ function timeToUnixSeconds(time) {
return unixSeconds;
}
// Default time values in getting started sample data
const defaultTimes = [
{
rfc3339: `${defaultStartDate}T08:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T08:00:00Z`),
}, // 1641024000
{
rfc3339: `${defaultStartDate}T09:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T09:00:00Z`),
}, // 1641027600
{
rfc3339: `${defaultStartDate}T10:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T10:00:00Z`),
}, // 1641031200
{
rfc3339: `${defaultStartDate}T11:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T11:00:00Z`),
}, // 1641034800
{
rfc3339: `${defaultStartDate}T12:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T12:00:00Z`),
}, // 1641038400
{
rfc3339: `${defaultStartDate}T13:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T13:00:00Z`),
}, // 1641042000
{
rfc3339: `${defaultStartDate}T14:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T14:00:00Z`),
}, // 1641045600
{
rfc3339: `${defaultStartDate}T15:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T15:00:00Z`),
}, // 1641049200
{
rfc3339: `${defaultStartDate}T16:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T16:00:00Z`),
}, // 1641052800
{
rfc3339: `${defaultStartDate}T17:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T17:00:00Z`),
}, // 1641056400
{
rfc3339: `${defaultStartDate}T18:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T18:00:00Z`),
}, // 1641060000
{
rfc3339: `${defaultStartDate}T19:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T19:00:00Z`),
}, // 1641063600
{
rfc3339: `${defaultStartDate}T20:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T20:00:00Z`),
}, // 1641067200
];
// Default time values in getting started sample data
const defaultTimes = [
{
rfc3339: `${defaultStartDate}T08:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T08:00:00Z`),
}, // 1641024000
{
rfc3339: `${defaultStartDate}T09:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T09:00:00Z`),
}, // 1641027600
{
rfc3339: `${defaultStartDate}T10:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T10:00:00Z`),
}, // 1641031200
{
rfc3339: `${defaultStartDate}T11:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T11:00:00Z`),
}, // 1641034800
{
rfc3339: `${defaultStartDate}T12:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T12:00:00Z`),
}, // 1641038400
{
rfc3339: `${defaultStartDate}T13:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T13:00:00Z`),
}, // 1641042000
{
rfc3339: `${defaultStartDate}T14:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T14:00:00Z`),
}, // 1641045600
{
rfc3339: `${defaultStartDate}T15:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T15:00:00Z`),
}, // 1641049200
{
rfc3339: `${defaultStartDate}T16:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T16:00:00Z`),
}, // 1641052800
{
rfc3339: `${defaultStartDate}T17:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T17:00:00Z`),
}, // 1641056400
{
rfc3339: `${defaultStartDate}T18:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T18:00:00Z`),
}, // 1641060000
{
rfc3339: `${defaultStartDate}T19:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T19:00:00Z`),
}, // 1641063600
{
rfc3339: `${defaultStartDate}T20:00:00Z`,
unix: timeToUnixSeconds(`${defaultStartDate}T20:00:00Z`),
}, // 1641067200
];
function updateTimestamps (newStartDate, seedTimes=defaultTimes) {
function updateTimestamps(newStartDate, seedTimes = defaultTimes) {
// Update the times array with replacement times
const times = seedTimes.map(x => {
const times = seedTimes.map((x) => {
var newStartTimestamp = x.rfc3339.replace(/^.*T/, newStartDate + 'T');
return {
@ -178,7 +178,7 @@ function updateTimestamps (newStartDate, seedTimes=defaultTimes) {
/////////////////////// MODAL INTERACTIONS / DATE PICKER ///////////////////////
function CustomTimeTrigger({component}) {
function CustomTimeTrigger({ component }) {
const $component = $(component);
$component
.find('a[data-action="open"]:first')
@ -212,7 +212,7 @@ function CustomTimeTrigger({component}) {
if (newDate != undefined) {
newDate = formatDate(newDate);
// Update the last updated timestamps with the new date
// and reassign the updated times.
updatedTimes = updateTimestamps(newDate, updatedTimes);

View File

@ -1,30 +1,54 @@
const monthNames = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"];
var date = new Date()
var currentTimestamp = date.toISOString().replace(/^(.*)(\.\d+)(Z)/, '$1$3') // 2023-01-01T12:34:56Z
var currentTime = date.toISOString().replace(/(^.*T)(.*)(Z)/, '$2') + '084216' // 12:34:56.000084216
import $ from 'jquery';
function currentDate(offset=0, trimTime=false) {
outputDate = new Date(date)
outputDate.setDate(outputDate.getDate() + offset)
var date = new Date();
var currentTimestamp = date.toISOString().replace(/^(.*)(\.\d+)(Z)/, '$1$3'); // 2023-01-01T12:34:56Z
// Microsecond offset appended to the current time string for formatting purposes
const MICROSECOND_OFFSET = '084216';
var currentTime =
date.toISOString().replace(/(^.*T)(.*)(Z)/, '$2') + MICROSECOND_OFFSET; // 12:34:56.000084216
function currentDate(offset = 0, trimTime = false) {
let outputDate = new Date(date);
outputDate.setDate(outputDate.getDate() + offset);
if (trimTime) {
return outputDate.toISOString().replace(/T.*$/, '') // 2023-01-01
return outputDate.toISOString().replace(/T.*$/, ''); // 2023-01-01
} else {
return outputDate.toISOString().replace(/T.*$/, 'T00:00:00Z') // 2023-01-01T00:00:00Z
return outputDate.toISOString().replace(/T.*$/, 'T00:00:00Z'); // 2023-01-01T00:00:00Z
}
}
function enterpriseEOLDate() {
var inTwoYears = date.setFullYear(date.getFullYear() + 2)
earliestEOL = new Date(inTwoYears)
return `${monthNames[earliestEOL.getMonth()]} ${earliestEOL.getDate()}, ${earliestEOL.getFullYear()}`
const monthNames = [
'January',
'February',
'March',
'April',
'May',
'June',
'July',
'August',
'September',
'October',
'November',
'December',
];
var inTwoYears = new Date(date);
inTwoYears.setFullYear(inTwoYears.getFullYear() + 2);
let earliestEOL = new Date(inTwoYears);
return `${monthNames[earliestEOL.getMonth()]} ${earliestEOL.getDate()}, ${earliestEOL.getFullYear()}`;
}
$('span.current-timestamp').text(currentTimestamp)
$('span.current-time').text(currentTime)
$('span.enterprise-eol-date').text(enterpriseEOLDate)
$('span.current-date').each(function() {
var dayOffset = parseInt($(this).attr("offset"))
var trimTime = $(this).attr("trim-time") === "true"
$(this).text(currentDate(dayOffset, trimTime))
})
function initialize() {
$('span.current-timestamp').text(currentTimestamp);
$('span.current-time').text(currentTime);
$('span.enterprise-eol-date').text(enterpriseEOLDate());
$('span.current-date').each(function () {
var dayOffset = parseInt($(this).attr('offset'));
var trimTime = $(this).attr('trim-time') === 'true';
$(this).text(currentDate(dayOffset, trimTime));
});
}
export { initialize };

View File

@ -2,37 +2,24 @@
This feature is designed to callout new features added to the documentation
CSS is required for the callout bubble to determine look and position, but the
element must have the `callout` class and a unique id.
Callouts are treated as notifications and use the notification cookie API in
assets/js/cookies.js.
Callouts are treated as notifications and use the LocalStorage notification API.
*/
import $ from 'jquery';
import * as LocalStorageAPI from './services/local-storage.js';
// Get notification ID
function getCalloutID (el) {
function getCalloutID(el) {
return $(el).attr('id');
}
// Hide a callout and update the cookie with the viewed callout
function hideCallout (calloutID) {
if (!window.LocalStorageAPI.notificationIsRead(calloutID)) {
window.LocalStorageAPI.setNotificationAsRead(calloutID, 'callout');
$(`#${calloutID}`).fadeOut(200);
// Show the url feature callouts on page load
export default function FeatureCallout({ component }) {
const calloutID = getCalloutID($(component));
if (!LocalStorageAPI.notificationIsRead(calloutID, 'callout')) {
$(`#${calloutID}.feature-callout`)
.fadeIn(300)
.removeClass('start-position');
}
}
// Show the url feature callouts on page load
$(document).ready(function () {
$('.feature-callout').each(function () {
const calloutID = getCalloutID($(this));
if (!window.LocalStorageAPI.notificationIsRead(calloutID, 'callout')) {
$(`#${calloutID}.feature-callout`)
.fadeIn(300)
.removeClass('start-position');
}
});
});
// Hide the InfluxDB URL selector callout
// $('button.url-trigger, #influxdb-url-selector .close').click(function () {
// hideCallout('influxdb-url-selector');
// });

View File

@ -1,49 +1,148 @@
var tablesElement = $("#flux-group-keys-demo #grouped-tables")
import $ from 'jquery';
// Sample data
let data = [
[
{ _time: "2021-01-01T00:00:00Z", _measurement: "example", loc: "rm1", sensorID: "A123", _field: "temp", _value: 110.3 },
{ _time: "2021-01-01T00:01:00Z", _measurement: "example", loc: "rm1", sensorID: "A123", _field: "temp", _value: 112.5 },
{ _time: "2021-01-01T00:02:00Z", _measurement: "example", loc: "rm1", sensorID: "A123", _field: "temp", _value: 111.9 }
{
_time: '2021-01-01T00:00:00Z',
_measurement: 'example',
loc: 'rm1',
sensorID: 'A123',
_field: 'temp',
_value: 110.3,
},
{
_time: '2021-01-01T00:01:00Z',
_measurement: 'example',
loc: 'rm1',
sensorID: 'A123',
_field: 'temp',
_value: 112.5,
},
{
_time: '2021-01-01T00:02:00Z',
_measurement: 'example',
loc: 'rm1',
sensorID: 'A123',
_field: 'temp',
_value: 111.9,
},
],
[
{ _time: "2021-01-01T00:00:00Z", _measurement: "example", loc: "rm1", sensorID: "A123", _field: "hum", _value: 73.4 },
{ _time: "2021-01-01T00:01:00Z", _measurement: "example", loc: "rm1", sensorID: "A123", _field: "hum", _value: 73.7 },
{ _time: "2021-01-01T00:02:00Z", _measurement: "example", loc: "rm1", sensorID: "A123", _field: "hum", _value: 75.1 }
{
_time: '2021-01-01T00:00:00Z',
_measurement: 'example',
loc: 'rm1',
sensorID: 'A123',
_field: 'hum',
_value: 73.4,
},
{
_time: '2021-01-01T00:01:00Z',
_measurement: 'example',
loc: 'rm1',
sensorID: 'A123',
_field: 'hum',
_value: 73.7,
},
{
_time: '2021-01-01T00:02:00Z',
_measurement: 'example',
loc: 'rm1',
sensorID: 'A123',
_field: 'hum',
_value: 75.1,
},
],
[
{ _time: "2021-01-01T00:00:00Z", _measurement: "example", loc: "rm2", sensorID: "B456", _field: "temp", _value: 108.2 },
{ _time: "2021-01-01T00:01:00Z", _measurement: "example", loc: "rm2", sensorID: "B456", _field: "temp", _value: 108.5 },
{ _time: "2021-01-01T00:02:00Z", _measurement: "example", loc: "rm2", sensorID: "B456", _field: "temp", _value: 109.6 }
{
_time: '2021-01-01T00:00:00Z',
_measurement: 'example',
loc: 'rm2',
sensorID: 'B456',
_field: 'temp',
_value: 108.2,
},
{
_time: '2021-01-01T00:01:00Z',
_measurement: 'example',
loc: 'rm2',
sensorID: 'B456',
_field: 'temp',
_value: 108.5,
},
{
_time: '2021-01-01T00:02:00Z',
_measurement: 'example',
loc: 'rm2',
sensorID: 'B456',
_field: 'temp',
_value: 109.6,
},
],
[
{ _time: "2021-01-01T00:00:00Z", _measurement: "example", loc: "rm2", sensorID: "B456", _field: "hum", _value: 71.8 },
{ _time: "2021-01-01T00:01:00Z", _measurement: "example", loc: "rm2", sensorID: "B456", _field: "hum", _value: 72.3 },
{ _time: "2021-01-01T00:02:00Z", _measurement: "example", loc: "rm2", sensorID: "B456", _field: "hum", _value: 72.1 }
]
]
{
_time: '2021-01-01T00:00:00Z',
_measurement: 'example',
loc: 'rm2',
sensorID: 'B456',
_field: 'hum',
_value: 71.8,
},
{
_time: '2021-01-01T00:01:00Z',
_measurement: 'example',
loc: 'rm2',
sensorID: 'B456',
_field: 'hum',
_value: 72.3,
},
{
_time: '2021-01-01T00:02:00Z',
_measurement: 'example',
loc: 'rm2',
sensorID: 'B456',
_field: 'hum',
_value: 72.1,
},
],
];
// Default group key
let groupKey = ["_measurement", "loc", "sensorID", "_field"]
let groupKey = ['_measurement', 'loc', 'sensorID', '_field'];
export default function FluxGroupKeysDemo({ component }) {
$('.column-list label').click(function () {
toggleCheckbox($(this));
groupKey = getChecked(component);
groupData();
buildGroupExample(component);
});
// Group and render tables on load
groupData();
}
// Build a table group (group key and table) using an array of objects
function buildTable(inputData) {
// Build the group key string
function wrapString(column, value) {
var stringColumns = ["_measurement", "loc", "sensorID", "_field"]
var stringColumns = ['_measurement', 'loc', 'sensorID', '_field'];
if (stringColumns.includes(column)) {
return '"' + value + '"'
return '"' + value + '"';
} else {
return value
return value;
}
}
var groupKeyString = "Group key instance = [" + (groupKey.map(column => column + ": " + wrapString(column, (inputData[0])[column])) ).join(", ") + "]";
var groupKeyLabel = document.createElement("p");
groupKeyLabel.className = "table-group-key"
groupKeyLabel.innerHTML = groupKeyString
var groupKeyString =
'Group key instance = [' +
groupKey
.map((column) => column + ': ' + wrapString(column, inputData[0][column]))
.join(', ') +
']';
var groupKeyLabel = document.createElement('p');
groupKeyLabel.className = 'table-group-key';
groupKeyLabel.innerHTML = groupKeyString;
// Extract column headers
var columns = [];
@ -54,56 +153,57 @@ function buildTable(inputData) {
}
}
}
// Create the table element
var table = document.createElement("table");
const table = document.createElement('table');
// Create the table header
for (let i = 0; i < columns.length; i++) {
var header = table.createTHead();
var th = document.createElement("th");
var th = document.createElement('th');
th.innerHTML = columns[i];
if (groupKey.includes(columns[i])) {
th.className = "grouped-by";
th.className = 'grouped-by';
}
header.appendChild(th);
}
// Add inputData to the HTML table
for (let i = 0; i < inputData.length; i++) {
tr = table.insertRow(-1);
let tr = table.insertRow(-1);
for (let j = 0; j < columns.length; j++) {
var td = tr.insertCell(-1);
td.innerHTML = inputData[i][columns[j]];
// Highlight the value if column is part of the group key
if (groupKey.includes(columns[j])) {
td.className = "grouped-by";
td.className = 'grouped-by';
}
}
}
// Create a table group with group key and table
var tableGroup = document.createElement("div");
tableGroup.innerHTML += groupKeyLabel.outerHTML + table.outerHTML
var tableGroup = document.createElement('div');
tableGroup.innerHTML += groupKeyLabel.outerHTML + table.outerHTML;
return tableGroup
return tableGroup;
}
// Clear and rebuild all HTML tables
function buildTables(data) {
existingTables = tablesElement[0]
let tablesElement = $('#flux-group-keys-demo #grouped-tables');
let existingTables = tablesElement[0];
while (existingTables.firstChild) {
existingTables.removeChild(existingTables.firstChild);
}
for (let i = 0; i < data.length; i++) {
var table = buildTable(data[i])
var table = buildTable(data[i]);
tablesElement.append(table);
}
}
// Group data based on the group key and output new tables
function groupData() {
let groupedData = data.flat()
let groupedData = data.flat();
function groupBy(array, f) {
var groups = {};
@ -114,20 +214,19 @@ function groupData() {
});
return Object.keys(groups).map(function (group) {
return groups[group];
})
});
}
groupedData = groupBy(groupedData, function (r) {
return groupKey.map(v => r[v]);
return groupKey.map((v) => r[v]);
});
buildTables(groupedData);
}
// Get selected column names
var checkboxes = $("input[type=checkbox]");
function getChecked() {
function getChecked(component) {
// Get selected column names
var checkboxes = $(component).find('input[type=checkbox]');
var checked = [];
for (var i = 0; i < checkboxes.length; i++) {
var checkbox = checkboxes[i];
@ -141,17 +240,12 @@ function toggleCheckbox(element) {
}
// Build example group function
function buildGroupExample() {
var columnCollection = getChecked().map(i => '<span class=\"s2\">"' + i + '"</span>').join(", ")
$("pre#group-by-example")[0].innerHTML = "data\n <span class='nx'>|></span> group(columns<span class='nx'>:</span> [" + columnCollection + "])";
function buildGroupExample(component) {
var columnCollection = getChecked(component)
.map((i) => '<span class=\"s2\">"' + i + '"</span>')
.join(', ');
$('pre#group-by-example')[0].innerHTML =
"data\n <span class='nx'>|></span> group(columns<span class='nx'>:</span> [" +
columnCollection +
'])';
}
$(".column-list label").click(function () {
toggleCheckbox($(this))
groupKey = getChecked();
groupData();
buildGroupExample();
});
// Group and render tables on load
groupData()

View File

@ -1,22 +0,0 @@
$('.exp-btn').click(function() {
var targetBtnElement = $(this).parent()
$('.exp-btn > p', targetBtnElement).fadeOut(100);
setTimeout(function() {
$('.exp-btn-links', targetBtnElement).fadeIn(200)
$('.exp-btn', targetBtnElement).addClass('open');
$('.close-btn', targetBtnElement).fadeIn(200);
}, 100);
})
$('.close-btn').click(function() {
var targetBtnElement = $(this).parent().parent()
$('.exp-btn-links', targetBtnElement).fadeOut(100)
$('.exp-btn', targetBtnElement).removeClass('open');
$(this).fadeOut(100);
setTimeout(function() {
$('p', targetBtnElement).fadeIn(100);
}, 100);
})
/////////////////////////////// EXPANDING BUTTONS //////////////////////////////

View File

@ -3,7 +3,6 @@
///////////////////////// INFLUXDB URL PREFERENCE /////////////////////////////
////////////////////////////////////////////////////////////////////////////////
*/
import * as pageParams from '@params';
import {
DEFAULT_STORAGE_URLS,
getPreference,
@ -12,15 +11,18 @@ import {
removeInfluxDBUrl,
getInfluxDBUrl,
getInfluxDBUrls,
} from './local-storage.js';
} from './services/local-storage.js';
import $ from 'jquery';
import { context as PRODUCT_CONTEXT, referrerHost } from './page-context.js';
import { influxdbUrls } from './services/influxdb-urls.js';
import { delay } from './helpers.js';
import { toggleModal } from './modals.js';
let CLOUD_URLS = [];
if (pageParams && pageParams.influxdb_urls) {
CLOUD_URLS = Object.values(pageParams.influxdb_urls.cloud.providers).flatMap((provider) => provider.regions?.map((region) => region.url));
if (influxdbUrls?.cloud) {
CLOUD_URLS = Object.values(influxdbUrls.cloud.providers).flatMap((provider) =>
provider.regions?.map((region) => region.url)
);
}
export { CLOUD_URLS };
@ -28,7 +30,7 @@ export function InfluxDBUrl() {
const UNIQUE_URL_PRODUCTS = ['dedicated', 'clustered'];
const IS_UNIQUE_URL_PRODUCT = UNIQUE_URL_PRODUCTS.includes(PRODUCT_CONTEXT);
// Add actual cloud URLs as needed
// Add actual cloud URLs as needed
const elementSelector = '.article--content pre:not(.preserve)';
///////////////////// Stored preference management ///////////////////////
@ -118,11 +120,12 @@ export function InfluxDBUrl() {
});
}
// Retrieve the currently selected URLs from the urls local storage object.
function getUrls() {
const { cloud, oss, core, enterprise, serverless, dedicated, clustered } = getInfluxDBUrls();
return { oss, cloud, core, enterprise, serverless, dedicated, clustered };
}
// Retrieve the currently selected URLs from the urls local storage object.
function getUrls() {
const { cloud, oss, core, enterprise, serverless, dedicated, clustered } =
getInfluxDBUrls();
return { oss, cloud, core, enterprise, serverless, dedicated, clustered };
}
// Retrieve the previously selected URLs from the from the urls local storage object.
// This is used to update URLs whenever you switch between browser tabs.
@ -289,15 +292,17 @@ export function InfluxDBUrl() {
}
// Append the URL selector button to each codeblock containing a placeholder URL
function appendUrlSelector(urls={
cloud: '',
oss: '',
core: '',
enterprise: '',
serverless: '',
dedicated: '',
clustered: '',
}) {
function appendUrlSelector(
urls = {
cloud: '',
oss: '',
core: '',
enterprise: '',
serverless: '',
dedicated: '',
clustered: '',
}
) {
const appendToUrls = Object.values(urls);
const getBtnText = (context) => {
@ -315,7 +320,7 @@ export function InfluxDBUrl() {
return contextText[context];
};
appendToUrls.forEach(function (url) {
appendToUrls.forEach(function (url) {
$(elementSelector).each(function () {
var code = $(this).html();
if (code.includes(url)) {
@ -330,20 +335,32 @@ export function InfluxDBUrl() {
});
}
////////////////////////////////////////////////////////////////////////////
////////////////// Initialize InfluxDB URL interactions ////////////////////
////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
////////////////// Initialize InfluxDB URL interactions ////////////////////
////////////////////////////////////////////////////////////////////////////
// Add the preserve tag to code blocks that shouldn't be updated
addPreserve();
const { cloud, oss, core, enterprise, serverless, dedicated, clustered } = DEFAULT_STORAGE_URLS;
const { cloud, oss, core, enterprise, serverless, dedicated, clustered } =
DEFAULT_STORAGE_URLS;
// Append URL selector buttons to code blocks
appendUrlSelector({ cloud, oss, core, enterprise, serverless, dedicated, clustered });
appendUrlSelector({
cloud,
oss,
core,
enterprise,
serverless,
dedicated,
clustered,
});
// Update URLs on load
updateUrls({ cloud, oss, core, enterprise, serverless, dedicated, clustered }, getUrls());
updateUrls(
{ cloud, oss, core, enterprise, serverless, dedicated, clustered },
getUrls()
);
// Set active radio button on page load
setRadioButtons(getUrls());

View File

@ -1,41 +1,58 @@
// Dynamically update keybindings or hotkeys
function getPlatform() {
if (/Mac/.test(navigator.platform)) {
return "osx"
} else if (/Win/.test(navigator.platform)) {
return "win"
} else if (/Linux/.test(navigator.platform)) {
return "linux"
} else {
return "other"
}
import { getPlatform } from './utils/user-agent-platform.js';
import $ from 'jquery';
/**
* Adds OS-specific class to component
* @param {string} osClass - OS-specific class to add
* @param {Object} options - Component options
* @param {jQuery} options.$component - jQuery element reference
*/
function addOSClass(osClass, { $component }) {
$component.addClass(osClass);
}
const platform = getPlatform()
/**
* Updates keybinding display based on detected platform
* @param {Object} options - Component options
* @param {jQuery} options.$component - jQuery element reference
* @param {string} options.platform - Detected platform
*/
function updateKeyBindings({ $component, platform }) {
const osx = $component.data('osx');
const linux = $component.data('linux');
const win = $component.data('win');
function addOSClass(osClass) {
$('.keybinding').addClass(osClass)
}
let keybind;
function updateKeyBindings() {
$('.keybinding').each(function() {
var osx = $(this).data("osx")
var linux = $(this).data("linux")
var win = $(this).data("win")
if (platform === "other") {
if (win != linux) {
var keybind = '<code class="osx">' + osx + '</code> for macOS, <code>' + linux + '</code> for Linux, and <code>' + win + '</code> for Windows';
} else {
var keybind = '<code>' + linux + '</code> for Linux and Windows and <code class="osx">' + osx + '</code> for macOS';
}
if (platform === 'other') {
if (win !== linux) {
keybind =
`<code class="osx">${osx}</code> for macOS, ` +
`<code>${linux}</code> for Linux, ` +
`and <code>${win}</code> for Windows`;
} else {
var keybind = '<code>' + $(this).data(platform) + '</code>'
keybind =
`<code>${linux}</code> for Linux and Windows and ` +
`<code class="osx">${osx}</code> for macOS`;
}
} else {
keybind = `<code>${$component.data(platform)}</code>`;
}
$(this).html(keybind)
})
$component.html(keybind);
}
addOSClass(platform)
updateKeyBindings()
/**
* Initialize and render platform-specific keybindings
* @param {Object} options - Component options
* @param {HTMLElement} options.component - DOM element
* @returns {void}
*/
export default function KeyBinding({ component }) {
// Initialize keybindings
const platform = getPlatform();
const $component = $(component);
addOSClass(platform, { $component });
updateKeyBindings({ $component, platform });
}

Some files were not shown because too many files have changed in this diff Show More