diff --git a/.ci/link-checker/default.lycherc.toml b/.ci/link-checker/default.lycherc.toml new file mode 100644 index 000000000..f769afc36 --- /dev/null +++ b/.ci/link-checker/default.lycherc.toml @@ -0,0 +1,74 @@ +# Lychee link checker configuration +# Generated by link-checker +[lychee] +# Performance settings + +# Maximum number of retries for failed checks + +max_retries = 3 + +# Timeout for each link check (in seconds) +timeout = 30 + +# Maximum number of concurrent checks +max_concurrency = 128 + +skip_code_blocks = false + +# HTTP settings +# Identify the tool to external services +user_agent = "Mozilla/5.0 (compatible; link-checker)" + +# Accept these HTTP status codes as valid +accept = [200, 201, 202, 203, 204, 206, 301, 302, 303, 304, +307, 308] + +# Skip these URL schemes +scheme = ["file", "mailto", "tel"] + +# Exclude patterns (regex supported) +exclude = [ + # Localhost URLs + "^https?://localhost", + "^https?://127\\.0\\.0\\.1", + + # Common CI/CD environments + "^https?://.*\\.local", + + # Example domains used in documentation + "^https?://example\\.(com|org|net)", + + # Placeholder URLs from code block filtering + "https://example.com/REMOVED_FROM_CODE_BLOCK", + "example.com/INLINE_CODE_URL", + + # URLs that require authentication + "^https?://.*\\.slack\\.com", + "^https?://.*\\.atlassian\\.net", + + # GitHub URLs (often fail due to rate limiting and bot + # detection) + "^https?://github\\.com", + + # StackExchange network URLs (often block automated requests) + "^https?://.*\\.stackexchange\\.com", + "^https?://stackoverflow\\.com", + "^https?://.*\\.stackoverflow\\.com", + + # Docker Hub URLs (rate limiting and bot detection) + "^https?://hub\\.docker\\.com", + + # Common documentation placeholders + "YOUR_.*", + "REPLACE_.*", + "<.*>", +] + +# Request headers +[headers] +# Add custom headers here if needed +# "Authorization" = "Bearer $GITHUB_TOKEN" + +# Cache settings +cache = true +max_cache_age = "1d" \ No newline at end of file diff --git a/.ci/link-checker/production.lycherc.toml b/.ci/link-checker/production.lycherc.toml new file mode 100644 index 000000000..37f692e47 --- /dev/null +++ b/.ci/link-checker/production.lycherc.toml @@ -0,0 +1,116 @@ +# Production Link Checker Configuration for InfluxData docs-v2 +# Optimized for performance, reliability, and reduced false positives +[lychee] +# Performance settings + +# Maximum number of retries for failed checks + +max_retries = 3 + +# Timeout for each link check (in seconds) +timeout = 30 + +# Maximum number of concurrent checks +max_concurrency = 128 + +skip_code_blocks = false + +# HTTP settings +# Identify the tool to external services +"User-Agent" = "Mozilla/5.0 (compatible; influxdata-link-checker/1.0; +https://github.com/influxdata/docs-v2)" +accept = [200, 201, 202, 203, 204, 206, 301, 302, 303, 304, 307, 308] + +# Skip these URL schemes +scheme = ["mailto", "tel"] + +# Performance optimizations +cache = true +max_cache_age = "1h" + +# Retry configuration for reliability +include_verbatim = false + +# Exclusion patterns for docs-v2 (regex supported) +exclude = [ + # Localhost URLs + "^https?://localhost", + "^https?://127\\.0\\.0\\.1", + + # Common CI/CD environments + "^https?://.*\\.local", + + # Example domains used in documentation + "^https?://example\\.(com|org|net)", + + # Placeholder URLs from code block filtering + "https://example.com/REMOVED_FROM_CODE_BLOCK", + "example.com/INLINE_CODE_URL", + + # URLs that require authentication + "^https?://.*\\.slack\\.com", + "^https?://.*\\.atlassian\\.net", + + # GitHub URLs (often fail due to rate limiting and bot + # detection) + "^https?://github\\.com", + + # Social media URLs (often block bots) + "^https?://reddit\\.com", + "^https?://.*\\.reddit\\.com", + + # StackExchange network URLs (often block automated requests) + "^https?://.*\\.stackexchange\\.com", + "^https?://stackoverflow\\.com", + "^https?://.*\\.stackoverflow\\.com", + + # Docker Hub URLs (rate limiting and bot detection) + "^https?://hub\\.docker\\.com", + + # InfluxData support URLs (certificate/SSL issues in CI) + "^https?://support\\.influxdata\\.com", + + # Common documentation placeholders + "YOUR_.*", + "REPLACE_.*", + "<.*>", +] + +# Request headers +[headers] +# Add custom headers here if needed +# "Authorization" = "Bearer $GITHUB_TOKEN" +"Accept" = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" +"Accept-Language" = "en-US,en;q=0.5" +"Accept-Encoding" = "gzip, deflate" +"DNT" = "1" +"Connection" = "keep-alive" +"Upgrade-Insecure-Requests" = "1" + +[ci] +# CI-specific settings + +[ci.github_actions] +output_format = "json" +create_annotations = true +fail_fast = false +max_annotations = 50 # Limit to avoid overwhelming PR comments + +[ci.performance] +# Performance tuning for CI environment +parallel_requests = 32 +connection_timeout = 10 +read_timeout = 30 + +# Resource limits +max_memory_mb = 512 +max_execution_time_minutes = 10 + +[reporting] +# Report configuration +include_fragments = false +verbose = false +no_progress = true # Disable progress bar in CI + +# Summary settings +show_success_count = true +show_skipped_count = true \ No newline at end of file diff --git a/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt b/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt index 1ebaf7d46..a56077021 100644 --- a/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt +++ b/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt @@ -31,7 +31,7 @@ LogicalPlan [Mm]onitor MBs? PBs? -Parquet +Parquet|\b\w*-*parquet-\w*\b|\b--\w*parquet\w*\b|`[^`]*parquet[^`]*` Redoc SQLAlchemy SQLAlchemy diff --git a/.circleci/config.yml b/.circleci/config.yml index e0ee7ca74..b90ba6693 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,4 +1,4 @@ -version: 2 +version: 2.1 jobs: build: docker: @@ -31,17 +31,17 @@ jobs: command: cd api-docs && bash generate-api-docs.sh - run: name: Inject Flux stdlib frontmatter - command: node ./flux-build-scripts/inject-flux-stdlib-frontmatter.js + command: node ./flux-build-scripts/inject-flux-stdlib-frontmatter.cjs - run: name: Update Flux/InfluxDB versions - command: node ./flux-build-scripts/update-flux-versions.js + command: node ./flux-build-scripts/update-flux-versions.cjs - save_cache: key: install-{{ .Environment.CACHE_VERSION }}-{{ checksum ".circleci/config.yml" }} paths: - /home/circleci/bin - run: name: Hugo Build - command: npx hugo --logLevel info --minify --destination workspace/public + command: yarn hugo --environment production --logLevel info --gc --destination workspace/public - persist_to_workspace: root: workspace paths: @@ -68,7 +68,6 @@ jobs: when: on_success workflows: - version: 2 build: jobs: - build diff --git a/.claude/commands/analyze-api-source.md b/.claude/commands/analyze-api-source.md new file mode 100644 index 000000000..32b651001 --- /dev/null +++ b/.claude/commands/analyze-api-source.md @@ -0,0 +1,25 @@ +analyze-api-source: + + + Analyze source code in the specified repo to determine: + 1. HTTP method and endpoint path + 2. Parameters for the given endpoint + 3. Whether the specified parameter is supported for the given API endpoint + 4. Parameter format, valid values, and default behavior + 5. Any limitations or quirks of the parameter + +For product InfluxDB 3 Core and Enterprise, + Search repo influxdata/influxdb + Search through: + - HTTP endpoint handlers in + influxdb3_server/src/http/ + - Parameter structs and deserialization + - Request routing and processing logic + - Type definitions in influxdb3_types/src/ + + In the output, provide: + - Comparison across v1, v2, and v3 API compatibility + +In the output, provide: + Concrete examples of endpoint and parameter usage + Cite specific source code locations. \ No newline at end of file diff --git a/.claude/commands/enhance-release-notes.md b/.claude/commands/enhance-release-notes.md new file mode 100644 index 000000000..2297e4519 --- /dev/null +++ b/.claude/commands/enhance-release-notes.md @@ -0,0 +1,192 @@ +# enhance-release-notes + +Analyze GitHub PRs referenced in release notes and enhance descriptions following Google Developer Documentation style. + +## Overview + +This command improves release note descriptions by: +1. Fetching PR data from GitHub API +2. Analyzing code changes and PR content +3. Generating clear, action-oriented descriptions +4. Following Google Developer Documentation principles +5. Creating a descriptive commit message + +## Usage + +``` +enhance-release-notes [--dry-run] +``` + +## Process + +### 1. Extract PR References + +- Scan the release notes file for GitHub PR links +- Extract PR numbers and repository information +- Example pattern: `([#26574](https://github.com/influxdata/influxdb/pull/26574))` + +### 2. Fetch PR Data + +For each PR, collect: +- PR title and description +- Files modified (to determine component scope) +- Labels and metadata +- Code change statistics + +### 3. Analyze and Categorize + +**Component Detection** (based on file paths): +- `src/database/`, `catalog/`, `schema/` → Database operations +- `cmd/`, `cli/` → CLI commands +- `api/`, `http/` → HTTP API +- `src/query/`, `sql/` → Query engine +- `src/auth/`, `token/` → Authentication +- `storage/`, `parquet/`, `wal/` → Storage engine +- `license/` → License management + +**Change Type Detection**: +- `feat:` or "add", "new" → Feature +- `fix:` or "resolve", "correct" → Bug fix +- `perf:` or "optim", "faster" → Performance improvement + +### 4. Generate Google Developer Documentation Style Descriptions + +**Principles**: +- Clear, concise, action-oriented language +- Focus on what developers can do +- Avoid marketing speak ("enhanced", "improved", "better") +- Use specific, concrete benefits +- Start with action verbs when possible + +**Templates**: + +**Database Operations**: +- `hard.*delet.*date` → "Set custom hard deletion dates for deleted databases and tables" +- `retention.*period` → "Configure automatic data expiration for databases" +- `schema.*updat` → "Modify database schema after creation" + +**CLI Commands**: +- `help.*text` → "Access help documentation for commands" +- `show.*license` → "View license details including expiration and limits" +- `object.*store.*required` → "Specify object store configuration when starting the server" + +**HTTP API**: +- `v1.*query.*endpoint.*ns` → "Use nanosecond precision by default in V1 API CSV responses" +- `trigger.*request_path` → "Configure processing engine triggers with request paths" + +**Query Engine**: +- `csv.*precision` → "Get consistent timestamp precision in CSV output" +- `query.*performance` → "Execute queries without performance degradation" + +**Authentication**: +- `token.*creation` → "Generate tokens with additional configuration options" +- `admin.*token.*expiration` → "Set expiration dates for admin tokens" + +**Storage Engine**: +- `aws.*credential.*reload` → "Automatically refresh AWS credentials from files" +- `wal.*replay.*concurrency` → "Control memory usage during database startup" +- `corrupt.*wal.*recovery` → "Recover from corrupted write-ahead log files" + +**Fallback Patterns**: +- Features: "Use [functionality] to [specific action]" +- Bug fixes: "Avoid [specific problem] when [specific action]" +- Performance: "Execute [operation] without [specific issue]" + +### 5. Enhancement Format + +Transform: +```markdown +- **Database management**: Allow hard_deleted date of deleted schema to be updated ([#26574](https://github.com/influxdata/influxdb/pull/26574)) +``` + +Into: +```markdown +- **Database operations**: Set custom hard deletion dates for deleted databases and tables ([#26574](https://github.com/influxdata/influxdb/pull/26574)) +``` + +### 6. Output Processing + +**Dry Run Mode**: +- Show before/after comparison +- List all proposed changes +- Don't modify the file + +**Apply Mode**: +- Replace descriptions in the original file +- Preserve all formatting and PR links +- Log successful enhancements + +### 7. Create Descriptive Commit Message + +After enhancing the release notes, generate a commit message: + +**Format**: +``` +docs: enhance release notes with specific user benefits + +- Transform generic descriptions into action-oriented language +- Add specific benefits following Google Developer Documentation style +- Focus on what developers can do with each change +- Enhanced [X] descriptions across [Y] components + +Enhanced components: [list of components modified] +``` + +**Example**: +``` +docs: enhance v3.2.1 release notes with specific user benefits + +- Transform generic descriptions into action-oriented language +- Add specific benefits following Google Developer Documentation style +- Focus on what developers can do with each change +- Enhanced 8 descriptions across database, CLI, and API components + +Enhanced components: Database operations, CLI commands, HTTP API +``` + +## Error Handling + +- **Missing GitHub token**: Warn about rate limits, continue with public API +- **Private repos**: Skip PRs that can't be accessed +- **Invalid PR URLs**: Log error and skip enhancement +- **API rate limits**: Implement exponential backoff +- **Network issues**: Retry with fallback to original description + +## Configuration + +**Environment Variables**: +- `GITHUB_TOKEN`: Personal access token for GitHub API access + +**GitHub Enterprise Support**: +- Detect GitHub Enterprise URLs in PR links +- Use appropriate API base URL + +## Implementation Notes + +1. **Rate Limiting**: Respect GitHub API rate limits (5000/hour authenticated, 60/hour unauthenticated) +2. **Caching**: Consider caching PR data to avoid repeated API calls during development +3. **Validation**: Verify PR URLs match expected format before API calls +4. **Preservation**: Maintain all existing formatting, spacing, and non-PR content +5. **Atomic Updates**: Only modify the file if all enhancements succeed (or provide partial success options) + +## Example Usage + +```bash +# Dry run to see proposed changes +enhance-release-notes release-notes-v3.2.1.md --dry-run + +# Apply enhancements +enhance-release-notes release-notes-v3.2.1.md + +# With verbose output +enhance-release-notes release-notes-v3.2.1.md --verbose +``` + +## Success Criteria + +1. All PR descriptions follow Google Developer Documentation style +2. Descriptions focus on specific developer actions and benefits +3. No marketing language or vague improvements +4. Component categories are accurate based on code changes +5. Original formatting and PR links are preserved +6. Commit message clearly describes the enhancement approach \ No newline at end of file diff --git a/.claude/commands/fix-github-issue.md b/.claude/commands/fix-github-issue.md new file mode 100644 index 000000000..3b817998c --- /dev/null +++ b/.claude/commands/fix-github-issue.md @@ -0,0 +1,16 @@ +Please analyze and fix the GitHub issue: $ARGUMENTS. + +Follow these steps: + +1. Use `gh issue view` to get the issue details +2. Understand the problem described in the issue +3. Search the codebase for relevant files, using your knowledge of the project structure and the issue description +4. Implement the necessary changes to fix the issue +5. Write and run tests (store in `tests/` directory) to verify the fix +6. Create a descriptive commit message +7. Ensure code passes linting and type checking +8. Push +9. Ensure code passes pre-push tests +10. Create a PR + +Remember to use the GitHub CLI (`gh`) for all GitHub-related tasks. \ No newline at end of file diff --git a/.context/README.md b/.context/README.md new file mode 100644 index 000000000..79b6a4459 --- /dev/null +++ b/.context/README.md @@ -0,0 +1,44 @@ +# Context Files for LLMs and AI Tools + +This directory contains plans, reports, and other context files that are: +- Used to provide context to LLMs during development +- Not committed to the repository +- May be transient or belong in other repositories + +## Directory Structure + +- `plans/` - Documentation plans and roadmaps +- `reports/` - Generated reports and analyses +- `research/` - Research notes and findings +- `templates/` - Reusable templates for Claude interactions + +## Usage + +Place files here that you want to reference--for example, using @ mentions in Claude--such as: +- Documentation planning documents +- API migration guides +- Performance reports +- Architecture decisions + +## Example Structure + +``` +.context/ +├── plans/ +│ ├── v3.2-release-plan.md +│ └── api-migration-guide.md +├── reports/ +│ ├── weekly-progress-2025-07.md +│ └── pr-summary-2025-06.md +├── research/ +│ └── competitor-analysis.md +└── templates/ + └── release-notes-template.md +``` + +## Best Practices + +1. Use descriptive filenames that indicate the content and date +2. Keep files organized in appropriate subdirectories +3. Consider using date prefixes for time-sensitive content (e.g., `2025-07-01-meeting-notes.md`) +4. Remove outdated files periodically to keep the context relevant \ No newline at end of file diff --git a/.github/actions/setup-docs-env/action.yml b/.github/actions/setup-docs-env/action.yml new file mode 100644 index 000000000..44d285f81 --- /dev/null +++ b/.github/actions/setup-docs-env/action.yml @@ -0,0 +1,29 @@ +name: 'Setup Documentation Environment' +description: 'Sets up Node.js environment and installs dependencies for documentation workflows' + +runs: + using: 'composite' + steps: + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'yarn' + + - name: Install dependencies + run: yarn install + shell: bash + + - name: Verify Hugo installation + run: | + echo "Checking Hugo availability..." + if command -v hugo &> /dev/null; then + echo "✅ Hugo found on PATH: $(which hugo)" + hugo version + else + echo "⚠️ Hugo not found on PATH, will use project-local Hugo via yarn" + fi + + echo "Checking yarn hugo command..." + yarn hugo version || echo "⚠️ Project Hugo not available via yarn" + shell: bash \ No newline at end of file diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index da39eaf1a..4a541203f 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -1,98 +1,282 @@ -# GitHub Copilot Instructions for InfluxData Documentation +# InfluxData Documentation Repository (docs-v2) -## Purpose and Scope +Always follow these instructions first and fallback to additional search and context gathering only when the information provided here is incomplete or found to be in error. -GitHub Copilot should help document InfluxData products by creating clear, accurate technical content with proper code examples, frontmatter, and formatting. +## Working Effectively -## Documentation Structure +### Collaboration approach + +Be a critical thinking partner, provide honest feedback, and identify potential issues. + +### Bootstrap, Build, and Test the Repository + +Execute these commands in order to set up a complete working environment: + +1. **Install Node.js dependencies** (takes ~4 seconds): + + ```bash + # Skip Cypress binary download due to network restrictions in CI environments + CYPRESS_INSTALL_BINARY=0 yarn install + ``` + +2. **Build the static site** (takes ~75 seconds, NEVER CANCEL - set timeout to 180+ seconds): + + ```bash + npx hugo --quiet + ``` + +3. **Start the development server** (builds in ~92 seconds, NEVER CANCEL - set timeout to 150+ seconds): + + ```bash + npx hugo server --bind 0.0.0.0 --port 1313 + ``` + + - Access at: http://localhost:1313/ + - Serves 5,359+ pages and 441 static files + - Auto-rebuilds on file changes + +4. **Alternative Docker development setup** (use if local Hugo fails): + ```bash + docker compose up local-dev + ``` + **Note**: May fail in restricted network environments due to Alpine package manager issues. + +### Testing (CRITICAL: NEVER CANCEL long-running tests) + +#### Code Block Testing (takes 5-15 minutes per product, NEVER CANCEL - set timeout to 30+ minutes): + +```bash +# Build test environment first (takes ~30 seconds, may fail due to network restrictions) +docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest . + +# Test all products (takes 15-45 minutes total) +yarn test:codeblocks:all + +# Test specific products +yarn test:codeblocks:cloud +yarn test:codeblocks:v2 +yarn test:codeblocks:telegraf +``` + +#### Link Validation (takes 1-5 minutes): + +Runs automatically on pull requests. +Requires the **link-checker** binary from the repo release artifacts. + +```bash +# Test specific files/products (faster) +# JSON format is required for accurate reporting +link-checker map content/influxdb3/core/**/*.md \ +| link-checker check \ + --config .ci/link-checker/production.lycherc.toml + --format json +``` + +#### Style Linting (takes 30-60 seconds): + +```bash +# Basic Vale linting +docker compose run -T vale content/**/*.md + +# Product-specific linting with custom configurations +docker compose run -T vale --config=content/influxdb3/cloud-dedicated/.vale.ini --minAlertLevel=error content/influxdb3/cloud-dedicated/**/*.md +``` + +#### JavaScript and CSS Linting (takes 5-10 seconds): + +```bash +yarn eslint assets/js/**/*.js +yarn prettier --check "**/*.{css,js,ts,jsx,tsx}" +``` + +### Pre-commit Hooks (automatically run, can be skipped if needed): + +```bash +# Run all pre-commit checks manually +yarn lint + +# Skip pre-commit hooks if necessary (not recommended) +git commit -m "message" --no-verify +``` + +## Validation Scenarios + +Always test these scenarios after making changes to ensure full functionality: + +### 1. Documentation Rendering Test + +```bash +# Start Hugo server +npx hugo server --bind 0.0.0.0 --port 1313 + +# Verify key pages load correctly (200 status) +curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/influxdb3/core/ +curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/influxdb/v2/ +curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/telegraf/v1/ + +# Verify content contains expected elements +curl -s http://localhost:1313/influxdb3/core/ | grep -i "influxdb" +``` + +### 2. Build Output Validation + +```bash +# Verify build completes successfully +npx hugo --quiet + +# Check build output exists and has reasonable size (~529MB) +ls -la public/ +du -sh public/ + +# Verify key files exist +file public/index.html +file public/influxdb3/core/index.html +``` + +### 3. Shortcode and Formatting Test + +```bash +# Test shortcode examples page +yarn test:links content/example.md +``` + +## Repository Structure and Key Locations + +### Content Organization + +- **InfluxDB 3**: `/content/influxdb3/` (core, enterprise, cloud-dedicated, cloud-serverless, clustered, explorer) +- **InfluxDB v2**: `/content/influxdb/` (v2, cloud, enterprise_influxdb, v1) +- **Telegraf**: `/content/telegraf/v1/` +- **Other tools**: `/content/kapacitor/`, `/content/chronograf/`, `/content/flux/` +- **Shared content**: `/content/shared/` +- **Examples**: `/content/example.md` (comprehensive shortcode reference) + +### Configuration Files + +- **Hugo config**: `/config/_default/` +- **Package management**: `package.json`, `yarn.lock` +- **Docker**: `compose.yaml`, `Dockerfile.pytest` +- **Git hooks**: `lefthook.yml` +- **Testing**: `cypress.config.js`, `pytest.ini` (in test directories) +- **Linting**: `.vale.ini`, `.prettierrc.yaml`, `eslint.config.js` + +### Build and Development + +- **Hugo binary**: Available via `npx hugo` (version 0.148.2+) +- **Static assets**: `/assets/` (JavaScript, CSS, images) +- **Build output**: `/public/` (generated, ~529MB) +- **Layouts**: `/layouts/` (Hugo templates) +- **Data files**: `/data/` (YAML/JSON data for templates) + +## Technology Stack + +- **Static Site Generator**: Hugo (0.148.2+ extended) +- **Package Manager**: Yarn (1.22.22+) with Node.js (20.19.4+) +- **Testing Framework**: + - Pytest with pytest-codeblocks (for code examples) + - Cypress (for E2E tests) + - influxdata/docs-link-checker (for link validation) + - Vale (for style and writing guidelines) +- **Containerization**: Docker with Docker Compose +- **Linting**: ESLint, Prettier, Vale +- **Git Hooks**: Lefthook + +## Common Tasks and Build Times + +### Network Connectivity Issues + +In restricted environments, these commands may fail due to external dependency downloads: + +- `docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest .` (InfluxData repositories, HashiCorp repos) +- `docker compose up local-dev` (Alpine package manager) +- Cypress binary installation (use `CYPRESS_INSTALL_BINARY=0`) + +Document these limitations but proceed with available functionality. + +### Validation Commands for CI + +Always run these before committing changes: + +```bash +# Format and lint code +yarn prettier --write "**/*.{css,js,ts,jsx,tsx}" +yarn eslint assets/js/**/*.js + +# Test Hugo build +npx hugo --quiet + +# Test development server startup +timeout 150 npx hugo server --bind 0.0.0.0 --port 1313 & +sleep 120 +curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/ +pkill hugo +``` + +## Key Projects in This Codebase + +1. **InfluxDB 3 Documentation** (Core, Enterprise, Clustered, Cloud Dedicated, Cloud Serverless, and InfluxDB 3 plugins for Core and Enterprise) +2. **InfluxDB 3 Explorer** (UI) +3. **InfluxDB v2 Documentation** (OSS and Cloud) +3. **InfuxDB v1 Documentation** (OSS and Enterprise) +4. **Telegraf Documentation** (agent and plugins) +5. **Supporting Tools Documentation** (Kapacitor, Chronograf, Flux) +6. **API Reference Documentation** (`/api-docs/`) +7. **Shared Documentation Components** (`/content/shared/`) + +## Important Locations for Frequent Tasks + +- **Shortcode reference**: `/content/example.md` +- **Contributing guide**: `CONTRIBUTING.md` +- **Testing guide**: `TESTING.md` +- **Product configurations**: `/data/products.yml` +- **Vale style rules**: `/.ci/vale/styles/` +- **GitHub workflows**: `/.github/workflows/` +- **Test scripts**: `/test/scripts/` +- **Hugo layouts and shortcodes**: `/layouts/` +- **CSS/JS assets**: `/assets/` + +## Content Guidelines and Style + +### Documentation Structure - **Product version data**: `/data/products.yml` -- **Products**: - - InfluxDB OSS 1.x - - Documentation source path: `/content/influxdb/v1` - - Code repository: https://github.com/influxdata/influxdb - - InfluxDB OSS 2.x - - Documentation source path: `/content/influxdb/v2` - - Code repository: https://github.com/influxdata/influxdb - - InfluxDB 3 Core - - Documentation source path: `/content/influxdb3/core` - - Code repositories: https://github.com/influxdata/influxdb, https://github.com/influxdata/influxdb3_core - - InfluxDB Enterprise v1 (1.x) - - Documentation source path: `/content/influxdb/enterprise_influxdb` - - Code repository: https://github.com/influxdata/influxdb - - InfluxDB Cloud v2 (TSM) - - Documentation source path: `/content/influxdb/cloud` - - Code repository: https://github.com/influxdata/idpe - - InfluxDB 3 Cloud Dedicated - - Documentation source path: `/content/influxdb3/cloud-dedicated` - - Code repository: https://github.com/influxdata/influxdb - - InfluxDB 3 Cloud Serverless - - Documentation source path: `/content/influxdb3/cloud-serverless` - - Code repository: https://github.com/influxdata/idpe - - InfluxDB 3 Clustered - - Documentation source path: `/content/influxdb3/clustered` - - Code repository: https://github.com/influxdata/influxdb - - Telegraf - - Documentation source path: `/content/telegraf/v1` - - Code repository: https://github.com/influxdata/telegraf - - Kapacitor - - Documentation source path: `/content/kapacitor/v1` - - Code repository: https://github.com/influxdata/kapacitor - - Chronograf - - Documentation source path: `/content/chronograf/v1` - - Code repository: https://github.com/influxdata/chronograf - - Flux - - Documentation source path: `/content/flux/v0` - - Code repository: https://github.com/influxdata/flux -- **InfluxData-supported tools**: - - InfluxDB API client libraries - - Code repositories: https://github.com/InfluxCommunity - - InfluxDB 3 processing engine plugins - - Code repository: https://github.com/influxdata/influxdb3_plugins - **Query Languages**: SQL, InfluxQL, Flux (use appropriate language per product version) - **Documentation Site**: https://docs.influxdata.com -- **Repository**: https://github.com/influxdata/docs-v2 - **Framework**: Hugo static site generator -## Style Guidelines +### Style Guidelines - Follow Google Developer Documentation style guidelines -- For API references, follow YouTube Data API style - Use semantic line feeds (one sentence per line) -- Use only h2-h6 headings in content (h1 comes from frontmatter title properties) -- Use sentence case for headings -- Use GitHub callout syntax +- Format code examples to fit within 80 characters +- Use long options in command line examples (`--option` instead of `-o`) +- Use GitHub callout syntax for notes and warnings - Image naming: `project/version-context-description.png` -- Use appropriate product names and versions consistently -- Follow InfluxData vocabulary guidelines -## Markdown and Shortcodes +### Markdown and Shortcodes -- Include proper frontmatter for each page: +Include proper frontmatter for all content pages: - ```yaml - title: # Page title (h1) - seotitle: # SEO title - list_title: # Title for article lists - description: # SEO description - menu: - product_version: - weight: # Page order (1-99, 101-199, etc.) - ``` +```yaml +title: # Page title (h1) +seotitle: # SEO title +description: # SEO description +menu: + product_version: +weight: # Page order (1-99, 101-199, etc.) +``` -- Use provided shortcodes correctly: - - Notes/warnings: `{{% note %}}`, `{{% warn %}}` - - Product-specific: `{{% enterprise %}}`, `{{% cloud %}}` - - Tabbed content: `{{< tabs-wrapper >}}`, `{{% tabs %}}`, `{{% tab-content %}}` - - Version links: `{{< latest >}}`, `{{< latest-patch >}}` - - API endpoints: `{{< api-endpoint >}}` - - Required elements: `{{< req >}}` - - Navigation: `{{< page-nav >}}` - - Diagrams: `{{< diagram >}}`, `{{< filesystem-diagram >}}` +Key shortcodes (see `/content/example.md` for full reference): -## Code Examples and Testing +- Notes/warnings (GitHub syntax): `> [!Note]`, `> [!Warning]` +- Tabbed content: `{{< tabs-wrapper >}}`, `{{% tabs %}}`, `{{% tab-content %}}` +- Code examples: `{{< code-tabs-wrapper >}}`, `{{% code-tabs %}}`, `{{% code-tab-content %}}` +- Required elements: `{{< req >}}` +- API endpoints: `{{< api-endpoint >}}` -- Provide complete, working examples with proper testing annotations: +### Code Examples and Testing + +Provide complete, working examples with pytest annotations: ```python print("Hello, world!") @@ -104,44 +288,21 @@ print("Hello, world!") Hello, world! ``` -- CLI command example: +## Troubleshooting Common Issues -```sh -influx query 'from(bucket:"example") |> range(start:-1h)' -``` +1. **"Pytest collected 0 items"**: Use `python` (not `py`) for code block language identifiers +2. **Hugo build errors**: Check `/config/_default/` for configuration issues +3. **Docker build failures**: Expected in restricted networks - document and continue with local Hugo +4. **Cypress installation failures**: Use `CYPRESS_INSTALL_BINARY=0 yarn install` +5. **Link validation slow**: Use file-specific testing: `yarn test:links content/specific-file.md` +6. **Vale linting errors**: Check `.ci/vale/styles/config/vocabularies` for accepted/rejected terms - +## Additional Instruction Files -``` -Table: keys: [_start, _stop, _field, _measurement] - _start:time _stop:time _field:string _measurement:string _time:time _value:float ------------------------------- ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- -``` +For specific workflows and content types, also refer to: -- Include necessary environment variables -- Show proper credential handling for authenticated commands +- **InfluxDB 3 code placeholders**: `.github/instructions/influxdb3-code-placeholders.instructions.md` +- **Contributing guidelines**: `.github/instructions/contributing.instructions.md` +- **Content-specific instructions**: Check `.github/instructions/` directory -## API Documentation - -- Follow OpenAPI specification patterns -- Match REST API examples to current implementation -- Include complete request/response examples -- Document required headers and authentication - -## Versioning and Product Differentiation - -- Clearly distinguish between different InfluxDB versions (1.x, 2.x, 3.x) -- Use correct terminology for each product variant -- Apply appropriate UI descriptions and screenshots -- Reference appropriate query language per version - -## Development Tools - -- Vale.sh linter for style checking -- Docker for local development and testing -- pytest and pytest-codeblocks for validating code examples -- Pre-commit hooks for quality assurance - -## Related repositories - -- **Internal dcumentation assistance requests**: https://github.com/influxdata/DAR/issues +Remember: This is a large documentation site with complex build processes. Patience with build times is essential, and NEVER CANCEL long-running operations. diff --git a/.github/instructions/contributing.instructions.md b/.github/instructions/contributing.instructions.md new file mode 100644 index 000000000..f80ec35d0 --- /dev/null +++ b/.github/instructions/contributing.instructions.md @@ -0,0 +1,287 @@ +--- +applyTo: "content/**/*.md, layouts/**/*.html" +--- + +# Contributing instructions for InfluxData Documentation + +## Purpose and scope + +Help document InfluxData products +by creating clear, accurate technical content with proper +code examples, frontmatter, shortcodes, and formatting. + +## Quick Start + +Ready to contribute? Here's the essential workflow: + +1. [Sign the InfluxData CLA](#sign-the-influxdata-cla) (for substantial changes) +2. [Fork and clone](#fork-and-clone-influxdata-documentation-repository) this repository +3. [Install dependencies](#development-environment-setup) (Node.js, Yarn, Docker) +4. Make your changes following [style guidelines](#making-changes) +5. [Test your changes](TESTING.md) (pre-commit and pre-push hooks run automatically) +6. [Submit a pull request](#submission-process) + +For detailed setup and reference information, see the sections below. + +--- + +### Sign the InfluxData CLA + +The InfluxData Contributor License Agreement (CLA) is part of the legal framework +for the open source ecosystem that protects both you and InfluxData. +To make substantial contributions to InfluxData documentation, first sign the InfluxData CLA. +What constitutes a "substantial" change is at the discretion of InfluxData documentation maintainers. + +[Sign the InfluxData CLA](https://www.influxdata.com/legal/cla/) + +_**Note:** Typo and broken link fixes are greatly appreciated and do not require signing the CLA._ + +_If you're new to contributing or you're looking for an easy update, see [`docs-v2` good-first-issues](https://github.com/influxdata/docs-v2/issues?q=is%3Aissue+is%3Aopen+label%3Agood-first-issue)._ + +### Fork and clone InfluxData Documentation Repository + +[Fork this repository](https://help.github.com/articles/fork-a-repo/) and +[clone it](https://help.github.com/articles/cloning-a-repository/) to your local machine. + +--- + +### Prerequisites + +docs-v2 automatically runs format (Markdown, JS, and CSS) linting and code block tests for staged files that you try to commit. + +For the linting and tests to run, you need to install: + +- **Node.js and Yarn**: For managing dependencies and running build scripts +- **Docker**: For running Vale linter and code block tests +- **VS Code extensions** (optional): For enhanced editing experience + + +```sh +git commit -m "" --no-verify +``` +# ... (see full CONTRIBUTING.md for complete example) +```bash +docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest . +``` + +### Install Visual Studio Code extensions + + +- Comment Anchors: recognizes tags (for example, `//SOURCE`) and makes links and filepaths clickable in comments. +- Vale: shows linter errors and suggestions in the editor. +- YAML Schemas: validates frontmatter attributes. + + +_See full CONTRIBUTING.md for complete details._ + +#### Markdown + +Most docs-v2 documentation content uses [Markdown](https://en.wikipedia.org/wiki/Markdown). + +_Some parts of the documentation, such as `./api-docs`, contain Markdown within YAML and rely on additional tooling._ + +#### Semantic line feeds + + +```diff +-Data is taking off. This data is time series. You need a database that specializes in time series. You should check out InfluxDB. ++Data is taking off. This data is time series. You need a database that specializes in time series. You need InfluxDB. +# ... (see full CONTRIBUTING.md for complete example) +``` + +### Essential Frontmatter Reference + + +```yaml +title: # Title of the page used in the page's h1 +description: # Page description displayed in search engine results +# ... (see full CONTRIBUTING.md for complete example) +``` + + +_See full CONTRIBUTING.md for complete details._ + +#### Notes and warnings + +```md +> [!Note] +> Insert note markdown content here. + +> [!Warning] +> Insert warning markdown content here. + +> [!Caution] +> Insert caution markdown content here. + +> [!Important] +> Insert important markdown content here. + +> [!Tip] +> Insert tip markdown content here. +``` + +#### Tabbed content + +```md +{{< tabs-wrapper >}} + +{{% tabs %}} +[Button text for tab 1](#) +[Button text for tab 2](#) +{{% /tabs %}} + +{{% tab-content %}} +Markdown content for tab 1. +{{% /tab-content %}} + +{{% tab-content %}} +Markdown content for tab 2. +{{% /tab-content %}} + +{{< /tabs-wrapper >}} +``` + +#### Required elements + +```md +{{< req >}} +{{< req type="key" >}} + +- {{< req "\*" >}} **This element is required** +- {{< req "\*" >}} **This element is also required** +- **This element is NOT required** +``` + +For the complete shortcodes reference with all available shortcodes, see [Complete Shortcodes Reference](#complete-shortcodes-reference). + +--- + +### InfluxDB API documentation + +docs-v2 includes the InfluxDB API reference documentation in the `/api-docs` directory. +To edit the API documentation, edit the YAML files in `/api-docs`. + +InfluxData uses [Redoc](https://github.com/Redocly/redoc) to generate the full +InfluxDB API documentation when documentation is deployed. +Redoc generates HTML documentation using the InfluxDB `swagger.yml`. +For more information about generating InfluxDB API documentation, see the +[API Documentation README](https://github.com/influxdata/docs-v2/tree/master/api-docs#readme). + +--- + +## Testing & Quality Assurance + +For comprehensive testing information, including code block testing, link validation, style linting, and advanced testing procedures, see **[TESTING.md](TESTING.md)**. + +### Quick Testing Reference + +```bash +# Test code blocks +yarn test:codeblocks:all + +# Test links +yarn test:links content/influxdb3/core/**/*.md + +# Run style linting +docker compose run -T vale content/**/*.md +``` + +Pre-commit hooks run automatically when you commit changes, testing your staged files with Vale, Prettier, Cypress, and Pytest. To skip hooks if needed: + +```sh +git commit -m "" --no-verify +``` + +--- + +### Commit Guidelines + +When creating commits, follow these guidelines: + +- Use a clear, descriptive commit message that explains the change +- Start with a type prefix: `fix()`, `feat()`, `style()`, `refactor()`, `test()`, `chore()` +- For product-specific changes, include the product in parentheses: `fix(enterprise)`, `fix(influxdb3)`, `fix(core)` +- Keep the first line under 72 characters +- Reference issues with "closes" or "fixes": `closes #123` or `closes influxdata/DAR#123` +- For multiple issues, use comma separation: `closes influxdata/DAR#517, closes influxdata/DAR#518` + +**Examples:** +``` +fix(enterprise): correct Docker environment variable name for license email +fix(influxdb3): correct Docker environment variable and compose examples for monolith +feat(telegraf): add new plugin documentation +chore(ci): update Vale configuration +``` + +## Reference Sections + + +_See full CONTRIBUTING.md for complete details._ + +### Complete Frontmatter Reference + +_For the complete Complete Frontmatter Reference reference, see frontmatter-reference.instructions.md._ + +### Complete Shortcodes Reference + +_For the complete Complete Shortcodes Reference reference, see shortcodes-reference.instructions.md._ + +#### Vale style linting configuration + +docs-v2 includes Vale writing style linter configurations to enforce documentation writing style rules, guidelines, branding, and vocabulary terms. + +**Advanced Vale usage:** + +```sh +docker compose run -T vale --config=content/influxdb/cloud-dedicated/.vale.ini --minAlertLevel=error content/influxdb/cloud-dedicated/write-data/**/*.md +``` + + +- **Error**: +- **Warning**: General style guide rules and best practices +- **Suggestion**: Style preferences that may require refactoring or updates to an exceptions list + +#### Configure style rules + + +_See full CONTRIBUTING.md for complete details._ + +#### JavaScript in the documentation UI + +The InfluxData documentation UI uses JavaScript with ES6+ syntax and +`assets/js/main.js` as the entry point to import modules from + + +1. In your HTML file, add a `data-component` attribute to the element that + +# ... (see full CONTRIBUTING.md for complete example) +```js + import { debugLog, debugBreak, debugInspect } from './utils/debug-helpers.js'; + + const data = debugInspect(someData, 'Data'); + debugLog('Processing data', 'myFunction'); + + function processData() { + // Add a breakpoint that works with DevTools + debugBreak(); + + // Your existing code... + } + ``` + +3. Start Hugo in development mode--for example: + + ```bash + yarn hugo server + ``` + +4. In VS Code, go to Run > Start Debugging, and select the "Debug JS (debug-helpers)" configuration. + +Your system uses the configuration in `launch.json` to launch the site in Chrome +and attach the debugger to the Developer Tools console. + +Make sure to remove the debug statements before merging your changes. +The debug helpers are designed to be used in development and should not be used in production. + +_See full CONTRIBUTING.md for complete details._ + diff --git a/.github/instructions/frontmatter-reference.instructions.md b/.github/instructions/frontmatter-reference.instructions.md new file mode 100644 index 000000000..8ab4df618 --- /dev/null +++ b/.github/instructions/frontmatter-reference.instructions.md @@ -0,0 +1,198 @@ +--- +applyTo: "content/**/*.md, layouts/**/*.html" +--- + +### Complete Frontmatter Reference + +Every documentation page includes frontmatter which specifies information about the page. +Frontmatter populates variables in page templates and the site's navigation menu. + +```yaml +title: # Title of the page used in the page's h1 +seotitle: # Page title used in the html title and used in search engine results +list_title: # Title used in article lists generated using the {{< children >}} shortcode +description: # Page description displayed in search engine results +menu: + influxdb_2_0: + name: # Article name that only appears in the left nav + parent: # Specifies a parent group and nests navigation items +weight: # Determines sort order in both the nav tree and in article lists +draft: # If true, will not render page on build +product/v2.x/tags: # Tags specific to each version (replace product and .x" with the appropriate product and minor version ) +related: # Creates links to specific internal and external content at the bottom of the page + - /path/to/related/article + - https://external-link.com, This is an external link +external_url: # Used in children shortcode type="list" for page links that are external +list_image: # Image included with article descriptions in children type="articles" shortcode +list_note: # Used in children shortcode type="list" to add a small note next to listed links +list_code_example: # Code example included with article descriptions in children type="articles" shortcode +list_query_example:# Code examples included with article descriptions in children type="articles" shortcode, + # References to examples in data/query_examples +canonical: # Path to canonical page, overrides auto-gen'd canonical URL +v2: # Path to v2 equivalent page +alt_links: # Alternate pages in other products/versions for cross-product navigation + cloud-dedicated: /influxdb3/cloud-dedicated/path/to/page/ + core: /influxdb3/core/path/to/page/ +prepend: # Prepend markdown content to an article (especially powerful with cascade) + block: # (Optional) Wrap content in a block style (note, warn, cloud) + content: # Content to prepend to article +append: # Append markdown content to an article (especially powerful with cascade) + block: # (Optional) Wrap content in a block style (note, warn, cloud) + content: # Content to append to article +metadata: [] # List of metadata messages to include under the page h1 +updated_in: # Product and version the referenced feature was updated in (displayed as a unique metadata) +source: # Specify a file to pull page content from (typically in /content/shared/) +``` + +#### Title usage + +##### `title` + +The `title` frontmatter populates each page's HTML `h1` heading tag. +It shouldn't be overly long, but should set the context for users coming from outside sources. + +##### `seotitle` + +The `seotitle` frontmatter populates each page's HTML `title` attribute. +Search engines use this in search results (not the page's h1) and therefore it should be keyword optimized. + +##### `list_title` + +The `list_title` frontmatter determines an article title when in a list generated +by the [`{{< children >}}` shortcode](#generate-a-list-of-children-articles). + +##### `menu > name` + +The `name` attribute under the `menu` frontmatter determines the text used in each page's link in the site navigation. +It should be short and assume the context of its parent if it has one. + +#### Page Weights + +To ensure pages are sorted both by weight and their depth in the directory +structure, pages should be weighted in "levels." +All top level pages are weighted 1-99. +The next level is 101-199. +Then 201-299 and so on. + +_**Note:** `_index.md` files should be weighted one level up from the other `.md` files in the same directory._ + +#### Related content + +Use the `related` frontmatter to include links to specific articles at the bottom of an article. + +- If the page exists inside of this documentation, just include the path to the page. + It will automatically detect the title of the page. +- If the page exists inside of this documentation, but you want to customize the link text, + include the path to the page followed by a comma, and then the custom link text. + The path and custom text must be in that order and separated by a comma and a space. +- If the page exists outside of this documentation, include the full URL and a title for the link. + The link and title must be in that order and separated by a comma and a space. + +```yaml +related: + - /v2.0/write-data/quick-start + - /v2.0/write-data/quick-start, This is custom text for an internal link + - https://influxdata.com, This is an external link +``` + +#### Canonical URLs + +Search engines use canonical URLs to accurately rank pages with similar or identical content. +The `canonical` HTML meta tag identifies which page should be used as the source of truth. + +By default, canonical URLs are automatically generated for each page in the InfluxData +documentation using the latest version of the current product and the current path. + +Use the `canonical` frontmatter to override the auto-generated canonical URL. + +_**Note:** The `canonical` frontmatter supports the [`{{< latest >}}` shortcode](#latest-links)._ + +```yaml +canonical: /path/to/canonical/doc/ + +# OR + +canonical: /{{< latest "influxdb" "v2" >}}/path/to/canonical/doc/ +``` + +#### v2 equivalent documentation + +To display a notice on a 1.x page that links to an equivalent 2.0 page, +add the following frontmatter to the 1.x page: + +```yaml +v2: /influxdb/v2.0/get-started/ +``` + +#### Alternative links for cross-product navigation + +Use the `alt_links` frontmatter to specify equivalent pages in other InfluxDB products, +for example, when a page exists at a different path in a different version or if +the feature doesn't exist in that product. +This enables the product switcher to navigate users to the corresponding page when they +switch between products. If a page doesn't exist in another product (for example, an +Enterprise-only feature), point to the nearest parent page if relevant. + +```yaml +alt_links: + cloud-dedicated: /influxdb3/cloud-dedicated/admin/tokens/create-token/ + cloud-serverless: /influxdb3/cloud-serverless/admin/tokens/create-token/ + core: /influxdb3/core/reference/cli/influxdb3/update/ # Points to parent if exact page doesn't exist +``` + +Supported product keys for InfluxDB 3: +- `core` +- `enterprise` +- `cloud-serverless` +- `cloud-dedicated` +- `clustered` + +#### Prepend and append content to a page + +Use the `prepend` and `append` frontmatter to add content to the top or bottom of a page. +Each has the following fields: + +```yaml +append: | + > [!Note] + > #### This is example markdown content + > This is just an example note block that gets appended to the article. +``` + +Use this frontmatter with [cascade](#cascade) to add the same content to +all children pages as well. + +```yaml +cascade: + append: | + > [!Note] + > #### This is example markdown content + > This is just an example note block that gets appended to the article. +``` + +#### Cascade + +To automatically apply frontmatter to a page and all of its children, use the +[`cascade` frontmatter](https://gohugo.io/content-management/front-matter/#front-matter-cascade) +built in into Hugo. + +```yaml +title: Example page +description: Example description +cascade: + layout: custom-layout +``` + +`cascade` applies the frontmatter to all children unless the child already includes +those frontmatter keys. Frontmatter defined on the page overrides frontmatter +"cascaded" from a parent. + +#### Use shared content in a page + +Use the `source` frontmatter to specify a shared file to use to populate the +page content. Shared files are typically stored in the `/content/shared` directory. + +When building shared content, use the `show-in` and `hide-in` shortcodes to show +or hide blocks of content based on the current InfluxDB product/version. +For more information, see [show-in](#show-in) and [hide-in](#hide-in). + diff --git a/.github/instructions/influxdb3-code-placeholders.instructions.md b/.github/instructions/influxdb3-code-placeholders.instructions.md new file mode 100644 index 000000000..583ef705a --- /dev/null +++ b/.github/instructions/influxdb3-code-placeholders.instructions.md @@ -0,0 +1,89 @@ +--- +mode: 'edit' +applyTo: "content/{influxdb3/core,influxdb3/enterprise,shared/influxdb3*}/**" +--- +## Best Practices + +- Use UPPERCASE for placeholders to make them easily identifiable +- Don't use pronouns in placeholders (e.g., "your", "this") +- List placeholders in the same order they appear in the code +- Provide clear descriptions including: +- - Expected data type or format +- - Purpose of the value +- - Any constraints or requirements +- Mark optional placeholders as "Optional:" in their descriptions +- Placeholder key descriptions should fit the context of the code snippet +- Include examples for complex formats + +## Writing Placeholder Descriptions + +Descriptions should follow consistent patterns: + +1. **Admin Authentication tokens**: + - Recommended: "a {{% token-link "admin" %}} for your {{< product-name >}} instance" + - Avoid: "your token", "the token", "an authorization token" +2. **Database resource tokens**: + - Recommended: "your {{% token-link "database" %}}"{{% show-in "enterprise" %}} with permissions on the specified database{{% /show-in %}} + - Avoid: "your token", "the token", "an authorization token" +3. **Database names**: + - Recommended: "the name of the database to [action]" + - Avoid: "your database", "the database name" +4. **Conditional content**: + - Use `{{% show-in "enterprise" %}}` for content specific to enterprise versions + - Example: "your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}}" + +## Common placeholders for InfluxDB 3 + +- `AUTH_TOKEN`: your {{% token-link %}} +- `DATABASE_NAME`: the database to use +- `TABLE_NAME`: Name of the table/measurement to query or write to +- `NODE_ID`: Node ID for a specific node in a cluster +- `CLUSTER_ID`: Cluster ID for a specific cluster +- `HOST`: InfluxDB server hostname or URL +- `PORT`: InfluxDB server port (typically 8181) +- `QUERY`: SQL or InfluxQL query string +- `LINE_PROTOCOL`: Line protocol data for writes +- `PLUGIN_FILENAME`: Name of plugin file to use +- `CACHE_NAME`: Name for a new or existing cache + +## Hugo shortcodes in Markdown + +- `{{% code-placeholders "PLACEHOLDER1|PLACEHOLDER2" %}}`: Use this shortcode to define placeholders in code snippets. +- `{{% /code-placeholders %}}`: End the shortcode. +- `{{% code-placeholder-key %}}`: Use this shortcode to define a specific placeholder key. +- `{{% /code-placeholder-key %}}`: End the specific placeholder key shortcode. + +## Language-Specific Placeholder Formatting + +- **Bash/Shell**: Use uppercase variables with no quotes or prefix + ```bash + --database DATABASE_NAME + ``` +- Python: Use string literals with quotes + ```python + database_name='DATABASE_NAME' + ``` +- JSON: Use key-value pairs with quotes + ```json + { + "database": "DATABASE_NAME" + } + ``` + +## Real-World Examples from Documentation + +### InfluxDB CLI Commands +This pattern appears frequently in CLI documentation: + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} +```bash +influxdb3 write \ + --database DATABASE_NAME \ + --token AUTH_TOKEN \ + --precision ns +{{% /code-placeholders %}} + +Replace the following placeholders with your values: + +{{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to write to +{{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with write permissions on the specified database{{% /show-in %}} \ No newline at end of file diff --git a/.github/instructions/shortcodes-reference.instructions.md b/.github/instructions/shortcodes-reference.instructions.md new file mode 100644 index 000000000..098ae1e07 --- /dev/null +++ b/.github/instructions/shortcodes-reference.instructions.md @@ -0,0 +1,1189 @@ +--- +applyTo: "content/**/*.md, layouts/**/*.html" +--- + +### Complete Shortcodes Reference + +#### Notes and warnings + +Shortcodes are available for formatting notes and warnings in each article: + +```md +{{% note %}} +Insert note markdown content here. +{{% /note %}} + +{{% warn %}} +Insert warning markdown content here. +{{% /warn %}} +``` + +#### Product data + +Display the full product name and version name for the current page--for example: + +- InfluxDB 3 Core +- InfluxDB 3 Cloud Dedicated + +```md +{{% product-name %}} +``` + +Display the short version name (part of the key used in `products.yml`) from the current page URL--for example: + +- `/influxdb3/core` returns `core` + +```md +{{% product-key %}} +``` + +##### Enterprise name + +The name used to refer to InfluxData's enterprise offering is subject to change. +To facilitate easy updates in the future, use the `enterprise-name` shortcode +when referencing the enterprise product. +This shortcode accepts a `"short"` parameter which uses the "short-name". + +``` +This is content that references {{< enterprise-name >}}. +This is content that references {{< enterprise-name "short" >}}. +``` + +Product names are stored in `data/products.yml`. + +##### Enterprise link + +References to InfluxDB Enterprise are often accompanied with a link to a page where +visitors can get more information about the Enterprise offering. +This link is subject to change. +Use the `enterprise-link` shortcode when including links to more information about +InfluxDB Enterprise. + +``` +Find more info [here][{{< enterprise-link >}}] +``` + +#### Latest patch version + +Use the `{{< latest-patch >}}` shortcode to add the latest patch version of a product. +By default, this shortcode parses the product and minor version from the URL. +To specify a specific product and minor version, use the `product` and `version` arguments. +Easier to maintain being you update the version number in the `data/products.yml` file instead of updating individual links and code examples. + +```md +{{< latest-patch >}} + +{{< latest-patch product="telegraf" >}} + +{{< latest-patch product="chronograf" version="1.7" >}} +``` + +#### Latest influx CLI version + +Use the `{{< latest-patch cli=true >}}` shortcode to add the latest version of the `influx` +CLI supported by the minor version of InfluxDB. +By default, this shortcode parses the minor version from the URL. +To specify a specific minor version, use the `version` argument. +Maintain CLI version numbers in the `data/products.yml` file instead of updating individual links and code examples. + +```md +{{< latest-patch cli=true >}} + +{{< latest-cli version="2.1" >}} +``` + +#### API endpoint + +Use the `{{< api-endpoint >}}` shortcode to generate a code block that contains +a colored request method, a specified API endpoint, and an optional link to +the API reference documentation. +Provide the following arguments: + +- **method**: HTTP request method (get, post, patch, put, or delete) +- **endpoint**: API endpoint +- **api-ref**: Link the endpoint to a specific place in the API documentation +- **influxdb_host**: Specify which InfluxDB product host to use + _if the `endpoint` contains the `influxdb/host` shortcode_. + Uses the current InfluxDB product as default. + Supports the following product values: + + - oss + - cloud + - serverless + - dedicated + - clustered + +```md +{{< api-endpoint method="get" endpoint="/api/v2/tasks" api-ref="/influxdb/cloud/api/#operation/GetTasks">}} +``` + +```md +{{< api-endpoint method="get" endpoint="{{< influxdb/host >}}/api/v2/tasks" influxdb_host="cloud">}} +``` + +#### Tabbed Content + +To create "tabbed" content (content that is changed by a users' selection), use the following three shortcodes in combination: + +`{{< tabs-wrapper >}}` +This shortcode creates a wrapper or container for the tabbed content. +All UI interactions are limited to the scope of each container. +If you have more than one "group" of tabbed content in a page, each needs its own `tabs-wrapper`. +This shortcode must be closed with `{{< /tabs-wrapper >}}`. + +**Note**: The `<` and `>` characters used in this shortcode indicate that the contents should be processed as HTML. + +`{{% tabs %}}` +This shortcode creates a container for buttons that control the display of tabbed content. +It should contain simple markdown links with anonymous anchors (`#`). +The link text is used as the button text. +This shortcode must be closed with `{{% /tabs %}}`. + +**Note**: The `%` characters used in this shortcode indicate that the contents should be processed as Markdown. + +The `{{% tabs %}}` shortcode has an optional `style` argument that lets you +assign CSS classes to the tags HTML container. The following classes are available: + +- **small**: Tab buttons are smaller and don't scale to fit the width. +- **even-wrap**: Prevents uneven tab widths when tabs are forced to wrap. + +`{{% tab-content %}}` +This shortcode creates a container for a content block. +Each content block in the tab group needs to be wrapped in this shortcode. +**The number of `tab-content` blocks must match the number of links provided in the `tabs` shortcode** +This shortcode must be closed with `{{% /tab-content %}}`. + +**Note**: The `%` characters used in this shortcode indicate that the contents should be processed as Markdown. + +##### Example tabbed content group + +```md +{{< tabs-wrapper >}} + +{{% tabs %}} +[Button text for tab 1](#) +[Button text for tab 2](#) +{{% /tabs %}} + +{{% tab-content %}} +Markdown content for tab 1. +{{% /tab-content %}} + +{{% tab-content %}} +Markdown content for tab 2. +{{% /tab-content %}} + +{{< /tabs-wrapper >}} +``` + +##### Tabbed code blocks + +Shortcodes are also available for tabbed code blocks primarily used to give users +the option to choose between different languages and syntax. +The shortcode structure is the same as above, but the shortcode names are different: + +`{{< code-tabs-wrapper >}}` +`{{% code-tabs %}}` +`{{% code-tab-content %}}` + +````md +{{< code-tabs-wrapper >}} + +{{% code-tabs %}} +[Flux](#) +[InfluxQL](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} + +```js +data = from(bucket: "example-bucket") + |> range(start: -15m) + |> filter(fn: (r) => + r._measurement == "mem" and + r._field == "used_percent" + ) +``` + +{{% /code-tab-content %}} + +{{% code-tab-content %}} + +```sql +SELECT "used_percent" +FROM "telegraf"."autogen"."mem" +WHERE time > now() - 15m +``` + +{{% /code-tab-content %}} + +{{< /code-tabs-wrapper >}} +```` + +##### Link to tabbed content + +To link to tabbed content, click on the tab and use the URL parameter shown. +It will have the form `?t=`, plus a string. +For example: + +``` +[Windows installation](/influxdb/v2.0/install/?t=Windows) +``` + +#### Required elements + +Use the `{{< req >}}` shortcode to identify required elements in documentation with +orange text and/or asterisks. By default, the shortcode outputs the text, "Required," but +you can customize the text by passing a string argument with the shortcode. + +```md +{{< req >}} +``` + +**Output:** Required + +```md +{{< req "This is Required" >}} +``` + +**Output:** This is required + +If using other named arguments like `key` or `color`, use the `text` argument to +customize the text of the required message. + +```md +{{< req text="Required if ..." color="blue" type="key" >}} +``` + +##### Required elements in a list + +When identifying required elements in a list, use `{{< req type="key" >}}` to generate +a "\* Required" key before the list. For required elements in the list, include +{{< req "\*" >}} before the text of the list item. For example: + +```md +{{< req type="key" >}} + +- {{< req "\*" >}} **This element is required** +- {{< req "\*" >}} **This element is also required** +- **This element is NOT required** +``` + +##### Change color of required text + +Use the `color` argument to change the color of required text. +The following colors are available: + +- blue +- green +- magenta + +```md +{{< req color="magenta" text="This is required" >}} +``` + +#### Page navigation buttons + +Use the `{{< page-nav >}}` shortcode to add page navigation buttons to a page. +These are useful for guiding users through a set of docs that should be read in sequential order. +The shortcode has the following parameters: + +- **prev:** path of the previous document _(optional)_ +- **next:** path of the next document _(optional)_ +- **prevText:** override the button text linking to the previous document _(optional)_ +- **nextText:** override the button text linking to the next document _(optional)_ +- **keepTab:** include the currently selected tab in the button link _(optional)_ + +The shortcode generates buttons that link to both the previous and next documents. +By default, the shortcode uses either the `list_title` or the `title` of the linked +document, but you can use `prevText` and `nextText` to override button text. + +```md + + +{{ page-nav prev="/path/to/prev/" next="/path/to/next" >}} + + + +{{ page-nav prev="/path/to/prev/" prevText="Previous" next="/path/to/next" nextText="Next" >}} + + + +{{ page-nav prev="/path/to/prev/" next="/path/to/next" keepTab=true>}} +``` + +#### Keybinds + +Use the `{{< keybind >}}` shortcode to include OS-specific keybindings/hotkeys. +The following parameters are available: + +- mac +- linux +- win +- all +- other + +```md + + +{{< keybind mac="⇧⌘P" other="Ctrl+Shift+P" >}} + + + +{{< keybind all="Ctrl+Shift+P" >}} + + + +{{< keybind mac="⇧⌘P" linux="Ctrl+Shift+P" win="Ctrl+Shift+Alt+P" >}} +``` + +#### Diagrams + +Use the `{{< diagram >}}` shortcode to dynamically build diagrams. +The shortcode uses [mermaid.js](https://github.com/mermaid-js/mermaid) to convert +simple text into SVG diagrams. +For information about the syntax, see the [mermaid.js documentation](https://mermaid-js.github.io/mermaid/#/). + +```md +{{< diagram >}} +flowchart TB +This --> That +That --> There +{{< /diagram >}} +``` + +#### File system diagrams + +Use the `{{< filesystem-diagram >}}` shortcode to create a styled file system +diagram using a Markdown unordered list. + +##### Example filesystem diagram shortcode + +```md +{{< filesystem-diagram >}} + +- Dir1/ +- Dir2/ + - ChildDir/ + - Child + - Child +- Dir3/ + {{< /filesystem-diagram >}} +``` + +#### High-resolution images + +In many cases, screenshots included in the docs are taken from high-resolution (retina) screens. +Because of this, the actual pixel dimension is 2x larger than it needs to be and is rendered 2x bigger than it should be. +The following shortcode automatically sets a fixed width on the image using half of its actual pixel dimension. +This preserves the detail of the image and renders it at a size where there should be little to no "blur" +cause by browser image resizing. + +```html +{{< img-hd src="/path/to/image" alt="Alternate title" />}} +``` + +###### Notes + +- This should only be used on screenshots takes from high-resolution screens. +- The `src` should be relative to the `static` directory. +- Image widths are limited to the width of the article content container and will scale accordingly, + even with the `width` explicitly set. + +#### Truncated content blocks + +In some cases, it may be appropriate to shorten or truncate blocks of content. +Use cases include long examples of output data or tall images. +The following shortcode truncates blocks of content and allows users to opt into +to seeing the full content block. + +```md +{{% truncate %}} +Truncated markdown content here. +{{% /truncate %}} +``` + +#### Expandable accordion content blocks + +Use the `{{% expand "Item label" %}}` shortcode to create expandable, accordion-style content blocks. +Each expandable block needs a label that users can click to expand or collapse the content block. +Pass the label as a string to the shortcode. + +```md +{{% expand "Label 1" %}} +Markdown content associated with label 1. +{{% /expand %}} + +{{% expand "Label 2" %}} +Markdown content associated with label 2. +{{% /expand %}} + +{{% expand "Label 3" %}} +Markdown content associated with label 3. +{{% /expand %}} +``` + +Use the optional `{{< expand-wrapper >}}` shortcode around a group of `{{% expand %}}` +shortcodes to ensure proper spacing around the expandable elements: + +```md +{{< expand-wrapper >}} +{{% expand "Label 1" %}} +Markdown content associated with label 1. +{{% /expand %}} + +{{% expand "Label 2" %}} +Markdown content associated with label 2. +{{% /expand %}} +{{< /expand-wrapper >}} +``` + +#### Captions + +Use the `{{% caption %}}` shortcode to add captions to images and code blocks. +Captions are styled with a smaller font size, italic text, slight transparency, +and appear directly under the previous image or code block. + +```md +{{% caption %}} +Markdown content for the caption. +{{% /caption %}} +``` + +#### Generate a list of children articles + +Section landing pages often contain just a list of articles with links and descriptions for each. +This can be cumbersome to maintain as content is added. +To automate the listing of articles in a section, use the `{{< children >}}` shortcode. + +```md +{{< children >}} +``` + +The children shortcode can also be used to list only "section" articles (those with their own children), +or only "page" articles (those with no children) using the `show` argument: + +```md +{{< children show="sections" >}} + + + +{{< children show="pages" >}} +``` + +_By default, it displays both sections and pages._ + +Use the `type` argument to specify the format of the children list. + +```md +{{< children type="functions" >}} +``` + +The following list types are available: + +- **articles:** lists article titles as headers with the description or summary + of the article as a paragraph. Article headers link to the articles. +- **list:** lists children article links in an unordered list. +- **anchored-list:** lists anchored children article links in an unordered list + meant to act as a page navigation and link to children header. +- **functions:** a special use-case designed for listing Flux functions. + +##### Include a "Read more" link + +To include a "Read more" link with each child summary, set `readmore=true`. +_Only the `articles` list type supports "Read more" links._ + +```md +{{< children readmore=true >}} +``` + +##### Include a horizontal rule + +To include a horizontal rule after each child summary, set `hr=true`. +_Only the `articles` list type supports horizontal rules._ + +```md +{{< children hr=true >}} +``` + +##### Include a code example with a child summary + +Use the `list_code_example` frontmatter to provide a code example with an article +in an articles list. + +````yaml +list_code_example: | + ```sh + This is a code example + ``` +```` + +##### Organize and include native code examples + +To include text from a file in `/shared/text/`, use the +`{{< get-shared-text >}}` shortcode and provide the relative path and filename. + +This is useful for maintaining and referencing sample code variants in their +native file formats. + +1. Store code examples in their native formats at `/shared/text/`. + +```md +/shared/text/example1/example.js +/shared/text/example1/example.py +``` + +2. Include the files--for example, in code tabs: + + ````md + {{% code-tabs-wrapper %}} + {{% code-tabs %}} + [Javascript](#js) + [Python](#py) + {{% /code-tabs %}} + {{% code-tab-content %}} + + ```js + {{< get-shared-text "example1/example.js" >}} + ``` + + {{% /code-tab-content %}} + {{% code-tab-content %}} + + ```py + {{< get-shared-text "example1/example.py" >}} + ``` + + {{% /code-tab-content %}} + {{% /code-tabs-wrapper %}} + ```` + +##### Include specific files from the same directory +> [!Caution] +> #### Don't use for code examples +> Using this and `get-shared-text` shortcodes to include code examples prevents the code from being tested. + +To include the text from one file in another file in the same +directory, use the `{{< get-leaf-text >}}` shortcode. +The directory that contains both files must be a +Hugo [_Leaf Bundle_](https://gohugo.io/content-management/page-bundles/#leaf-bundles), +a directory that doesn't have any child directories. + +In the following example, `api` is a leaf bundle. `content` isn't. + +```md +content +| +|--- api +| query.pdmc +| query.sh +| \_index.md +``` + +###### query.pdmc + +```md +# Query examples +``` + +###### query.sh + +```md +curl https://localhost:8086/query +``` + +To include `query.sh` and `query.pdmc` in `api/_index.md`, use the following code: + +````md +{{< get-leaf-text "query.pdmc" >}} + +# Curl example + +```sh +{{< get-leaf-text "query.sh" >}} +``` +```` + +Avoid using the following file extensions when naming included text files since Hugo interprets these as markup languages: +`.ad`, `.adoc`, `.asciidoc`, `.htm`, `.html`, `.markdown`, `.md`, `.mdown`, `.mmark`, `.pandoc`, `.pdc`, `.org`, or `.rst`. + +##### Reference a query example in children + +To include a query example with the children in your list, update `data/query_examples.yml` +with the example code, input, and output, and use the `list_query_example` +frontmatter to reference the corresponding example. + +```yaml +list_query_example: cumulative_sum +``` + +##### Children frontmatter + +Each children list `type` uses [frontmatter properties](#page-frontmatter) when generating the list of articles. +The following table shows which children types use which frontmatter properties: + +| Frontmatter | articles | list | functions | +| :------------------- | :------: | :--: | :-------: | +| `list_title` | ✓ | ✓ | ✓ | +| `description` | ✓ | | | +| `external_url` | ✓ | ✓ | | +| `list_image` | ✓ | | | +| `list_note` | | ✓ | | +| `list_code_example` | ✓ | | | +| `list_query_example` | ✓ | | | + +#### Authentication token link + +Use the `{{% token-link "" "%}}` shortcode to +automatically generate links to token management documentation. The shortcode +accepts two _optional_ arguments: + +- **descriptor**: An optional token descriptor +- **link_append**: An optional path to append to the token management link path, + `///admin/tokens/`. + +```md +{{% token-link "database" "resource/" %}} + + +[database token](/influxdb3/enterprise/admin/tokens/resource/) +``` + +InfluxDB 3 Enterprise and InfluxDB 3 Core support different kinds of tokens. +The shortcode has a blacklist of token descriptors for each that will prevent +unsupported descriptors from appearing in the rendered output based on the +current product. + +#### Inline icons + +The `icon` shortcode allows you to inject icons in paragraph text. +It's meant to clarify references to specific elements in the InfluxDB user interface. +This shortcode supports Clockface (the UI) v2 and v3. +Specify the version to use as the second argument. The default version is `v3`. + +``` +{{< icon "icon-name" "v2" >}} +``` + +Below is a list of available icons (some are aliases): + +- add-cell +- add-label +- alert +- calendar +- chat +- checkmark +- clone +- cloud +- cog +- config +- copy +- dashboard +- dashboards +- data-explorer +- delete +- download +- duplicate +- edit +- expand +- export +- eye +- eye-closed +- eye-open +- feedback +- fullscreen +- gear +- graph +- hide +- influx +- influx-icon +- nav-admin +- nav-config +- nav-configuration +- nav-dashboards +- nav-data-explorer +- nav-organizations +- nav-orgs +- nav-tasks +- note +- notebook +- notebooks +- org +- orgs +- pause +- pencil +- play +- plus +- refresh +- remove +- replay +- save-as +- search +- settings +- tasks +- toggle +- trash +- trashcan +- triangle +- view +- wrench +- x + +#### InfluxDB UI left navigation icons + +In many cases, documentation references an item in the left nav of the InfluxDB UI. +Provide a visual example of the navigation item using the `nav-icon` shortcode. +This shortcode supports Clockface (the UI) v2 and v3. +Specify the version to use as the second argument. The default version is `v3`. + +``` +{{< nav-icon "tasks" "v2" >}} +``` + +The following case insensitive values are supported: + +- admin, influx +- data-explorer, data explorer +- notebooks, books +- dashboards +- tasks +- monitor, alerts, bell +- cloud, usage +- data, load data, load-data +- settings +- feedback + +#### Flexbox-formatted content blocks + +CSS Flexbox formatting lets you create columns in article content that adjust and +flow based on the viewable width. +In article content, this helps if you have narrow tables that could be displayed +side-by-side, rather than stacked vertically. +Use the `{{< flex >}}` shortcode to create the Flexbox wrapper. +Use the `{{% flex-content %}}` shortcode to identify each column content block. + +```md +{{< flex >}} +{{% flex-content %}} +Column 1 +{{% /flex-content %}} +{{% flex-content %}} +Column 2 +{{% /flex-content %}} +{{< /flex >}} +``` + +`{{% flex-content %}}` has an optional width argument that determines the maximum +width of the column. + +```md +{{% flex-content "half" %}} +``` + +The following options are available: + +- half _(Default)_ +- third +- quarter + +#### Tooltips + +Use the `{{< tooltip >}}` shortcode to add tooltips to text. +The **first** argument is the text shown in the tooltip. +The **second** argument is the highlighted text that triggers the tooltip. + +```md +I like {{< tooltip "Butterflies are awesome!" "butterflies" >}}. +``` + +The rendered output is "I like butterflies" with "butterflies" highlighted. +When you hover over "butterflies," a tooltip appears with the text: "Butterflies are awesome!" + +#### Flux sample data tables + +The Flux `sample` package provides basic sample datasets that can be used to +illustrate how Flux functions work. To quickly display one of the raw sample +datasets, use the `{{% flux/sample %}}` shortcode. + +The `flux/sample` shortcode has the following arguments that can be specified +by name or positionally. + +##### set + +Sample dataset to output. Use either `set` argument name or provide the set +as the first argument. The following sets are available: + +- float +- int +- uint +- string +- bool +- numericBool + +##### includeNull + +Specify whether or not to include _null_ values in the dataset. +Use either `includeNull` argument name or provide the boolean value as the second argument. + +##### includeRange + +Specify whether or not to include time range columns (`_start` and `_stop`) in the dataset. +This is only recommended when showing how functions that require a time range +(such as `window()`) operate on input data. +Use either `includeRange` argument name or provide the boolean value as the third argument. + +###### Example Flux sample data shortcodes + +```md + + +{{% flux/sample %}} + + + +{{% flux/sample set="string" includeNull=false %}} + + + +{{% flux/sample "int" true %}} + + + + +{{% flux/sample set="int" includeNull=true includeRange=true %}} +{{% flux/sample "int" true true %}} +``` + +#### Duplicate OSS content in Cloud + +Docs for InfluxDB OSS and InfluxDB Cloud share a majority of content. +To prevent duplication of content between versions, use the following shortcodes: + +- `{{< duplicate-oss >}}` +- `{{% oss-only %}}` +- `{{% cloud-only %}}` + +##### duplicate-oss + +The `{{< duplicate-oss >}}` shortcode copies the page content of the file located +at the identical file path in the most recent InfluxDB OSS version. +The Cloud version of this markdown file should contain the frontmatter required +for all pages, but the body content should just be the `{{< duplicate-oss >}}` shortcode. + +##### oss-only + +Wrap content that should only appear in the OSS version of the doc with the `{{% oss-only %}}` shortcode. +Use the shortcode on both inline and content blocks: + +```md +{{% oss-only %}}This is inline content that only renders in the InfluxDB OSS docs{{% /oss-only %}} + +{{% oss-only %}} + +This is a multi-paragraph content block that spans multiple paragraphs and will +only render in the InfluxDB OSS documentation. + +**Note:** Notice the blank newline after the opening short-code tag. +This is necessary to get the first sentence/paragraph to render correctly. + +{{% /oss-only %}} + +- {{% oss-only %}}This is a list item that will only render in InfluxDB OSS docs.{{% /oss-only %}} +- {{% oss-only %}} + + This is a multi-paragraph list item that will only render in the InfluxDB OSS docs. + + **Note:** Notice shortcode is _inside_ of the line item. + There also must be blank newline after the opening short-code tag. + This is necessary to get the first sentence/paragraph to render correctly. + + {{% /oss-only %}} + +1. Step 1 +2. {{% oss-only %}}This is a list item that will only render in InfluxDB OSS docs.{{% /oss-only %}} +3. {{% oss-only %}} + + This is a list item that contains multiple paragraphs or nested list items and will only render in the InfluxDB OSS docs. + + **Note:** Notice shortcode is _inside_ of the line item. + There also must be blank newline after the opening short-code tag. + This is necessary to get the first sentence/paragraph to render correctly. + + {{% /oss-only %}} +``` + +##### cloud-only + +Wrap content that should only appear in the Cloud version of the doc with the `{{% cloud-only %}}` shortcode. +Use the shortcode on both inline and content blocks: + +```md +{{% cloud-only %}}This is inline content that only renders in the InfluxDB Cloud docs{{% /cloud-only %}} + +{{% cloud-only %}} + +This is a multi-paragraph content block that spans multiple paragraphs and will +only render in the InfluxDB Cloud documentation. + +**Note:** Notice the blank newline after the opening short-code tag. +This is necessary to get the first sentence/paragraph to render correctly. + +{{% /cloud-only %}} + +- {{% cloud-only %}}This is a list item that will only render in InfluxDB Cloud docs.{{% /cloud-only %}} +- {{% cloud-only %}} + + This is a list item that contains multiple paragraphs or nested list items and will only render in the InfluxDB Cloud docs. + + **Note:** Notice shortcode is _inside_ of the line item. + There also must be blank newline after the opening short-code tag. + This is necessary to get the first sentence/paragraph to render correctly. + + {{% /cloud-only %}} + +1. Step 1 +2. {{% cloud-only %}}This is a list item that will only render in InfluxDB Cloud docs.{{% /cloud-only %}} +3. {{% cloud-only %}} + + This is a multi-paragraph list item that will only render in the InfluxDB Cloud docs. + + **Note:** Notice shortcode is _inside_ of the line item. + There also must be blank newline after the opening short-code tag. + This is necessary to get the first sentence/paragraph to render correctly. + + {{% /cloud-only %}} +``` + +#### Show or hide content blocks in shared content + +The `source` frontmatter lets you source page content from another file and is +used to share content across InfluxDB products. Within the shared content, you +can use the `show-in` and `hide-in` shortcodes to conditionally show or hide +content blocks based on the InfluxDB "version." Valid "versions" include: + +- v2 +- cloud +- cloud-serverless +- cloud-dedicated +- clustered +- core +- enterprise + +##### show-in + +The `show-in` shortcode accepts a comma-delimited string of InfluxDB "versions" +to show the content block in. The version is the second level of the page +path--for example: `/influxdb//...`. + +```md +{{% show-in "core,enterprise" %}} + +This content will appear in pages in the InfluxDB 3 Core and InfluxDB 3 Enterprise +documentation, but not any other InfluxDB documentation this content is shared in. + +{{% /show-in %}} +``` + +##### hide-in + +The `hide-in` shortcode accepts a comma-delimited string of InfluxDB "versions" +to hide the content block in. The version is the second level of the page +path--for example: `/influxdb//...`. + +```md +{{% hide-in "core,enterprise" %}} + +This content will not appear in pages in the InfluxDB 3 Core and InfluxDB 3 +Enterprise documentation, but will in all other InfluxDB documentation this +content is shared in. + +{{% /hide-in %}} +``` + +#### All-Caps + +Clockface v3 introduces many buttons with text formatted as all-caps. +Use the `{{< caps >}}` shortcode to format text to match those buttons. + +```md +Click {{< caps >}}Add Data{{< /caps >}} +``` + +#### Code callouts + +Use the `{{< code-callout >}}` shortcode to highlight and emphasize a specific +piece of code (for example, a variable, placeholder, or value) in a code block. +Provide the string to highlight in the code block. +Include a syntax for the codeblock to properly style the called out code. + +````md +{{< code-callout "03a2bbf46249a000" >}} + +```sh +http://localhost:8086/orgs/03a2bbf46249a000/... +``` + +{{< /code-callout >}} +```` + +#### InfluxDB University banners + +Use the `{{< influxdbu >}}` shortcode to add an InfluxDB University banner that +points to the InfluxDB University site or a specific course. +Use the default banner template, a predefined course template, or fully customize +the content of the banner. + +```html + +{{< influxdbu >}} + + +{{< influxdbu "influxdb-101" >}} + + +{{< influxdbu title="Course title" summary="Short course summary." action="Take +the course" link="https://university.influxdata.com/" >}} +``` + +##### Course templates + +Use one of the following course templates: + +- influxdb-101 +- telegraf-102 +- flux-103 + +##### Custom banner content + +Use the following shortcode parameters to customize the content of the InfluxDB +University banner: + +- **title**: Course or banner title +- **summary**: Short description shown under the title +- **action**: Text of the button +- **link**: URL the button links to + +#### Reference content + +The InfluxDB documentation is "task-based," meaning content primarily focuses on +what a user is **doing**, not what they are **using**. +However, there is a need to document tools and other things that don't necessarily +fit in the task-based style. +This is referred to as "reference content." + +Reference content is styled just as the rest of the InfluxDB documentation. +The only difference is the `menu` reference in the page's frontmatter. +When defining the menu for reference content, use the following pattern: + +```yaml +# Pattern +menu: + ___ref: + # ... + +# Example +menu: + influxdb_2_0_ref: + # ... +``` + +#### InfluxDB URLs + +When a user selects an InfluxDB product and region, example URLs in code blocks +throughout the documentation are updated to match their product and region. +InfluxDB URLs are configured in `/data/influxdb_urls.yml`. + +By default, the InfluxDB URL replaced inside of code blocks is `http://localhost:8086`. +Use this URL in all code examples that should be updated with a selected provider and region. + +For example: + +```` +```sh +# This URL will get updated +http://localhost:8086 + +# This URL will NOT get updated +http://example.com +``` +```` + +If the user selects the **US West (Oregon)** region, all occurrences of `http://localhost:8086` +in code blocks will get updated to `https://us-west-2-1.aws.cloud2.influxdata.com`. + +##### Exempt URLs from getting updated + +To exempt a code block from being updated, include the `{{< keep-url >}}` shortcode +just before the code block. + +```` +{{< keep-url >}} +``` +// This URL won't get updated +http://localhost:8086 +``` +```` + +##### Code examples only supported in InfluxDB Cloud + +Some functionality is only supported in InfluxDB Cloud and code examples should +only use InfluxDB Cloud URLs. In these cases, use `https://cloud2.influxdata.com` +as the placeholder in the code block. It will get updated on page load and when +users select a Cloud region in the URL select modal. + +```` +```sh +# This URL will get updated +https://cloud2.influxdata.com +``` +```` + +##### Automatically populate InfluxDB host placeholder + +The InfluxDB host placeholder that gets replaced by custom domains differs +between each InfluxDB product/version. +Use the `influxdb/host` shortcode to automatically render the correct +host placeholder value for the current product. You can also pass a single +argument to specify a specific InfluxDB product to use. +Supported argument values: + +- oss +- cloud +- cloud-serverless +- cloud-dedicated +- clustered +- core +- enterprise + +``` +{{< influxdb/host >}} + +{{< influxdb/host "serverless" >}} +``` + +##### User-populated placeholders + +Use the `code-placeholders` shortcode to format placeholders +as text fields that users can populate with their own values. +The shortcode takes a regular expression for matching placeholder names. +Use the `code-placeholder-key` shortcode to format the placeholder names in +text that describes the placeholder--for example: + +```markdown +{{% code-placeholders "DATABASE_NAME|USERNAME|PASSWORD_OR_TOKEN|API_TOKEN|exampleuser@influxdata.com" %}} +```sh +curl --request POST http://localhost:8086/write?db=DATABASE_NAME \ + --header "Authorization: Token API_TOKEN" \ + --data-binary @path/to/line-protocol.txt +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME` and `RETENTION_POLICY`{{% /code-placeholder-key %}}: the [database and retention policy mapping (DBRP)](/influxdb/v2/reference/api/influxdb-1x/dbrp/) for the InfluxDB v2 bucket that you want to write to +- {{% code-placeholder-key %}}`USERNAME`{{% /code-placeholder-key %}}: your [InfluxDB 1.x username](/influxdb/v2/reference/api/influxdb-1x/#manage-credentials) +- {{% code-placeholder-key %}}`PASSWORD_OR_TOKEN`{{% /code-placeholder-key %}}: your [InfluxDB 1.x password or InfluxDB API token](/influxdb/v2/reference/api/influxdb-1x/#manage-credentials) +- {{% code-placeholder-key %}}`API_TOKEN`{{% /code-placeholder-key %}}: your [InfluxDB API token](/influxdb/v2/admin/tokens/) +``` + + diff --git a/.github/instructions/testing-setup.instructions.md b/.github/instructions/testing-setup.instructions.md new file mode 100644 index 000000000..7c0b9509b --- /dev/null +++ b/.github/instructions/testing-setup.instructions.md @@ -0,0 +1,15 @@ +--- +applyTo: "content/**/*.md, layouts/**/*.html" +--- + +### Detailed Testing Setup + +For comprehensive testing information, including: +- Code block testing setup and configuration +- Link validation testing procedures +- Style linting with Vale +- Pre-commit hooks and GitHub Actions integration +- Advanced testing procedures and troubleshooting + +Please refer to the main **[TESTING.md](../../TESTING.md)** file. + diff --git a/.github/scripts/cache-manager.cjs b/.github/scripts/cache-manager.cjs new file mode 100644 index 000000000..3e0aae23f --- /dev/null +++ b/.github/scripts/cache-manager.cjs @@ -0,0 +1,177 @@ +#!/usr/bin/env node + +/** + * Simple Cache Manager for Link Validation Results + * Uses GitHub Actions cache API or local file storage + */ + +const fs = require('fs'); +const path = require('path'); +const crypto = require('crypto'); +const process = require('process'); + +const CACHE_VERSION = 'v1'; +const CACHE_KEY_PREFIX = 'link-validation'; +const LOCAL_CACHE_DIR = path.join(process.cwd(), '.cache', 'link-validation'); + +/** + * Simple cache interface + */ +class CacheManager { + constructor(options = {}) { + this.useGitHubCache = + options.useGitHubCache !== false && process.env.GITHUB_ACTIONS; + this.localCacheDir = options.localCacheDir || LOCAL_CACHE_DIR; + + // Configurable cache TTL - default 30 days, support environment variable + this.cacheTTLDays = + options.cacheTTLDays || parseInt(process.env.LINK_CACHE_TTL_DAYS) || 30; + this.maxAge = this.cacheTTLDays * 24 * 60 * 60 * 1000; + + if (!this.useGitHubCache) { + this.ensureLocalCacheDir(); + } + } + + ensureLocalCacheDir() { + if (!fs.existsSync(this.localCacheDir)) { + fs.mkdirSync(this.localCacheDir, { recursive: true }); + } + } + + generateCacheKey(filePath, fileHash) { + const pathHash = crypto + .createHash('sha256') + .update(filePath) + .digest('hex') + .substring(0, 8); + return `${CACHE_KEY_PREFIX}-${CACHE_VERSION}-${pathHash}-${fileHash}`; + } + + async get(filePath, fileHash) { + if (this.useGitHubCache) { + return await this.getFromGitHubCache(filePath, fileHash); + } else { + return await this.getFromLocalCache(filePath, fileHash); + } + } + + async set(filePath, fileHash, results) { + if (this.useGitHubCache) { + return await this.setToGitHubCache(filePath, fileHash, results); + } else { + return await this.setToLocalCache(filePath, fileHash, results); + } + } + + async getFromGitHubCache(filePath, fileHash) { + // TODO: This method is a placeholder for GitHub Actions cache integration + // GitHub Actions cache is handled directly in the workflow via actions/cache + // This method should either be implemented or removed in future versions + console.warn( + '[PLACEHOLDER] getFromGitHubCache: Using placeholder implementation - always returns null' + ); + return null; + } + + async setToGitHubCache(filePath, fileHash, results) { + // TODO: This method is a placeholder for GitHub Actions cache integration + // GitHub Actions cache is handled directly in the workflow via actions/cache + // This method should either be implemented or removed in future versions + console.warn( + '[PLACEHOLDER] setToGitHubCache: Using placeholder implementation - always returns true' + ); + return true; + } + + async getFromLocalCache(filePath, fileHash) { + const cacheKey = this.generateCacheKey(filePath, fileHash); + const cacheFile = path.join(this.localCacheDir, `${cacheKey}.json`); + + if (!fs.existsSync(cacheFile)) { + return null; + } + + try { + const content = fs.readFileSync(cacheFile, 'utf8'); + const cached = JSON.parse(content); + + // TTL check using configured cache duration + const age = Date.now() - new Date(cached.cachedAt).getTime(); + + if (age > this.maxAge) { + fs.unlinkSync(cacheFile); + return null; + } + + return cached.results; + } catch (error) { + // Clean up corrupted cache + try { + fs.unlinkSync(cacheFile); + } catch { + // Ignore cleanup errors + } + return null; + } + } + + async setToLocalCache(filePath, fileHash, results) { + const cacheKey = this.generateCacheKey(filePath, fileHash); + const cacheFile = path.join(this.localCacheDir, `${cacheKey}.json`); + + const cacheData = { + filePath, + fileHash, + results, + cachedAt: new Date().toISOString(), + }; + + try { + fs.writeFileSync(cacheFile, JSON.stringify(cacheData, null, 2)); + return true; + } catch (error) { + console.warn(`Cache save failed: ${error.message}`); + return false; + } + } + + async cleanup() { + if (this.useGitHubCache) { + return { removed: 0, note: 'GitHub Actions cache auto-managed' }; + } + + let removed = 0; + if (!fs.existsSync(this.localCacheDir)) { + return { removed }; + } + + const files = fs.readdirSync(this.localCacheDir); + + for (const file of files) { + if (!file.endsWith('.json')) continue; + + const filePath = path.join(this.localCacheDir, file); + try { + const stat = fs.statSync(filePath); + if (Date.now() - stat.mtime.getTime() > this.maxAge) { + fs.unlinkSync(filePath); + removed++; + } + } catch { + // Remove corrupted files + try { + fs.unlinkSync(filePath); + removed++; + } catch { + // Ignore errors + } + } + } + + return { removed }; + } +} + +module.exports = CacheManager; +module.exports.CacheManager = CacheManager; \ No newline at end of file diff --git a/.github/scripts/cache-manager.js b/.github/scripts/cache-manager.js new file mode 100644 index 000000000..e3e6dcceb --- /dev/null +++ b/.github/scripts/cache-manager.js @@ -0,0 +1,177 @@ +#!/usr/bin/env node + +/** + * Simple Cache Manager for Link Validation Results + * Uses GitHub Actions cache API or local file storage + */ + +import fs from 'fs'; +import path from 'path'; +import crypto from 'crypto'; +import process from 'process'; + +const CACHE_VERSION = 'v1'; +const CACHE_KEY_PREFIX = 'link-validation'; +const LOCAL_CACHE_DIR = path.join(process.cwd(), '.cache', 'link-validation'); + +/** + * Simple cache interface + */ +class CacheManager { + constructor(options = {}) { + this.useGitHubCache = + options.useGitHubCache !== false && process.env.GITHUB_ACTIONS; + this.localCacheDir = options.localCacheDir || LOCAL_CACHE_DIR; + + // Configurable cache TTL - default 30 days, support environment variable + this.cacheTTLDays = + options.cacheTTLDays || parseInt(process.env.LINK_CACHE_TTL_DAYS) || 30; + this.maxAge = this.cacheTTLDays * 24 * 60 * 60 * 1000; + + if (!this.useGitHubCache) { + this.ensureLocalCacheDir(); + } + } + + ensureLocalCacheDir() { + if (!fs.existsSync(this.localCacheDir)) { + fs.mkdirSync(this.localCacheDir, { recursive: true }); + } + } + + generateCacheKey(filePath, fileHash) { + const pathHash = crypto + .createHash('sha256') + .update(filePath) + .digest('hex') + .substring(0, 8); + return `${CACHE_KEY_PREFIX}-${CACHE_VERSION}-${pathHash}-${fileHash}`; + } + + async get(filePath, fileHash) { + if (this.useGitHubCache) { + return await this.getFromGitHubCache(filePath, fileHash); + } else { + return await this.getFromLocalCache(filePath, fileHash); + } + } + + async set(filePath, fileHash, results) { + if (this.useGitHubCache) { + return await this.setToGitHubCache(filePath, fileHash, results); + } else { + return await this.setToLocalCache(filePath, fileHash, results); + } + } + + async getFromGitHubCache(filePath, fileHash) { + // TODO: This method is a placeholder for GitHub Actions cache integration + // GitHub Actions cache is handled directly in the workflow via actions/cache + // This method should either be implemented or removed in future versions + console.warn( + '[PLACEHOLDER] getFromGitHubCache: Using placeholder implementation - always returns null' + ); + return null; + } + + async setToGitHubCache(filePath, fileHash, results) { + // TODO: This method is a placeholder for GitHub Actions cache integration + // GitHub Actions cache is handled directly in the workflow via actions/cache + // This method should either be implemented or removed in future versions + console.warn( + '[PLACEHOLDER] setToGitHubCache: Using placeholder implementation - always returns true' + ); + return true; + } + + async getFromLocalCache(filePath, fileHash) { + const cacheKey = this.generateCacheKey(filePath, fileHash); + const cacheFile = path.join(this.localCacheDir, `${cacheKey}.json`); + + if (!fs.existsSync(cacheFile)) { + return null; + } + + try { + const content = fs.readFileSync(cacheFile, 'utf8'); + const cached = JSON.parse(content); + + // TTL check using configured cache duration + const age = Date.now() - new Date(cached.cachedAt).getTime(); + + if (age > this.maxAge) { + fs.unlinkSync(cacheFile); + return null; + } + + return cached.results; + } catch (error) { + // Clean up corrupted cache + try { + fs.unlinkSync(cacheFile); + } catch { + // Ignore cleanup errors + } + return null; + } + } + + async setToLocalCache(filePath, fileHash, results) { + const cacheKey = this.generateCacheKey(filePath, fileHash); + const cacheFile = path.join(this.localCacheDir, `${cacheKey}.json`); + + const cacheData = { + filePath, + fileHash, + results, + cachedAt: new Date().toISOString(), + }; + + try { + fs.writeFileSync(cacheFile, JSON.stringify(cacheData, null, 2)); + return true; + } catch (error) { + console.warn(`Cache save failed: ${error.message}`); + return false; + } + } + + async cleanup() { + if (this.useGitHubCache) { + return { removed: 0, note: 'GitHub Actions cache auto-managed' }; + } + + let removed = 0; + if (!fs.existsSync(this.localCacheDir)) { + return { removed }; + } + + const files = fs.readdirSync(this.localCacheDir); + + for (const file of files) { + if (!file.endsWith('.json')) continue; + + const filePath = path.join(this.localCacheDir, file); + try { + const stat = fs.statSync(filePath); + if (Date.now() - stat.mtime.getTime() > this.maxAge) { + fs.unlinkSync(filePath); + removed++; + } + } catch { + // Remove corrupted files + try { + fs.unlinkSync(filePath); + removed++; + } catch { + // Ignore errors + } + } + } + + return { removed }; + } +} + +export default CacheManager; +export { CacheManager }; diff --git a/.github/scripts/comment-generator.js b/.github/scripts/comment-generator.js new file mode 100644 index 000000000..82db92d70 --- /dev/null +++ b/.github/scripts/comment-generator.js @@ -0,0 +1,329 @@ +/** + * Comment Generator for Link Validation Results + * Standardizes PR comment generation across workflows + * Includes cache performance metrics and optimization info + */ + +import fs from 'fs'; +import path from 'path'; +import process from 'process'; +import { fileURLToPath } from 'url'; + +/** + * Normalize broken link data from different report formats + * @param {Object|Array} reportData - Raw report data + * @returns {Array} - Normalized array of broken links + */ +function normalizeBrokenLinks(reportData) { + if (!reportData) return []; + + let links = []; + + if (Array.isArray(reportData)) { + reportData.forEach((item) => { + if (item.links && Array.isArray(item.links)) { + // Format: { sourceFile: "file.md", links: [...] } + item.links.forEach((link) => { + links.push({ + sourceFile: item.sourceFile || item.page || 'Unknown', + url: link.url || link.href, + linkText: link.linkText || link.url || link.href, + status: link.status, + error: link.error, + type: link.type, + }); + }); + } else { + // Format: direct link object + links.push({ + sourceFile: item.sourceFile || item.page || 'Unknown', + url: item.url || item.href, + linkText: item.linkText || item.url || item.href, + status: item.status, + error: item.error, + type: item.type, + }); + } + }); + } + + return links; +} + +/** + * Group broken links by source file + * @param {Array} brokenLinks - Array of normalized broken links + * @returns {Object} - Object with source files as keys + */ +function groupLinksBySource(brokenLinks) { + const bySource = {}; + + brokenLinks.forEach((link) => { + const source = link.sourceFile || 'Unknown'; + if (!bySource[source]) { + bySource[source] = []; + } + bySource[source].push(link); + }); + + return bySource; +} + +/** + * Generate markdown comment for PR + * @param {Array} allBrokenLinks - Array of all broken links + * @param {Object} options - Generation options + * @returns {string} - Markdown comment content + */ +/** + * Load cache statistics from reports directory + * @param {string} reportsDir - Directory containing reports + * @returns {Object|null} Cache statistics or null if not found + */ +function loadCacheStats(reportsDir) { + try { + const cacheStatsFile = path.join(reportsDir, 'cache_statistics.json'); + if (fs.existsSync(cacheStatsFile)) { + const content = fs.readFileSync(cacheStatsFile, 'utf8'); + return JSON.parse(content); + } + } catch (error) { + console.warn(`Warning: Could not load cache stats: ${error.message}`); + } + return null; +} + +function generateComment(allBrokenLinks, options = {}) { + const { + includeSuccessMessage = true, + includeStats = true, + includeActionRequired = true, + maxLinksPerFile = 20, + cacheStats = null, + reportsDir = null, + } = options; + + // Load cache stats if reports directory is provided + const actualCacheStats = + cacheStats || (reportsDir ? loadCacheStats(reportsDir) : null); + + let comment = ''; + + // Add cache performance metrics at the top + if (actualCacheStats) { + comment += '## 📊 Link Validation Performance\n\n'; + comment += `- **Cache Hit Rate:** ${actualCacheStats.hitRate}%\n`; + comment += `- **Files Cached:** ${actualCacheStats.cacheHits} (skipped validation)\n`; + comment += `- **Files Validated:** ${actualCacheStats.cacheMisses}\n`; + + if (actualCacheStats.hitRate >= 50) { + comment += + '- **Performance:** 🚀 Cache optimization saved significant validation time!\n'; + } else if (actualCacheStats.hitRate > 0) { + comment += + '- **Performance:** ⚡ Some files were cached, improving validation speed\n'; + } + comment += '\n'; + } + + if (!allBrokenLinks || allBrokenLinks.length === 0) { + comment += '## ✅ Link Validation Passed\n\n'; + comment += 'All links in the changed files are valid!'; + + if (actualCacheStats && actualCacheStats.hitRate === 100) { + comment += '\n\n✨ **All files were cached** - no validation was needed!'; + } + + return includeSuccessMessage ? comment : ''; + } + + comment += '## 🔗 Broken Links Found\n\n'; + + if (includeStats) { + comment += `Found ${allBrokenLinks.length} broken link(s) in the changed files:\n\n`; + } + + // Group by source file + const bySource = groupLinksBySource(allBrokenLinks); + + // Generate sections for each source file + for (const [source, links] of Object.entries(bySource)) { + comment += `### ${source}\n\n`; + + const displayLinks = links.slice(0, maxLinksPerFile); + const hiddenCount = links.length - displayLinks.length; + + displayLinks.forEach((link) => { + const url = link.url || 'Unknown URL'; + const linkText = link.linkText || url; + const status = link.status || 'Unknown'; + + comment += `- [ ] **${linkText}** → \`${url}\`\n`; + comment += ` - Status: ${status}\n`; + + if (link.type) { + comment += ` - Type: ${link.type}\n`; + } + + if (link.error) { + comment += ` - Error: ${link.error}\n`; + } + + comment += '\n'; + }); + + if (hiddenCount > 0) { + comment += `
\n... and ${hiddenCount} more broken link(s)\n\n`; + + links.slice(maxLinksPerFile).forEach((link) => { + const url = link.url || 'Unknown URL'; + const linkText = link.linkText || url; + const status = link.status || 'Unknown'; + + comment += `- [ ] **${linkText}** → \`${url}\` (Status: ${status})\n`; + }); + + comment += '\n
\n\n'; + } + } + + if (includeActionRequired) { + comment += '\n---\n'; + comment += + '**Action Required:** Please fix the broken links before merging this PR.'; + } + + return comment; +} + +/** + * Load and merge broken link reports from artifacts + * @param {string} reportsDir - Directory containing report artifacts + * @returns {Array} - Array of all broken links + */ +function loadBrokenLinkReports(reportsDir) { + const allBrokenLinks = []; + + if (!fs.existsSync(reportsDir)) { + return allBrokenLinks; + } + + try { + const reportDirs = fs.readdirSync(reportsDir); + + for (const dir of reportDirs) { + if (dir.startsWith('broken-links-')) { + const reportPath = path.join( + reportsDir, + dir, + 'broken_links_report.json' + ); + + if (fs.existsSync(reportPath)) { + try { + const reportContent = fs.readFileSync(reportPath, 'utf8'); + const reportData = JSON.parse(reportContent); + const normalizedLinks = normalizeBrokenLinks(reportData); + allBrokenLinks.push(...normalizedLinks); + } catch (e) { + console.error(`Error reading ${reportPath}: ${e.message}`); + } + } + } + } + } catch (e) { + console.error( + `Error reading reports directory ${reportsDir}: ${e.message}` + ); + } + + return allBrokenLinks; +} + +/** + * CLI interface for the comment generator + */ +function main() { + const args = process.argv.slice(2); + + if (args.includes('--help') || args.includes('-h')) { + console.log(` +Usage: node comment-generator.js [options] + +Options: + --no-success Don't include success message when no broken links + --no-stats Don't include broken link statistics + --no-action-required Don't include action required message + --max-links Maximum links to show per file (default: 20) + --output-file Write comment to file instead of stdout + --help, -h Show this help message + +Examples: + node comment-generator.js reports/ + node comment-generator.js --max-links 10 --output-file comment.md reports/ +`); + process.exit(0); + } + + // Parse arguments + let reportsDir = ''; + const options = { + includeSuccessMessage: true, + includeStats: true, + includeActionRequired: true, + maxLinksPerFile: 20, + }; + let outputFile = null; + + for (let i = 0; i < args.length; i++) { + const arg = args[i]; + + if (arg === '--no-success') { + options.includeSuccessMessage = false; + } else if (arg === '--no-stats') { + options.includeStats = false; + } else if (arg === '--no-action-required') { + options.includeActionRequired = false; + } else if (arg === '--max-links' && i + 1 < args.length) { + options.maxLinksPerFile = parseInt(args[++i]); + } else if (arg === '--output-file' && i + 1 < args.length) { + outputFile = args[++i]; + } else if (!arg.startsWith('--')) { + reportsDir = arg; + } + } + + if (!reportsDir) { + console.error('Error: reports directory is required'); + process.exit(1); + } + + // Load reports and generate comment with cache stats + const brokenLinks = loadBrokenLinkReports(reportsDir); + options.reportsDir = reportsDir; + const comment = generateComment(brokenLinks, options); + + if (outputFile) { + fs.writeFileSync(outputFile, comment); + console.log(`Comment written to ${outputFile}`); + } else { + console.log(comment); + } + + // Exit with error code if there are broken links + if (brokenLinks.length > 0) { + process.exit(1); + } +} + +// Run CLI if this file is executed directly +if (fileURLToPath(import.meta.url) === process.argv[1]) { + main(); +} + +export { + generateComment, + loadBrokenLinkReports, + normalizeBrokenLinks, + groupLinksBySource, +}; diff --git a/.github/scripts/incremental-validator.cjs b/.github/scripts/incremental-validator.cjs new file mode 100644 index 000000000..ab58447c0 --- /dev/null +++ b/.github/scripts/incremental-validator.cjs @@ -0,0 +1,230 @@ +#!/usr/bin/env node + +/** + * Incremental Link Validator + * Combines link extraction and caching to validate only changed links + */ + +const { extractLinksFromFile } = require('./link-extractor.cjs'); +const CacheManager = require('./cache-manager.cjs'); +const process = require('process'); + +/** + * Incremental validator that only validates changed content + */ +class IncrementalValidator { + constructor(options = {}) { + this.cacheManager = new CacheManager(options); + this.validateExternal = options.validateExternal !== false; + this.validateInternal = options.validateInternal !== false; + } + + /** + * Get validation strategy for a list of files + * @param {Array} filePaths - Array of file paths + * @returns {Object} Validation strategy with files categorized + */ + async getValidationStrategy(filePaths) { + const strategy = { + unchanged: [], // Files that haven't changed (skip validation) + changed: [], // Files that changed (need full validation) + newLinks: [], // New links across all files (need validation) + total: filePaths.length, + }; + + const allNewLinks = new Set(); + + for (const filePath of filePaths) { + try { + const extractionResult = extractLinksFromFile(filePath); + if (!extractionResult) { + console.warn(`Could not extract links from ${filePath}`); + continue; + } + + const { fileHash, links } = extractionResult; + + // Check if we have cached results for this file version + const cachedResults = await this.cacheManager.get(filePath, fileHash); + + if (cachedResults) { + // File unchanged, skip validation + strategy.unchanged.push({ + filePath, + fileHash, + linkCount: links.length, + cachedResults, + }); + } else { + // File changed or new, needs validation + strategy.changed.push({ + filePath, + fileHash, + links: links.filter((link) => link.needsValidation), + extractionResult, + }); + + // Collect all new links for batch validation + links + .filter((link) => link.needsValidation) + .forEach((link) => allNewLinks.add(link.url)); + } + } catch (error) { + console.error(`Error processing ${filePath}: ${error.message}`); + // Treat as changed file to ensure validation + strategy.changed.push({ + filePath, + error: error.message, + }); + } + } + + strategy.newLinks = Array.from(allNewLinks); + + return strategy; + } + + /** + * Validate files using incremental strategy + * @param {Array} filePaths - Files to validate + * @returns {Object} Validation results + */ + async validateFiles(filePaths) { + console.log( + `📊 Analyzing ${filePaths.length} files for incremental validation...` + ); + + const strategy = await this.getValidationStrategy(filePaths); + + console.log(`✅ ${strategy.unchanged.length} files unchanged (cached)`); + console.log(`🔄 ${strategy.changed.length} files need validation`); + console.log(`🔗 ${strategy.newLinks.length} unique links to validate`); + + const results = { + validationStrategy: strategy, + filesToValidate: strategy.changed.map((item) => ({ + filePath: item.filePath, + linkCount: item.links ? item.links.length : 0, + fileHash: item.fileHash || 'unknown', + })), + cacheStats: { + totalFiles: strategy.total, + cacheHits: strategy.unchanged.length, + cacheMisses: strategy.changed.length, + hitRate: + strategy.total > 0 + ? Math.round((strategy.unchanged.length / strategy.total) * 100) + : 0, + }, + }; + + return results; + } + + /** + * Store validation results in cache + * @param {string} filePath - File path + * @param {string} fileHash - File hash + * @param {Object} validationResults - Results to cache + * @returns {Promise} Success status + */ + async cacheResults(filePath, fileHash, validationResults) { + return await this.cacheManager.set(filePath, fileHash, validationResults); + } + + /** + * Clean up expired cache entries + * @returns {Promise} Cleanup statistics + */ + async cleanupCache() { + return await this.cacheManager.cleanup(); + } +} + +/** + * CLI usage + */ +async function main() { + const args = process.argv.slice(2); + + if (args.length === 0 || args[0] === '--help') { + console.log(` +Incremental Link Validator + +Usage: + node incremental-validator.cjs [files...] Analyze files for validation + node incremental-validator.cjs --cleanup Clean up expired cache + node incremental-validator.cjs --help Show this help + +Options: + --no-external Don't validate external links + --no-internal Don't validate internal links + --local Use local cache instead of GitHub Actions cache + --cache-ttl=DAYS Set cache TTL in days (default: 30) + +Examples: + node incremental-validator.cjs content/**/*.md + node incremental-validator.cjs --cache-ttl=7 content/**/*.md + node incremental-validator.cjs --cleanup +`); + process.exit(0); + } + + if (args[0] === '--cleanup') { + const validator = new IncrementalValidator(); + const stats = await validator.cleanupCache(); + console.log(`🧹 Cleaned up ${stats.removed} expired cache entries`); + if (stats.note) console.log(`ℹ️ ${stats.note}`); + return; + } + + const options = { + validateExternal: !args.includes('--no-external'), + validateInternal: !args.includes('--no-internal'), + useGitHubCache: !args.includes('--local'), + }; + + // Extract cache TTL option if provided + const cacheTTLArg = args.find((arg) => arg.startsWith('--cache-ttl=')); + if (cacheTTLArg) { + options.cacheTTLDays = parseInt(cacheTTLArg.split('=')[1]); + } + + const filePaths = args.filter((arg) => !arg.startsWith('--')); + + if (filePaths.length === 0) { + console.error('No files specified for validation'); + process.exit(1); + } + + const validator = new IncrementalValidator(options); + const results = await validator.validateFiles(filePaths); + + console.log('\n📈 Validation Analysis Results:'); + console.log('================================'); + console.log(`📊 Cache hit rate: ${results.cacheStats.hitRate}%`); + console.log(`📋 Files to validate: ${results.filesToValidate.length}`); + + if (results.filesToValidate.length > 0) { + console.log('\n📝 Files needing validation:'); + results.filesToValidate.forEach((file) => { + console.log(` ${file.filePath} (${file.linkCount} links)`); + }); + + // Output files for Cypress to process + console.log('\n🎯 Files for Cypress validation (one per line):'); + results.filesToValidate.forEach((file) => { + console.log(file.filePath); + }); + } else { + console.log('\n✨ All files are cached - no validation needed!'); + } +} + +module.exports = IncrementalValidator; +module.exports.IncrementalValidator = IncrementalValidator; + +// Run CLI if called directly +if (require.main === module) { + main().catch(console.error); +} \ No newline at end of file diff --git a/.github/scripts/incremental-validator.js b/.github/scripts/incremental-validator.js new file mode 100644 index 000000000..8f7caa0a6 --- /dev/null +++ b/.github/scripts/incremental-validator.js @@ -0,0 +1,229 @@ +#!/usr/bin/env node + +/** + * Incremental Link Validator + * Combines link extraction and caching to validate only changed links + */ + +import { extractLinksFromFile } from './link-extractor.js'; +import { CacheManager } from './cache-manager.js'; +import process from 'process'; +import { fileURLToPath } from 'url'; + +/** + * Incremental validator that only validates changed content + */ +class IncrementalValidator { + constructor(options = {}) { + this.cacheManager = new CacheManager(options); + this.validateExternal = options.validateExternal !== false; + this.validateInternal = options.validateInternal !== false; + } + + /** + * Get validation strategy for a list of files + * @param {Array} filePaths - Array of file paths + * @returns {Object} Validation strategy with files categorized + */ + async getValidationStrategy(filePaths) { + const strategy = { + unchanged: [], // Files that haven't changed (skip validation) + changed: [], // Files that changed (need full validation) + newLinks: [], // New links across all files (need validation) + total: filePaths.length, + }; + + const allNewLinks = new Set(); + + for (const filePath of filePaths) { + try { + const extractionResult = extractLinksFromFile(filePath); + if (!extractionResult) { + console.warn(`Could not extract links from ${filePath}`); + continue; + } + + const { fileHash, links } = extractionResult; + + // Check if we have cached results for this file version + const cachedResults = await this.cacheManager.get(filePath, fileHash); + + if (cachedResults) { + // File unchanged, skip validation + strategy.unchanged.push({ + filePath, + fileHash, + linkCount: links.length, + cachedResults, + }); + } else { + // File changed or new, needs validation + strategy.changed.push({ + filePath, + fileHash, + links: links.filter((link) => link.needsValidation), + extractionResult, + }); + + // Collect all new links for batch validation + links + .filter((link) => link.needsValidation) + .forEach((link) => allNewLinks.add(link.url)); + } + } catch (error) { + console.error(`Error processing ${filePath}: ${error.message}`); + // Treat as changed file to ensure validation + strategy.changed.push({ + filePath, + error: error.message, + }); + } + } + + strategy.newLinks = Array.from(allNewLinks); + + return strategy; + } + + /** + * Validate files using incremental strategy + * @param {Array} filePaths - Files to validate + * @returns {Object} Validation results + */ + async validateFiles(filePaths) { + console.log( + `📊 Analyzing ${filePaths.length} files for incremental validation...` + ); + + const strategy = await this.getValidationStrategy(filePaths); + + console.log(`✅ ${strategy.unchanged.length} files unchanged (cached)`); + console.log(`🔄 ${strategy.changed.length} files need validation`); + console.log(`🔗 ${strategy.newLinks.length} unique links to validate`); + + const results = { + validationStrategy: strategy, + filesToValidate: strategy.changed.map((item) => ({ + filePath: item.filePath, + linkCount: item.links ? item.links.length : 0, + })), + cacheStats: { + cacheHits: strategy.unchanged.length, + cacheMisses: strategy.changed.length, + hitRate: + strategy.total > 0 + ? Math.round((strategy.unchanged.length / strategy.total) * 100) + : 0, + }, + }; + + return results; + } + + /** + * Store validation results in cache + * @param {string} filePath - File path + * @param {string} fileHash - File hash + * @param {Object} validationResults - Results to cache + * @returns {Promise} Success status + */ + async cacheResults(filePath, fileHash, validationResults) { + return await this.cacheManager.set(filePath, fileHash, validationResults); + } + + /** + * Clean up expired cache entries + * @returns {Promise} Cleanup statistics + */ + async cleanupCache() { + return await this.cacheManager.cleanup(); + } +} + +/** + * CLI usage + */ +async function main() { + const args = process.argv.slice(2); + + if (args.length === 0 || args[0] === '--help') { + console.log(` +Incremental Link Validator + +Usage: + node incremental-validator.js [files...] Analyze files for validation + node incremental-validator.js --cleanup Clean up expired cache + node incremental-validator.js --help Show this help + +Options: + --no-external Don't validate external links + --no-internal Don't validate internal links + --local Use local cache instead of GitHub Actions cache + --cache-ttl=DAYS Set cache TTL in days (default: 30) + +Examples: + node incremental-validator.js content/**/*.md + node incremental-validator.js --cache-ttl=7 content/**/*.md + node incremental-validator.js --cleanup +`); + process.exit(0); + } + + if (args[0] === '--cleanup') { + const validator = new IncrementalValidator(); + const stats = await validator.cleanupCache(); + console.log(`🧹 Cleaned up ${stats.removed} expired cache entries`); + if (stats.note) console.log(`ℹ️ ${stats.note}`); + return; + } + + const options = { + validateExternal: !args.includes('--no-external'), + validateInternal: !args.includes('--no-internal'), + useGitHubCache: !args.includes('--local'), + }; + + // Extract cache TTL option if provided + const cacheTTLArg = args.find((arg) => arg.startsWith('--cache-ttl=')); + if (cacheTTLArg) { + options.cacheTTLDays = parseInt(cacheTTLArg.split('=')[1]); + } + + const filePaths = args.filter((arg) => !arg.startsWith('--')); + + if (filePaths.length === 0) { + console.error('No files specified for validation'); + process.exit(1); + } + + const validator = new IncrementalValidator(options); + const results = await validator.validateFiles(filePaths); + + console.log('\n📈 Validation Analysis Results:'); + console.log('================================'); + console.log(`📊 Cache hit rate: ${results.cacheStats.hitRate}%`); + console.log(`📋 Files to validate: ${results.filesToValidate.length}`); + + if (results.filesToValidate.length > 0) { + console.log('\n📝 Files needing validation:'); + results.filesToValidate.forEach((file) => { + console.log(` ${file.filePath} (${file.linkCount} links)`); + }); + + // Output files for Cypress to process + console.log('\n🎯 Files for Cypress validation (one per line):'); + results.filesToValidate.forEach((file) => { + console.log(file.filePath); + }); + } else { + console.log('\n✨ All files are cached - no validation needed!'); + } +} + +export default IncrementalValidator; +export { IncrementalValidator }; + +// Run CLI if called directly +if (fileURLToPath(import.meta.url) === process.argv[1]) { + main().catch(console.error); +} diff --git a/.github/scripts/link-extractor.cjs b/.github/scripts/link-extractor.cjs new file mode 100644 index 000000000..772d725fe --- /dev/null +++ b/.github/scripts/link-extractor.cjs @@ -0,0 +1,477 @@ +#!/usr/bin/env node + +/** + * Link Extractor for Documentation Files + * Extracts all links from markdown and HTML files with metadata for caching and incremental validation + */ + +const fs = require('fs'); +const crypto = require('crypto'); +const matter = require('gray-matter'); +const path = require('path'); +const process = require('process'); + +/** + * Extract links from markdown content + * @param {string} content - File content + * @param {string} filePath - Path to the file + * @returns {Array} Array of link objects with metadata + */ +function extractMarkdownLinks(content, filePath) { + const links = []; + const lines = content.split('\n'); + + // Track reference-style link definitions + const referenceLinks = new Map(); + + // First pass: collect reference definitions + content.replace(/^\s*\[([^\]]+)\]:\s*(.+)$/gm, (match, ref, url) => { + referenceLinks.set(ref.toLowerCase(), url.trim()); + return match; + }); + + // Process each line for links + lines.forEach((line, lineIndex) => { + const lineNumber = lineIndex + 1; + + // Standard markdown links + let match; + const standardLinkRegex = /\[([^\]]*)\]\(([^)]+)\)/g; + while ((match = standardLinkRegex.exec(line)) !== null) { + const linkText = match[1]; + const url = match[2]; + const columnStart = match.index; + + links.push({ + url: url.trim(), + text: linkText, + type: 'markdown', + line: lineNumber, + column: columnStart, + context: line.trim(), + hash: generateLinkHash(url.trim(), filePath, lineNumber), + }); + } + + // Reference-style links + const refLinkRegex = /\[([^\]]*)\]\[([^\]]*)\]/g; + while ((match = refLinkRegex.exec(line)) !== null) { + const linkText = match[1]; + const refKey = (match[2] || linkText).toLowerCase(); + const url = referenceLinks.get(refKey); + + if (url) { + const columnStart = match.index; + links.push({ + url: url, + text: linkText, + type: 'markdown-reference', + line: lineNumber, + column: columnStart, + context: line.trim(), + reference: refKey, + hash: generateLinkHash(url, filePath, lineNumber), + }); + } + } + + // Autolinks + const autolinkRegex = /<(https?:\/\/[^>]+)>/g; + while ((match = autolinkRegex.exec(line)) !== null) { + const url = match[1]; + const columnStart = match.index; + + links.push({ + url: url, + text: url, + type: 'autolink', + line: lineNumber, + column: columnStart, + context: line.trim(), + hash: generateLinkHash(url, filePath, lineNumber), + }); + } + + // Bare URLs (basic detection, avoid false positives) + // Regex to match bare URLs in text + // - (?:^|[\s\n]): Match the start of the line or any whitespace character + // - (https?:\/\/): Match the protocol (http or https) followed by :// + // - [^\s\)\]\}]+: Match the rest of the URL, stopping at spaces or closing characters like ), ], or } + const bareUrlRegex = /(?^|[\s\n])(?https?:\/\/[^\s\)\]\}]+)/g; + while ((match = bareUrlRegex.exec(line)) !== null) { + const url = match.groups.url; + const columnStart = match.index + match[0].length - url.length; + + // Skip if this URL is already captured in a proper markdown link + const alreadyCaptured = links.some( + (link) => + link.line === lineNumber && + Math.abs(link.column - columnStart) < 10 && + link.url === url + ); + + if (!alreadyCaptured) { + links.push({ + url: url, + text: url, + type: 'bare-url', + line: lineNumber, + column: columnStart, + context: line.trim(), + hash: generateLinkHash(url, filePath, lineNumber), + }); + } + } + }); + + return links; +} + +/** + * Extract links from HTML content + * @param {string} content - File content + * @param {string} filePath - Path to the file + * @returns {Array} Array of link objects with metadata + */ +function extractHtmlLinks(content, filePath) { + const links = []; + const lines = content.split('\n'); + + lines.forEach((line, lineIndex) => { + const lineNumber = lineIndex + 1; + let match; + + const htmlLinkRegex = /]*href\s*=\s*["']([^"']+)["'][^>]*>/gi; + while ((match = htmlLinkRegex.exec(line)) !== null) { + const url = match[1]; + const columnStart = match.index; + + // Extract link text if possible + const fullMatch = match[0]; + const textMatch = fullMatch.match(/>([^<]*) ({ + ...link, + ...categorizeLinkType(link.url), + filePath, + })); + + // Calculate statistics + const stats = { + totalLinks: enhancedLinks.length, + externalLinks: enhancedLinks.filter((l) => l.category === 'external') + .length, + internalLinks: enhancedLinks.filter((l) => + l.category.startsWith('internal') + ).length, + fragmentLinks: enhancedLinks.filter((l) => l.category === 'fragment') + .length, + linksNeedingValidation: enhancedLinks.filter((l) => l.needsValidation) + .length, + }; + + return { + filePath, + fileHash, + extension, + frontmatter, + links: enhancedLinks, + stats, + extractedAt: new Date().toISOString(), + }; + } catch (error) { + console.error(`Error extracting links from ${filePath}: ${error.message}`); + return null; + } +} + +/** + * Main function for CLI usage + */ +function main() { + const args = process.argv.slice(2); + + if (args.length === 0) { + console.error('Usage: node link-extractor.cjs [file2] [...]'); + console.error(' node link-extractor.cjs --help'); + process.exit(1); + } + + if (args[0] === '--help') { + console.log(` +Link Extractor for Documentation Files + +Usage: + node link-extractor.cjs [file2] [...] Extract links from files + node link-extractor.cjs --help Show this help + +Options: + --json Output results as JSON + --stats-only Show only statistics + --filter TYPE Filter links by category (external, internal-absolute, internal-relative, fragment) + +Examples: + node link-extractor.cjs content/influxdb3/core/install.md + node link-extractor.cjs --json content/**/*.md + node link-extractor.cjs --stats-only --filter external content/influxdb3/**/*.md +`); + process.exit(0); + } + + const jsonOutput = args.includes('--json'); + const statsOnly = args.includes('--stats-only'); + const filterType = args.includes('--filter') + ? args[args.indexOf('--filter') + 1] + : null; + + const files = args.filter( + (arg) => !arg.startsWith('--') && arg !== filterType + ); + const results = []; + + for (const filePath of files) { + const result = extractLinksFromFile(filePath); + if (result) { + // Apply filter if specified + if (filterType) { + result.links = result.links.filter( + (link) => link.category === filterType + ); + // Recalculate stats after filtering + result.stats = { + totalLinks: result.links.length, + externalLinks: result.links.filter((l) => l.category === 'external') + .length, + internalLinks: result.links.filter((l) => + l.category.startsWith('internal') + ).length, + fragmentLinks: result.links.filter((l) => l.category === 'fragment') + .length, + linksNeedingValidation: result.links.filter((l) => l.needsValidation) + .length, + }; + } + + results.push(result); + } + } + + if (jsonOutput) { + console.log(JSON.stringify(results, null, 2)); + } else if (statsOnly) { + console.log('\nLink Extraction Statistics:'); + console.log('=========================='); + + let totalFiles = 0; + let totalLinks = 0; + let totalExternal = 0; + let totalInternal = 0; + let totalFragment = 0; + let totalNeedingValidation = 0; + + results.forEach((result) => { + totalFiles++; + totalLinks += result.stats.totalLinks; + totalExternal += result.stats.externalLinks; + totalInternal += result.stats.internalLinks; + totalFragment += result.stats.fragmentLinks; + totalNeedingValidation += result.stats.linksNeedingValidation; + + console.log( + `${result.filePath}: ${result.stats.totalLinks} links (${result.stats.linksNeedingValidation} need validation)` + ); + }); + + console.log('\nSummary:'); + console.log(` Total files: ${totalFiles}`); + console.log(` Total links: ${totalLinks}`); + console.log(` External links: ${totalExternal}`); + console.log(` Internal links: ${totalInternal}`); + console.log(` Fragment links: ${totalFragment}`); + console.log(` Links needing validation: ${totalNeedingValidation}`); + } else { + results.forEach((result) => { + console.log(`\nFile: ${result.filePath}`); + console.log(`Hash: ${result.fileHash}`); + console.log(`Links found: ${result.stats.totalLinks}`); + console.log( + `Links needing validation: ${result.stats.linksNeedingValidation}` + ); + + if (result.links.length > 0) { + console.log('\nLinks:'); + result.links.forEach((link, index) => { + console.log(` ${index + 1}. [${link.category}] ${link.url}`); + console.log(` Line ${link.line}, Column ${link.column}`); + console.log(` Text: "${link.text}"`); + console.log(` Hash: ${link.hash}`); + if (link.reference) { + console.log(` Reference: ${link.reference}`); + } + console.log(''); + }); + } + }); + } +} + +// Export functions for use as a module +module.exports = { + extractLinksFromFile, + extractMarkdownLinks, + extractHtmlLinks, + generateFileHash, + generateLinkHash, + categorizeLinkType, +}; + +// Run main function if called directly +if (require.main === module) { + main(); +} \ No newline at end of file diff --git a/.github/scripts/link-extractor.js b/.github/scripts/link-extractor.js new file mode 100644 index 000000000..616f7527d --- /dev/null +++ b/.github/scripts/link-extractor.js @@ -0,0 +1,478 @@ +#!/usr/bin/env node + +/** + * Link Extractor for Documentation Files + * Extracts all links from markdown and HTML files with metadata for caching and incremental validation + */ + +import fs from 'fs'; +import crypto from 'crypto'; +import matter from 'gray-matter'; +import path from 'path'; +import process from 'process'; +import { fileURLToPath } from 'url'; + +/** + * Extract links from markdown content + * @param {string} content - File content + * @param {string} filePath - Path to the file + * @returns {Array} Array of link objects with metadata + */ +function extractMarkdownLinks(content, filePath) { + const links = []; + const lines = content.split('\n'); + + // Track reference-style link definitions + const referenceLinks = new Map(); + + // First pass: collect reference definitions + content.replace(/^\s*\[([^\]]+)\]:\s*(.+)$/gm, (match, ref, url) => { + referenceLinks.set(ref.toLowerCase(), url.trim()); + return match; + }); + + // Process each line for links + lines.forEach((line, lineIndex) => { + const lineNumber = lineIndex + 1; + + // Standard markdown links + let match; + const standardLinkRegex = /\[([^\]]*)\]\(([^)]+)\)/g; + while ((match = standardLinkRegex.exec(line)) !== null) { + const linkText = match[1]; + const url = match[2]; + const columnStart = match.index; + + links.push({ + url: url.trim(), + text: linkText, + type: 'markdown', + line: lineNumber, + column: columnStart, + context: line.trim(), + hash: generateLinkHash(url.trim(), filePath, lineNumber), + }); + } + + // Reference-style links + const refLinkRegex = /\[([^\]]*)\]\[([^\]]*)\]/g; + while ((match = refLinkRegex.exec(line)) !== null) { + const linkText = match[1]; + const refKey = (match[2] || linkText).toLowerCase(); + const url = referenceLinks.get(refKey); + + if (url) { + const columnStart = match.index; + links.push({ + url: url, + text: linkText, + type: 'markdown-reference', + line: lineNumber, + column: columnStart, + context: line.trim(), + reference: refKey, + hash: generateLinkHash(url, filePath, lineNumber), + }); + } + } + + // Autolinks + const autolinkRegex = /<(https?:\/\/[^>]+)>/g; + while ((match = autolinkRegex.exec(line)) !== null) { + const url = match[1]; + const columnStart = match.index; + + links.push({ + url: url, + text: url, + type: 'autolink', + line: lineNumber, + column: columnStart, + context: line.trim(), + hash: generateLinkHash(url, filePath, lineNumber), + }); + } + + // Bare URLs (basic detection, avoid false positives) + // Regex to match bare URLs in text + // - (?:^|[\s\n]): Match the start of the line or any whitespace character + // - (https?:\/\/): Match the protocol (http or https) followed by :// + // - [^\s\)\]\}]+: Match the rest of the URL, stopping at spaces or closing characters like ), ], or } + const bareUrlRegex = /(?^|[\s\n])(?https?:\/\/[^\s\)\]\}]+)/g; + while ((match = bareUrlRegex.exec(line)) !== null) { + const url = match.groups.url; + const columnStart = match.index + match[0].length - url.length; + + // Skip if this URL is already captured in a proper markdown link + const alreadyCaptured = links.some( + (link) => + link.line === lineNumber && + Math.abs(link.column - columnStart) < 10 && + link.url === url + ); + + if (!alreadyCaptured) { + links.push({ + url: url, + text: url, + type: 'bare-url', + line: lineNumber, + column: columnStart, + context: line.trim(), + hash: generateLinkHash(url, filePath, lineNumber), + }); + } + } + }); + + return links; +} + +/** + * Extract links from HTML content + * @param {string} content - File content + * @param {string} filePath - Path to the file + * @returns {Array} Array of link objects with metadata + */ +function extractHtmlLinks(content, filePath) { + const links = []; + const lines = content.split('\n'); + + lines.forEach((line, lineIndex) => { + const lineNumber = lineIndex + 1; + let match; + + const htmlLinkRegex = /]*href\s*=\s*["']([^"']+)["'][^>]*>/gi; + while ((match = htmlLinkRegex.exec(line)) !== null) { + const url = match[1]; + const columnStart = match.index; + + // Extract link text if possible + const fullMatch = match[0]; + const textMatch = fullMatch.match(/>([^<]*) ({ + ...link, + ...categorizeLinkType(link.url), + filePath, + })); + + // Calculate statistics + const stats = { + totalLinks: enhancedLinks.length, + externalLinks: enhancedLinks.filter((l) => l.category === 'external') + .length, + internalLinks: enhancedLinks.filter((l) => + l.category.startsWith('internal') + ).length, + fragmentLinks: enhancedLinks.filter((l) => l.category === 'fragment') + .length, + linksNeedingValidation: enhancedLinks.filter((l) => l.needsValidation) + .length, + }; + + return { + filePath, + fileHash, + extension, + frontmatter, + links: enhancedLinks, + stats, + extractedAt: new Date().toISOString(), + }; + } catch (error) { + console.error(`Error extracting links from ${filePath}: ${error.message}`); + return null; + } +} + +/** + * Main function for CLI usage + */ +function main() { + const args = process.argv.slice(2); + + if (args.length === 0) { + console.error('Usage: node link-extractor.js [file2] [...]'); + console.error(' node link-extractor.js --help'); + process.exit(1); + } + + if (args[0] === '--help') { + console.log(` +Link Extractor for Documentation Files + +Usage: + node link-extractor.js [file2] [...] Extract links from files + node link-extractor.js --help Show this help + +Options: + --json Output results as JSON + --stats-only Show only statistics + --filter TYPE Filter links by category (external, internal-absolute, internal-relative, fragment) + +Examples: + node link-extractor.js content/influxdb3/core/install.md + node link-extractor.js --json content/**/*.md + node link-extractor.js --stats-only --filter external content/influxdb3/**/*.md +`); + process.exit(0); + } + + const jsonOutput = args.includes('--json'); + const statsOnly = args.includes('--stats-only'); + const filterType = args.includes('--filter') + ? args[args.indexOf('--filter') + 1] + : null; + + const files = args.filter( + (arg) => !arg.startsWith('--') && arg !== filterType + ); + const results = []; + + for (const filePath of files) { + const result = extractLinksFromFile(filePath); + if (result) { + // Apply filter if specified + if (filterType) { + result.links = result.links.filter( + (link) => link.category === filterType + ); + // Recalculate stats after filtering + result.stats = { + totalLinks: result.links.length, + externalLinks: result.links.filter((l) => l.category === 'external') + .length, + internalLinks: result.links.filter((l) => + l.category.startsWith('internal') + ).length, + fragmentLinks: result.links.filter((l) => l.category === 'fragment') + .length, + linksNeedingValidation: result.links.filter((l) => l.needsValidation) + .length, + }; + } + + results.push(result); + } + } + + if (jsonOutput) { + console.log(JSON.stringify(results, null, 2)); + } else if (statsOnly) { + console.log('\nLink Extraction Statistics:'); + console.log('=========================='); + + let totalFiles = 0; + let totalLinks = 0; + let totalExternal = 0; + let totalInternal = 0; + let totalFragment = 0; + let totalNeedingValidation = 0; + + results.forEach((result) => { + totalFiles++; + totalLinks += result.stats.totalLinks; + totalExternal += result.stats.externalLinks; + totalInternal += result.stats.internalLinks; + totalFragment += result.stats.fragmentLinks; + totalNeedingValidation += result.stats.linksNeedingValidation; + + console.log( + `${result.filePath}: ${result.stats.totalLinks} links (${result.stats.linksNeedingValidation} need validation)` + ); + }); + + console.log('\nSummary:'); + console.log(` Total files: ${totalFiles}`); + console.log(` Total links: ${totalLinks}`); + console.log(` External links: ${totalExternal}`); + console.log(` Internal links: ${totalInternal}`); + console.log(` Fragment links: ${totalFragment}`); + console.log(` Links needing validation: ${totalNeedingValidation}`); + } else { + results.forEach((result) => { + console.log(`\nFile: ${result.filePath}`); + console.log(`Hash: ${result.fileHash}`); + console.log(`Links found: ${result.stats.totalLinks}`); + console.log( + `Links needing validation: ${result.stats.linksNeedingValidation}` + ); + + if (result.links.length > 0) { + console.log('\nLinks:'); + result.links.forEach((link, index) => { + console.log(` ${index + 1}. [${link.category}] ${link.url}`); + console.log(` Line ${link.line}, Column ${link.column}`); + console.log(` Text: "${link.text}"`); + console.log(` Hash: ${link.hash}`); + if (link.reference) { + console.log(` Reference: ${link.reference}`); + } + console.log(''); + }); + } + }); + } +} + +// Export functions for use as a module +export { + extractLinksFromFile, + extractMarkdownLinks, + extractHtmlLinks, + generateFileHash, + generateLinkHash, + categorizeLinkType, +}; + +// Run main function if called directly +if (fileURLToPath(import.meta.url) === process.argv[1]) { + main(); +} diff --git a/.github/scripts/matrix-generator.js b/.github/scripts/matrix-generator.js new file mode 100644 index 000000000..a5e8bba4a --- /dev/null +++ b/.github/scripts/matrix-generator.js @@ -0,0 +1,385 @@ +/** + * Matrix Generator for Link Validation Workflows + * Replaces complex bash scripting with maintainable JavaScript + * Includes cache-aware optimization to skip validation of unchanged files + */ + +import { spawn } from 'child_process'; +import process from 'process'; +import { fileURLToPath } from 'url'; // Used for main execution check at bottom of file + +// Product configuration mapping file paths to products +const PRODUCT_MAPPING = { + 'content/influxdb3/core': { + key: 'influxdb3-core', + name: 'InfluxDB 3 Core', + }, + 'content/influxdb3/enterprise': { + key: 'influxdb3-enterprise', + name: 'InfluxDB 3 Enterprise', + }, + 'content/influxdb3/cloud-dedicated': { + key: 'influxdb3-cloud-dedicated', + name: 'InfluxDB 3 Cloud Dedicated', + }, + 'content/influxdb3/cloud-serverless': { + key: 'influxdb3-cloud-serverless', + name: 'InfluxDB 3 Cloud Serverless', + }, + 'content/influxdb3/clustered': { + key: 'influxdb3-clustered', + name: 'InfluxDB 3 Clustered', + }, + 'content/influxdb3/explorer': { + key: 'influxdb3-explorer', + name: 'InfluxDB 3 Explorer', + }, + 'content/influxdb/v2': { + key: 'influxdb-v2', + name: 'InfluxDB v2', + }, + 'content/influxdb/cloud': { + key: 'influxdb-cloud', + name: 'InfluxDB Cloud', + }, + 'content/influxdb/v1': { + key: 'influxdb-v1', + name: 'InfluxDB v1', + }, + 'content/influxdb/enterprise_influxdb': { + key: 'influxdb-enterprise-v1', + name: 'InfluxDB Enterprise v1', + }, + 'content/telegraf': { + key: 'telegraf', + name: 'Telegraf', + }, + 'content/kapacitor': { + key: 'kapacitor', + name: 'Kapacitor', + }, + 'content/chronograf': { + key: 'chronograf', + name: 'Chronograf', + }, + 'content/flux': { + key: 'flux', + name: 'Flux', + }, + 'content/shared': { + key: 'shared', + name: 'Shared Content', + }, + 'api-docs': { + key: 'api-docs', + name: 'API Documentation', + }, +}; + +/** + * Group files by product based on their path + * @param {string[]} files - Array of file paths + * @returns {Object} - Object with product keys and arrays of files + */ +function groupFilesByProduct(files) { + const productFiles = {}; + + // Initialize all products + Object.values(PRODUCT_MAPPING).forEach((product) => { + productFiles[product.key] = []; + }); + + files.forEach((file) => { + let matched = false; + + // Check each product mapping + for (const [pathPrefix, product] of Object.entries(PRODUCT_MAPPING)) { + if (file.startsWith(pathPrefix + '/')) { + productFiles[product.key].push(file); + matched = true; + break; + } + } + + // Handle edge case for api-docs (no trailing slash) + if (!matched && file.startsWith('api-docs/')) { + productFiles['api-docs'].push(file); + } + }); + + return productFiles; +} + +/** + * Run incremental validation analysis + * @param {string[]} files - Array of file paths to analyze + * @returns {Promise} - Incremental validation results + */ +async function runIncrementalAnalysis(files) { + return new Promise((resolve) => { + const child = spawn( + 'node', + ['.github/scripts/incremental-validator.cjs', ...files], + { + stdio: ['pipe', 'pipe', 'pipe'], + env: process.env, + } + ); + + let stdout = ''; + let stderr = ''; + + child.stdout.on('data', (data) => { + stdout += data.toString(); + }); + + child.stderr.on('data', (data) => { + stderr += data.toString(); + }); + + child.on('close', (code) => { + if (code === 0) { + try { + // Parse the JSON output from the validation script + const lines = stdout.trim().split('\n'); + const jsonLine = lines.find((line) => line.startsWith('{')); + + if (jsonLine) { + const results = JSON.parse(jsonLine); + resolve(results); + } else { + resolve({ filesToValidate: files.map((f) => ({ filePath: f })) }); + } + } catch (error) { + console.warn( + `Warning: Could not parse incremental validation results: ${error.message}` + ); + resolve({ filesToValidate: files.map((f) => ({ filePath: f })) }); + } + } else { + console.warn( + `Incremental validation failed with code ${code}: ${stderr}` + ); + resolve({ filesToValidate: files.map((f) => ({ filePath: f })) }); + } + }); + + child.on('error', (error) => { + console.warn(`Incremental validation error: ${error.message}`); + resolve({ filesToValidate: files.map((f) => ({ filePath: f })) }); + }); + }); +} + +/** + * Generate matrix configuration for GitHub Actions with cache awareness + * @param {string[]} changedFiles - Array of changed file paths + * @param {Object} options - Configuration options + * @returns {Promise} - Matrix configuration object + */ +async function generateMatrix(changedFiles, options = {}) { + const { + maxConcurrentJobs = 5, + forceSequential = false, + minFilesForParallel = 10, + useCache = true, + } = options; + + if (!changedFiles || changedFiles.length === 0) { + return { + strategy: 'none', + hasChanges: false, + matrix: { include: [] }, + cacheStats: { hitRate: 100, cacheHits: 0, cacheMisses: 0 }, + }; + } + + let filesToValidate = changedFiles; + let cacheStats = { + hitRate: 0, + cacheHits: 0, + cacheMisses: changedFiles.length, + }; + + // Run incremental analysis if cache is enabled + if (useCache) { + try { + console.log( + `🔍 Running cache analysis for ${changedFiles.length} files...` + ); + const analysisResults = await runIncrementalAnalysis(changedFiles); + + if (analysisResults.filesToValidate) { + filesToValidate = analysisResults.filesToValidate.map( + (f) => f.filePath + ); + cacheStats = analysisResults.cacheStats || cacheStats; + + console.log( + `📊 Cache analysis complete: ${cacheStats.hitRate}% hit rate` + ); + console.log( + `✅ ${cacheStats.cacheHits} files cached, ${cacheStats.cacheMisses} need validation` + ); + } + } catch (error) { + console.warn( + `Cache analysis failed: ${error.message}, proceeding without cache optimization` + ); + } + } + + // If no files need validation after cache analysis + if (filesToValidate.length === 0) { + return { + strategy: 'cache-hit', + hasChanges: false, + matrix: { include: [] }, + cacheStats, + message: '✨ All files are cached - no validation needed!', + }; + } + + const productFiles = groupFilesByProduct(filesToValidate); + const productsWithFiles = Object.entries(productFiles).filter( + ([key, files]) => files.length > 0 + ); + + // Determine strategy based on file count and configuration + const totalFiles = filesToValidate.length; + const shouldUseParallel = + !forceSequential && + totalFiles >= minFilesForParallel && + productsWithFiles.length > 1; + + if (shouldUseParallel) { + // Parallel strategy: create matrix with products + const matrixIncludes = productsWithFiles.map(([productKey, files]) => { + const product = Object.values(PRODUCT_MAPPING).find( + (p) => p.key === productKey + ); + return { + product: productKey, + name: product?.name || productKey, + files: files.join(' '), + cacheEnabled: useCache, + }; + }); + + return { + strategy: 'parallel', + hasChanges: true, + matrix: { include: matrixIncludes.slice(0, maxConcurrentJobs) }, + cacheStats, + originalFileCount: changedFiles.length, + validationFileCount: filesToValidate.length, + }; + } else { + // Sequential strategy: single job with all files + return { + strategy: 'sequential', + hasChanges: true, + matrix: { + include: [ + { + product: 'all', + name: 'All Files', + files: filesToValidate.join(' '), + cacheEnabled: useCache, + }, + ], + }, + cacheStats, + originalFileCount: changedFiles.length, + validationFileCount: filesToValidate.length, + }; + } +} + +/** + * CLI interface for the matrix generator + */ +async function main() { + const args = process.argv.slice(2); + + if (args.includes('--help') || args.includes('-h')) { + console.log(` +Usage: node matrix-generator.js [options] ... + +Options: + --max-concurrent Maximum concurrent jobs (default: 5) + --force-sequential Force sequential execution + --min-files-parallel Minimum files needed for parallel (default: 10) + --output-format Output format: json, github (default: github) + --no-cache Disable cache-aware optimization + --help, -h Show this help message + +Examples: + node matrix-generator.js content/influxdb3/core/file1.md content/influxdb/v2/file2.md + node matrix-generator.js --force-sequential content/shared/file.md + node matrix-generator.js --no-cache --output-format json *.md +`); + process.exit(0); + } + + // Parse options + const options = {}; + const files = []; + + for (let i = 0; i < args.length; i++) { + const arg = args[i]; + + if (arg === '--max-concurrent' && i + 1 < args.length) { + options.maxConcurrentJobs = parseInt(args[++i]); + } else if (arg === '--force-sequential') { + options.forceSequential = true; + } else if (arg === '--min-files-parallel' && i + 1 < args.length) { + options.minFilesForParallel = parseInt(args[++i]); + } else if (arg === '--output-format' && i + 1 < args.length) { + options.outputFormat = args[++i]; + } else if (arg === '--no-cache') { + options.useCache = false; + } else if (!arg.startsWith('--')) { + files.push(arg); + } + } + + try { + const result = await generateMatrix(files, options); + + if (options.outputFormat === 'json') { + console.log(JSON.stringify(result, null, 2)); + } else { + // GitHub Actions format + console.log(`strategy=${result.strategy}`); + console.log(`has-changes=${result.hasChanges}`); + console.log(`matrix=${JSON.stringify(result.matrix)}`); + + // Add cache statistics + if (result.cacheStats) { + console.log(`cache-hit-rate=${result.cacheStats.hitRate}`); + console.log(`cache-hits=${result.cacheStats.cacheHits}`); + console.log(`cache-misses=${result.cacheStats.cacheMisses}`); + } + + if (result.originalFileCount !== undefined) { + console.log(`original-file-count=${result.originalFileCount}`); + console.log(`validation-file-count=${result.validationFileCount}`); + } + + if (result.message) { + console.log(`message=${result.message}`); + } + } + } catch (error) { + console.error(`Error generating matrix: ${error.message}`); + process.exit(1); + } +} + +// Run CLI if this file is executed directly +if (fileURLToPath(import.meta.url) === process.argv[1]) { + main().catch(console.error); +} + +export { generateMatrix, groupFilesByProduct, PRODUCT_MAPPING }; diff --git a/.github/scripts/utils/url-transformer.js b/.github/scripts/utils/url-transformer.js new file mode 100644 index 000000000..9253eb852 --- /dev/null +++ b/.github/scripts/utils/url-transformer.js @@ -0,0 +1,24 @@ +/** + * URL Transformation Utilities + * Shared logic for converting file paths to URL paths + * Used across documentation testing and build tools + */ + +/** + * Convert a content file path to its corresponding URL path + * @param {string} filePath - File path starting with 'content/' + * @returns {string} - URL path (starts with '/') + */ +function filePathToUrl(filePath) { + // Map to URL + let url = filePath.replace(/^content/, ''); + url = url.replace(/\/_index\.(html|md)$/, '/'); + url = url.replace(/\.md$/, '/'); + url = url.replace(/\.html$/, '/'); + if (!url.startsWith('/')) { + url = '/' + url; + } + return url; +} + +export { filePathToUrl }; diff --git a/.github/workflows/audit-documentation.yml b/.github/workflows/audit-documentation.yml new file mode 100644 index 000000000..742f31c65 --- /dev/null +++ b/.github/workflows/audit-documentation.yml @@ -0,0 +1,429 @@ +name: Audit Documentation + +on: + workflow_dispatch: + inputs: + version: + description: 'Version to audit (must exist in git tags, e.g., v3.0.0 or "local" for dev containers)' + required: false + default: 'local' + create_issue: + description: 'Create GitHub issue with audit results' + required: false + type: boolean + default: false + +jobs: + cli-3-core: + name: Audit InfluxDB 3 Core CLI + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: '18' + cache: 'yarn' + + - name: Install dependencies + run: yarn install --frozen-lockfile + + - name: Set up Docker + if: github.event.inputs.version == 'local' || github.event_name == 'schedule' + run: | + docker compose up -d influxdb3-core + sleep 10 # Wait for containers to be ready + + - name: Run Core CLI audit + run: | + VERSION="${{ github.event.inputs.version || 'local' }}" + node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js core $VERSION + + - name: Upload audit reports + uses: actions/upload-artifact@v4 + with: + name: cli-audit-3-core-${{ github.event.inputs.version || 'local' }} + path: helper-scripts/output/cli-audit/ + retention-days: 30 + + cli-3-enterprise: + name: Audit InfluxDB 3 Enterprise CLI + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: '18' + cache: 'yarn' + + - name: Install dependencies + run: yarn install --frozen-lockfile + + - name: Set up Docker + if: github.event.inputs.version == 'local' || github.event_name == 'schedule' + run: | + docker compose up -d influxdb3-enterprise + sleep 10 # Wait for containers to be ready + + - name: Run Enterprise CLI audit + run: | + VERSION="${{ github.event.inputs.version || 'local' }}" + node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js enterprise $VERSION + + - name: Upload audit reports + uses: actions/upload-artifact@v4 + with: + name: cli-audit-3-enterprise-${{ github.event.inputs.version || 'local' }} + path: helper-scripts/output/cli-audit/ + retention-days: 30 + + cli-3-influxctl: + name: Audit InfluxDB 3 influxctl CLI + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: '18' + cache: 'yarn' + + - name: Install dependencies + run: yarn install --frozen-lockfile + + - name: Run influxctl CLI audit + run: | + VERSION="${{ github.event.inputs.version || 'local' }}" + echo "influxctl CLI audit not yet implemented" + # TODO: Implement influxctl CLI audit + # node ./helper-scripts/influxdb3-distributed/audit-influxctl-cli.js $VERSION + + # Create placeholder report + mkdir -p helper-scripts/output/cli-audit + cat > helper-scripts/output/cli-audit/influxctl-audit-$VERSION.md << 'EOF' + # influxctl CLI Audit Report + + **CLI:** influxctl + **Version:** $VERSION + **Date:** $(date) + **Status:** Placeholder - audit not yet implemented + + ## TODO + - Implement influxctl CLI help extraction + - Compare against clustered and cloud-dedicated documentation + - Generate patches for missing documentation + EOF + + - name: Upload audit reports + uses: actions/upload-artifact@v4 + with: + name: cli-audit-3-influxctl-${{ github.event.inputs.version || 'local' }} + path: helper-scripts/output/cli-audit/ + retention-days: 30 + + api-3-core: + name: Audit InfluxDB 3 Core API + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Run Core API audit + run: | + VERSION="${{ github.event.inputs.version || 'local' }}" + echo "Core API audit not yet implemented" + # TODO: Implement Core API audit + # node ./helper-scripts/influxdb3-monolith/audit-api-documentation.js core $VERSION + + # Create placeholder report + mkdir -p helper-scripts/output/api-audit + cat > helper-scripts/output/api-audit/core-api-audit-$VERSION.md << 'EOF' + # InfluxDB 3 Core API Audit Report + + **API:** InfluxDB 3 Core + **Version:** $VERSION + **Date:** $(date) + **Status:** Placeholder - audit not yet implemented + + ## TODO + - Implement API endpoint discovery + - Compare against OpenAPI specs + - Validate documentation examples + EOF + + - name: Upload audit reports + uses: actions/upload-artifact@v4 + with: + name: api-audit-3-core-${{ github.event.inputs.version || 'local' }} + path: helper-scripts/output/api-audit/ + retention-days: 30 + + api-3-enterprise: + name: Audit InfluxDB 3 Enterprise API + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Run Enterprise API audit + run: | + VERSION="${{ github.event.inputs.version || 'local' }}" + echo "Enterprise API audit not yet implemented" + # TODO: Implement Enterprise API audit + # node ./helper-scripts/influxdb3-monolith/audit-api-documentation.js enterprise $VERSION + + # Create placeholder report + mkdir -p helper-scripts/output/api-audit + cat > helper-scripts/output/api-audit/enterprise-api-audit-$VERSION.md << 'EOF' + # InfluxDB 3 Enterprise API Audit Report + + **API:** InfluxDB 3 Enterprise + **Version:** $VERSION + **Date:** $(date) + **Status:** Placeholder - audit not yet implemented + + ## TODO + - Implement API endpoint discovery + - Compare against OpenAPI specs + - Validate documentation examples + - Check enterprise-specific endpoints + EOF + + - name: Upload audit reports + uses: actions/upload-artifact@v4 + with: + name: api-audit-3-enterprise-${{ github.event.inputs.version || 'local' }} + path: helper-scripts/output/api-audit/ + retention-days: 30 + + api-3-cloud-dedicated: + name: Audit InfluxDB 3 Cloud Dedicated API + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Run Cloud Dedicated API audit + run: | + VERSION="${{ github.event.inputs.version || 'local' }}" + echo "Cloud Dedicated API audit not yet implemented" + # TODO: Implement Cloud Dedicated API audit + # node ./helper-scripts/influxdb3-distributed/audit-api-documentation.js cloud-dedicated $VERSION + + # Create placeholder report + mkdir -p helper-scripts/output/api-audit + cat > helper-scripts/output/api-audit/cloud-dedicated-api-audit-$VERSION.md << 'EOF' + # InfluxDB 3 Cloud Dedicated API Audit Report + + **API:** InfluxDB 3 Cloud Dedicated + **Version:** $VERSION + **Date:** $(date) + **Status:** Placeholder - audit not yet implemented + + ## TODO + - Implement management API audit + - Implement data API audit + - Compare against OpenAPI specs + - Validate documentation examples + EOF + + - name: Upload audit reports + uses: actions/upload-artifact@v4 + with: + name: api-audit-3-cloud-dedicated-${{ github.event.inputs.version || 'local' }} + path: helper-scripts/output/api-audit/ + retention-days: 30 + + api-3-clustered: + name: Audit InfluxDB 3 Clustered API + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Run Clustered API audit + run: | + VERSION="${{ github.event.inputs.version || 'local' }}" + echo "Clustered API audit not yet implemented" + # TODO: Implement Clustered API audit + # node ./helper-scripts/influxdb3-distributed/audit-api-documentation.js clustered $VERSION + + # Create placeholder report + mkdir -p helper-scripts/output/api-audit + cat > helper-scripts/output/api-audit/clustered-api-audit-$VERSION.md << 'EOF' + # InfluxDB 3 Clustered API Audit Report + + **API:** InfluxDB 3 Clustered + **Version:** $VERSION + **Date:** $(date) + **Status:** Placeholder - audit not yet implemented + + ## TODO + - Implement management API audit + - Implement data API audit + - Compare against OpenAPI specs + - Validate documentation examples + EOF + + - name: Upload audit reports + uses: actions/upload-artifact@v4 + with: + name: api-audit-3-clustered-${{ github.event.inputs.version || 'local' }} + path: helper-scripts/output/api-audit/ + retention-days: 30 + + api-3-cloud-serverless: + name: Audit InfluxDB 3 Cloud Serverless API + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Run Cloud Serverless API audit + run: | + VERSION="${{ github.event.inputs.version || 'local' }}" + echo "Cloud Serverless API audit not yet implemented" + # TODO: Implement Cloud Serverless API audit + # node ./helper-scripts/influxdb3-distributed/audit-api-documentation.js cloud-serverless $VERSION + + # Create placeholder report + mkdir -p helper-scripts/output/api-audit + cat > helper-scripts/output/api-audit/cloud-serverless-api-audit-$VERSION.md << 'EOF' + # InfluxDB 3 Cloud Serverless API Audit Report + + **API:** InfluxDB 3 Cloud Serverless + **Version:** $VERSION + **Date:** $(date) + **Status:** Placeholder - audit not yet implemented + + ## TODO + - Implement management API audit + - Implement data API audit + - Compare against OpenAPI specs + - Validate documentation examples + EOF + + - name: Upload audit reports + uses: actions/upload-artifact@v4 + with: + name: api-audit-3-cloud-serverless-${{ github.event.inputs.version || 'local' }} + path: helper-scripts/output/api-audit/ + retention-days: 30 + + create-audit-issues: + name: Create Issues from Audit Results + runs-on: ubuntu-latest + needs: [ + cli-3-core, + cli-3-enterprise, + cli-3-influxctl, + api-3-core, + api-3-enterprise, + api-3-cloud-dedicated, + api-3-clustered, + api-3-cloud-serverless + ] + if: always() && (github.event_name == 'schedule' || github.event.inputs.create_issue == 'true') + + steps: + - uses: actions/checkout@v4 + + - name: Download all audit reports + uses: actions/download-artifact@v4 + with: + path: audit-reports/ + + - name: Create issues from audit results + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const path = require('path'); + + // Find all audit report directories + const reportDirs = fs.readdirSync('audit-reports'); + + for (const reportDir of reportDirs) { + const reportPath = path.join('audit-reports', reportDir); + const files = fs.readdirSync(reportPath); + + for (const file of files) { + if (file.endsWith('.md')) { + const content = fs.readFileSync(path.join(reportPath, file), 'utf8'); + + // Only create issues if there are actual problems (not placeholders) + const hasIssues = content.includes('⚠️ Missing from docs') || + content.includes('ℹ️ Documented but not in CLI') || + content.includes('API endpoint mismatch'); + + if (hasIssues) { + const auditType = reportDir.replace(/-(local|\d+\.\d+\.\d+)$/, ''); + + await github.rest.issues.create({ + owner: context.repo.owner, + repo: context.repo.repo, + title: `Documentation Audit Issues - ${auditType}`, + body: `## Audit Results\n\n${content}`, + labels: ['documentation', 'audit', auditType.includes('cli') ? 'cli-audit' : 'api-audit'] + }); + + console.log(`Created issue for ${auditType}`); + } + } + } + } + + audit-summary: + name: Generate Summary Report + runs-on: ubuntu-latest + needs: [ + cli-3-core, + cli-3-enterprise, + cli-3-influxctl, + api-3-core, + api-3-enterprise, + api-3-cloud-dedicated, + api-3-clustered, + api-3-cloud-serverless + ] + if: always() + + steps: + - uses: actions/checkout@v4 + + - name: Download all artifacts + uses: actions/download-artifact@v4 + with: + path: audit-artifacts/ + + - name: Generate summary + run: | + echo "# Documentation Audit Summary" > summary.md + echo "Date: $(date)" >> summary.md + echo "Version: ${{ github.event.inputs.version || 'local' }}" >> summary.md + echo "" >> summary.md + + # Add results from each audit type + for dir in audit-artifacts/*/; do + if [ -d "$dir" ]; then + echo "## $(basename "$dir")" >> summary.md + if [ -f "$dir"/*.md ]; then + cat "$dir"/*.md >> summary.md + fi + echo "" >> summary.md + fi + done + + - name: Upload summary + uses: actions/upload-artifact@v4 + with: + name: audit-summary + path: summary.md + retention-days: 30 \ No newline at end of file diff --git a/.github/workflows/influxdb3-release.yml b/.github/workflows/influxdb3-release.yml new file mode 100644 index 000000000..643cdd1ae --- /dev/null +++ b/.github/workflows/influxdb3-release.yml @@ -0,0 +1,503 @@ +name: InfluxDB 3 Release Documentation + +on: + workflow_dispatch: + inputs: + product: + description: 'Product being released' + required: true + type: choice + options: + - core + - enterprise + - clustered + - cloud-dedicated + - cloud-serverless + version: + description: 'Release tag name (must exist in git tags, e.g., v3.0.0 or "local" for dev)' + required: true + type: string + previous_version: + description: 'Previous release tag name (must exist in git tags, e.g., v2.9.0)' + required: true + type: string + dry_run: + description: 'Dry run (do not create PRs or issues)' + required: false + type: boolean + default: true + +jobs: + generate-release-notes-core-enterprise: + name: Generate Release Notes (Core/Enterprise) + runs-on: ubuntu-latest + if: contains(fromJSON('["core", "enterprise"]'), github.event.inputs.product) + outputs: + generated: ${{ steps.generate.outputs.generated }} + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: '18' + cache: 'yarn' + + - name: Install dependencies + run: yarn install --frozen-lockfile + + - name: Generate release notes + id: generate + run: | + echo "Generating Core/Enterprise release notes for ${{ github.event.inputs.product }} v${{ github.event.inputs.version }}" + + # Create output directory + mkdir -p helper-scripts/output/release-notes + + # Note: This generates placeholder release notes since the actual repositories + # (influxdb and influxdb_pro) are not available in the GitHub Actions environment. + # To generate actual release notes, the script would need to be run locally with: + # node ./helper-scripts/common/generate-release-notes.js \ + # --config ./helper-scripts/common/config/influxdb3-core-enterprise.json \ + # ${{ github.event.inputs.previous_version }} \ + # ${{ github.event.inputs.version }} + + # Create structured placeholder that matches the expected format + cat > helper-scripts/output/release-notes/release-notes-${{ github.event.inputs.product }}-${{ github.event.inputs.version }}.md << EOF + > [!Note] + > #### InfluxDB 3 Core and Enterprise relationship + > + > InfluxDB 3 Enterprise is a superset of InfluxDB 3 Core. + > All updates to Core are automatically included in Enterprise. + > The Enterprise sections below only list updates exclusive to Enterprise. + + ## ${{ github.event.inputs.version }} {date="$(date +'%Y-%m-%d')"} + + ### Core + + #### Features + + - TODO: Add Core features for ${{ github.event.inputs.product }} ${{ github.event.inputs.version }} + + #### Bug Fixes + + - TODO: Add Core bug fixes for ${{ github.event.inputs.product }} ${{ github.event.inputs.version }} + + ### Enterprise + + All Core updates are included in Enterprise. Additional Enterprise-specific features and fixes: + + #### Features + + - TODO: Add Enterprise-specific features for ${{ github.event.inputs.version }} + + #### Bug Fixes + + - TODO: Add Enterprise-specific bug fixes for ${{ github.event.inputs.version }} + EOF + + echo "generated=true" >> $GITHUB_OUTPUT + + - name: Upload release notes + uses: actions/upload-artifact@v4 + with: + name: release-notes-${{ github.event.inputs.product }}-${{ github.event.inputs.version }} + path: helper-scripts/output/release-notes/ + retention-days: 30 + + # generate-release-notes-distributed: + # name: Generate Release Notes (Distributed) + # runs-on: ubuntu-latest + # if: contains(fromJSON('["clustered", "cloud-dedicated", "cloud-serverless"]'), github.event.inputs.product) + # outputs: + # generated: ${{ steps.generate.outputs.generated }} + + # steps: + # - uses: actions/checkout@v4 + + # - name: Set up Node.js + # uses: actions/setup-node@v4 + # with: + # node-version: '18' + # cache: 'yarn' + + # - name: Install dependencies + # run: yarn install --frozen-lockfile + + # - name: Generate release notes + # id: generate + # run: | + # echo "Generating distributed product release notes for ${{ github.event.inputs.product }} v${{ github.event.inputs.version }}" + + # # Create output directory + # mkdir -p helper-scripts/output/release-notes + + # # Note: This generates placeholder release notes since the actual repositories + # # for distributed products are not available in the GitHub Actions environment. + # # To generate actual release notes, the script would need to be run locally with: + # # node ./helper-scripts/common/generate-release-notes.js \ + # # --config ./helper-scripts/common/config/influxdb3-clustered.json \ + # # ${{ github.event.inputs.previous_version }} \ + # # ${{ github.event.inputs.version }} + + # # Create structured placeholder for distributed products + # cat > helper-scripts/output/release-notes/release-notes-${{ github.event.inputs.product }}-${{ github.event.inputs.version }}.md << EOF + # ## ${{ github.event.inputs.version }} {date="$(date +'%Y-%m-%d')"} + + # ### Features + + # - TODO: Add features for ${{ github.event.inputs.product }} ${{ github.event.inputs.version }} + + # ### Bug Fixes + + # - TODO: Add bug fixes for ${{ github.event.inputs.product }} ${{ github.event.inputs.version }} + + # ### Performance Improvements + + # - TODO: Add performance improvements for ${{ github.event.inputs.product }} ${{ github.event.inputs.version }} + # EOF + + # echo "generated=true" >> $GITHUB_OUTPUT + + # - name: Upload release notes + # uses: actions/upload-artifact@v4 + # with: + # name: release-notes-${{ github.event.inputs.product }}-${{ github.event.inputs.version }} + # path: helper-scripts/output/release-notes/ + # retention-days: 30 + + audit-cli-documentation: + name: Audit CLI Documentation + needs: generate-release-notes-core-enterprise + runs-on: ubuntu-latest + if: needs.generate-release-notes-core-enterprise.outputs.generated == 'true' && contains(fromJSON('["core", "enterprise"]'), github.event.inputs.product) + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: '18' + cache: 'yarn' + + - name: Install dependencies + run: yarn install --frozen-lockfile + + - name: Pull Docker images for version + run: | + VERSION="${{ github.event.inputs.version }}" + PRODUCT="${{ github.event.inputs.product }}" + + if [ "$PRODUCT" == "both" ]; then + docker pull influxdb:${VERSION}-core || true + docker pull influxdb:${VERSION}-enterprise || true + else + docker pull influxdb:${VERSION}-${PRODUCT} || true + fi + + - name: Run CLI audit + run: | + PRODUCT="${{ github.event.inputs.product }}" + VERSION="${{ github.event.inputs.version }}" + + node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js $PRODUCT $VERSION + + - name: Upload CLI audit reports + uses: actions/upload-artifact@v4 + with: + name: cli-audit-release-${{ github.event.inputs.product }}-${{ github.event.inputs.version }} + path: helper-scripts/output/cli-audit/ + retention-days: 90 + + # audit-distributed-documentation: + # name: Audit Distributed Products Documentation + # needs: generate-release-notes-distributed + # runs-on: ubuntu-latest + # if: needs.generate-release-notes-distributed.outputs.generated == 'true' && contains(fromJSON('["clustered", "cloud-dedicated", "cloud-serverless"]'), github.event.inputs.product) + + # steps: + # - uses: actions/checkout@v4 + + # - name: Set up Node.js + # uses: actions/setup-node@v4 + # with: + # node-version: '18' + # cache: 'yarn' + + # - name: Install dependencies + # run: yarn install --frozen-lockfile + + # - name: Run distributed products audit + # run: | + # PRODUCT="${{ github.event.inputs.product }}" + # VERSION="${{ github.event.inputs.version }}" + + # echo "Auditing distributed product: $PRODUCT v$VERSION" + # # TODO: Implement distributed products audit for release + # # This would audit API docs, deployment guides, configuration references + # # node ./helper-scripts/influxdb3-distributed/audit-documentation.js $PRODUCT $VERSION + + # # For now, create placeholder report + # mkdir -p helper-scripts/output/distributed-audit + # cat > helper-scripts/output/distributed-audit/release-audit-$PRODUCT-$VERSION.md << 'EOF' + # # Release Audit Report - Distributed Products + + # **Product:** $PRODUCT + # **Version:** $VERSION + # **Date:** $(date) + # **Status:** Placeholder - audit not yet implemented + + # ## Areas to Audit + # - API documentation completeness + # - Deployment guide accuracy + # - Configuration reference updates + # - Integration guide updates + # - Version-specific feature documentation + + # ## TODO + # - Implement API documentation audit + # - Implement deployment guide audit + # - Implement configuration reference audit + # - Implement integration guide audit + # EOF + + # - name: Upload distributed audit reports + # uses: actions/upload-artifact@v4 + # with: + # name: distributed-audit-release-${{ github.event.inputs.product }}-${{ github.event.inputs.version }} + # path: helper-scripts/output/distributed-audit/ + # retention-days: 90 + + create-documentation-pr: + name: Create Documentation PR + needs: [generate-release-notes-core-enterprise, audit-cli-documentation] + runs-on: ubuntu-latest + if: github.event.inputs.dry_run != 'true' && always() && (needs.generate-release-notes-core-enterprise.result == 'success') + + steps: + - uses: actions/checkout@v4 + + - name: Download artifacts + uses: actions/download-artifact@v4 + with: + path: artifacts/ + + - name: Create release branch + run: | + BRANCH="release-docs-${{ github.event.inputs.product }}-${{ github.event.inputs.version }}" + git checkout -b $BRANCH + echo "BRANCH=$BRANCH" >> $GITHUB_ENV + + - name: Copy release notes to docs + run: | + # Download the generated release notes artifact + PRODUCT="${{ github.event.inputs.product }}" + VERSION="${{ github.event.inputs.version }}" + + # Determine the target documentation file based on product + case "$PRODUCT" in + "core"|"enterprise") + TARGET_FILE="content/shared/v3-core-enterprise-release-notes/_index.md" + SOURCE_FILE="artifacts/release-notes-${PRODUCT}-${VERSION}/release-notes-${PRODUCT}-${VERSION}.md" + ;; + "clustered") + TARGET_FILE="content/influxdb3/clustered/reference/release-notes/_index.md" + SOURCE_FILE="artifacts/release-notes-${PRODUCT}-${VERSION}/release-notes-${PRODUCT}-${VERSION}.md" + ;; + "cloud-dedicated") + TARGET_FILE="content/influxdb3/cloud-dedicated/reference/release-notes/_index.md" + SOURCE_FILE="artifacts/release-notes-${PRODUCT}-${VERSION}/release-notes-${PRODUCT}-${VERSION}.md" + ;; + "cloud-serverless") + TARGET_FILE="content/influxdb3/cloud-serverless/reference/release-notes/_index.md" + SOURCE_FILE="artifacts/release-notes-${PRODUCT}-${VERSION}/release-notes-${PRODUCT}-${VERSION}.md" + ;; + *) + echo "Unknown product: $PRODUCT" + exit 1 + ;; + esac + + # Check if source file exists + if [ -f "$SOURCE_FILE" ]; then + echo "Copying release notes from $SOURCE_FILE to $TARGET_FILE" + + # For Core/Enterprise, prepend to existing file (new releases go at the top) + if [ "$PRODUCT" = "core" ] || [ "$PRODUCT" = "enterprise" ]; then + # Create temporary file with new content + existing content + cp "$SOURCE_FILE" temp_release_notes.md + echo "" >> temp_release_notes.md + cat "$TARGET_FILE" >> temp_release_notes.md + mv temp_release_notes.md "$TARGET_FILE" + else + # For other products, replace the file + cp "$SOURCE_FILE" "$TARGET_FILE" + fi + + echo "Release notes successfully copied to documentation" + else + echo "Warning: Release notes file not found at $SOURCE_FILE" + echo "Available files in artifacts:" + find artifacts/ -type f -name "*.md" || echo "No markdown files found" + fi + + - name: Create Pull Request + uses: peter-evans/create-pull-request@v5 + with: + token: ${{ secrets.GITHUB_TOKEN }} + branch: ${{ env.BRANCH }} + title: "docs: Release documentation for ${{ github.event.inputs.product }} v${{ github.event.inputs.version }}" + body: | + ## Release Documentation Update + + This PR contains documentation updates for **${{ github.event.inputs.product }} v${{ github.event.inputs.version }}** + + ### Included Updates: + - [ ] Release notes + - [ ] Version updates + - [ ] CLI documentation audit results + + ### Artifacts: + - [Release Notes](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) + - [CLI Audit Report](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}) + + ### Manual Review Needed: + Please review the CLI audit report for any missing or outdated documentation that needs to be updated. + + --- + *This PR was automatically generated by the release workflow.* + labels: | + documentation + release + ${{ github.event.inputs.product }} + draft: true + + create-audit-issue: + name: Create Audit Issue + needs: [audit-cli-documentation] + runs-on: ubuntu-latest + if: github.event.inputs.dry_run != 'true' && always() && (needs.audit-cli-documentation.result == 'success') + + steps: + - uses: actions/checkout@v4 + + - name: Download audit reports + uses: actions/download-artifact@v4 + with: + path: audit-reports/ + + - name: Create issue from audit + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const path = require('path'); + const product = '${{ github.event.inputs.product }}'; + const version = '${{ github.event.inputs.version }}'; + + let auditReports = []; + let hasIssues = false; + + // Check for CLI audit report + const cliAuditPath = `audit-reports/cli-audit-release-${product}-${version}`; + if (fs.existsSync(cliAuditPath)) { + const files = fs.readdirSync(cliAuditPath); + const cliAuditFile = files.find(f => f.includes('documentation-audit')); + if (cliAuditFile) { + const report = fs.readFileSync(path.join(cliAuditPath, cliAuditFile), 'utf8'); + const hasMissingOptions = report.includes('⚠️ Missing from docs'); + const hasExtraOptions = report.includes('ℹ️ Documented but not in CLI'); + if (hasMissingOptions || hasExtraOptions) { + auditReports.push({ + type: 'CLI', + content: report + }); + hasIssues = true; + } + } + } + + // Check for distributed audit report + const distributedAuditPath = `audit-reports/distributed-audit-release-${product}-${version}`; + if (fs.existsSync(distributedAuditPath)) { + const files = fs.readdirSync(distributedAuditPath); + const distributedAuditFile = files.find(f => f.includes('release-audit')); + if (distributedAuditFile) { + const report = fs.readFileSync(path.join(distributedAuditPath, distributedAuditFile), 'utf8'); + // For now, always include distributed audit reports since they're placeholders + auditReports.push({ + type: 'Distributed Products', + content: report + }); + hasIssues = true; + } + } + + if (hasIssues && auditReports.length > 0) { + // Create comprehensive issue + const issueBody = [ + '## Release Documentation Audit Results', + '', + `The following documentation issues were found during the release of **${product} v${version}**:`, + '', + ...auditReports.map(report => [ + `### ${report.type} Audit`, + '', + report.content, + '' + ]).flat(), + '### Action Items:', + '- [ ] Review and update documentation for missing or outdated content', + '- [ ] Verify all examples work with the new version', + '- [ ] Update any version-specific content', + '- [ ] Remove documentation for deprecated features', + '', + '---', + '*This issue was automatically generated during the release process.*' + ].join('\n'); + + await github.rest.issues.create({ + owner: context.repo.owner, + repo: context.repo.repo, + title: `Documentation Updates Needed - ${product} v${version}`, + body: issueBody, + labels: ['documentation', 'release', product, 'audit'] + }); + + console.log('Created issue for documentation updates'); + } else { + console.log('No documentation issues found - skipping issue creation'); + } + + influxdb3-monolith-release-summary: + name: Release Summary + needs: [generate-release-notes-core-enterprise, audit-cli-documentation, create-documentation-pr, create-audit-issue] + runs-on: ubuntu-latest + if: always() + + steps: + - name: Generate summary + run: | + echo "# Release Documentation Summary" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "## Release Information" >> $GITHUB_STEP_SUMMARY + echo "- **Product**: ${{ github.event.inputs.product }}" >> $GITHUB_STEP_SUMMARY + echo "- **Version**: ${{ github.event.inputs.version }}" >> $GITHUB_STEP_SUMMARY + echo "- **Previous Version**: ${{ github.event.inputs.previous_version }}" >> $GITHUB_STEP_SUMMARY + echo "- **Dry Run**: ${{ github.event.inputs.dry_run }}" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + + echo "## Workflow Results" >> $GITHUB_STEP_SUMMARY + echo "| Step | Status |" >> $GITHUB_STEP_SUMMARY + echo "|------|--------|" >> $GITHUB_STEP_SUMMARY + echo "| Generate Release Notes (Core/Enterprise) | ${{ needs.generate-release-notes-core-enterprise.result }} |" >> $GITHUB_STEP_SUMMARY + echo "| CLI Documentation Audit | ${{ needs.audit-cli-documentation.result }} |" >> $GITHUB_STEP_SUMMARY + echo "| Create Documentation PR | ${{ needs.create-documentation-pr.result }} |" >> $GITHUB_STEP_SUMMARY + echo "| Create Audit Issue | ${{ needs.create-audit-issue.result }} |" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + + if [ "${{ github.event.inputs.dry_run }}" == "true" ]; then + echo "**Note**: This was a dry run. No PRs or issues were created." >> $GITHUB_STEP_SUMMARY + fi \ No newline at end of file diff --git a/.github/workflows/pr-link-check.yml b/.github/workflows/pr-link-check.yml new file mode 100644 index 000000000..5f5dacca8 --- /dev/null +++ b/.github/workflows/pr-link-check.yml @@ -0,0 +1,241 @@ +name: Link Check PR Changes + +on: + pull_request: + paths: + - 'content/**/*.md' + - 'data/**/*.yml' + - 'layouts/**/*.html' + types: [opened, synchronize, reopened] + +jobs: + link-check: + name: Check links in affected files + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Detect content changes + id: detect + run: | + echo "🔍 Detecting changes between ${{ github.base_ref }} and ${{ github.sha }}" + + # For PRs, use the GitHub Files API to get changed files + if [[ "${{ github.event_name }}" == "pull_request" ]]; then + echo "Using GitHub API to detect PR changes..." + curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.number }}/files" \ + | jq -r '.[].filename' > all_changed_files.txt + else + echo "Using git diff to detect changes..." + git diff --name-only ${{ github.event.before }}..${{ github.sha }} > all_changed_files.txt + fi + + # Filter for content markdown files + CHANGED_FILES=$(grep '^content/.*\.md$' all_changed_files.txt || true) + + echo "📁 All changed files:" + cat all_changed_files.txt + echo "" + echo "📝 Content markdown files:" + echo "$CHANGED_FILES" + + if [[ -n "$CHANGED_FILES" ]]; then + echo "✅ Found $(echo "$CHANGED_FILES" | wc -l) changed content file(s)" + echo "has-changes=true" >> $GITHUB_OUTPUT + echo "changed-content<> $GITHUB_OUTPUT + echo "$CHANGED_FILES" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + # Check if any shared content files were modified + SHARED_CHANGES=$(echo "$CHANGED_FILES" | grep '^content/shared/' || true) + if [[ -n "$SHARED_CHANGES" ]]; then + echo "has-shared-content=true" >> $GITHUB_OUTPUT + echo "🔄 Detected shared content changes: $SHARED_CHANGES" + else + echo "has-shared-content=false" >> $GITHUB_OUTPUT + fi + else + echo "❌ No content changes detected" + echo "has-changes=false" >> $GITHUB_OUTPUT + echo "has-shared-content=false" >> $GITHUB_OUTPUT + fi + + - name: Skip if no content changes + if: steps.detect.outputs.has-changes == 'false' + run: | + echo "No content changes detected in this PR - skipping link check" + echo "✅ **No content changes detected** - link check skipped" >> $GITHUB_STEP_SUMMARY + + - name: Setup Node.js + if: steps.detect.outputs.has-changes == 'true' + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'yarn' + + - name: Install dependencies + if: steps.detect.outputs.has-changes == 'true' + run: yarn install --frozen-lockfile + + - name: Build Hugo site + if: steps.detect.outputs.has-changes == 'true' + run: npx hugo --minify + + - name: Download link-checker binary + if: steps.detect.outputs.has-changes == 'true' + run: | + echo "Downloading link-checker binary from docs-v2 releases..." + + # Download from docs-v2's own releases (always accessible) + curl -L -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + -o link-checker-info.json \ + "https://api.github.com/repos/influxdata/docs-v2/releases/tags/link-checker-v1.2.2" + + # Extract download URL for linux binary + DOWNLOAD_URL=$(jq -r '.assets[] | select(.name | test("link-checker.*linux")) | .url' link-checker-info.json) + + if [[ "$DOWNLOAD_URL" == "null" || -z "$DOWNLOAD_URL" ]]; then + echo "❌ No linux binary found in release" + echo "Available assets:" + jq -r '.assets[].name' link-checker-info.json + exit 1 + fi + + echo "📥 Downloading: $DOWNLOAD_URL" + curl -L -H "Accept: application/octet-stream" \ + -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + -o link-checker "$DOWNLOAD_URL" + + chmod +x link-checker + ./link-checker --version + + - name: Verify link checker config exists + if: steps.detect.outputs.has-changes == 'true' + run: | + if [[ ! -f .ci/link-checker/production.lycherc.toml ]]; then + echo "❌ Configuration file .ci/link-checker/production.lycherc.toml not found" + echo "Please copy production.lycherc.toml from docs-tooling/link-checker/" + exit 1 + fi + echo "✅ Using configuration: .ci/link-checker/production.lycherc.toml" + + - name: Map changed content to public files + if: steps.detect.outputs.has-changes == 'true' + id: mapping + run: | + echo "Mapping changed content files to public HTML files..." + + # Create temporary file with changed content files + echo "${{ steps.detect.outputs.changed-content }}" > changed-files.txt + + # Map content files to public files + PUBLIC_FILES=$(cat changed-files.txt | xargs -r ./link-checker map --existing-only) + + if [[ -n "$PUBLIC_FILES" ]]; then + echo "Found affected public files:" + echo "$PUBLIC_FILES" + echo "public-files<> $GITHUB_OUTPUT + echo "$PUBLIC_FILES" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + # Count files for summary + FILE_COUNT=$(echo "$PUBLIC_FILES" | wc -l) + echo "file-count=$FILE_COUNT" >> $GITHUB_OUTPUT + else + echo "No public files found to check" + echo "public-files=" >> $GITHUB_OUTPUT + echo "file-count=0" >> $GITHUB_OUTPUT + fi + + - name: Run link checker + if: steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != '' + id: link-check + run: | + echo "Checking links in ${{ steps.mapping.outputs.file-count }} affected files..." + + # Create temporary file with public files list + echo "${{ steps.mapping.outputs.public-files }}" > public-files.txt + + # Run link checker with detailed JSON output + set +e # Don't fail immediately on error + + cat public-files.txt | xargs -r ./link-checker check \ + --config .ci/link-checker/production.lycherc.toml \ + --format json \ + --output link-check-results.json + + EXIT_CODE=$? + + if [[ -f link-check-results.json ]]; then + # Parse results + BROKEN_COUNT=$(jq -r '.summary.broken_count // 0' link-check-results.json) + TOTAL_COUNT=$(jq -r '.summary.total_checked // 0' link-check-results.json) + SUCCESS_RATE=$(jq -r '.summary.success_rate // 0' link-check-results.json) + + echo "broken-count=$BROKEN_COUNT" >> $GITHUB_OUTPUT + echo "total-count=$TOTAL_COUNT" >> $GITHUB_OUTPUT + echo "success-rate=$SUCCESS_RATE" >> $GITHUB_OUTPUT + + if [[ $BROKEN_COUNT -gt 0 ]]; then + echo "❌ Found $BROKEN_COUNT broken links out of $TOTAL_COUNT total links" + echo "check-result=failed" >> $GITHUB_OUTPUT + else + echo "✅ All $TOTAL_COUNT links are valid" + echo "check-result=passed" >> $GITHUB_OUTPUT + fi + else + echo "❌ Link check failed to generate results" + echo "check-result=error" >> $GITHUB_OUTPUT + fi + + exit $EXIT_CODE + + - name: Process and report results + if: always() && steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != '' + run: | + if [[ -f link-check-results.json ]]; then + # Create detailed error annotations for broken links + if [[ "${{ steps.link-check.outputs.check-result }}" == "failed" ]]; then + echo "Creating error annotations for broken links..." + + jq -r '.broken_links[]? | + "::error file=\(.file // "unknown"),line=\(.line // 1)::Broken link: \(.url) - \(.error // "Unknown error")"' \ + link-check-results.json || true + fi + + # Generate summary comment + cat >> $GITHUB_STEP_SUMMARY << 'EOF' + ## Link Check Results + + **Files Checked:** ${{ steps.mapping.outputs.file-count }} + **Total Links:** ${{ steps.link-check.outputs.total-count }} + **Broken Links:** ${{ steps.link-check.outputs.broken-count }} + **Success Rate:** ${{ steps.link-check.outputs.success-rate }}% + + EOF + + if [[ "${{ steps.link-check.outputs.check-result }}" == "failed" ]]; then + echo "❌ **Link check failed** - see annotations above for details" >> $GITHUB_STEP_SUMMARY + else + echo "✅ **All links are valid**" >> $GITHUB_STEP_SUMMARY + fi + else + echo "⚠️ **Link check could not complete** - no results file generated" >> $GITHUB_STEP_SUMMARY + fi + + - name: Upload detailed results + if: always() && steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != '' + uses: actions/upload-artifact@v4 + with: + name: link-check-results + path: | + link-check-results.json + changed-files.txt + public-files.txt + retention-days: 30 \ No newline at end of file diff --git a/.github/workflows/prepare-release.yml b/.github/workflows/prepare-release.yml new file mode 100644 index 000000000..edacd683b --- /dev/null +++ b/.github/workflows/prepare-release.yml @@ -0,0 +1,107 @@ +name: Prepare Documentation Release + +on: + workflow_dispatch: + inputs: + product: + description: 'Product to release' + required: true + type: choice + options: + - core + - enterprise + - cloud-serverless + - cloud-dedicated + version: + description: 'Version number (e.g., 3.2.1)' + required: true + release_type: + description: 'Release type' + required: true + type: choice + options: + - major + - minor + - patch + - hotfix + +jobs: + prepare-release: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Create release branch + run: | + git checkout -b docs-release-v${{ inputs.version }} + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: '18' + cache: 'yarn' + + - name: Install dependencies + run: yarn install --frozen-lockfile + + - name: Generate release notes + run: | + # Note: This workflow assumes release notes are generated manually or from tagged releases + # For Core/Enterprise products, the script needs repository access which would require + # checking out the influxdb and influxdb_pro repositories first + + echo "Warning: Release notes generation requires access to InfluxDB source repositories" + echo "For now, creating a placeholder file that should be replaced with actual release notes" + + # Create output directory + mkdir -p helper-scripts/output/release-notes + + # Create placeholder release notes file + cat > helper-scripts/output/release-notes/release-notes-v${{ inputs.version }}.md << EOF + ## v${{ inputs.version }} {date="$(date +'%Y-%m-%d')"} + + ### Features + + - TODO: Add features for ${{ inputs.product }} v${{ inputs.version }} + + ### Bug Fixes + + - TODO: Add bug fixes for ${{ inputs.product }} v${{ inputs.version }} + + + EOF + + - name: Update product versions + run: | + # Script to update data/products.yml + ./helper-scripts/common/update-product-version.sh \ + --product ${{ inputs.product }} \ + --version ${{ inputs.version }} + + - name: Create release checklist issue + uses: actions/github-script@v7 + with: + script: | + const checklist = require('./.github/scripts/release-checklist.js'); + await checklist.createIssue({ + github, + context, + product: '${{ inputs.product }}', + version: '${{ inputs.version }}', + releaseType: '${{ inputs.release_type }}' + }) diff --git a/.github/workflows/sync-link-checker-binary.yml b/.github/workflows/sync-link-checker-binary.yml new file mode 100644 index 000000000..b0ac46c68 --- /dev/null +++ b/.github/workflows/sync-link-checker-binary.yml @@ -0,0 +1,68 @@ +name: Sync Link Checker Binary from docs-tooling + +on: + workflow_dispatch: + inputs: + version: + description: 'Link checker version to sync (e.g., v1.2.2)' + required: true + type: string + +jobs: + sync-binary: + name: Sync link-checker binary from docs-tooling + runs-on: ubuntu-latest + + steps: + - name: Download binary from docs-tooling release + run: | + echo "Downloading link-checker ${{ inputs.version }} from docs-tooling..." + + # Download binary from docs-tooling release + curl -L -H "Accept: application/octet-stream" \ + -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + -o link-checker-linux-x86_64 \ + "https://github.com/influxdata/docs-tooling/releases/download/link-checker-${{ inputs.version }}/link-checker-linux-x86_64" + + # Download checksums + curl -L -H "Accept: application/octet-stream" \ + -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + -o checksums.txt \ + "https://github.com/influxdata/docs-tooling/releases/download/link-checker-${{ inputs.version }}/checksums.txt" + + # Verify downloads + ls -la link-checker-linux-x86_64 checksums.txt + + - name: Create docs-v2 release + run: | + echo "Creating link-checker-${{ inputs.version }} release in docs-v2..." + + gh release create \ + --title "Link Checker Binary ${{ inputs.version }}" \ + --notes "Link validation tooling binary for docs-v2 GitHub Actions workflows. + + This binary is distributed from the docs-tooling repository release link-checker-${{ inputs.version }}. + + ### Usage in GitHub Actions + + The binary is automatically downloaded by docs-v2 workflows for link validation. + + ### Manual Usage + + \`\`\`bash + # Download and make executable + curl -L -o link-checker https://github.com/influxdata/docs-v2/releases/download/link-checker-${{ inputs.version }}/link-checker-linux-x86_64 + chmod +x link-checker + + # Verify installation + ./link-checker --version + \`\`\` + + ### Changes in ${{ inputs.version }} + + See the [docs-tooling release](https://github.com/influxdata/docs-tooling/releases/tag/link-checker-${{ inputs.version }}) for detailed changelog." \ + link-checker-${{ inputs.version }} \ + link-checker-linux-x86_64 \ + checksums.txt + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/trigger-on-release.yml b/.github/workflows/trigger-on-release.yml new file mode 100644 index 000000000..cbf4419c2 --- /dev/null +++ b/.github/workflows/trigger-on-release.yml @@ -0,0 +1,61 @@ +name: Trigger Documentation Update on Release + +on: + # Can be triggered by external workflows using repository_dispatch + repository_dispatch: + types: [influxdb3-release] + + # Can also be triggered via GitHub API + # Example: + # curl -X POST \ + # -H "Authorization: token $GITHUB_TOKEN" \ + # -H "Accept: application/vnd.github.v3+json" \ + # https://api.github.com/repos/influxdata/docs-v2/dispatches \ + # -d '{"event_type":"influxdb3-release","client_payload":{"product":"core","version":"3.0.0","previous_version":"2.9.0"}}' + +jobs: + trigger-release-workflow: + name: Trigger Release Documentation + runs-on: ubuntu-latest + + steps: + - name: Validate payload + run: | + if [ -z "${{ github.event.client_payload.product }}" ]; then + echo "Error: product is required in client_payload" + exit 1 + fi + + if [ -z "${{ github.event.client_payload.version }}" ]; then + echo "Error: version is required in client_payload" + exit 1 + fi + + if [ -z "${{ github.event.client_payload.previous_version }}" ]; then + echo "Error: previous_version is required in client_payload" + exit 1 + fi + + echo "Received release notification:" + echo "Product: ${{ github.event.client_payload.product }}" + echo "Version: ${{ github.event.client_payload.version }}" + echo "Previous Version: ${{ github.event.client_payload.previous_version }}" + + - name: Trigger release documentation workflow + uses: actions/github-script@v7 + with: + script: | + await github.rest.actions.createWorkflowDispatch({ + owner: context.repo.owner, + repo: context.repo.repo, + workflow_id: 'influxdb3-release.yml', + ref: 'master', + inputs: { + product: '${{ github.event.client_payload.product }}', + version: '${{ github.event.client_payload.version }}', + previous_version: '${{ github.event.client_payload.previous_version }}', + dry_run: '${{ github.event.client_payload.dry_run || 'false' }}' + } + }); + + console.log('Successfully triggered release documentation workflow'); \ No newline at end of file diff --git a/.gitignore b/.gitignore index 7faf70c94..32765da72 100644 --- a/.gitignore +++ b/.gitignore @@ -3,16 +3,38 @@ public .*.swp node_modules +package-lock.json .config* **/.env* *.log /resources .hugo_build.lock + +# Content generation /content/influxdb*/**/api/**/*.html !api-docs/**/.config.yml /api-docs/redoc-static.html* +/helper-scripts/output/* +/telegraf-build +!telegraf-build/templates +!telegraf-build/scripts +!telegraf-build/README.md + +# CI/CD tool files +/cypress/downloads/* +/cypress/screenshots/* +/cypress/videos/* +.lycheecache +test-results.xml +/influxdb3cli-build-scripts/content +tmp + +# IDE files .vscode/* +!.vscode/launch.json .idea **/config.toml -package-lock.json -tmp \ No newline at end of file + +# User context files for AI assistant tools +.context/* +!.context/README.md diff --git a/.husky/_/pre-push.old b/.husky/_/pre-push.old new file mode 100755 index 000000000..a0d96ef93 --- /dev/null +++ b/.husky/_/pre-push.old @@ -0,0 +1,57 @@ +#!/bin/sh + +if [ "$LEFTHOOK_VERBOSE" = "1" -o "$LEFTHOOK_VERBOSE" = "true" ]; then + set -x +fi + +if [ "$LEFTHOOK" = "0" ]; then + exit 0 +fi + +call_lefthook() +{ + if test -n "$LEFTHOOK_BIN" + then + "$LEFTHOOK_BIN" "$@" + elif lefthook -h >/dev/null 2>&1 + then + lefthook "$@" + else + dir="$(git rev-parse --show-toplevel)" + osArch=$(uname | tr '[:upper:]' '[:lower:]') + cpuArch=$(uname -m | sed 's/aarch64/arm64/;s/x86_64/x64/') + if test -f "$dir/node_modules/lefthook-${osArch}-${cpuArch}/bin/lefthook" + then + "$dir/node_modules/lefthook-${osArch}-${cpuArch}/bin/lefthook" "$@" + elif test -f "$dir/node_modules/@evilmartians/lefthook/bin/lefthook-${osArch}-${cpuArch}/lefthook" + then + "$dir/node_modules/@evilmartians/lefthook/bin/lefthook-${osArch}-${cpuArch}/lefthook" "$@" + elif test -f "$dir/node_modules/@evilmartians/lefthook-installer/bin/lefthook" + then + "$dir/node_modules/@evilmartians/lefthook-installer/bin/lefthook" "$@" + elif test -f "$dir/node_modules/lefthook/bin/index.js" + then + "$dir/node_modules/lefthook/bin/index.js" "$@" + + elif bundle exec lefthook -h >/dev/null 2>&1 + then + bundle exec lefthook "$@" + elif yarn lefthook -h >/dev/null 2>&1 + then + yarn lefthook "$@" + elif pnpm lefthook -h >/dev/null 2>&1 + then + pnpm lefthook "$@" + elif swift package plugin lefthook >/dev/null 2>&1 + then + swift package --disable-sandbox plugin lefthook "$@" + elif command -v mint >/dev/null 2>&1 + then + mint run csjones/lefthook-plugin "$@" + else + echo "Can't find lefthook in PATH" + fi + fi +} + +call_lefthook run "pre-push" "$@" diff --git a/.husky/_/serve b/.husky/_/serve new file mode 100755 index 000000000..df25a7d09 --- /dev/null +++ b/.husky/_/serve @@ -0,0 +1,57 @@ +#!/bin/sh + +if [ "$LEFTHOOK_VERBOSE" = "1" -o "$LEFTHOOK_VERBOSE" = "true" ]; then + set -x +fi + +if [ "$LEFTHOOK" = "0" ]; then + exit 0 +fi + +call_lefthook() +{ + if test -n "$LEFTHOOK_BIN" + then + "$LEFTHOOK_BIN" "$@" + elif lefthook -h >/dev/null 2>&1 + then + lefthook "$@" + else + dir="$(git rev-parse --show-toplevel)" + osArch=$(uname | tr '[:upper:]' '[:lower:]') + cpuArch=$(uname -m | sed 's/aarch64/arm64/;s/x86_64/x64/') + if test -f "$dir/node_modules/lefthook-${osArch}-${cpuArch}/bin/lefthook" + then + "$dir/node_modules/lefthook-${osArch}-${cpuArch}/bin/lefthook" "$@" + elif test -f "$dir/node_modules/@evilmartians/lefthook/bin/lefthook-${osArch}-${cpuArch}/lefthook" + then + "$dir/node_modules/@evilmartians/lefthook/bin/lefthook-${osArch}-${cpuArch}/lefthook" "$@" + elif test -f "$dir/node_modules/@evilmartians/lefthook-installer/bin/lefthook" + then + "$dir/node_modules/@evilmartians/lefthook-installer/bin/lefthook" "$@" + elif test -f "$dir/node_modules/lefthook/bin/index.js" + then + "$dir/node_modules/lefthook/bin/index.js" "$@" + + elif bundle exec lefthook -h >/dev/null 2>&1 + then + bundle exec lefthook "$@" + elif yarn lefthook -h >/dev/null 2>&1 + then + yarn lefthook "$@" + elif pnpm lefthook -h >/dev/null 2>&1 + then + pnpm lefthook "$@" + elif swift package plugin lefthook >/dev/null 2>&1 + then + swift package --disable-sandbox plugin lefthook "$@" + elif command -v mint >/dev/null 2>&1 + then + mint run csjones/lefthook-plugin "$@" + else + echo "Can't find lefthook in PATH" + fi + fi +} + +call_lefthook run "serve" "$@" diff --git a/.nvmrc b/.nvmrc new file mode 100644 index 000000000..945c17819 --- /dev/null +++ b/.nvmrc @@ -0,0 +1 @@ +v23.10.0 diff --git a/.prettierignore b/.prettierignore index 004c23fb4..b7974b235 100644 --- a/.prettierignore +++ b/.prettierignore @@ -3,3 +3,4 @@ **/.svn **/.hg **/node_modules +assets/jsconfig.json \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 000000000..9aacd8f89 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,47 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Debug JS (debug-helpers)", + "type": "chrome", + "request": "launch", + "url": "http://localhost:1313", + "webRoot": "${workspaceFolder}", + "skipFiles": [ + "/**" + ], + "sourceMaps": false, + "trace": true, + "smartStep": false + }, + { + "name": "Debug JS (source maps)", + "type": "chrome", + "request": "launch", + "url": "http://localhost:1313", + "webRoot": "${workspaceFolder}", + "sourceMaps": true, + "sourceMapPathOverrides": { + "*": "${webRoot}/assets/js/*", + "main.js": "${webRoot}/assets/js/main.js", + "page-context.js": "${webRoot}/assets/js/page-context.js", + "ask-ai-trigger.js": "${webRoot}/assets/js/ask-ai-trigger.js", + "ask-ai.js": "${webRoot}/assets/js/ask-ai.js", + "utils/*": "${webRoot}/assets/js/utils/*", + "services/*": "${webRoot}/assets/js/services/*" + }, + "skipFiles": [ + "/**", + "node_modules/**", + "chrome-extension://**" + ], + "trace": true, + "smartStep": true, + "disableNetworkCache": true, + "userDataDir": "${workspaceFolder}/.vscode/chrome-user-data", + "runtimeArgs": [ + "--disable-features=VizDisplayCompositor" + ] + }, + ] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 002907174..c827452b9 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,12 +1,12 @@ { - "commentAnchors.tags.anchors": - { "SOURCE": { - "scope": "file", - "behavior": "link", - "iconColor": "#FF0000", - "highlightColor": "#FF0000", - "style": "bold" - }}, + "commentAnchors.tags.anchors": + { "SOURCE": { + "scope": "file", + "behavior": "link", + "iconColor": "#FF0000", + "highlightColor": "#FF0000", + "style": "bold" + }}, "commentAnchors.workspace.matchFiles": "**/*.{md,ini,json,yaml,yml}", "commentAnchors.workspace.enabled": true, "yaml.schemas": { @@ -14,11 +14,7 @@ }, "vale.valeCLI.config": "${workspaceFolder}/.vale.ini", "vale.valeCLI.minAlertLevel": "warning", - "github.copilot.chat.codeGeneration.useInstructionFiles": true, - "github.copilot.chat.codeGeneration.instructionFiles": [ - { - "path": "${workspaceFolder}/.github/copilot-instructions.md", - "enabled": true - } - ], + "cSpell.words": [ + "influxctl" + ] } \ No newline at end of file diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 000000000..d606868cc --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,38 @@ +# Instructions for InfluxData Documentation + +## Purpose and scope + +Claude should help document InfluxData products by creating clear, accurate technical content with proper code examples, frontmatter, and formatting. + +## Project overview + +See @README.md + +## Available NPM commands + +@package.json + +## Instructions for contributing + +See @.github/copilot-instructions.md for style guidelines and +product-specific documentation paths and URLs managed in this project. + +See @.github/instructions/contributing.instructions.md for essential InfluxData +documentation contributing guidelines, such as style and +formatting, and commonly used shortcodes. + +See @TESTING.md for comprehensive testing information, including code block +testing, link validation, style linting, and advanced testing procedures. + +See @.github/instructions/shortcodes-reference.instructions.md for detailed +information about shortcodes used in this project. + +See @.github/instructions/frontmatter-reference.instructions.md for detailed +information about frontmatter used in this project. + +See @.github/instructions/influxdb3-code-placeholders.instructions.md for using +placeholders in code samples and CLI commands. + +See @api-docs/README.md for information about the API reference documentation, how to +generate it, and how to contribute to it. + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ece943143..096ded745 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,24 @@ # Contributing to InfluxData Documentation -## Sign the InfluxData CLA + +## Quick Start + +Ready to contribute? Here's the essential workflow: + +1. [Sign the InfluxData CLA](#sign-the-influxdata-cla) (for substantial changes) +2. [Fork and clone](#fork-and-clone-influxdata-documentation-repository) this repository +3. [Install dependencies](#development-environment-setup) (Node.js, Yarn, Docker) +4. Make your changes following [style guidelines](#making-changes) +5. [Test your changes](TESTING.md) (pre-commit and pre-push hooks run automatically) +6. [Submit a pull request](#submission-process) + +For detailed setup and reference information, see the sections below. + +--- + +## Legal & Getting Started + +### Sign the InfluxData CLA The InfluxData Contributor License Agreement (CLA) is part of the legal framework for the open source ecosystem that protects both you and InfluxData. @@ -13,24 +31,32 @@ _**Note:** Typo and broken link fixes are greatly appreciated and do not require _If you're new to contributing or you're looking for an easy update, see [`docs-v2` good-first-issues](https://github.com/influxdata/docs-v2/issues?q=is%3Aissue+is%3Aopen+label%3Agood-first-issue)._ -## Make suggested updates - ### Fork and clone InfluxData Documentation Repository [Fork this repository](https://help.github.com/articles/fork-a-repo/) and [clone it](https://help.github.com/articles/cloning-a-repository/) to your local machine. -## Install project dependencies +--- + + +## Development Environment Setup + +### Prerequisites docs-v2 automatically runs format (Markdown, JS, and CSS) linting and code block tests for staged files that you try to commit. -For the linting and tests to run, you need to install Docker and Node.js -dependencies. +For the linting and tests to run, you need to install: + +- **Node.js and Yarn**: For managing dependencies and running build scripts +- **Docker**: For running Vale linter and code block tests +- **VS Code extensions** (optional): For enhanced editing experience \_**Note:** -We strongly recommend running linting and tests, but you can skip them -(and avoid installing dependencies) -by including the `--no-verify` flag with your commit--for example, enter the following command in your terminal: +The git pre-commit and pre-push hooks are configured to run linting and tests automatically +when you commit or push changes. +We strongly recommend letting them run, but you can skip them +(and avoid installing related dependencies) +by including the `--no-verify` flag with your commit--for example: ```sh git commit -m "" --no-verify @@ -43,7 +69,6 @@ To install dependencies listed in package.json: 1. Install [Node.js](https://nodejs.org/en) for your system. 2. Install [Yarn](https://yarnpkg.com/getting-started/install) for your system. 3. Run `yarn` to install dependencies (including Hugo). -4. Install the Yarn package manager and run `yarn` to install project dependencies. `package.json` contains dependencies used in `/assets/js` JavaScript code and dev dependencies used in pre-commit hooks for linting, syntax-checking, and testing. @@ -51,7 +76,7 @@ dev dependencies used in pre-commit hooks for linting, syntax-checking, and test Dev dependencies include: - [Lefthook](https://github.com/evilmartians/lefthook): configures and -manages pre-commit hooks for linting and testing Markdown content. +manages git pre-commit and pre-push hooks for linting and testing Markdown content. - [prettier](https://prettier.io/docs/en/): formats code, including Markdown, according to style rules for consistency - [Cypress]: e2e testing for UI elements and URLs in content @@ -87,217 +112,21 @@ docs-v2 contains a `./.vscode/settings.json` that configures the following exten - Vale: shows linter errors and suggestions in the editor. - YAML Schemas: validates frontmatter attributes. -### Make your changes +--- -Make your suggested changes being sure to follow the [style and formatting guidelines](#style--formatting) outline below. + +## Making Changes -## Lint and test your changes -### Automatic pre-commit checks +### Style Guidelines -docs-v2 uses Lefthook to manage Git hooks, such as pre-commit hooks that lint Markdown and test code blocks. -When you try to commit changes (`git commit`), Git runs -the commands configured in `lefthook.yml` which pass your **staged** files to Vale, -Prettier, Cypress (for UI tests and link-checking), and Pytest (for testing Python and shell code in code blocks). - -### Skip pre-commit hooks - -**We strongly recommend running linting and tests**, but you can skip them -(and avoid installing dependencies) -by including the `LEFTHOOK=0` environment variable or the `--no-verify` flag with -your commit--for example: - -```sh -git commit -m "" --no-verify -``` - -```sh -LEFTHOOK=0 git commit -``` - -### Set up test scripts and credentials - -Tests for code blocks require your InfluxDB credentials and other typical -InfluxDB configuration. - -To set up your docs-v2 instance to run tests locally, do the following: - -1. **Set executable permissions on test scripts** in `./test/src`: - - ```sh - chmod +x ./test/src/*.sh - ``` - -2. **Create credentials for tests**: - - - Create databases, buckets, and tokens for the product(s) you're testing. - - If you don't have access to a Clustered instance, you can use your -Cloud Dedicated instance for testing in most cases. To avoid conflicts when - running tests, create separate Cloud Dedicated and Clustered databases. - -1. **Create .env.test**: Copy the `./test/env.test.example` file into each - product directory to test and rename the file as `.env.test`--for example: - - ```sh - ./content/influxdb/cloud-dedicated/.env.test - ``` - -2. Inside each product's `.env.test` file, assign your InfluxDB credentials to - environment variables: - - - Include the usual `INFLUX_` environment variables - - In - `cloud-dedicated/.env.test` and `clustered/.env.test` files, also define the - following variables: - - - `ACCOUNT_ID`, `CLUSTER_ID`: You can find these values in your `influxctl` - `config.toml` configuration file. - - `MANAGEMENT_TOKEN`: Use the `influxctl management create` command to generate - a long-lived management token to authenticate Management API requests - - See the substitution - patterns in `./test/src/prepare-content.sh` for the full list of variables you may need to define in your `.env.test` files. - -3. For influxctl commands to run in tests, move or copy your `config.toml` file - to the `./test` directory. - -> [!Warning] -> -> - The database you configure in `.env.test` and any written data may -be deleted during test runs. -> - Don't add your `.env.test` files to Git. To prevent accidentally adding credentials to the docs-v2 repo, -Git is configured to ignore `.env*` files. Consider backing them up on your local machine in case of accidental deletion. - -#### Test shell and python code blocks - -[pytest-codeblocks](https://github.com/nschloe/pytest-codeblocks/tree/main) extracts code from python and shell Markdown code blocks and executes assertions for the code. -If you don't assert a value (using a Python `assert` statement), `--codeblocks` considers a non-zero exit code to be a failure. - -**Note**: `pytest --codeblocks` uses Python's `subprocess.run()` to execute shell code. - -You can use this to test CLI and interpreter commands, regardless of programming -language, as long as they return standard exit codes. - -To make the documented output of a code block testable, precede it with the -`` tag and **omit the code block language -descriptor**--for example, in your Markdown file: - -##### Example markdown - -```python -print("Hello, world!") -``` - - - -The next code block is treated as an assertion. -If successful, the output is the following: - -``` -Hello, world! -``` - -For commands, such as `influxctl` CLI commands, that require launching an -OAuth URL in a browser, wrap the command in a subshell and redirect the output -to `/shared/urls.txt` in the container--for example: - -```sh -# Test the preceding command outside of the code block. -# influxctl authentication requires TTY interaction-- -# output the auth URL to a file that the host can open. -script -c "influxctl user list " \ - /dev/null > /shared/urls.txt -``` - -You probably don't want to display this syntax in the docs, which unfortunately -means you'd need to include the test block separately from the displayed code -block. -To hide it from users, wrap the code block inside an HTML comment. -pytest-codeblocks will still collect and run the code block. - -##### Mark tests to skip - -pytest-codeblocks has features for skipping tests and marking blocks as failed. -To learn more, see the pytest-codeblocks README and tests. - -#### Troubleshoot tests - -### Pytest collected 0 items - -Potential reasons: - -- See the test discovery options in `pytest.ini`. -- For Python code blocks, use the following delimiter: - - ```python - # Codeblocks runs this block. - ``` - - `pytest --codeblocks` ignores code blocks that use the following: - - ```py - # Codeblocks ignores this block. - ``` - -### Vale style linting - -docs-v2 includes Vale writing style linter configurations to enforce documentation writing style rules, guidelines, branding, and vocabulary terms. - -To run Vale, use the Vale extension for your editor or the included Docker configuration. -For example, the following command runs Vale in a container and lints `*.md` (Markdown) files in the path `./content/influxdb/cloud-dedicated/write-data/` using the specified configuration for `cloud-dedicated`: - -```sh -docker compose run -T vale --config=content/influxdb/cloud-dedicated/.vale.ini --minAlertLevel=error content/influxdb/cloud-dedicated/write-data/**/*.md -``` - -The output contains error-level style alerts for the Markdown content. - -**Note**: We strongly recommend running Vale, but it's not included in the -docs-v2 pre-commit hooks](#automatic-pre-commit-checks) for now. -You can include it in your own Git hooks. - -If a file contains style, spelling, or punctuation problems, -the Vale linter can raise one of the following alert levels: - -- **Error**: - - Problems that can cause content to render incorrectly - - Violations of branding guidelines or trademark guidelines - - Rejected vocabulary terms -- **Warning**: General style guide rules and best practices -- **Suggestion**: Style preferences that may require refactoring or updates to an exceptions list - -### Integrate Vale with your editor - -To integrate Vale with VSCode: - -1. Install the [Vale VSCode](https://marketplace.visualstudio.com/items?itemName=ChrisChinchilla.vale-vscode) extension. -2. In the extension settings, set the `Vale:Vale CLI:Path` value to the path of your Vale binary (`${workspaceFolder}/node_modules/.bin/vale` for Yarn-installed Vale). - -To use with an editor other than VSCode, see the [Vale integration guide](https://vale.sh/docs/integrations/guide/). - -### Configure style rules - -`/.ci/vale/styles/` contains configuration files for the custom `InfluxDataDocs` style. - -The easiest way to add accepted or rejected spellings is to enter your terms (or regular expression patterns) into the Vocabulary files at `.ci/vale/styles/config/vocabularies`. - -To add accepted/rejected terms for specific products, configure a style for the product and include a `Branding.yml` configuration. As an example, see `content/influxdb/cloud-dedicated/.vale.ini` and `.ci/vale/styles/Cloud-Dedicated/Branding.yml`. - -To learn more about configuration and rules, see [Vale configuration](https://vale.sh/docs/topics/config). - -### Submit a pull request - -Push your changes up to your forked repository, then [create a new pull request](https://help.github.com/articles/creating-a-pull-request/). - -## Style & Formatting - -### Markdown +#### Markdown Most docs-v2 documentation content uses [Markdown](https://en.wikipedia.org/wiki/Markdown). _Some parts of the documentation, such as `./api-docs`, contain Markdown within YAML and rely on additional tooling._ -### Semantic line feeds +#### Semantic line feeds Use [semantic line feeds](http://rhodesmill.org/brandon/2012/one-sentence-per-line/). Separating each sentence with a new line makes it easy to parse diffs with the human eye. @@ -319,19 +148,165 @@ You need a database that specializes in time series. +You need InfluxDB. ``` -### Article headings +#### Article headings Use only h2-h6 headings in markdown content. h1 headings act as the page title and are populated automatically from the `title` frontmatter. h2-h6 headings act as section headings. -### Image naming conventions +#### Image naming conventions Save images using the following naming format: `project/version-context-description.png`. For example, `influxdb/2-0-visualizations-line-graph.png` or `influxdb/2-0-tasks-add-new.png`. Specify a version other than 2.0 only if the image is specific to that version. -## Page frontmatter +### Essential Frontmatter Reference + +Every documentation page includes frontmatter which specifies information about the page. +Frontmatter populates variables in page templates and the site's navigation menu. + +**Essential fields:** + +```yaml +title: # Title of the page used in the page's h1 +description: # Page description displayed in search engine results +menu: + influxdb_2_0: + name: # Article name that only appears in the left nav + parent: # Specifies a parent group and nests navigation items +weight: # Determines sort order in both the nav tree and in article lists +``` + +For the complete frontmatter reference with all available fields, see [Complete Frontmatter Reference](#complete-frontmatter-reference). + + +### Common Shortcodes Reference + +#### Notes and warnings + +```md +> [!Note] +> Insert note markdown content here. + +> [!Warning] +> Insert warning markdown content here. + +> [!Caution] +> Insert caution markdown content here. + +> [!Important] +> Insert important markdown content here. + +> [!Tip] +> Insert tip markdown content here. +``` + +#### Tabbed content + +```md +{{< tabs-wrapper >}} + +{{% tabs %}} +[Button text for tab 1](#) +[Button text for tab 2](#) +{{% /tabs %}} + +{{% tab-content %}} +Markdown content for tab 1. +{{% /tab-content %}} + +{{% tab-content %}} +Markdown content for tab 2. +{{% /tab-content %}} + +{{< /tabs-wrapper >}} +``` + +#### Required elements + +```md +{{< req >}} +{{< req type="key" >}} + +- {{< req "\*" >}} **This element is required** +- {{< req "\*" >}} **This element is also required** +- **This element is NOT required** +``` + +For the complete shortcodes reference with all available shortcodes, see [Complete Shortcodes Reference](#complete-shortcodes-reference). + +--- + +### InfluxDB API documentation + +docs-v2 includes the InfluxDB API reference documentation in the `/api-docs` directory. +To edit the API documentation, edit the YAML files in `/api-docs`. + +InfluxData uses [Redoc](https://github.com/Redocly/redoc) to generate the full +InfluxDB API documentation when documentation is deployed. +Redoc generates HTML documentation using the InfluxDB `swagger.yml`. +For more information about generating InfluxDB API documentation, see the +[API Documentation README](https://github.com/influxdata/docs-v2/tree/master/api-docs#readme). + +--- + +## Testing & Quality Assurance + +For comprehensive testing information, including code block testing, link validation, style linting, and advanced testing procedures, see **[TESTING.md](TESTING.md)**. + +### Quick Testing Reference + +```bash +# Test code blocks +yarn test:codeblocks:all + +# Test links +yarn test:links content/influxdb3/core/**/*.md + +# Run style linting +docker compose run -T vale content/**/*.md +``` + +Pre-commit hooks run automatically when you commit changes, testing your staged files with Vale, Prettier, Cypress, and Pytest. To skip hooks if needed: + +```sh +git commit -m "" --no-verify +``` + +--- + + +## Submission Process + +### Commit Guidelines + +When creating commits, follow these guidelines: + +- Use a clear, descriptive commit message that explains the change +- Start with a type prefix: `fix()`, `feat()`, `style()`, `refactor()`, `test()`, `chore()` +- For product-specific changes, include the product in parentheses: `fix(enterprise)`, `fix(influxdb3)`, `fix(core)` +- Keep the first line under 72 characters +- Reference issues with "closes" or "fixes": `closes #123` or `closes influxdata/DAR#123` +- For multiple issues, use comma separation: `closes influxdata/DAR#517, closes influxdata/DAR#518` + +**Examples:** +``` +fix(enterprise): correct Docker environment variable name for license email +fix(influxdb3): correct Docker environment variable and compose examples for monolith +feat(telegraf): add new plugin documentation +chore(ci): update Vale configuration +``` + +### Submit a pull request + +Push your changes up to your forked repository, then [create a new pull request](https://help.github.com/articles/creating-a-pull-request/). + +--- + +## Reference Sections + + +### Complete Frontmatter Reference Every documentation page includes frontmatter which specifies information about the page. Frontmatter populates variables in page templates and the site's navigation menu. @@ -359,6 +334,9 @@ list_query_example:# Code examples included with article descriptions in childre # References to examples in data/query_examples canonical: # Path to canonical page, overrides auto-gen'd canonical URL v2: # Path to v2 equivalent page +alt_links: # Alternate pages in other products/versions for cross-product navigation + cloud-dedicated: /influxdb3/cloud-dedicated/path/to/page/ + core: /influxdb3/core/path/to/page/ prepend: # Prepend markdown content to an article (especially powerful with cascade) block: # (Optional) Wrap content in a block style (note, warn, cloud) content: # Content to prepend to article @@ -370,7 +348,7 @@ updated_in: # Product and version the referenced feature was updated in (display source: # Specify a file to pull page content from (typically in /content/shared/) ``` -### Title usage +#### Title usage ##### `title` @@ -402,7 +380,7 @@ Then 201-299 and so on. _**Note:** `_index.md` files should be weighted one level up from the other `.md` files in the same directory._ -### Related content +#### Related content Use the `related` frontmatter to include links to specific articles at the bottom of an article. @@ -421,7 +399,7 @@ related: - https://influxdata.com, This is an external link ``` -### Canonical URLs +#### Canonical URLs Search engines use canonical URLs to accurately rank pages with similar or identical content. The `canonical` HTML meta tag identifies which page should be used as the source of truth. @@ -441,7 +419,7 @@ canonical: /path/to/canonical/doc/ canonical: /{{< latest "influxdb" "v2" >}}/path/to/canonical/doc/ ``` -### v2 equivalent documentation +#### v2 equivalent documentation To display a notice on a 1.x page that links to an equivalent 2.0 page, add the following frontmatter to the 1.x page: @@ -450,7 +428,30 @@ add the following frontmatter to the 1.x page: v2: /influxdb/v2.0/get-started/ ``` -### Prepend and append content to a page +#### Alternative links for cross-product navigation + +Use the `alt_links` frontmatter to specify equivalent pages in other InfluxDB products, +for example, when a page exists at a different path in a different version or if +the feature doesn't exist in that product. +This enables the product switcher to navigate users to the corresponding page when they +switch between products. If a page doesn't exist in another product (for example, an +Enterprise-only feature), point to the nearest parent page if relevant. + +```yaml +alt_links: + cloud-dedicated: /influxdb3/cloud-dedicated/admin/tokens/create-token/ + cloud-serverless: /influxdb3/cloud-serverless/admin/tokens/create-token/ + core: /influxdb3/core/reference/cli/influxdb3/update/ # Points to parent if exact page doesn't exist +``` + +Supported product keys for InfluxDB 3: +- `core` +- `enterprise` +- `cloud-serverless` +- `cloud-dedicated` +- `clustered` + +#### Prepend and append content to a page Use the `prepend` and `append` frontmatter to add content to the top or bottom of a page. Each has the following fields: @@ -473,7 +474,7 @@ cascade: > This is just an example note block that gets appended to the article. ``` -### Cascade +#### Cascade To automatically apply frontmatter to a page and all of its children, use the [`cascade` frontmatter](https://gohugo.io/content-management/front-matter/#front-matter-cascade) @@ -490,7 +491,7 @@ cascade: those frontmatter keys. Frontmatter defined on the page overrides frontmatter "cascaded" from a parent. -## Use shared content in a page +#### Use shared content in a page Use the `source` frontmatter to specify a shared file to use to populate the page content. Shared files are typically stored in the `/content/shared` directory. @@ -499,9 +500,10 @@ When building shared content, use the `show-in` and `hide-in` shortcodes to show or hide blocks of content based on the current InfluxDB product/version. For more information, see [show-in](#show-in) and [hide-in](#hide-in). -## Shortcodes + +### Complete Shortcodes Reference -### Notes and warnings +#### Notes and warnings Shortcodes are available for formatting notes and warnings in each article: @@ -515,7 +517,7 @@ Insert warning markdown content here. {{% /warn %}} ``` -### Product data +#### Product data Display the full product name and version name for the current page--for example: @@ -534,7 +536,7 @@ Display the short version name (part of the key used in `products.yml`) from the {{% product-key %}} ``` -#### Enterprise name +##### Enterprise name The name used to refer to InfluxData's enterprise offering is subject to change. To facilitate easy updates in the future, use the `enterprise-name` shortcode @@ -548,7 +550,7 @@ This is content that references {{< enterprise-name "short" >}}. Product names are stored in `data/products.yml`. -#### Enterprise link +##### Enterprise link References to InfluxDB Enterprise are often accompanied with a link to a page where visitors can get more information about the Enterprise offering. @@ -560,7 +562,7 @@ InfluxDB Enterprise. Find more info [here][{{< enterprise-link >}}] ``` -### Latest patch version +#### Latest patch version Use the `{{< latest-patch >}}` shortcode to add the latest patch version of a product. By default, this shortcode parses the product and minor version from the URL. @@ -575,21 +577,21 @@ Easier to maintain being you update the version number in the `data/products.yml {{< latest-patch product="chronograf" version="1.7" >}} ``` -### Latest influx CLI version +#### Latest influx CLI version -Use the `{{< latest-cli >}}` shortcode to add the latest version of the `influx` +Use the `{{< latest-patch cli=true >}}` shortcode to add the latest version of the `influx` CLI supported by the minor version of InfluxDB. By default, this shortcode parses the minor version from the URL. To specify a specific minor version, use the `version` argument. Maintain CLI version numbers in the `data/products.yml` file instead of updating individual links and code examples. ```md -{{< latest-cli >}} +{{< latest-patch cli=true >}} {{< latest-cli version="2.1" >}} ``` -### API endpoint +#### API endpoint Use the `{{< api-endpoint >}}` shortcode to generate a code block that contains a colored request method, a specified API endpoint, and an optional link to @@ -618,7 +620,7 @@ Provide the following arguments: {{< api-endpoint method="get" endpoint="{{< influxdb/host >}}/api/v2/tasks" influxdb_host="cloud">}} ``` -### Tabbed Content +#### Tabbed Content To create "tabbed" content (content that is changed by a users' selection), use the following three shortcodes in combination: @@ -652,7 +654,7 @@ This shortcode must be closed with `{{% /tab-content %}}`. **Note**: The `%` characters used in this shortcode indicate that the contents should be processed as Markdown. -#### Example tabbed content group +##### Example tabbed content group ```md {{< tabs-wrapper >}} @@ -673,7 +675,7 @@ Markdown content for tab 2. {{< /tabs-wrapper >}} ``` -#### Tabbed code blocks +##### Tabbed code blocks Shortcodes are also available for tabbed code blocks primarily used to give users the option to choose between different languages and syntax. @@ -717,7 +719,7 @@ WHERE time > now() - 15m {{< /code-tabs-wrapper >}} ```` -#### Link to tabbed content +##### Link to tabbed content To link to tabbed content, click on the tab and use the URL parameter shown. It will have the form `?t=`, plus a string. @@ -727,7 +729,7 @@ For example: [Windows installation](/influxdb/v2.0/install/?t=Windows) ``` -### Required elements +#### Required elements Use the `{{< req >}}` shortcode to identify required elements in documentation with orange text and/or asterisks. By default, the shortcode outputs the text, "Required," but @@ -752,7 +754,7 @@ customize the text of the required message. {{< req text="Required if ..." color="blue" type="key" >}} ``` -#### Required elements in a list +##### Required elements in a list When identifying required elements in a list, use `{{< req type="key" >}}` to generate a "\* Required" key before the list. For required elements in the list, include @@ -766,7 +768,7 @@ a "\* Required" key before the list. For required elements in the list, include - **This element is NOT required** ``` -#### Change color of required text +##### Change color of required text Use the `color` argument to change the color of required text. The following colors are available: @@ -779,7 +781,7 @@ The following colors are available: {{< req color="magenta" text="This is required" >}} ``` -### Page navigation buttons +#### Page navigation buttons Use the `{{< page-nav >}}` shortcode to add page navigation buttons to a page. These are useful for guiding users through a set of docs that should be read in sequential order. @@ -809,7 +811,7 @@ document, but you can use `prevText` and `nextText` to override button text. {{ page-nav prev="/path/to/prev/" next="/path/to/next" keepTab=true>}} ``` -### Keybinds +#### Keybinds Use the `{{< keybind >}}` shortcode to include OS-specific keybindings/hotkeys. The following parameters are available: @@ -834,7 +836,7 @@ The following parameters are available: {{< keybind mac="⇧⌘P" linux="Ctrl+Shift+P" win="Ctrl+Shift+Alt+P" >}} ``` -### Diagrams +#### Diagrams Use the `{{< diagram >}}` shortcode to dynamically build diagrams. The shortcode uses [mermaid.js](https://github.com/mermaid-js/mermaid) to convert @@ -849,7 +851,7 @@ That --> There {{< /diagram >}} ``` -### File system diagrams +#### File system diagrams Use the `{{< filesystem-diagram >}}` shortcode to create a styled file system diagram using a Markdown unordered list. @@ -868,7 +870,7 @@ diagram using a Markdown unordered list. {{< /filesystem-diagram >}} ``` -### High-resolution images +#### High-resolution images In many cases, screenshots included in the docs are taken from high-resolution (retina) screens. Because of this, the actual pixel dimension is 2x larger than it needs to be and is rendered 2x bigger than it should be. @@ -887,7 +889,7 @@ cause by browser image resizing. - Image widths are limited to the width of the article content container and will scale accordingly, even with the `width` explicitly set. -### Truncated content blocks +#### Truncated content blocks In some cases, it may be appropriate to shorten or truncate blocks of content. Use cases include long examples of output data or tall images. @@ -900,7 +902,7 @@ Truncated markdown content here. {{% /truncate %}} ``` -### Expandable accordion content blocks +#### Expandable accordion content blocks Use the `{{% expand "Item label" %}}` shortcode to create expandable, accordion-style content blocks. Each expandable block needs a label that users can click to expand or collapse the content block. @@ -935,7 +937,7 @@ Markdown content associated with label 2. {{< /expand-wrapper >}} ``` -### Captions +#### Captions Use the `{{% caption %}}` shortcode to add captions to images and code blocks. Captions are styled with a smaller font size, italic text, slight transparency, @@ -947,7 +949,7 @@ Markdown content for the caption. {{% /caption %}} ``` -### Generate a list of children articles +#### Generate a list of children articles Section landing pages often contain just a list of articles with links and descriptions for each. This can be cumbersome to maintain as content is added. @@ -985,7 +987,7 @@ The following list types are available: meant to act as a page navigation and link to children header. - **functions:** a special use-case designed for listing Flux functions. -#### Include a "Read more" link +##### Include a "Read more" link To include a "Read more" link with each child summary, set `readmore=true`. _Only the `articles` list type supports "Read more" links._ @@ -994,7 +996,7 @@ _Only the `articles` list type supports "Read more" links._ {{< children readmore=true >}} ``` -#### Include a horizontal rule +##### Include a horizontal rule To include a horizontal rule after each child summary, set `hr=true`. _Only the `articles` list type supports horizontal rules._ @@ -1003,7 +1005,7 @@ _Only the `articles` list type supports horizontal rules._ {{< children hr=true >}} ``` -#### Include a code example with a child summary +##### Include a code example with a child summary Use the `list_code_example` frontmatter to provide a code example with an article in an articles list. @@ -1015,7 +1017,7 @@ list_code_example: | ``` ```` -#### Organize and include native code examples +##### Organize and include native code examples To include text from a file in `/shared/text/`, use the `{{< get-shared-text >}}` shortcode and provide the relative path and filename. @@ -1055,7 +1057,10 @@ native file formats. {{% /code-tabs-wrapper %}} ```` -#### Include specific files from the same directory +##### Include specific files from the same directory +> [!Caution] +> #### Don't use for code examples +> Using this and `get-shared-text` shortcodes to include code examples prevents the code from being tested. To include the text from one file in another file in the same directory, use the `{{< get-leaf-text >}}` shortcode. @@ -1074,13 +1079,13 @@ content | \_index.md ``` -##### query.pdmc +###### query.pdmc ```md # Query examples ``` -##### query.sh +###### query.sh ```md curl https://localhost:8086/query @@ -1101,7 +1106,7 @@ To include `query.sh` and `query.pdmc` in `api/_index.md`, use the following cod Avoid using the following file extensions when naming included text files since Hugo interprets these as markup languages: `.ad`, `.adoc`, `.asciidoc`, `.htm`, `.html`, `.markdown`, `.md`, `.mdown`, `.mmark`, `.pandoc`, `.pdc`, `.org`, or `.rst`. -#### Reference a query example in children +##### Reference a query example in children To include a query example with the children in your list, update `data/query_examples.yml` with the example code, input, and output, and use the `list_query_example` @@ -1111,7 +1116,7 @@ frontmatter to reference the corresponding example. list_query_example: cumulative_sum ``` -#### Children frontmatter +##### Children frontmatter Each children list `type` uses [frontmatter properties](#page-frontmatter) when generating the list of articles. The following table shows which children types use which frontmatter properties: @@ -1126,7 +1131,29 @@ The following table shows which children types use which frontmatter properties: | `list_code_example` | ✓ | | | | `list_query_example` | ✓ | | | -### Inline icons +#### Authentication token link + +Use the `{{% token-link "" "%}}` shortcode to +automatically generate links to token management documentation. The shortcode +accepts two _optional_ arguments: + +- **descriptor**: An optional token descriptor +- **link_append**: An optional path to append to the token management link path, + `///admin/tokens/`. + +```md +{{% token-link "database" "resource/" %}} + + +[database token](/influxdb3/enterprise/admin/tokens/resource/) +``` + +InfluxDB 3 Enterprise and InfluxDB 3 Core support different kinds of tokens. +The shortcode has a blacklist of token descriptors for each that will prevent +unsupported descriptors from appearing in the rendered output based on the +current product. + +#### Inline icons The `icon` shortcode allows you to inject icons in paragraph text. It's meant to clarify references to specific elements in the InfluxDB user interface. @@ -1201,7 +1228,7 @@ Below is a list of available icons (some are aliases): - wrench - x -### InfluxDB UI left navigation icons +#### InfluxDB UI left navigation icons In many cases, documentation references an item in the left nav of the InfluxDB UI. Provide a visual example of the navigation item using the `nav-icon` shortcode. @@ -1225,7 +1252,7 @@ The following case insensitive values are supported: - settings - feedback -### Flexbox-formatted content blocks +#### Flexbox-formatted content blocks CSS Flexbox formatting lets you create columns in article content that adjust and flow based on the viewable width. @@ -1258,7 +1285,7 @@ The following options are available: - third - quarter -### Tooltips +#### Tooltips Use the `{{< tooltip >}}` shortcode to add tooltips to text. The **first** argument is the text shown in the tooltip. @@ -1271,7 +1298,7 @@ I like {{< tooltip "Butterflies are awesome!" "butterflies" >}}. The rendered output is "I like butterflies" with "butterflies" highlighted. When you hover over "butterflies," a tooltip appears with the text: "Butterflies are awesome!" -### Flux sample data tables +#### Flux sample data tables The Flux `sample` package provides basic sample datasets that can be used to illustrate how Flux functions work. To quickly display one of the raw sample @@ -1280,7 +1307,7 @@ datasets, use the `{{% flux/sample %}}` shortcode. The `flux/sample` shortcode has the following arguments that can be specified by name or positionally. -#### set +##### set Sample dataset to output. Use either `set` argument name or provide the set as the first argument. The following sets are available: @@ -1292,19 +1319,19 @@ as the first argument. The following sets are available: - bool - numericBool -#### includeNull +##### includeNull Specify whether or not to include _null_ values in the dataset. Use either `includeNull` argument name or provide the boolean value as the second argument. -#### includeRange +##### includeRange Specify whether or not to include time range columns (`_start` and `_stop`) in the dataset. This is only recommended when showing how functions that require a time range (such as `window()`) operate on input data. Use either `includeRange` argument name or provide the boolean value as the third argument. -##### Example Flux sample data shortcodes +###### Example Flux sample data shortcodes ```md @@ -1326,7 +1353,7 @@ Use either `includeRange` argument name or provide the boolean value as the thir {{% flux/sample "int" true true %}} ``` -### Duplicate OSS content in Cloud +#### Duplicate OSS content in Cloud Docs for InfluxDB OSS and InfluxDB Cloud share a majority of content. To prevent duplication of content between versions, use the following shortcodes: @@ -1335,14 +1362,14 @@ To prevent duplication of content between versions, use the following shortcodes - `{{% oss-only %}}` - `{{% cloud-only %}}` -#### duplicate-oss +##### duplicate-oss The `{{< duplicate-oss >}}` shortcode copies the page content of the file located at the identical file path in the most recent InfluxDB OSS version. The Cloud version of this markdown file should contain the frontmatter required for all pages, but the body content should just be the `{{< duplicate-oss >}}` shortcode. -#### oss-only +##### oss-only Wrap content that should only appear in the OSS version of the doc with the `{{% oss-only %}}` shortcode. Use the shortcode on both inline and content blocks: @@ -1384,7 +1411,7 @@ This is necessary to get the first sentence/paragraph to render correctly. {{% /oss-only %}} ``` -#### cloud-only +##### cloud-only Wrap content that should only appear in the Cloud version of the doc with the `{{% cloud-only %}}` shortcode. Use the shortcode on both inline and content blocks: @@ -1426,7 +1453,7 @@ This is necessary to get the first sentence/paragraph to render correctly. {{% /cloud-only %}} ``` -### Show or hide content blocks in shared content +#### Show or hide content blocks in shared content The `source` frontmatter lets you source page content from another file and is used to share content across InfluxDB products. Within the shared content, you @@ -1441,7 +1468,7 @@ content blocks based on the InfluxDB "version." Valid "versions" include: - core - enterprise -#### show-in +##### show-in The `show-in` shortcode accepts a comma-delimited string of InfluxDB "versions" to show the content block in. The version is the second level of the page @@ -1456,7 +1483,7 @@ documentation, but not any other InfluxDB documentation this content is shared i {{% /show-in %}} ``` -#### hide-in +##### hide-in The `hide-in` shortcode accepts a comma-delimited string of InfluxDB "versions" to hide the content block in. The version is the second level of the page @@ -1472,7 +1499,7 @@ content is shared in. {{% /hide-in %}} ``` -### All-Caps +#### All-Caps Clockface v3 introduces many buttons with text formatted as all-caps. Use the `{{< caps >}}` shortcode to format text to match those buttons. @@ -1481,7 +1508,7 @@ Use the `{{< caps >}}` shortcode to format text to match those buttons. Click {{< caps >}}Add Data{{< /caps >}} ``` -### Code callouts +#### Code callouts Use the `{{< code-callout >}}` shortcode to highlight and emphasize a specific piece of code (for example, a variable, placeholder, or value) in a code block. @@ -1498,7 +1525,7 @@ http://localhost:8086/orgs/03a2bbf46249a000/... {{< /code-callout >}} ```` -### InfluxDB University banners +#### InfluxDB University banners Use the `{{< influxdbu >}}` shortcode to add an InfluxDB University banner that points to the InfluxDB University site or a specific course. @@ -1517,7 +1544,7 @@ the content of the banner. the course" link="https://university.influxdata.com/" >}} ``` -#### Course templates +##### Course templates Use one of the following course templates: @@ -1525,7 +1552,7 @@ Use one of the following course templates: - telegraf-102 - flux-103 -#### Custom banner content +##### Custom banner content Use the following shortcode parameters to customize the content of the InfluxDB University banner: @@ -1535,7 +1562,7 @@ University banner: - **action**: Text of the button - **link**: URL the button links to -### Reference content +#### Reference content The InfluxDB documentation is "task-based," meaning content primarily focuses on what a user is **doing**, not what they are **using**. @@ -1559,7 +1586,7 @@ menu: # ... ``` -## InfluxDB URLs +#### InfluxDB URLs When a user selects an InfluxDB product and region, example URLs in code blocks throughout the documentation are updated to match their product and region. @@ -1583,7 +1610,7 @@ http://example.com If the user selects the **US West (Oregon)** region, all occurrences of `http://localhost:8086` in code blocks will get updated to `https://us-west-2-1.aws.cloud2.influxdata.com`. -### Exempt URLs from getting updated +##### Exempt URLs from getting updated To exempt a code block from being updated, include the `{{< keep-url >}}` shortcode just before the code block. @@ -1596,7 +1623,7 @@ http://localhost:8086 ``` ```` -### Code examples only supported in InfluxDB Cloud +##### Code examples only supported in InfluxDB Cloud Some functionality is only supported in InfluxDB Cloud and code examples should only use InfluxDB Cloud URLs. In these cases, use `https://cloud2.influxdata.com` @@ -1610,7 +1637,7 @@ https://cloud2.influxdata.com ``` ```` -### Automatically populate InfluxDB host placeholder +##### Automatically populate InfluxDB host placeholder The InfluxDB host placeholder that gets replaced by custom domains differs between each InfluxDB product/version. @@ -1633,10 +1660,148 @@ Supported argument values: {{< influxdb/host "serverless" >}} ``` -## InfluxDB API documentation +##### User-populated placeholders -InfluxData uses [Redoc](https://github.com/Redocly/redoc) to generate the full -InfluxDB API documentation when documentation is deployed. -Redoc generates HTML documentation using the InfluxDB `swagger.yml`. -For more information about generating InfluxDB API documentation, see the -[API Documentation README](https://github.com/influxdata/docs-v2/tree/master/api-docs#readme). +Use the `code-placeholders` shortcode to format placeholders +as text fields that users can populate with their own values. +The shortcode takes a regular expression for matching placeholder names. +Use the `code-placeholder-key` shortcode to format the placeholder names in +text that describes the placeholder--for example: + +```markdown +{{% code-placeholders "DATABASE_NAME|USERNAME|PASSWORD_OR_TOKEN|API_TOKEN|exampleuser@influxdata.com" %}} +```sh +curl --request POST http://localhost:8086/write?db=DATABASE_NAME \ + --header "Authorization: Token API_TOKEN" \ + --data-binary @path/to/line-protocol.txt +``` +{{% /code-placeholders %}} + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME` and `RETENTION_POLICY`{{% /code-placeholder-key %}}: the [database and retention policy mapping (DBRP)](/influxdb/v2/reference/api/influxdb-1x/dbrp/) for the InfluxDB v2 bucket that you want to write to +- {{% code-placeholder-key %}}`USERNAME`{{% /code-placeholder-key %}}: your [InfluxDB 1.x username](/influxdb/v2/reference/api/influxdb-1x/#manage-credentials) +- {{% code-placeholder-key %}}`PASSWORD_OR_TOKEN`{{% /code-placeholder-key %}}: your [InfluxDB 1.x password or InfluxDB API token](/influxdb/v2/reference/api/influxdb-1x/#manage-credentials) +- {{% code-placeholder-key %}}`API_TOKEN`{{% /code-placeholder-key %}}: your [InfluxDB API token](/influxdb/v2/admin/tokens/) +``` + + + +### Advanced Configuration + +#### Vale style linting configuration + +docs-v2 includes Vale writing style linter configurations to enforce documentation writing style rules, guidelines, branding, and vocabulary terms. + +**Advanced Vale usage:** + +```sh +docker compose run -T vale --config=content/influxdb/cloud-dedicated/.vale.ini --minAlertLevel=error content/influxdb/cloud-dedicated/write-data/**/*.md +``` + +The output contains error-level style alerts for the Markdown content. + +**Note**: We strongly recommend running Vale, but it's not included in the +docs-v2 pre-commit hooks for now. +You can include it in your own Git hooks. + +If a file contains style, spelling, or punctuation problems, +the Vale linter can raise one of the following alert levels: + +- **Error**: + - Problems that can cause content to render incorrectly + - Violations of branding guidelines or trademark guidelines + - Rejected vocabulary terms +- **Warning**: General style guide rules and best practices +- **Suggestion**: Style preferences that may require refactoring or updates to an exceptions list + +#### Configure style rules + +`/.ci/vale/styles/` contains configuration files for the custom `InfluxDataDocs` style. + +The easiest way to add accepted or rejected spellings is to enter your terms (or regular expression patterns) into the Vocabulary files at `.ci/vale/styles/config/vocabularies`. + +To add accepted/rejected terms for specific products, configure a style for the product and include a `Branding.yml` configuration. As an example, see `content/influxdb/cloud-dedicated/.vale.ini` and `.ci/vale/styles/Cloud-Dedicated/Branding.yml`. + +To learn more about configuration and rules, see [Vale configuration](https://vale.sh/docs/topics/config). + + +#### JavaScript in the documentation UI + +The InfluxData documentation UI uses JavaScript with ES6+ syntax and +`assets/js/main.js` as the entry point to import modules from +`assets/js`. +Only `assets/js/main.js` should be imported in HTML files. + +`assets/js/main.js` registers components and initializes them on page load. + +If you're adding UI functionality that requires JavaScript, follow these steps: + +1. In your HTML file, add a `data-component` attribute to the element that + should be initialized by your JavaScript code. For example: + + ```html +
+ ``` + +2. Following the component pattern, create a single-purpose JavaScript module + (`assets/js/components/my-component.js`) + that exports a single function that receives the component element and initializes it. +3. In `assets/js/main.js`, import the module and register the component to ensure + the component is initialized on page load. + +##### Debugging JavaScript + +To debug JavaScript code used in the InfluxData documentation UI, choose one of the following methods: + +- Use source maps and the Chrome DevTools debugger. +- Use debug helpers that provide breakpoints and console logging as a workaround or alternative for using source maps and the Chrome DevTools debugger. + +###### Using source maps and Chrome DevTools debugger + +1. In VS Code, select Run > Start Debugging. +2. Select the "Debug Docs (source maps)" configuration. +3. Click the play button to start the debugger. +5. Set breakpoints in the JavaScript source files--files in the + `assets/js/ns-hugo-imp:` namespace-- in the + VS Code editor or in the Chrome Developer Tools Sources panel: + + - In the VS Code Debugger panel > "Loaded Scripts" section, find the + `assets/js/ns-hugo-imp:` namespace. + - In the Chrome Developer Tools Sources panel, expand + `js/ns-hugo-imp://assets/js/`. + +###### Using debug helpers + +1. In your JavaScript module, import debug helpers from `assets/js/utils/debug-helpers.js`. + These helpers provide breakpoints and console logging as a workaround or alternative for + using source maps and the Chrome DevTools debugger. +2. Insert debug statements by calling the helper functions in your code--for example: + + ```js + import { debugLog, debugBreak, debugInspect } from './utils/debug-helpers.js'; + + const data = debugInspect(someData, 'Data'); + debugLog('Processing data', 'myFunction'); + + function processData() { + // Add a breakpoint that works with DevTools + debugBreak(); + + // Your existing code... + } + ``` + +3. Start Hugo in development mode--for example: + + ```bash + yarn hugo server + ``` + +4. In VS Code, go to Run > Start Debugging, and select the "Debug JS (debug-helpers)" configuration. + +Your system uses the configuration in `launch.json` to launch the site in Chrome +and attach the debugger to the Developer Tools console. + +Make sure to remove the debug statements before merging your changes. +The debug helpers are designed to be used in development and should not be used in production. \ No newline at end of file diff --git a/DOC_GPT_PROFILE.md b/DOC_GPT_PROFILE.md deleted file mode 100644 index 88128535b..000000000 --- a/DOC_GPT_PROFILE.md +++ /dev/null @@ -1,60 +0,0 @@ -Doc is a public custom GPT for OpenAI ChatGPT used to help write and style InfluxData and InfluxDB documentation. - -## Introduction - -You write technical software documentation for InfluxData. The public web site is https://docs.influxdata.com and the source repository is https://github.com/influxdata/docs-v2. -Documentation provides step-by-step guides and reference documentation for InfluxDB and associated clients (CLIs, client libraries (SDKs), and Telegraf (https://docs.influxdata.com/telegraf/v1/)), and the legacy v1 components Kapacitor and Chronograf. - -## Instruction - -When a user asks a question and doesn't include a product from the list below, ask them which product in the list they are using, along with the version and query language: - -InfluxDB OSS 1.x (AKA "OSS v1") - - Documentation: https://docs.influxdata.com/influxdb/v1/ - - Query languages: v1.8+ supports InfluxQL and Flux - - Clients: Telegraf, influx CLI, v1 client libraries -InfluxDB Enterprise (AKA "v1 Enterprise") - - Documentation: https://docs.influxdata.com/enterprise_influxdb/v1/ - - Query languages: v1.8+ supports InfluxQL and Flux - - Clients: Telegraf, influx CLI, v1 client libraries -InfluxDB OSS 2.x (AKA "OSS v2", "OSS (TSM)") - - Documentation: https://docs.influxdata.com/influxdb/v2/ - - Query languages: InfluxQL and Flux - - Clients: Telegraf, influx CLI, v2 client libraries -InfluxDB Cloud (TSM) (AKA "Cloud 2") - - Documentation: https://docs.influxdata.com/influxdb/cloud/ - - Query languages: InfluxQL and Flux - - Clients: Telegraf, influx CLI, v2 client libraries -InfluxDB 3 Clustered (AKA "Clustered", "v3 Clustered") - - Documentation: https://docs.influxdata.com/influxdb3/clustered/ - - Query languages: SQL and InfluxQL - - Clients: Telegraf, influxctl CLI, `influxdb3-` (v3) client libraries -InfluxDB 3 Cloud Dedicated (AKA "Cloud Dedicated", "v3 Cloud Dedicated", "Dedicated", "CST (Cloud single-tenant)") - - Documentation: https://docs.influxdata.com/influxdb3/cloud-dedicated/ - - Query languages: SQL and InfluxQL - - Clients: Telegraf, influxctl CLI, `influxdb3-` (v3) client libraries -InfluxDB 3 Cloud Serverless (AKA "Cloud Serverless", "v3 Cloud", "Serverless", "Cloud multi-tenant") - - Documentation: https://docs.influxdata.com/influxdb3/cloud-serverless/ - - Query languages: SQL and InfluxQL - - Clients: Telegraf, influx CLI, `influxdb3-` (v3) client libraries -InfluxDB 3 Core (AKA "Core", "InfluxDB 3 OSS", "v3 Core", "v3 free") - - Documentation: https://docs.influxdata.com/influxdb3/core/ - - Query languages: SQL and InfluxQL - - Clients: Telegraf, influxdb3 CLI, `influxdb3-` (v3) client libraries -InfluxDB 3 Enterprise (AKA "Enterprise", "v3 Enterprise") - - Documentation: https://docs.influxdata.com/influxdb3/enterprise/ - - Query languages: SQL and InfluxQL - - Clients: Telegraf, influxdb3 CLI, `influxdb3-` (v3) client libraries - -If I ask about a REST API or SDK (client library) and don't specify a product, ask which product. -For API client libraries, refer to the documentation and to the source repositories in https://github.com/InfluxCommunity for the version-specific client library. - -When writing documentation, always use Google Developer Documentation style guidelines and Markdown format. -If writing REST API reference documentation follow YouTube Data API style and Google Developer Documentation style guidelines. - -The project uses the Hugo static site generator to build the documentation. -The site uses JavaScript and jQuery. -For information about linting, tests (using pytests for codeblocks), shortcode , refer to https://github.com/influxdata/docs-v2/blob/master/README.md and https://github.com/influxdata/docs-v2/blob/master/CONTRIBUTING.md. -If something in CONTRIBUTING.md needs clarification, then give me the suggested revision for CONTRIBUTING.md in Markdown. - -The community forum is https://community.influxdata.com/ and should not be used as a primary source of information, but might contain useful suggestions or solutions to specific problems from users. diff --git a/Dockerfile.pytest b/Dockerfile.pytest index 2199dcb15..89a24277c 100644 --- a/Dockerfile.pytest +++ b/Dockerfile.pytest @@ -3,12 +3,13 @@ FROM golang:latest ### Install InfluxDB clients for testing # Install InfluxDB keys to verify client installs. # Follow the install instructions (https://docs.influxdata.com/telegraf/v1/install/?t=curl), except for sudo (which isn't available in Docker). -# influxdata-archive_compat.key GPG fingerprint: -# 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E -ADD https://repos.influxdata.com/influxdata-archive_compat.key ./influxdata-archive_compat.key -RUN echo '393e8779c89ac8d958f81f942f9ad7fb82a25e133faddaf92e15b16e6ac9ce4c influxdata-archive_compat.key' | sha256sum -c && cat influxdata-archive_compat.key | gpg --dearmor | tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null +# influxdata-archive.key GPG fingerprint: +# Primary key fingerprint: 24C9 75CB A61A 024E E1B6 3178 7C3D 5715 9FC2 F927 +# Subkey fingerprint: 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E +ADD https://repos.influxdata.com/influxdata-archive.key ./influxdata-archive.key +RUN gpg --no-default-keyring --homedir $(mktemp -d) --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null -RUN echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' | tee /etc/apt/sources.list.d/influxdata.list +RUN echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | tee /etc/apt/sources.list.d/influxdata.list # Vault is used for testing InfluxDB 2.0 Secrets # Fetch vault package information from HashiCorp repository @@ -100,4 +101,4 @@ ENTRYPOINT [ "pytest" ] # Specify command arguments: # --env-file to pass environment variables to the test suite. # the test directory to run the test suite. -CMD [ "--codeblocks", "" ] \ No newline at end of file +CMD [ "--codeblocks", "" ] diff --git a/README.md b/README.md index d24564c76..ab99436fd 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,10 @@ This repository contains the InfluxDB 2.x documentation published at [docs.influ We welcome and encourage community contributions. For information about contributing to the InfluxData documentation, see [Contribution guidelines](CONTRIBUTING.md). +## Testing + +For information about testing the documentation, including code block testing, link validation, and style linting, see [Testing guide](TESTING.md). + ## Reporting a Vulnerability InfluxData takes security and our users' trust very seriously. diff --git a/TESTING.md b/TESTING.md new file mode 100644 index 000000000..233bb3a36 --- /dev/null +++ b/TESTING.md @@ -0,0 +1,519 @@ +# Testing Guide for InfluxData Documentation + +This guide covers all testing procedures for the InfluxData documentation, including code block testing, link validation, and style linting. + +## Quick Start + +1. **Prerequisites**: Install [Node.js](https://nodejs.org/en), [Yarn](https://yarnpkg.com/getting-started/install), and [Docker](https://docs.docker.com/get-docker/) +2. **Install dependencies**: Run `yarn` to install all dependencies +3. **Build test environment**: Run `docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest .` +4. **Run tests**: Use any of the test commands below + +## Test Types Overview + +| Test Type | Purpose | Command | +|-----------|---------|---------| +| **Code blocks** | Validate shell/Python code examples | `yarn test:codeblocks:all` | +| **Link validation** | Check internal/external links | `yarn test:links` | +| **Style linting** | Enforce writing standards | `docker compose run -T vale` | +| **E2E tests** | UI and functionality testing | `yarn test:e2e` | + +## Code Block Testing + +Code block testing validates that shell commands and Python scripts in documentation work correctly using [pytest-codeblocks](https://github.com/nschloe/pytest-codeblocks/tree/main). + +### Basic Usage + +```bash +# Test all code blocks +yarn test:codeblocks:all + +# Test specific products +yarn test:codeblocks:cloud +yarn test:codeblocks:v2 +yarn test:codeblocks:telegraf +``` + +### Setup and Configuration + +#### 1. Set executable permissions on test scripts + +```sh +chmod +x ./test/src/*.sh +``` + +#### 2. Create test credentials + +Create databases, buckets, and tokens for the product(s) you're testing. +If you don't have access to a Clustered instance, you can use your Cloud Dedicated instance for testing in most cases. + +#### 3. Configure environment variables + +Copy the `./test/env.test.example` file into each product directory and rename as `.env.test`: + +```sh +# Example locations +./content/influxdb/cloud-dedicated/.env.test +./content/influxdb3/clustered/.env.test +``` + +Inside each product's `.env.test` file, assign your InfluxDB credentials: + +- Include the usual `INFLUX_` environment variables +- For `cloud-dedicated/.env.test` and `clustered/.env.test`, also define: + - `ACCOUNT_ID`, `CLUSTER_ID`: Found in your `influxctl config.toml` + - `MANAGEMENT_TOKEN`: Generate with `influxctl management create` + +See `./test/src/prepare-content.sh` for the full list of variables you may need. + +#### 4. Configure influxctl commands + +For influxctl commands to run in tests, move or copy your `config.toml` file to the `./test` directory. + +> [!Warning] +> - The database you configure in `.env.test` and any written data may be deleted during test runs +> - Don't add your `.env.test` files to Git. Git is configured to ignore `.env*` files to prevent accidentally committing credentials + +### Writing Testable Code Blocks + +#### Basic Example + +```python +print("Hello, world!") +``` + + + +``` +Hello, world! +``` + +#### Interactive Commands + +For commands that require TTY interaction (like `influxctl` authentication), wrap the command in a subshell and redirect output: + +```sh +# Test the preceding command outside of the code block. +# influxctl authentication requires TTY interaction-- +# output the auth URL to a file that the host can open. +script -c "influxctl user list " \ + /dev/null > /shared/urls.txt +``` + +To hide test blocks from users, wrap them in HTML comments. pytest-codeblocks will still collect and run them. + +#### Skipping Tests + +pytest-codeblocks has features for skipping tests and marking blocks as failed. See the [pytest-codeblocks README](https://github.com/nschloe/pytest-codeblocks/tree/main) for details. + +### Troubleshooting + +#### "Pytest collected 0 items" + +Potential causes: +- Check test discovery options in `pytest.ini` +- Use `python` (not `py`) for Python code block language identifiers: + ```python + # This works + ``` + vs + ```py + # This is ignored + ``` + +## Link Validation with Link-Checker + +Link validation uses the `link-checker` tool to validate internal and external links in documentation files. + +### Basic Usage + +#### Installation + +**Option 1: Build from source (macOS/local development)** + +For local development on macOS, build the link-checker from source: + +```bash +# Clone and build link-checker +git clone https://github.com/influxdata/docs-tooling.git +cd docs-tooling/link-checker +cargo build --release + +# Copy binary to your PATH or use directly +cp target/release/link-checker /usr/local/bin/ +# OR use directly: ./target/release/link-checker +``` + +**Option 2: Download pre-built binary (GitHub Actions/Linux)** + +The link-checker binary is distributed via docs-v2 releases for reliable access from GitHub Actions workflows: + +```bash +# Download Linux binary from docs-v2 releases +curl -L -o link-checker \ + https://github.com/influxdata/docs-v2/releases/download/link-checker-v1.0.0/link-checker-linux-x86_64 +chmod +x link-checker + +# Verify installation +./link-checker --version +``` + +> [!Note] +> Pre-built binaries are currently Linux x86_64 only. For macOS development, use Option 1 to build from source. + +```bash +# Clone and build link-checker +git clone https://github.com/influxdata/docs-tooling.git +cd docs-tooling/link-checker +cargo build --release + +# Copy binary to your PATH or use directly +cp target/release/link-checker /usr/local/bin/ +``` + +#### Binary Release Process + +**For maintainers:** To create a new link-checker release in docs-v2: + +1. **Create release in docs-tooling** (builds and releases binary automatically): + ```bash + cd docs-tooling + git tag link-checker-v1.2.x + git push origin link-checker-v1.2.x + ``` + +2. **Manually distribute to docs-v2** (required due to private repository access): + ```bash + # Download binary from docs-tooling release + curl -L -H "Authorization: Bearer $(gh auth token)" \ + -o link-checker-linux-x86_64 \ + "https://github.com/influxdata/docs-tooling/releases/download/link-checker-v1.2.x/link-checker-linux-x86_64" + + curl -L -H "Authorization: Bearer $(gh auth token)" \ + -o checksums.txt \ + "https://github.com/influxdata/docs-tooling/releases/download/link-checker-v1.2.x/checksums.txt" + + # Create docs-v2 release + gh release create \ + --repo influxdata/docs-v2 \ + --title "Link Checker Binary v1.2.x" \ + --notes "Link validation tooling binary for docs-v2 GitHub Actions workflows." \ + link-checker-v1.2.x \ + link-checker-linux-x86_64 \ + checksums.txt + ``` + +3. **Update workflow reference** (if needed): + ```bash + # Update .github/workflows/pr-link-check.yml line 98 to use new version + sed -i 's/link-checker-v[0-9.]*/link-checker-v1.2.x/' .github/workflows/pr-link-check.yml + ``` + +> [!Note] +> The manual distribution is required because docs-tooling is a private repository and the default GitHub token doesn't have cross-repository access for private repos. + +#### Core Commands + +```bash +# Map content files to public HTML files +link-checker map content/path/to/file.md + +# Check links in HTML files +link-checker check public/path/to/file.html + +# Generate configuration file +link-checker config +``` + +### Link Resolution Behavior + +The link-checker automatically handles relative link resolution based on the input type: + +**Local Files → Local Resolution** +```bash +# When checking local files, relative links resolve to the local filesystem +link-checker check public/influxdb3/core/admin/scale-cluster/index.html +# Relative link /influxdb3/clustered/tags/kubernetes/ becomes: +# → /path/to/public/influxdb3/clustered/tags/kubernetes/index.html +``` + +**URLs → Production Resolution** +```bash +# When checking URLs, relative links resolve to the production site +link-checker check https://docs.influxdata.com/influxdb3/core/admin/scale-cluster/ +# Relative link /influxdb3/clustered/tags/kubernetes/ becomes: +# → https://docs.influxdata.com/influxdb3/clustered/tags/kubernetes/ +``` + +**Why This Matters** +- **Testing new content**: Tag pages generated locally will be found when testing local files +- **Production validation**: Production URLs validate against the live site +- **No false positives**: New content won't appear broken when testing locally before deployment + +### Content Mapping Workflows + +#### Scenario 1: Map and check InfluxDB 3 Core content + +```bash +# Map Markdown files to HTML +link-checker map content/influxdb3/core/get-started/ + +# Check links in mapped HTML files +link-checker check public/influxdb3/core/get-started/ +``` + +#### Scenario 2: Map and check shared CLI content + +```bash +# Map shared content files +link-checker map content/shared/influxdb3-cli/ + +# Check the mapped output files +# (link-checker map outputs the HTML file paths) +link-checker map content/shared/influxdb3-cli/ | \ + xargs link-checker check +``` + +#### Scenario 3: Direct HTML checking + +```bash +# Check HTML files directly without mapping +link-checker check public/influxdb3/core/get-started/ +``` + +#### Combined workflow for changed files + +```bash +# Check only files changed in the last commit +git diff --name-only HEAD~1 HEAD | grep '\.md$' | \ + xargs link-checker map | \ + xargs link-checker check +``` + +### Configuration Options + +#### Local usage (default configuration) + +```bash +# Uses default settings or test.lycherc.toml if present +link-checker check public/influxdb3/core/get-started/ +``` + +#### Production usage (GitHub Actions) + +```bash +# Use production configuration with comprehensive exclusions +link-checker check \ + --config .ci/link-checker/production.lycherc.toml \ + public/influxdb3/core/get-started/ +``` + +### GitHub Actions Integration + +**Automated Integration (docs-v2)** + +The docs-v2 repository includes automated link checking for pull requests: + +- **Trigger**: Runs automatically on PRs that modify content files +- **Binary distribution**: Downloads latest pre-built binary from docs-v2 releases +- **Smart detection**: Only checks files affected by PR changes +- **Production config**: Uses optimized settings with exclusions for GitHub, social media, etc. +- **Results reporting**: Broken links reported as GitHub annotations with detailed summaries + +The workflow automatically: +1. Detects content changes in PRs using GitHub Files API +2. Downloads latest link-checker binary from docs-v2 releases +3. Builds Hugo site and maps changed content to public HTML files +4. Runs link checking with production configuration +5. Reports results with annotations and step summaries + +**Manual Integration (other repositories)** + +For other repositories, you can integrate link checking manually: + +```yaml +name: Link Check +on: + pull_request: + paths: + - 'content/**/*.md' + +jobs: + link-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Download link-checker + run: | + curl -L -o link-checker \ + https://github.com/influxdata/docs-tooling/releases/latest/download/link-checker-linux-x86_64 + chmod +x link-checker + cp target/release/link-checker ../../link-checker + cd ../.. + + - name: Build Hugo site + run: | + npm install + npx hugo --minify + + - name: Check changed files + run: | + git diff --name-only origin/main HEAD | \ + grep '\.md$' | \ + xargs ./link-checker map | \ + xargs ./link-checker check \ + --config .ci/link-checker/production.lycherc.toml +``` + +## Style Linting (Vale) + +Style linting uses [Vale](https://vale.sh/) to enforce documentation writing standards, branding guidelines, and vocabulary consistency. + +### Basic Usage + +```bash +# Basic linting with Docker +docker compose run -T vale --config=content/influxdb/cloud-dedicated/.vale.ini --minAlertLevel=error content/influxdb/cloud-dedicated/write-data/**/*.md +``` + +### VS Code Integration + +1. Install the [Vale VSCode](https://marketplace.visualstudio.com/items?itemName=ChrisChinchilla.vale-vscode) extension +2. Set the `Vale:Vale CLI:Path` setting to `${workspaceFolder}/node_modules/.bin/vale` + +### Alert Levels + +Vale can raise different alert levels: + +- **Error**: Problems that can cause content to render incorrectly, violations of branding guidelines, rejected vocabulary terms +- **Warning**: General style guide rules and best practices +- **Suggestion**: Style preferences that may require refactoring or updates to an exceptions list + +### Configuration + +- **Styles**: `.ci/vale/styles/` contains configuration for the custom `InfluxDataDocs` style +- **Vocabulary**: Add accepted/rejected terms to `.ci/vale/styles/config/vocabularies` +- **Product-specific**: Configure per-product styles like `content/influxdb/cloud-dedicated/.vale.ini` + +For more configuration details, see [Vale configuration](https://vale.sh/docs/topics/config). + +## Pre-commit Hooks + +docs-v2 uses [Lefthook](https://github.com/evilmartians/lefthook) to manage Git hooks that run automatically during pre-commit and pre-push. + +### What Runs Automatically + +When you run `git commit`, Git runs: +- **Vale**: Style linting (if configured) +- **Prettier**: Code formatting +- **Cypress**: Link validation tests +- **Pytest**: Code block tests + +### Skipping Pre-commit Hooks + +We strongly recommend running linting and tests, but you can skip them: + +```sh +# Skip with --no-verify flag +git commit -m "" --no-verify + +# Skip with environment variable +LEFTHOOK=0 git commit +``` + +## Advanced Testing + +### E2E Testing + +```bash +# Run all E2E tests +yarn test:e2e + +# Run specific E2E specs +node cypress/support/run-e2e-specs.js --spec "cypress/e2e/content/article-links.cy.js" +``` + +### JavaScript Testing and Debugging + +For JavaScript code in the documentation UI (`assets/js`): + +#### Using Source Maps and Chrome DevTools + +1. In VS Code, select Run > Start Debugging +2. Select "Debug Docs (source maps)" configuration +3. Set breakpoints in the `assets/js/ns-hugo-imp:` namespace + +#### Using Debug Helpers + +1. Import debug helpers in your JavaScript module: + ```js + import { debugLog, debugBreak, debugInspect } from './utils/debug-helpers.js'; + ``` + +2. Insert debug statements: + ```js + const data = debugInspect(someData, 'Data'); + debugLog('Processing data', 'myFunction'); + debugBreak(); // Add breakpoint + ``` + +3. Start Hugo: `yarn hugo server` +4. In VS Code, select "Debug JS (debug-helpers)" configuration + +Remember to remove debug statements before committing. + +## Docker Compose Services + +Available test services: + +```bash +# All code block tests +docker compose --profile test up + +# Individual product tests +docker compose run --rm cloud-pytest +docker compose run --rm v2-pytest +docker compose run --rm telegraf-pytest + +# Stop monitoring services +yarn test:codeblocks:stop-monitors +``` + +## Testing Best Practices + +### Code Block Examples + +- Always test code examples before committing +- Use realistic data and examples that users would encounter +- Include proper error handling in examples +- Format code to fit within 80 characters +- Use long options in command-line examples (`--option` vs `-o`) + +### Link Validation + +- Test links regularly, especially after content restructuring +- Use appropriate cache TTL settings for your validation needs +- Monitor cache hit rates to optimize performance +- Clean up expired cache entries periodically + +### Style Guidelines + +- Run Vale regularly to catch style issues early +- Add accepted terms to vocabulary files rather than ignoring errors +- Configure product-specific styles for branding consistency +- Review suggestions periodically for content improvement opportunities + +## Related Files + +- **Configuration**: `pytest.ini`, `cypress.config.js`, `lefthook.yml` +- **Docker**: `compose.yaml`, `Dockerfile.pytest` +- **Scripts**: `.github/scripts/` directory +- **Test data**: `./test/` directory +- **Vale config**: `.ci/vale/styles/` + +## Getting Help + +- **GitHub Issues**: [docs-v2 issues](https://github.com/influxdata/docs-v2/issues) +- **Good first issues**: [good-first-issue label](https://github.com/influxdata/docs-v2/issues?q=is%3Aissue+is%3Aopen+label%3Agood-first-issue) +- **InfluxData CLA**: [Sign here](https://www.influxdata.com/legal/cla/) for substantial contributions \ No newline at end of file diff --git a/api-docs/.config.yml b/api-docs/.config.yml index e337b7689..f075dddec 100644 --- a/api-docs/.config.yml +++ b/api-docs/.config.yml @@ -1,5 +1,5 @@ plugins: - - './../openapi/plugins/docs-plugin.js' + - './../openapi/plugins/docs-plugin.cjs' extends: - recommended - docs/all diff --git a/api-docs/getswagger.sh b/api-docs/getswagger.sh index c3c9a36a1..1ff077a45 100755 --- a/api-docs/getswagger.sh +++ b/api-docs/getswagger.sh @@ -62,7 +62,7 @@ function showHelp { subcommand=$1 case "$subcommand" in - cloud-dedicated-v2|cloud-dedicated-management|cloud-serverless-v2|clustered-v2|cloud-v2|v2|v1-compat|core-v3|enterprise-v3|all) + cloud-dedicated-v2|cloud-dedicated-management|cloud-serverless-v2|clustered-management|clustered-v2|cloud-v2|v2|v1-compat|core-v3|enterprise-v3|all) product=$1 shift @@ -187,6 +187,22 @@ function updateCloudServerlessV2 { postProcess $outFile 'influxdb3/cloud-serverless/.config.yml' v2@2 } +function updateClusteredManagement { + outFile="influxdb3/clustered/management/openapi.yml" + if [[ -z "$baseUrl" ]]; + then + echo "Using existing $outFile" + else + # Clone influxdata/granite and fetch the latest openapi.yaml file. + echo "Fetching the latest openapi.yaml file from influxdata/granite" + tmp_dir=$(mktemp -d) + git clone --depth 1 --branch main https://github.com/influxdata/granite.git "$tmp_dir" + cp "$tmp_dir/openapi.yaml" "$outFile" + rm -rf "$tmp_dir" + fi + postProcess $outFile 'influxdb3/clustered/.config.yml' management@0 +} + function updateClusteredV2 { outFile="influxdb3/clustered/v2/ref.yml" if [[ -z "$baseUrl" ]]; @@ -278,6 +294,9 @@ then elif [ "$product" = "cloud-serverless-v2" ]; then updateCloudServerlessV2 +elif [ "$product" = "clustered-management" ]; +then + updateClusteredManagement elif [ "$product" = "clustered-v2" ]; then updateClusteredV2 @@ -305,6 +324,6 @@ then updateOSSV2 updateV1Compat else - echo "Provide a product argument: cloud-v2, cloud-serverless-v2, cloud-dedicated-v2, cloud-dedicated-management, clustered-v2, core-v3, enterprise-v3, v2, v1-compat, or all." + echo "Provide a product argument: cloud-v2, cloud-serverless-v2, cloud-dedicated-v2, cloud-dedicated-management, clustered-management, clustered-v2, core-v3, enterprise-v3, v2, v1-compat, or all." showHelp fi diff --git a/api-docs/influxdb/cloud/.config.yml b/api-docs/influxdb/cloud/.config.yml index e2c6e000f..25a391d42 100644 --- a/api-docs/influxdb/cloud/.config.yml +++ b/api-docs/influxdb/cloud/.config.yml @@ -1,5 +1,5 @@ plugins: - - '../../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.cjs' extends: - recommended - docs/all diff --git a/api-docs/influxdb/cloud/v2/ref.yml b/api-docs/influxdb/cloud/v2/ref.yml index 72bee2d65..365e6d7db 100644 --- a/api-docs/influxdb/cloud/v2/ref.yml +++ b/api-docs/influxdb/cloud/v2/ref.yml @@ -13731,7 +13731,7 @@ components: Default is [`RFC3339` date/time format](/influxdb/cloud/reference/glossary/#rfc3339-timestamp). To include nanoseconds in timestamps, use `RFC3339Nano`. - #### Example formatted date/time values + ### Example formatted date/time values | Format | Value | |:------------|:----------------------------| diff --git a/api-docs/influxdb/v2/.config.yml b/api-docs/influxdb/v2/.config.yml index d17efcdfc..c0b0b9205 100644 --- a/api-docs/influxdb/v2/.config.yml +++ b/api-docs/influxdb/v2/.config.yml @@ -1,5 +1,5 @@ plugins: - - '../../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.cjs' extends: - recommended - docs/all @@ -10,7 +10,5 @@ apis: root: v2/ref.yml x-influxdata-docs-aliases: - /influxdb/v2/api/ - v1-compatibility@2: - root: v1-compatibility/swaggerV1Compat.yml - x-influxdata-docs-aliases: + - /influxdb/v2/api/v1-compatibility/ - /influxdb/v2/api/v1/ diff --git a/api-docs/influxdb/v2/v2/content/tag-groups.yml b/api-docs/influxdb/v2/v2/content/tag-groups.yml index 7fcd8cc8d..905c380ef 100644 --- a/api-docs/influxdb/v2/v2/content/tag-groups.yml +++ b/api-docs/influxdb/v2/v2/content/tag-groups.yml @@ -6,5 +6,6 @@ - Headers - Pagination - Response codes + - Compatibility endpoints - name: All endpoints tags: [] diff --git a/api-docs/influxdb/v2/v2/ref.yml b/api-docs/influxdb/v2/v2/ref.yml index c2f5e9308..778aceacc 100644 --- a/api-docs/influxdb/v2/v2/ref.yml +++ b/api-docs/influxdb/v2/v2/ref.yml @@ -58,6 +58,7 @@ tags: - [Manage API tokens](/influxdb/v2/security/tokens/) - [Assign a token to a specific user](/influxdb/v2/security/tokens/create-token/) name: Authorizations (API tokens) + - name: Authorizations (v1-compatible) - name: Backup - description: | Store your data in InfluxDB [buckets](/influxdb/v2/reference/glossary/#bucket). @@ -88,6 +89,15 @@ tags: | `orgID` | 16-byte string | The organization ID ([find your organization](/influxdb/v2/organizations/view-orgs/). | name: Common parameters x-traitTag: true + - name: Compatibility endpoints + description: | + InfluxDB v2 provides a v1-compatible API for backward compatibility with InfluxDB 1.x clients and integrations. + + Use these endpoints with InfluxDB 1.x client libraries and third-party integrations such as Grafana, Telegraf, and other tools designed for InfluxDB 1.x. The compatibility layer maps InfluxDB 1.x concepts (databases, retention policies) to InfluxDB v2 resources (buckets, organizations) through database retention policy (DBRP) mappings. + + - [Write data (v1-compatible)](#tag/Write-data-(v1-compatible)) + - [Query data using InfluxQL (v1-compatible)](#tag/Query-data-(v1-compatible)) + - [Manage v1-compatible users and permissions](#tag/Authorizations-(v1-compatible)) - name: Config - name: Dashboards - name: Data I/O endpoints @@ -99,7 +109,7 @@ tags: databases and retention policies are mapped to buckets using the database and retention policy (DBRP) mapping service. The DBRP mapping service uses the database and retention policy - specified in 1.x compatibility API requests to route operations to a bucket. + specified in v1 compatibility API requests to route operations to a bucket. ### Related guides @@ -139,9 +149,6 @@ tags: x-traitTag: true - name: Health - name: Labels - - name: Legacy Authorizations - - name: Legacy Query - - name: Legacy Write - name: Metrics - name: NotificationEndpoints - name: NotificationRules @@ -194,6 +201,7 @@ tags: - description: | Retrieve data, analyze queries, and get query suggestions. name: Query + - name: Query data (v1-compatible) - description: | See the [**API Quick Start**](/influxdb/v2/api-guide/api_intro/) to get up and running authenticating with tokens, writing to buckets, and querying data. @@ -218,11 +226,11 @@ tags: |:-----------:|:------------------------ |:--------------------- | | `200` | Success | | | `204` | Success. No content | InfluxDB doesn't return data for the request. | - | `400` | Bad request | May indicate one of the following:
  • Line protocol is malformed. The response body contains the first malformed line in the data and indicates what was expected. For partial writes, the number of points written and the number of points rejected are also included. For more information, check the `rejected_points` measurement in your `_monitoring` bucket.
  • `Authorization` header is missing or malformed or the API token doesn't have permission for the operation.
| + | `400` | Bad request | May indicate one of the following:
  • the request body is malformed
  • `Authorization` header is missing or malformed
  • the API token doesn't have permission for the operation.
| | `401` | Unauthorized | May indicate one of the following:
  • `Authorization: Token` header is missing or malformed
  • API token value is missing from the header
  • API token doesn't have permission. For more information about token types and permissions, see [Manage API tokens](/influxdb/v2/security/tokens/)
| | `404` | Not found | Requested resource was not found. `message` in the response body provides details about the requested resource. | | `413` | Request entity too large | Request payload exceeds the size limit. | - | `422` | Unprocessable entity | Request data is invalid. `code` and `message` in the response body provide details about the problem. | + | `422` | Unprocessable entity | Request data is invalid. The request was well-formed, but couldn't complete due to semantic errors--for example, some or all points in a write request were rejected due to a schema or retention policy violation. The response body provides details about the problem. For more information about rejected points, see how to [Troubleshoot issues writing data](/influxdb/v2/write-data/troubleshoot/)| | `429` | Too many requests | API token is temporarily over the request quota. The `Retry-After` header describes when to try the request again. | | `500` | Internal server error | | | `503` | Service unavailable | Server is temporarily unavailable to process the request. The `Retry-After` header describes when to try the request again. | @@ -314,6 +322,7 @@ tags: - description: | Write time series data to [buckets](/influxdb/v2/reference/glossary/#bucket). name: Write + - name: Write data (v1-compatible) paths: /api/v2: get: @@ -12752,6 +12761,12 @@ paths: - Returns this error only if the [Go (golang) `ioutil.ReadAll()`](https://pkg.go.dev/io/ioutil#ReadAll) function raises an error. - Returns `Content-Type: application/json` for this error. + '422': + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + description: The request was well-formed, but some or all the points were rejected due to semantic errors--for example, schema conflicts or retention policy violations. Error message contains details for one or more rejected points. '429': description: | Too many requests. @@ -12863,7 +12878,7 @@ paths: description: Unexpected error summary: List all legacy authorizations tags: - - Legacy Authorizations + - Authorizations (v1-compatible) post: description: | Creates a legacy authorization and returns the legacy authorization. @@ -12926,7 +12941,7 @@ paths: description: Unexpected error summary: Create a legacy authorization tags: - - Legacy Authorizations + - Authorizations (v1-compatible) servers: - url: /private /legacy/authorizations/{authID}: @@ -12948,7 +12963,7 @@ paths: description: Unexpected error summary: Delete a legacy authorization tags: - - Legacy Authorizations + - Authorizations (v1-compatible) get: operationId: GetLegacyAuthorizationsID parameters: @@ -12971,7 +12986,7 @@ paths: description: Unexpected error summary: Retrieve a legacy authorization tags: - - Legacy Authorizations + - Authorizations (v1-compatible) patch: operationId: PatchLegacyAuthorizationsID parameters: @@ -13001,7 +13016,7 @@ paths: description: Unexpected error summary: Update a legacy authorization to be active or inactive tags: - - Legacy Authorizations + - Authorizations (v1-compatible) servers: - url: /private /legacy/authorizations/{authID}/password: @@ -13034,94 +13049,29 @@ paths: description: Unexpected error summary: Set a legacy authorization password tags: - - Legacy Authorizations + - Authorizations (v1-compatible) servers: - url: /private /query: get: - description: Queries InfluxDB using InfluxQL. + summary: Execute InfluxQL query (v1-compatible) + description: | + Executes an InfluxQL query to retrieve data from the specified database. + + This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. + Use query parameters to specify the database and the InfluxQL query. operationId: GetLegacyQuery parameters: - $ref: '#/components/parameters/TraceSpan' - - in: header - name: Accept - schema: - default: application/json - description: | - Media type that the client can understand. - - **Note**: With `application/csv`, query results include [**unix timestamps**](/influxdb/v2/reference/glossary/#unix-timestamp) instead of [RFC3339 timestamps](/influxdb/v2/reference/glossary/#rfc3339-timestamp). - enum: - - application/json - - application/csv - - text/csv - - application/x-msgpack - type: string - - description: The content encoding (usually a compression algorithm) that the client can understand. - in: header - name: Accept-Encoding - schema: - default: identity - description: The content coding. Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. - enum: - - gzip - - identity - type: string - - in: header - name: Content-Type - schema: - enum: - - application/json - type: string - - description: The InfluxDB 1.x username to authenticate the request. - in: query - name: u - schema: - type: string - - description: The InfluxDB 1.x password to authenticate the request. - in: query - name: p - schema: - type: string - - description: | - The database to query data from. - This is mapped to an InfluxDB [bucket](/influxdb/v2/reference/glossary/#bucket). - For more information, see [Database and retention policy mapping](/influxdb/v2/api/influxdb-1x/dbrp/). - in: query - name: db - required: true - schema: - type: string - - description: | - The retention policy to query data from. - This is mapped to an InfluxDB [bucket](/influxdb/v2/reference/glossary/#bucket). - For more information, see [Database and retention policy mapping](/influxdb/v2/api/influxdb-1x/dbrp/). - in: query - name: rp - schema: - type: string - - description: The InfluxQL query to execute. To execute multiple queries, delimit queries with a semicolon (`;`). - in: query - name: q - required: true - schema: - type: string - - description: | - A unix timestamp precision. - Formats timestamps as [unix (epoch) timestamps](/influxdb/v2/reference/glossary/#unix-timestamp) with the specified precision - instead of [RFC3339 timestamps](/influxdb/v2/reference/glossary/#rfc3339-timestamp) with nanosecond precision. - in: query - name: epoch - schema: - enum: - - ns - - u - - µ - - ms - - s - - m - - h - type: string + - $ref: '#/components/parameters/AuthV1Username' + - $ref: '#/components/parameters/AuthV1Password' + - $ref: '#/components/parameters/Accept' + - $ref: '#/components/parameters/AcceptEncoding' + - $ref: '#/components/parameters/Content-Type' + - $ref: '#/components/parameters/V1Database' + - $ref: '#/components/parameters/V1RetentionPolicy' + - $ref: '#/components/parameters/V1Epoch' + - $ref: '#/components/parameters/V1Query' responses: '200': content: @@ -13185,9 +13135,85 @@ paths: schema: $ref: '#/components/schemas/Error' description: Error processing query - summary: Query with the 1.x compatibility API tags: - - Legacy Query + - Query data (v1-compatible) + post: + operationId: PostQueryV1 + summary: Execute InfluxQL query (v1-compatible) + description: | + Executes an InfluxQL query to retrieve data from the specified database. + + This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. + Use query parameters to specify the database and the InfluxQL query. + tags: + - Query data (v1-compatible) + requestBody: + description: InfluxQL query to execute. + content: + text/plain: + schema: + type: string + parameters: + - $ref: '#/components/parameters/TraceSpan' + - $ref: '#/components/parameters/AuthV1Username' + - $ref: '#/components/parameters/AuthV1Password' + - $ref: '#/components/parameters/Accept' + - $ref: '#/components/parameters/AcceptEncoding' + - $ref: '#/components/parameters/Content-Type' + - $ref: '#/components/parameters/V1Database' + - $ref: '#/components/parameters/V1RetentionPolicy' + - $ref: '#/components/parameters/V1Epoch' + responses: + '200': + description: Query results + headers: + Content-Encoding: + description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body + schema: + type: string + description: Specifies that the response in the body is encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + Trace-Id: + description: The Trace-Id header reports the request's trace ID, if one was generated. + schema: + type: string + description: Specifies the request's trace ID. + content: + application/csv: + schema: + $ref: '#/components/schemas/InfluxqlCsvResponse' + application/json: + schema: + $ref: '#/components/schemas/InfluxqlJsonResponse' + text/csv: + schema: + $ref: '#/components/schemas/InfluxqlCsvResponse' + examples: + influxql-chunk_size_2: + value: | + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:55Z",90,"1"],["2016-05-19T18:37:56Z",90,"1"]],"partial":true}],"partial":true}]} + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:57Z",90,"1"],["2016-05-19T18:37:58Z",90,"1"]]}]}]} + application/x-msgpack: + schema: + type: string + format: binary + '429': + description: Token is temporarily over quota. The Retry-After header describes when to try the read again. + headers: + Retry-After: + description: A non-negative decimal integer indicating the seconds to delay after the response is received. + schema: + type: integer + format: int32 + default: + description: Error processing query + content: + application/json: + schema: + $ref: '#/components/schemas/Error' /write: post: operationId: PostLegacyWrite @@ -13244,7 +13270,7 @@ paths: application/json: schema: $ref: '#/components/schemas/LineProtocolError' - description: Line protocol poorly formed and no points were written. Response can be used to determine the first malformed line in the body line-protocol. All data in body was rejected and not written. + description: Line protocol is poorly formed and no points were written. Response can be used to determine the first malformed line in the body line-protocol. '401': content: application/json: @@ -13256,13 +13282,19 @@ paths: application/json: schema: $ref: '#/components/schemas/Error' - description: No token was sent and they are required. + description: The request didn't provide an authorization token. '413': content: application/json: schema: $ref: '#/components/schemas/LineProtocolLengthError' description: Write has been rejected because the payload is too large. Error message returns max size supported. All data in body was rejected and not written. + '422': + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + description: The request was well-formed, but some points were rejected due to semantic errors--for example, schema conflicts or retention policy violations. Error message contains details for one or more rejected points. '429': description: Token is temporarily over quota. The Retry-After header describes when to try the write again. headers: @@ -13285,9 +13317,31 @@ paths: schema: $ref: '#/components/schemas/Error' description: Internal server error - summary: Write time series data into InfluxDB in a V1-compatible format + summary: Write data using a v1-compatible request + description: | + Writes data in [line protocol](/influxdb/v2/reference/syntax/line-protocol/) syntax to the specified bucket using a v1-compatible request. + + This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. + + Use query parameters to specify options for writing data. + + #### InfluxDB Cloud + + - Validates and queues the request. + - Handles the write asynchronously - the write might not have completed yet. + - Returns a `Retry-After` header that describes when to try the write again. + + #### InfluxDB OSS v2 + + - Validates the request and handles the write synchronously. + - If all points were written successfully, responds with HTTP `2xx` status code + - If any points were rejected, responds with HTTP `4xx` status code and details about the problem. + + #### Related guides + + - [Write data with the InfluxDB API](/influxdb/v2/write-data/developer-tools/api) tags: - - Legacy Write + - Write data (v1-compatible) components: examples: AuthorizationPostRequest: @@ -13392,6 +13446,96 @@ components: required: false schema: type: string + Accept: + in: header + name: Accept + schema: + default: application/json + description: | + Media type that the client can understand. + + **Note**: With `application/csv`, query results include [**unix timestamps**](/influxdb/v2/reference/glossary/#unix-timestamp) instead of [RFC3339 timestamps](/influxdb/v2/reference/glossary/#rfc3339-timestamp). + enum: + - application/json + - application/csv + - text/csv + - application/x-msgpack + type: string + AcceptEncoding: + description: The content encoding (usually a compression algorithm) that the client can understand. + in: header + name: Accept-Encoding + schema: + default: identity + description: The content coding. Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. + enum: + - gzip + - identity + type: string + Content-Type: + in: header + name: Content-Type + schema: + enum: + - application/json + type: string + AuthV1Username: + description: | + The InfluxDB 1.x username to authenticate the request. + If you provide an API token as the password, `u` is required, but can be any value. + in: query + name: u + schema: + type: string + AuthV1Password: + description: The InfluxDB 1.x password to authenticate the request. + in: query + name: p + schema: + type: string + V1Database: + description: | + The database to query data from. + This is mapped to an InfluxDB [bucket](/influxdb/v2/reference/glossary/#bucket). + For more information, see [Database and retention policy mapping](/influxdb/v2/api/influxdb-1x/dbrp/). + in: query + name: db + required: true + schema: + type: string + V1RetentionPolicy: + description: | + The retention policy to query data from. + This is mapped to an InfluxDB [bucket](/influxdb/v2/reference/glossary/#bucket). + For more information, see [Database and retention policy mapping](/influxdb/v2/api/influxdb-1x/dbrp/). + in: query + name: rp + schema: + type: string + V1Query: + description: The InfluxQL query to execute. To execute multiple queries, delimit queries with a semicolon (`;`). + in: query + name: q + required: true + schema: + type: string + V1Epoch: + description: | + A unix timestamp precision. + Formats timestamps as [unix (epoch) timestamps](/influxdb/v2/reference/glossary/#unix-timestamp) with the specified precision + instead of [RFC3339 timestamps](/influxdb/v2/reference/glossary/#rfc3339-timestamp) with nanosecond precision. + in: query + name: epoch + schema: + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string responses: AuthorizationError: content: @@ -14713,7 +14857,7 @@ components: Default is [`RFC3339` date/time format](/influxdb/v2/reference/glossary/#rfc3339-timestamp). To include nanoseconds in timestamps, use `RFC3339Nano`. - #### Example formatted date/time values + ### Example formatted date/time values | Format | Value | |:------------|:----------------------------| @@ -20038,13 +20182,16 @@ x-tagGroups: - Headers - Pagination - Response codes + - Compatibility endpoints - name: All endpoints tags: - Authorizations (API tokens) + - Authorizations (v1-compatible) - Backup - Buckets - Cells - Checks + - Compatibility endpoints - Config - Dashboards - DBRPs @@ -20052,15 +20199,13 @@ x-tagGroups: - Delete - Health - Labels - - Legacy Authorizations - - Legacy Query - - Legacy Write - Metrics - NotificationEndpoints - NotificationRules - Organizations - Ping - Query + - Query data (v1-compatible) - Ready - RemoteConnections - Replications @@ -20082,3 +20227,4 @@ x-tagGroups: - Variables - Views - Write + - Write data (v1-compatible) diff --git a/api-docs/influxdb3/cloud-dedicated/.config.yml b/api-docs/influxdb3/cloud-dedicated/.config.yml index 9f8eebb6e..11808b821 100644 --- a/api-docs/influxdb3/cloud-dedicated/.config.yml +++ b/api-docs/influxdb3/cloud-dedicated/.config.yml @@ -1,5 +1,5 @@ plugins: - - '../../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.cjs' extends: - recommended - docs/all diff --git a/api-docs/influxdb3/cloud-dedicated/management/content/tag-groups.yml b/api-docs/influxdb3/cloud-dedicated/management/content/tag-groups.yml index 775a53762..57e8c8484 100644 --- a/api-docs/influxdb3/cloud-dedicated/management/content/tag-groups.yml +++ b/api-docs/influxdb3/cloud-dedicated/management/content/tag-groups.yml @@ -1,6 +1,6 @@ - name: Using the Management API tags: - Authentication - - Examples + - Quickstart - name: All endpoints tags: [] diff --git a/api-docs/influxdb3/cloud-dedicated/management/openapi.yml b/api-docs/influxdb3/cloud-dedicated/management/openapi.yml index 74868b46b..a74165c29 100644 --- a/api-docs/influxdb3/cloud-dedicated/management/openapi.yml +++ b/api-docs/influxdb3/cloud-dedicated/management/openapi.yml @@ -7,10 +7,10 @@ info: This documentation is generated from the InfluxDB OpenAPI specification. + version: '' license: name: MIT url: https://opensource.org/licenses/MIT - version: '' contact: name: InfluxData url: https://www.influxdata.com @@ -31,7 +31,7 @@ tags: - name: Authentication x-traitTag: true description: | - The InfluxDB Management API endpoints require the following credentials: + With InfluxDB 3 Cloud Dedicated, the InfluxDB Management API endpoints require the following credentials: - `ACCOUNT_ID`: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the cluster belongs to. To view account ID and cluster ID, [list cluster details](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json). - `CLUSTER_ID`: The ID of the [cluster](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that you want to manage. To view account ID and cluster ID, [list cluster details](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json). @@ -40,11 +40,12 @@ tags: See how to [create a management token](/influxdb3/cloud-dedicated/admin/tokens/management/). By default, management tokens in InfluxDB 3 are short-lived tokens issued by an OAuth2 identity provider that grant a specific user administrative access to your InfluxDB cluster. However, for automation purposes, you can manually create management tokens that authenticate directly with your InfluxDB cluster and do not require human interaction with your identity provider. + - name: Database tokens description: Manage database read/write tokens for a cluster - name: Databases description: Manage databases for a cluster - - name: Example + - name: Quickstart x-traitTag: true description: | The following example script shows how to use `curl` to make database and token management requests: @@ -397,6 +398,26 @@ paths: post: operationId: CreateClusterDatabase summary: Create a database + description: | + Create a database for a cluster. + + The database name must be unique within the cluster. + + **Default maximum number of columns**: 250 + **Default maximum number of tables**: 500 + + The retention period is specified in nanoseconds. For example, to set a retention period of 1 hour, use `3600000000000`. + + InfluxDB Cloud Dedicated lets you define a [custom partitioning](/influxdb3/cloud-dedicated/admin/custom-partitions/) strategy for each database and table. + A _partition_ is a logical grouping of data stored in [Apache Parquet](https://parquet.apache.org/). + By default, data is partitioned by day, + but, depending on your schema and workload, customizing the partitioning + strategy can improve query performance. + + To use custom partitioning, you define a [partition template](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/). + If a table doesn't have a custom partition template, it inherits the database's template. + The partition template is set at the time of database creation and cannot be changed later. + For more information, see [Custom partitions](/influxdb3/cloud-dedicated/admin/custom-partitions/). tags: - Databases parameters: @@ -609,7 +630,7 @@ paths: maxTables: 300 maxColumnsPerTable: 150 retentionPeriod: 600000000000 - maxTablsOnly: + maxTablesOnly: summary: Update Max Tables Only value: maxTables: 300 @@ -660,7 +681,7 @@ paths: maxTables: 300 maxColumnsPerTable: 150 retentionPeriod: 600000000000 - maxTablsOnly: + maxTablesOnly: summary: Update Max Tables Only value: accountId: 11111111-1111-4111-8111-111111111111 @@ -779,6 +800,18 @@ paths: post: operationId: CreateClusterDatabaseTable summary: Create a database table + description: | + Create a table. The database must already exist. With InfluxDB Cloud Dedicated, tables and measurements are synonymous. + + Typically, tables are created automatically on write using the measurement name + specified in line protocol written to InfluxDB. + However, to apply a [custom partition template](/influxdb3/cloud-dedicated/admin/custom-partitions/) + to a table, you must manually [create the table with custom partitioning](/influxdb3/cloud-dedicated/admin/tables/#create-a-table-with-custom-partitioning) before you write any data to it. + + Partitioning defaults to `%Y-%m-%d` (daily). + When a partition template is applied to a database, it becomes the default template + for all tables in that database, but can be overridden when creating a + table. tags: - Tables parameters: @@ -942,6 +975,10 @@ paths: $ref: '#/components/schemas/DatabaseTokenPermissions' createdAt: $ref: '#/components/schemas/DatabaseTokenCreatedAt' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' required: - accountId - clusterId @@ -1045,6 +1082,8 @@ paths: $ref: '#/components/schemas/DatabaseTokenDescription' permissions: $ref: '#/components/schemas/DatabaseTokenPermissions' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' required: - description examples: @@ -1094,6 +1133,10 @@ paths: $ref: '#/components/schemas/DatabaseTokenCreatedAt' accessToken: $ref: '#/components/schemas/DatabaseTokenAccessToken' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' required: - accountId - clusterId @@ -1185,6 +1228,14 @@ paths: get: operationId: GetDatabaseToken summary: Get a database token + description: | + Retrieve metadata details for a [database token](/influxdb3/cloud-dedicated/admin/tokens/database/). + + #### Store secure tokens in a secret store + + We recommend storing database tokens in a **secure secret store**. + + Token strings are viewable _only_ on token creation and aren't stored by InfluxDB; you can't recover a lost token. tags: - Database tokens parameters: @@ -1229,6 +1280,10 @@ paths: $ref: '#/components/schemas/DatabaseTokenPermissions' createdAt: $ref: '#/components/schemas/DatabaseTokenCreatedAt' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' required: - accountId - clusterId @@ -1299,6 +1354,8 @@ paths: patch: operationId: UpdateDatabaseToken summary: Update a database token + description: | + Update the description and permissions of a [database token](/influxdb3/cloud-dedicated/admin/tokens/database/). tags: - Database tokens parameters: @@ -1317,7 +1374,6 @@ paths: - name: tokenId in: path description: The ID of the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) to update - required: true schema: $ref: '#/components/schemas/UuidV4' requestBody: @@ -1385,6 +1441,10 @@ paths: $ref: '#/components/schemas/DatabaseTokenPermissions' createdAt: $ref: '#/components/schemas/DatabaseTokenCreatedAt' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' required: - accountId - clusterId @@ -1625,9 +1685,9 @@ components: description: | A template for [partitioning](/influxdb3/cloud-dedicated/admin/custom-partitions/) a cluster database. - Each template part is evaluated in sequence, concatinating the final - partition key from the output of each part, delimited by the partition - key delimiter `|`. + Each partition template part is evaluated in sequence. + The outputs from each part are concatenated with the + `|` delimiter to form the final partition key. For example, using the partition template below: @@ -1834,6 +1894,18 @@ components: examples: - '2023-12-21T17:32:28.000Z' - '2024-03-02T04:20:19.000Z' + DatabaseTokenExpiresAt: + description: | + The date and time that the database token expires, if applicable + + Uses RFC3339 format + $ref: '#/components/schemas/DateTimeRfc3339' + DatabaseTokenRevokedAt: + description: | + The date and time that the database token was revoked, if applicable + + Uses RFC3339 format + $ref: '#/components/schemas/DateTimeRfc3339' DatabaseTokenAccessToken: description: | The access token that can be used to authenticate query and write requests to the cluster @@ -1944,7 +2016,7 @@ x-tagGroups: - name: Using the Management API tags: - Authentication - - Examples + - Quickstart - name: All endpoints tags: - Database tokens diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index 55f91d971..128021d19 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -66,7 +66,22 @@ paths: schema: type: string required: true - description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy. + description: | + The database to write to. + + **Database targeting:** In Cloud Dedicated, databases can be named using the `database_name/retention_policy_name` convention for InfluxQL compatibility. Cloud Dedicated does not use DBRP mappings. The db and rp parameters are used to construct the target database name following this naming convention. + + **Auto-creation behavior:** Cloud Dedicated requires databases to be created before writing data. The v1 `/write` API does not automatically create databases. If the specified + database does not exist, the write request will fail. + + Authentication: Requires a valid API token with _write_ permissions for the target database. + + ### Related + + - [Write data to InfluxDB Cloud Dedicated](/influxdb3/cloud-dedicated/write-data/) + - [Manage databases in InfluxDB Cloud Dedicated](/influxdb3/cloud-dedicated/admin/databases/) + - [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention) + - [InfluxQL data retention policy mapping differences](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences) - in: query name: rp schema: @@ -137,6 +152,160 @@ paths: schema: $ref: '#/components/schemas/Error' /query: + get: + operationId: GetQueryV1 + tags: + - Query + summary: Query using the InfluxDB v1 HTTP API + parameters: + - $ref: '#/components/parameters/TraceSpan' + - $ref: '#/components/parameters/AuthUserV1' + - $ref: '#/components/parameters/AuthPassV1' + - in: header + name: Accept + schema: + type: string + description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps. + default: application/json + enum: + - application/json + - application/csv + - text/csv + - application/x-msgpack + - in: header + name: Accept-Encoding + description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand. + schema: + type: string + description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. + schema: + type: boolean + default: false + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + schema: + type: string + required: true + description: The database to query from. + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: Defines the InfluxQL query to run. + required: true + schema: + type: string + - in: query + name: rp + schema: + type: string + description: | + The retention policy name for InfluxQL compatibility + + Optional parameter that, when combined with the db parameter, forms the complete database name to query. In InfluxDB Cloud Dedicated, databases can be named using the + database_name/retention_policy_name convention for InfluxQL compatibility. + + When a request specifies both `db` and `rp`, Cloud Dedicated combines them as `db/rp` to target the database--for example: + + - If `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen` + - If only `db=mydb` is provided (no `rp`), the query targets the database named `mydb` + + Unlike InfluxDB v1 and Cloud Serverless, Cloud Dedicated does not use DBRP mappings or separate retention policy objects. This parameter exists solely for v1 API + compatibility and database naming conventions. + + _Note: The retention policy name does not control data retention in Cloud Dedicated. Data retention is determined by the database's **retention period** setting._ + + ### Related + + - [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention) + - [InfluxQL data retention policy mapping differences](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences) + - name: epoch + description: | + Formats timestamps as unix (epoch) timestamps with the specified precision + instead of RFC3339 timestamps with nanosecond precision. + in: query + schema: + type: string + enum: + - h + - m + - s + - ms + - u + - µ + - ns + responses: + '200': + description: Query results + headers: + Content-Encoding: + description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body + schema: + type: string + description: Specifies that the response in the body is encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + Trace-Id: + description: The Trace-Id header reports the request's trace ID, if one was generated. + schema: + type: string + description: Specifies the request's trace ID. + content: + application/csv: + schema: + $ref: '#/components/schemas/InfluxQLCSVResponse' + text/csv: + schema: + $ref: '#/components/schemas/InfluxQLCSVResponse' + application/json: + schema: + $ref: '#/components/schemas/InfluxQLResponse' + examples: + influxql-chunk_size_2: + value: | + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:55Z",90,"1"],["2016-05-19T18:37:56Z",90,"1"]],"partial":true}],"partial":true}]} + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:57Z",90,"1"],["2016-05-19T18:37:58Z",90,"1"]]}]}]} + application/x-msgpack: + schema: + type: string + format: binary + '429': + description: Token is temporarily over quota. The Retry-After header describes when to try the read again. + headers: + Retry-After: + description: A non-negative decimal integer indicating the seconds to delay after the response is received. + schema: + type: integer + format: int32 + default: + description: Error processing query + content: + application/json: + schema: + $ref: '#/components/schemas/Error' post: operationId: PostQueryV1 tags: @@ -148,6 +317,83 @@ paths: text/plain: schema: type: string + application/json: + schema: + type: object + properties: + db: + type: string + description: | + The database name for InfluxQL queries. + + Required parameter that specifies the database to query. + In InfluxDB Cloud Dedicated, this can be either: + - A simple database name (for example, `mydb`) + - The database portion of a `database_name/retention_policy_name` naming convention (used together with the `rp` parameter) + + When used alone, `db` specifies the complete database name to query. When used with the `rp` parameter, they combine to form the full database name as `db/rp`--for example, if `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen`. + + Unlike InfluxDB Cloud Serverless, Cloud Dedicated does not use DBRP mappings. The database name directly corresponds to an existing database in your Cloud Dedicated cluster. + + Examples: + - `db=mydb` - queries the database named `mydb` + - `db=mydb` with `rp=autogen` - queries the database named `mydb/autogen` + + _Note: The specified database must exist in your Cloud Dedicated cluster. Queries will fail if the database does not exist._ + + ### Related + + - [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention) + - [Migrate data from InfluxDB 1.x to Cloud Dedicated](/influxdb3/cloud-dedicated/guides/migrate-data/migrate-1x-to-cloud-dedicated/) + - [InfluxQL data retention policy mapping differences between InfluxDB Cloud Dedicated and Cloud Serverless](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences) + rp: + description: | + The retention policy name for InfluxQL compatibility + + Optional parameter that, when combined with the db parameter, forms the complete database name to query. In InfluxDB Cloud Dedicated, databases can be named using the + database_name/retention_policy_name convention for InfluxQL compatibility. + + When a request specifies both `db` and `rp`, Cloud Dedicated combines them as `db/rp` to target the database--for example: + + - If `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen` + - If only `db=mydb` is provided (no `rp`), the query targets the database named `mydb` + + Unlike InfluxDB v1 and Cloud Serverless, Cloud Dedicated does not use DBRP mappings or separate retention policy objects. This parameter exists solely for v1 API + compatibility and database naming conventions. + + _Note: The retention policy name does not control data retention in Cloud Dedicated. Data retention is determined by the database's **retention period** setting._ + + ### Related + + - [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention) + - [Migrate data from InfluxDB 1.x to Cloud Dedicated](/influxdb3/cloud-dedicated/guides/migrate-data/migrate-1x-to-cloud-dedicated/) + - [InfluxQL data retention policy mapping differences](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences) + type: string + q: + description: Defines the InfluxQL query to run. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: | + A unix timestamp precision. + type: string + enum: + - h + - m + - s + - ms + - u + - µ + - ns parameters: - $ref: '#/components/parameters/TraceSpan' - $ref: '#/components/parameters/AuthUserV1' @@ -184,7 +430,7 @@ paths: schema: type: string required: true - description: Bucket to query. + description: Database to query. - in: query name: rp schema: diff --git a/api-docs/influxdb3/cloud-dedicated/v2/ref.yml b/api-docs/influxdb3/cloud-dedicated/v2/ref.yml index c4b645464..b638df94f 100644 --- a/api-docs/influxdb3/cloud-dedicated/v2/ref.yml +++ b/api-docs/influxdb3/cloud-dedicated/v2/ref.yml @@ -63,12 +63,14 @@ tags: name: API compatibility x-traitTag: true - description: | - Use one of the following schemes to authenticate to the InfluxDB API: + Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API: - - [Bearer authentication](#section/Authentication/BearerAuthentication) - - [Token authentication](#section/Authentication/TokenAuthentication) - - [Basic authentication](#section/Authentication/BasicAuthentication) - - [Querystring authentication](#section/Authentication/QuerystringAuthentication) + | Authentication scheme | Works with | + |:-------------------|:-----------| + | [Bearer authentication](#section/Authentication/BearerAuthentication) | All endpoints | + | [Token authentication](#section/Authentication/TokenAuthentication) | v1, v2 endpoints | + | [Basic authentication](#section/Authentication/BasicAuthentication) | v1 endpoints | + | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | name: Authentication x-traitTag: true @@ -1097,7 +1099,7 @@ components: Default is [`RFC3339` date/time format](/influxdb3/cloud-dedicated/reference/glossary/#rfc3339-timestamp). To include nanoseconds in timestamps, use `RFC3339Nano`. - #### Example formatted date/time values + ### Example formatted date/time values | Format | Value | |:------------|:----------------------------| @@ -1978,61 +1980,45 @@ components: type: string securitySchemes: BasicAuthentication: + type: http + scheme: basic description: | - ### Basic authentication scheme + Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests. - Use the `Authorization` header with the `Basic` scheme to authenticate v1 API `/write` and `/query` requests. - When authenticating requests, InfluxDB 3 Cloud Dedicated checks that the `password` part of the decoded credential is an authorized [database token](/influxdb3/cloud-dedicated/admin/tokens/). - InfluxDB 3 Cloud Dedicated ignores the `username` part of the decoded credential. + Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints in InfluxDB 3. - ### Syntax - - ```http - Authorization: Basic - ``` - - Replace the following: - - - **`[USERNAME]`**: an optional string value (ignored by InfluxDB 3 Cloud Dedicated). - - **`DATABASE_TOKEN`**: a [database token](/influxdb3/cloud-dedicated/admin/tokens/). - - Encode the `[USERNAME]:DATABASE_TOKEN` credential using base64 encoding, and then append the encoded string to the `Authorization: Basic` header. + When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an authorized token + and ignores the `username` part of the decoded credential. ### Example - The following example shows how to use cURL with the `Basic` authentication scheme and a [database token](/influxdb3/cloud-dedicated/admin/tokens/): - - ```sh - ####################################### - # Use Basic authentication with a database token - # to query the InfluxDB v1 HTTP API - ####################################### - # Use the --user option with `--user username:DATABASE_TOKEN` syntax - ####################################### - - curl --get "http://cluster-id.a.influxdb.io/query" \ + ```bash + curl "https://cluster-id.a.influxdb.io/write?db=DATABASE_NAME&precision=s" \ --user "":"DATABASE_TOKEN" \ - --data-urlencode "db=DATABASE_NAME" \ - --data-urlencode "q=SELECT * FROM MEASUREMENT" + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary 'home,room=kitchen temp=72 1641024000' ``` Replace the following: - **`DATABASE_NAME`**: your InfluxDB 3 Cloud Dedicated database - - **`DATABASE_TOKEN`**: a [database token](/influxdb3/cloud-dedicated/admin/tokens/) with sufficient permissions to the database - scheme: basic - type: http + - **`DATABASE_TOKEN`**: a database token with sufficient permissions to the database + + #### Related guides + + - [Authenticate v1 API requests](/influxdb3/cloud-dedicated/guides/api-compatibility/v1/) + - [Manage tokens](/influxdb3/cloud-dedicated/admin/tokens/) QuerystringAuthentication: type: apiKey in: query name: u=&p= description: | - Use the Querystring authentication - scheme with InfluxDB 1.x API parameters to provide credentials through the query string. + Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests. - ### Query string authentication + Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints. - In the URL, pass the `p` query parameter to authenticate `/write` and `/query` requests. - When authenticating requests, InfluxDB 3 Cloud Dedicated checks that `p` (_password_) is an authorized database token and ignores the `u` (_username_) parameter. + When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token + and ignores the `u` (_username_) query parameter. ### Syntax @@ -2041,11 +2027,20 @@ components: https://cluster-id.a.influxdb.io/write/?[u=any]&p=DATABASE_TOKEN ``` - ### Example + ### Examples - The following example shows how to use cURL with query string authentication and a [database token](/influxdb3/cloud-dedicated/admin/tokens/). + ```bash + curl "https://cluster-id.a.influxdb.io/write?db=DATABASE_NAME&precision=s&p=DATABASE_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary 'home,room=kitchen temp=72 1641024000' + ``` - ```sh + Replace the following: + + - **`DATABASE_NAME`**: your InfluxDB 3 Cloud Dedicated database + - **`DATABASE_TOKEN`**: a database token with sufficient permissions to the database + + ```bash ####################################### # Use an InfluxDB 1.x compatible username and password # to query the InfluxDB v1 HTTP API @@ -2062,16 +2057,23 @@ components: Replace the following: - - **`DATABASE_NAME`**: your InfluxDB 3 Cloud Dedicated database - - **`DATABASE_TOKEN`**: a [database token](/influxdb3/cloud-dedicated/admin/tokens/) with sufficient permissions to the database + - **`DATABASE_NAME`**: the database to query + - **`DATABASE_TOKEN`**: a database token with sufficient permissions to the database + + #### Related guides + + - [Authenticate v1 API requests](/influxdb3/cloud-dedicated/guides/api-compatibility/v1/) + - [Manage tokens](/influxdb3/cloud-dedicated/admin/tokens/) BearerAuthentication: type: http scheme: bearer bearerFormat: JWT description: | - Use the OAuth Bearer authentication - scheme to authenticate to the InfluxDB API. + Use the OAuth Bearer authentication + scheme to provide an authorization token to InfluxDB 3. + + Bearer authentication works with all endpoints. In your API requests, send an `Authorization` header. For the header value, provide the word `Bearer` followed by a space and a database token. @@ -2080,29 +2082,20 @@ components: ### Syntax ```http - Authorization: Bearer INFLUX_TOKEN + Authorization: Bearer DATABASE_TOKEN ``` ### Example - ```sh - ######################################################## - # Use the Bearer token authentication scheme with /api/v2/write - # to write data. - ######################################################## - - curl --request post "https://cluster-id.a.influxdb.io/api/v2/write?bucket=DATABASE_NAME&precision=s" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --data-binary 'home,room=kitchen temp=72 1463683075' + ```bash + curl https://cluster-id.a.influxdb.io/api/v3/query_influxql \ + --header "Authorization: Bearer DATABASE_TOKEN" ``` - - For examples and more information, see the following: - - [Authenticate API requests](/influxdb3/cloud-dedicated/primers/api/v2/#authenticate-api-requests) - - [Manage tokens](/influxdb3/cloud-dedicated/admin/tokens/) TokenAuthentication: description: | - Use the Token authentication - scheme to authenticate to the InfluxDB API. + Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3. + + The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3. In your API requests, send an `Authorization` header. For the header value, provide the word `Token` followed by a space and a database token. @@ -2111,7 +2104,7 @@ components: ### Syntax ```http - Authorization: Token INFLUX_API_TOKEN + Authorization: Token DATABASE_TOKEN ``` ### Example @@ -2129,7 +2122,6 @@ components: ### Related guides - - [Authenticate API requests](/influxdb3/cloud-dedicated/primers/api/v2/#authenticate-api-requests) - [Manage tokens](/influxdb3/cloud-dedicated/admin/tokens/) in: header name: Authorization diff --git a/api-docs/influxdb3/cloud-serverless/.config.yml b/api-docs/influxdb3/cloud-serverless/.config.yml index 684da1b8c..e845948d0 100644 --- a/api-docs/influxdb3/cloud-serverless/.config.yml +++ b/api-docs/influxdb3/cloud-serverless/.config.yml @@ -1,5 +1,5 @@ plugins: - - '../../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.cjs' extends: - recommended - docs/all diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index 744692e6d..2ff111180 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -65,7 +65,7 @@ paths: schema: type: string required: true - description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy. + description: Database to write to. If none exists, InfluxDB creates a database with a default 3-day retention policy. - in: query name: rp schema: @@ -136,6 +136,188 @@ paths: schema: $ref: '#/components/schemas/Error' /query: + get: + operationId: GetQueryV1 + tags: + - Query + summary: Query using the InfluxDB v1 HTTP API + parameters: + - $ref: '#/components/parameters/TraceSpan' + - $ref: '#/components/parameters/AuthUserV1' + - $ref: '#/components/parameters/AuthPassV1' + - in: header + name: Accept + schema: + type: string + description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps. + default: application/json + enum: + - application/json + - application/csv + - text/csv + - application/x-msgpack + - in: header + name: Accept-Encoding + description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand. + schema: + type: string + description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. + schema: + type: boolean + default: false + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + schema: + type: string + required: true + description: | + The database name for InfluxQL queries + + Required parameter that specifies the database to query via DBRP (Database Retention Policy) mapping. In Cloud Serverless, this parameter is used together with DBRP + mappings to identify which bucket to query. + + The `db` parameter (optionally combined with `rp`) must have an existing DBRP mapping that points to a bucket. Without a valid DBRP mapping, queries will fail with an + authorization error. + + **DBRP mapping requirements:** + - A DBRP mapping must exist before querying + - Mappings can be created automatically when writing data with the v1 API (if your token has permissions) + - Mappings can be created manually using the InfluxDB CLI or API + + ### Examples + - `db=mydb` - uses the default DBRP mapping for `mydb` + - `db=mydb` with `rp=weekly` - uses the DBRP mapping for `mydb/weekly` + + _Note: Unlike the v1 `/write` endpoint which can auto-create buckets and mappings, the `/query` endpoint requires pre-existing DBRP mappings. The actual data is stored in and + queried from the bucket that the DBRP mapping points to._ + + ### Related + + - [Use the InfluxDB v1 query API and InfluxQL in Cloud Serverless](/influxdb3/cloud-serverless/query-data/execute-queries/v1-http/) + - [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets) + - [Migrate from InfluxDB 1.x to Cloud Serverless](/influxdb3/cloud-serverless/guides/migrate-data/migrate-1x-to-serverless/) + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: Defines the InfluxQL query to run. + required: true + schema: + type: string + - in: query + name: rp + schema: + type: string + description: | + The retention policy name for InfluxQL queries + + Optional parameter that specifies the retention policy to use when querying data with InfluxQL. In Cloud Serverless, this parameter works with DBRP (Database Retention + Policy) mappings to identify the target bucket. + + When provided together with the `db` parameter, Cloud Serverless uses the DBRP mapping to determine which bucket to query. The combination of `db` and `rp` must have an + existing DBRP mapping that points to a bucket. If no `rp` is specified, Cloud Serverless uses the default retention policy mapping for the database. + + Requirements: A DBRP mapping must exist for the db/rp combination before you can query data. DBRP mappings can be created: + - Automatically when writing data with the v1 API (if your token has sufficient permissions) + - Manually using the InfluxDB CLI or API + + Example: If `db=mydb` and `rp=weekly`, the query uses the DBRP mapping for `mydb/weekly` to determine which bucket to query. + + _Note: The retention policy name is used only for DBRP mapping. Actual data retention is controlled by the target bucket's retention period setting, not by the retention + policy name._ + + ### Related + + - [Use the InfluxDB v1 query API and InfluxQL in Cloud Serverless](/influxdb3/cloud-serverless/query-data/execute-queries/v1-http/) + - [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets) + - [Migrate from InfluxDB 1.x to Cloud Serverless](/influxdb3/cloud-serverless/guides/migrate-data/migrate-1x-to-serverless/) + - name: epoch + description: | + Formats timestamps as unix (epoch) timestamps with the specified precision + instead of RFC3339 timestamps with nanosecond precision. + in: query + schema: + type: string + enum: + - h + - m + - s + - ms + - u + - µ + - ns + responses: + '200': + description: Query results + headers: + Content-Encoding: + description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body + schema: + type: string + description: Specifies that the response in the body is encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + Trace-Id: + description: The Trace-Id header reports the request's trace ID, if one was generated. + schema: + type: string + description: Specifies the request's trace ID. + content: + application/csv: + schema: + $ref: '#/components/schemas/InfluxQLCSVResponse' + text/csv: + schema: + $ref: '#/components/schemas/InfluxQLCSVResponse' + application/json: + schema: + $ref: '#/components/schemas/InfluxQLResponse' + examples: + influxql-chunk_size_2: + value: | + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:55Z",90,"1"],["2016-05-19T18:37:56Z",90,"1"]],"partial":true}],"partial":true}]} + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:57Z",90,"1"],["2016-05-19T18:37:58Z",90,"1"]]}]}]} + application/x-msgpack: + schema: + type: string + format: binary + '429': + description: Token is temporarily over quota. The Retry-After header describes when to try the read again. + headers: + Retry-After: + description: A non-negative decimal integer indicating the seconds to delay after the response is received. + schema: + type: integer + format: int32 + default: + description: Error processing query + content: + application/json: + schema: + $ref: '#/components/schemas/Error' post: operationId: PostQueryV1 tags: @@ -147,6 +329,87 @@ paths: text/plain: schema: type: string + application/json: + schema: + type: object + properties: + db: + type: string + description: | + The database name for InfluxQL queries + + Required parameter that specifies the database to query via DBRP (Database Retention Policy) mapping. In Cloud Serverless, this parameter is used together with DBRP + mappings to identify which bucket to query. + + The `db` parameter (optionally combined with `rp`) must have an existing DBRP mapping that points to a bucket. Without a valid DBRP mapping, queries will fail with an + authorization error. + + **DBRP mapping requirements:** + - A DBRP mapping must exist before querying + - Mappings can be created automatically when writing data with the v1 API (if your token has permissions) + - Mappings can be created manually using the InfluxDB CLI or API + + ### Examples + - `db=mydb` - uses the default DBRP mapping for `mydb` + - `db=mydb` with `rp=weekly` - uses the DBRP mapping for `mydb/weekly` + + _Note: Unlike the v1 `/write` endpoint which can auto-create buckets and mappings, the `/query` endpoint requires pre-existing DBRP mappings. The actual data is stored in and + queried from the bucket that the DBRP mapping points to._ + + ### Related + + - [Execute InfluxQL queries using the v1 API](/influxdb3/cloud-serverless/query-data/execute-queries/influxql/api/v1-http/) + - [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets) + - [Manage DBRP mappings in Cloud Serverless](/influxdb3/cloud-serverless/admin/dbrp/) + rp: + description: | + The retention policy name for InfluxQL queries + + Optional parameter that specifies the retention policy to use when querying data with InfluxQL. In Cloud Serverless, this parameter works with DBRP (Database Retention + Policy) mappings to identify the target bucket. + + When provided together with the `db` parameter, Cloud Serverless uses the DBRP mapping to determine which bucket to query. The combination of `db` and `rp` must have an + existing DBRP mapping that points to a bucket. If no `rp` is specified, Cloud Serverless uses the default retention policy mapping for the database. + + Requirements: A DBRP mapping must exist for the db/rp combination before you can query data. DBRP mappings can be created: + - Automatically when writing data with the v1 API (if your token has sufficient permissions) + - Manually using the InfluxDB CLI or API + + Example: If `db=mydb` and `rp=weekly`, the query uses the DBRP mapping for `mydb/weekly` to determine which bucket to query. + + _Note: The retention policy name is used only for DBRP mapping. Actual data retention is controlled by the target bucket's retention period setting, not by the retention policy name._ + + ### Related + + - [Execute InfluxQL queries using the v1 API](/influxdb3/cloud-serverless/query-data/execute-queries/influxql/api/v1-http/) + - [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets) + - [Manage DBRP mappings in Cloud Serverless](/influxdb3/cloud-serverless/admin/dbrp/) + type: string + q: + description: Defines the InfluxQL query to run. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: | + A unix timestamp precision. + type: string + enum: + - h + - m + - s + - ms + - u + - µ + - ns parameters: - $ref: '#/components/parameters/TraceSpan' - $ref: '#/components/parameters/AuthUserV1' diff --git a/api-docs/influxdb3/cloud-serverless/v2/ref.yml b/api-docs/influxdb3/cloud-serverless/v2/ref.yml index 6b57c22c8..3b8ac502e 100644 --- a/api-docs/influxdb3/cloud-serverless/v2/ref.yml +++ b/api-docs/influxdb3/cloud-serverless/v2/ref.yml @@ -9414,7 +9414,7 @@ components: Default is [`RFC3339` date/time format](/influxdb3/cloud-serverless/reference/glossary/#rfc3339-timestamp). To include nanoseconds in timestamps, use `RFC3339Nano`. - #### Example formatted date/time values + ### Example formatted date/time values | Format | Value | |:------------|:----------------------------| diff --git a/api-docs/influxdb3/clustered/.config.yml b/api-docs/influxdb3/clustered/.config.yml index ca9356a9e..1715e1bf9 100644 --- a/api-docs/influxdb3/clustered/.config.yml +++ b/api-docs/influxdb3/clustered/.config.yml @@ -1,11 +1,13 @@ plugins: - - '../../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.cjs' extends: - recommended - docs/all x-influxdata-product-name: InfluxDB 3 Clustered apis: + management@0: + root: management/openapi.yml v2@2: root: v2/ref.yml x-influxdata-docs-aliases: diff --git a/api-docs/influxdb3/clustered/management/content/info.yml b/api-docs/influxdb3/clustered/management/content/info.yml new file mode 100644 index 000000000..0d324fadb --- /dev/null +++ b/api-docs/influxdb3/clustered/management/content/info.yml @@ -0,0 +1,15 @@ +title: InfluxDB 3 Clustered Management API +x-influxdata-short-title: Management API +description: | + The Management API for InfluxDB 3 Clustered provides a programmatic interface for managing an InfluxDB 3 cluster. + The Management API lets you integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application. + + This documentation is generated from the + InfluxDB 3 Management API OpenAPI specification. +license: + name: MIT + url: 'https://opensource.org/licenses/MIT' +contact: + name: InfluxData + url: https://www.influxdata.com + email: support@influxdata.com \ No newline at end of file diff --git a/api-docs/influxdb3/clustered/management/content/servers.yml b/api-docs/influxdb3/clustered/management/content/servers.yml new file mode 100644 index 000000000..edec580b8 --- /dev/null +++ b/api-docs/influxdb3/clustered/management/content/servers.yml @@ -0,0 +1,8 @@ +- url: 'https://{baseurl}/api/v0' + description: InfluxDB 3 Clustered Management API URL + variables: + baseurl: + enum: + - 'console.influxdata.com' + default: 'console.influxdata.com' + description: InfluxDB 3 Clustered Console URL diff --git a/api-docs/influxdb3/clustered/management/content/tag-groups.yml b/api-docs/influxdb3/clustered/management/content/tag-groups.yml new file mode 100644 index 000000000..57e8c8484 --- /dev/null +++ b/api-docs/influxdb3/clustered/management/content/tag-groups.yml @@ -0,0 +1,6 @@ +- name: Using the Management API + tags: + - Authentication + - Quickstart +- name: All endpoints + tags: [] diff --git a/api-docs/influxdb3/clustered/management/openapi.yml b/api-docs/influxdb3/clustered/management/openapi.yml new file mode 100644 index 000000000..410d10fc8 --- /dev/null +++ b/api-docs/influxdb3/clustered/management/openapi.yml @@ -0,0 +1,1730 @@ +openapi: 3.1.0 +info: + title: InfluxDB 3 Clustered Management API + description: | + The Management API for InfluxDB 3 Clustered provides a programmatic interface for managing an InfluxDB 3 cluster. + The Management API lets you integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application. + + This documentation is generated from the + InfluxDB 3 Management API OpenAPI specification. + version: '' + license: + name: MIT + url: https://opensource.org/licenses/MIT + contact: + name: InfluxData + url: https://www.influxdata.com + email: support@influxdata.com +servers: + - url: https://{baseurl}/api/v0 + description: InfluxDB 3 Clustered Management API URL + variables: + baseurl: + enum: + - console.influxdata.com + default: console.influxdata.com + description: InfluxDB 3 Clustered Console URL +security: + - bearerAuthManagementToken: [] + bearerAuthJwt: [] +tags: + - name: Authentication + x-traitTag: true + description: | + With InfluxDB 3 Clustered, InfluxDB Management API endpoints require the following credential: + + - `Authorization MANAGEMENT_TOKEN`: the `Authorization` HTTP header with a [management token](/influxdb3/clustered/admin/tokens/management/). + + See how to [create a management token](/influxdb3/clustered/admin/tokens/management/). + + By default, management tokens in InfluxDB 3 are short-lived tokens issued by an OAuth2 identity provider that grant a specific user administrative access to your InfluxDB cluster. However, for automation purposes, you can manually create management tokens that authenticate directly with your InfluxDB cluster and do not require human interaction with your identity provider. + - name: Database tokens + description: Manage database read/write tokens for a cluster + - name: Databases + description: Manage databases for a cluster + - name: Quickstart + x-traitTag: true + description: | + The following example script shows how to use `curl` to make database and token management requests: + + ```shell + #!/bin/bash + + # Usage: + # Note the leading space in the command below to keep secrets out of the shell history + # + # ``` + # MANAGEMENT_TOKEN= ./scripts/test_http_api_v0_endpoints.sh + # ``` + + # Env var validation + if [ -z "${MANAGEMENT_TOKEN}" ]; then + echo " + [Error]: ❌ + \$MANAGEMENT_TOKEN env var is required. + " + exit 1 + fi + + HOST="https://cluster-host.com" + + # Database request functions + list_databases () { + local response=$( \ + curl \ + --location "$HOST/api/v0/databases" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + create_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/databases" \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "name": "'$databaseName'", + "maxTables": 75, + "maxColumnsPerTable": 90, + "retentionPeriod": 600000000000, + "partitionTemplate": [ + { + "type": "tag", + "value": "abc" + }, + { + "type": "bucket", + "value": { + "tagName": "def", + "numberOfBuckets": 5 + } + } + ] + }' \ + ) + echo "$response" + } + + update_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/databases/$databaseName" \ + --request PATCH \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "maxTables": 150, + "maxColumnsPerTable": 180, + "retentionPeriod": 1200000000000 + }' \ + ) + echo "$response" + } + + delete_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/databases/$databaseName" \ + --request DELETE \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + # Token request functions + list_tokens () { + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + create_token () { + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens" \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "description": "my test token", + "permissions": [ + { + "action": "write", + "resource": "database_one" + }, + { + "action": "read", + "resource": "database_two" + } + ] + }' \ + ) + echo "$response" + } + + get_token () { + local token_id=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens/$tokenId" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + update_token () { + local token_id=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens/$tokenId" \ + --request PATCH \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "description": "my updated test token", + "permissions": [ + { + "action": "database_one", + "resource": "read" + } + ] + }' \ + ) + echo "$response" + } + + delete_token () { + local token_id=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens/$tokenId" \ + --request DELETE \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + + # Test database endpoints + databaseName="test_database_$RANDOM" + + printf "\n🏗️ Creating database... 🏗️\n\n" + response="$(create_database $databaseName)" + echo $response | jq + printf "\n🏗️ Creating database successful 🏗️\n\n" + + printf "\n⬆️ Updating database... ⬆️\n\n" + response="$(update_database $databaseName)" + echo $response | jq + printf "\n⬆️ Updating database successful ⬆️\n\n" + + printf "\n⬇️ Listing databases... ⬇️\n\n" + response="$(list_databases)" + echo $response | jq + printf "\n⬇️ Listing databases successful ⬇️\n\n" + + printf "\n🗑️ Deleting database... 🗑️\n\n" + response="$(delete_database $databaseName)" + echo $response | jq + printf "\n🗑️ Deleting database successful 🗑️\n\n" + + + # Test token endpoints + printf "\n🏗️ Creating token... 🏗️\n\n" + response="$(create_token)" + echo $response | jq + tokenId=$(echo $response | jq '.id') + printf "\n🏗️ Creating token successful 🏗️\n\n" + + printf "\n⬇️ Getting token... ⬇️\n\n" + response="$(get_token $tokenId)" + echo $response | jq + printf "\n⬇️ Getting token successful ⬇️\n\n" + + printf "\n⬆️ Updating token... ⬆️\n\n" + response="$(update_token $tokenId)" + echo $response | jq + printf "\n⬆️ Updating token successful ⬆️\n\n" + + printf "\n📋 Listing tokens... 📋\n\n" + response="$(list_tokens)" + echo $response | jq + printf "\n📋 Listing tokens successful 📋\n\n" + + printf "\n🗑️ Deleting token... 🗑️\n\n" + response="$(delete_token $tokenId)" + echo $response | jq + printf "\n🗑️ Deleting token successful 🗑️\n\n" + ``` + - name: Tables + description: Manage tables in a database +paths: + /databases: + get: + operationId: GetClusterDatabases + summary: Get all databases for a cluster + responses: + '200': + description: The cluster databases were successfully retrieved + content: + application/json: + schema: + type: array + items: + type: object + properties: + name: + $ref: '#/components/schemas/ClusterDatabaseName' + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - name + - maxTables + - maxColumnsPerTable + - retentionPeriod + example: + - name: DatabaseOne + maxTables: 500 + maxColumnsPerTable: 200 + retentionPeriod: 0 + - name: DatabaseTwo + maxTables: 100 + maxColumnsPerTable: 50 + retentionPeriod: 300000000000 + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: bananas + - type: tag + value: plátanos + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: '' + lang: Shell + source: | + HOST="https://cluster-host.com" + + list_databases () { + local response=$( \ + curl \ + --location "$HOST/api/v0/databases" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + tags: + - Databases + post: + operationId: CreateClusterDatabase + summary: Create a database + tags: + - Databases + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + name: + $ref: '#/components/schemas/ClusterDatabaseName' + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - name + examples: + requiredFieldsOnly: + summary: Required Fields Only + value: + name: DatabaseOne + allFields: + summary: All Fields + value: + name: DatabaseTwo + maxTables: 100 + maxColumnsPerTable: 50 + retentionPeriod: 300000000000 + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: bananas + - type: tag + value: plátanos + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + responses: + '200': + description: The cluster database was successfully created + content: + application/json: + schema: + type: object + properties: + name: + $ref: '#/components/schemas/ClusterDatabaseName' + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - name + - maxTables + - maxColumnsPerTable + - retentionPeriod + examples: + requiredFieldsOnly: + summary: Required Fields Only + value: + name: DatabaseOne + maxTables: 500 + maxColumnsPerTable: 200 + retentionPeriod: 0 + allFields: + summary: All Fields + value: + name: DatabaseTwo + maxTables: 100 + maxColumnsPerTable: 50 + retentionPeriod: 300000000000 + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: a + - type: tag + value: c + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '409': + $ref: '#/components/responses/Conflict' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + create_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/databases" \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "name": "'$databaseName'", + "maxTables": 75, + "maxColumnsPerTable": 90, + "retentionPeriod": 600000000000, + "partitionTemplate": [ + { + "type": "tag", + "value": "abc" + }, + { + "type": "bucket", + "value": { + "tagName": "def", + "numberOfBuckets": 5 + } + } + ] + }' \ + ) + echo "$response" + } + /databases/{databaseName}: + patch: + operationId: UpdateClusterDatabase + summary: Update a database + tags: + - Databases + parameters: + - name: databaseName + in: path + description: The name of the database to update + required: true + schema: + $ref: '#/components/schemas/ClusterDatabaseName' + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + minProperties: 1 + examples: + allFields: + summary: Update All Fields + value: + maxTables: 300 + maxColumnsPerTable: 150 + retentionPeriod: 600000000000 + maxTablesOnly: + summary: Update Max Tables Only + value: + maxTables: 300 + maxColumnsPerTableOnly: + summary: Update Max Columns Per Table Only + value: + maxColumnsPerTable: 150 + retentionPeriodOnly: + summary: Update Retention Period Only + value: + retentionPeriod: 600000000000 + responses: + '200': + description: The cluster database was successfully updated. + content: + application/json: + schema: + type: object + properties: + name: + $ref: '#/components/schemas/ClusterDatabaseName' + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + required: + - maxTables + - maxColumnsPerTable + - retentionPeriod + - name + examples: + allFields: + summary: Update All Fields + value: + name: DatabaseOne + maxTables: 300 + maxColumnsPerTable: 150 + retentionPeriod: 600000000000 + maxTablesOnly: + summary: Update Max Tables Only + value: + name: DatabaseOne + maxTables: 300 + maxColumnsPerTable: 200 + retentionPeriod: 0 + maxColumnsPerTableOnly: + summary: Update Max Columns Per Table Only + value: + name: DatabaseOne + maxTables: 500 + maxColumnsPerTable: 150 + retentionPeriod: 0 + retentionPeriodOnly: + summary: Update Retention Period Only + value: + name: DatabaseOne + maxTables: 500 + maxColumnsPerTable: 200 + retentionPeriod: 600000000000 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + update_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/databases/$databaseName" \ + --request PATCH \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "maxTables": 150, + "maxColumnsPerTable": 180, + "retentionPeriod": 1200000000000 + }' \ + ) + echo "$response" + } + delete: + operationId: DeleteClusterDatabase + summary: Delete a database + tags: + - Databases + parameters: + - name: databaseName + in: path + description: The name of the database to delete + required: true + schema: + $ref: '#/components/schemas/ClusterDatabaseName' + responses: + '204': + description: The cluster database was successfully deleted + $ref: '#/components/responses/NoContent' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + delete_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/databases/$databaseName" \ + --request DELETE \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + /databases/{databaseName}/tables: + post: + operationId: CreateClusterDatabaseTable + summary: Create a database table + tags: + - Tables + parameters: + - name: databaseName + in: path + description: The name of the database to create the database table for + required: true + schema: + $ref: '#/components/schemas/ClusterDatabaseName' + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + name: + $ref: '#/components/schemas/ClusterDatabaseTableName' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - name + examples: + requiredFieldsOnly: + summary: Required Fields Only + value: + name: TableOne + allFields: + summary: All Fields + value: + name: TableTwo + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: bananas + - type: tag + value: plátanos + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + responses: + '200': + description: The cluster database table was successfully created + content: + application/json: + schema: + type: object + properties: + databaseName: + description: The name of the database that the database table belongs to + $ref: '#/components/schemas/ClusterDatabaseName' + name: + description: The name of the database table + $ref: '#/components/schemas/ClusterDatabaseTableName' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - databaseName + - name + examples: + requiredFieldsOnly: + summary: Required Fields Only + value: + databaseName: DatabaseOne + name: TableOne + allFields: + summary: All Fields + value: + databaseName: DatabaseOne + name: TableTwo + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: a + - type: tag + value: c + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '409': + $ref: '#/components/responses/Conflict' + '500': + $ref: '#/components/responses/InternalServerError' + /tokens: + get: + operationId: GetDatabaseTokens + summary: Get all database tokens for a cluster + tags: + - Database tokens + responses: + '200': + description: The database tokens were successfully retrieved + content: + application/json: + schema: + type: array + items: + type: object + properties: + id: + description: The ID of the database token + $ref: '#/components/schemas/UuidV4' + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + createdAt: + $ref: '#/components/schemas/DatabaseTokenCreatedAt' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' + required: + - id + - description + - permissions + - createdAt + example: + - id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + createdAt: '2023-12-21T17:32:28.000Z' + - id: 66666666-6666-4666-8666-666666666666 + description: Full Access Token + permissions: + - action: write + resource: '*' + createdAt: '2024-03-02T04:20:19.000Z' + - id: 77777777-7777-4777-8777-777777777777 + description: No Access Token + permissions: [] + createdAt: '2024-03-02T04:20:19.000Z' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + list_tokens () { + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + post: + operationId: CreateDatabaseToken + summary: Create a database token + tags: + - Database tokens + description: | + Create a [database token](/influxdb3/clustered/admin/tokens/database/) for a cluster. + + The token returned on the `accessToken` property in the response can be used to authenticate query and write requests to the cluster. + + ### Notable behaviors + + - InfluxDB might take some time--from a few seconds to a few minutes--to activate and synchronize new tokens. If a new database token doesn't immediately work (you receive a `401 Unauthorized` error) for querying or writing, wait and then try your request again. + + - Token strings are viewable _only_ on token creation and aren't stored by InfluxDB; you can't recover a lost token. + + #### Store secure tokens in a secret store + + We recommend storing database tokens in a **secure secret store**. + For example, see how to [authenticate Telegraf using tokens in your OS secret store](https://github.com/influxdata/telegraf/tree/master/plugins/secretstores/os). + + If you lose a token, [delete the token from InfluxDB](/influxdb3/clustered/admin/tokens/database/delete/) and create a new one. + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + required: + - description + examples: + limitedAccessToken: + summary: Limited Access Token + value: + description: Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + fullAccessToken: + summary: Full Access Token + value: + description: Full Access Token + permissions: + - action: write + resource: '*' + noAccessToken: + summary: No Access Token + value: + description: No Access Token + permissions: [] + responses: + '200': + description: The database token was successfully created + content: + application/json: + schema: + type: object + properties: + id: + description: The ID of the database token + $ref: '#/components/schemas/UuidV4' + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + createdAt: + $ref: '#/components/schemas/DatabaseTokenCreatedAt' + accessToken: + $ref: '#/components/schemas/DatabaseTokenAccessToken' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' + required: + - id + - description + - permissions + - createdAt + - accessToken + examples: + limitedAccessToken: + summary: Limited Access Token + value: + id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + createdAt: '2023-12-21T17:32:28.000Z' + accessToken: apiv1_5555555555555555555555555555555555555555555555555555555555555555 + fullAccessToken: + summary: Full Access Token + value: + id: 66666666-6666-4666-8666-666666666666 + description: Full Access Token + permissions: + - action: write + resource: '*' + createdAt: '2024-03-02T04:20:19.000Z' + accessToken: apiv1_6666666666666666666666666666666666666666666666666666666666666666 + noAccessToken: + summary: No Access Token + value: + id: 66666666-6666-4666-8666-666666666666 + description: No Access Token + permissions: [] + createdAt: '2024-03-02T04:20:19.000Z' + accessToken: apiv1_7777777777777777777777777777777777777777777777777777777777777777 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '409': + $ref: '#/components/responses/Conflict' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + create_token () { + local description=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens" \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "description": "'$description'", + "permissions": [ + { + "action": "read", + "resource": "DatabaseOne" + }, + { + "action": "write", + "resource": "DatabaseTwo" + } + ] + }' \ + ) + echo "$response" + } + /tokens/{tokenId}: + get: + operationId: GetDatabaseToken + summary: Get a database token + tags: + - Database tokens + parameters: + - name: tokenId + in: path + description: The ID of the [database token](/influxdb3/clustered/admin/tokens/database/) to get + required: true + schema: + $ref: '#/components/schemas/UuidV4' + responses: + '200': + description: The database token was successfully retrieved. + content: + application/json: + schema: + type: object + properties: + id: + description: The ID of the database token + $ref: '#/components/schemas/UuidV4' + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + createdAt: + $ref: '#/components/schemas/DatabaseTokenCreatedAt' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' + required: + - id + - description + - permissions + - createdAt + examples: + limitedAccessToken: + summary: Limited Access Token + value: + id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + createdAt: '2023-12-21T17:32:28.000Z' + fullAccessToken: + summary: Full Access Token + value: + id: 66666666-6666-4666-8666-666666666666 + description: Full Access Token + permissions: + - action: write + resource: '*' + createdAt: '2024-03-02T04:20:19.000Z' + noAccessToken: + summary: No Access Token + value: + id: 77777777-7777-4777-8777-777777777777 + description: No Access Token + permissions: [] + createdAt: '2024-03-02T04:20:19.000Z' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + get_token () { + local tokenId=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens/$tokenId" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + patch: + operationId: UpdateDatabaseToken + summary: Update a database token + tags: + - Database tokens + parameters: + - name: tokenId + in: path + description: The ID of the [database token](/influxdb3/clustered/admin/tokens/database/) to update + required: true + schema: + $ref: '#/components/schemas/UuidV4' + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + minProperties: 1 + examples: + allFields: + summary: Update All Fields + value: + description: Updated Limited Access Token + permissions: + - action: write + resource: DatabaseOne + - action: read + resource: DatabaseTwo + - action: write + resource: DatabaseThree + descriptionOnly: + summary: Update Description Only + value: + description: Updated Limited Access Token + permissionsOnly: + summary: Update Permissions Only + value: + permissions: + - action: write + resource: DatabaseOne + - action: read + resource: DatabaseTwo + - action: write + resource: DatabaseThree + removeAllPermissions: + summary: Remove All Permissions + value: + permissions: [] + responses: + '200': + description: The database token was successfully updated + content: + application/json: + schema: + type: object + properties: + id: + description: The ID of the database token + $ref: '#/components/schemas/UuidV4' + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + createdAt: + $ref: '#/components/schemas/DatabaseTokenCreatedAt' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' + required: + - id + - description + - permissions + - createdAt + examples: + allFields: + summary: Update All Fields + value: + id: 55555555-5555-4555-8555-555555555555 + description: Updated Limited Access Token + permissions: + - action: write + resource: DatabaseOne + - action: read + resource: DatabaseTwo + - action: write + resource: DatabaseThree + createdAt: '2023-12-21T17:32:28.000Z' + descriptionOnly: + summary: Update Description Only + value: + id: 55555555-5555-4555-8555-555555555555 + description: Updated Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + createdAt: '2023-12-21T17:32:28.000Z' + permissionsOnly: + summary: Update Permissions Only + value: + id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: + - action: write + resource: DatabaseOne + - action: read + resource: DatabaseTwo + - action: write + resource: DatabaseThree + createdAt: '2023-12-21T17:32:28.000Z' + removeAllPermissions: + summary: Remove All Permissions + value: + id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: [] + createdAt: '2023-12-21T17:32:28.000Z' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '409': + $ref: '#/components/responses/Conflict' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + update_token () { + local tokenId=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens/$tokenId" \ + --request PATCH \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "description": "Updated Limited Access Token", + "permissions": [ + { + "action": "write", + "resource": "DatabaseOne" + }, + { + "action": "read", + "resource": "DatabaseTwo" + }, + { + "action": "write", + "resource": "DatabaseThree" + } + ] + }' \ + ) + echo "$response" + } + delete: + operationId: DeleteDatabaseToken + summary: Delete a database token + tags: + - Database tokens + parameters: + - name: tokenId + in: path + description: The ID of the [database token](/influxdb3/clustered/admin/tokens/database/) to delete + required: true + schema: + $ref: '#/components/schemas/UuidV4' + responses: + '204': + description: The database token was successfully deleted + $ref: '#/components/responses/NoContent' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + delete_token () { + local tokenId=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens/$tokenId" \ + --request DELETE \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } +components: + schemas: + Error: + type: object + properties: + code: + type: integer + message: + type: string + examples: + - code: 400 + message: bad request + - code: 401 + message: unauthorized + - code: 403 + message: forbidden + - code: 404 + message: not found + - code: 409 + message: conflict + - code: 500 + message: internal server error + required: + - code + - message + DateTimeRfc3339: + type: string + format: date-time + examples: + - '2023-12-21T17:32:28Z' + UuidV4: + type: string + format: uuid + examples: + - 11111111-1111-4111-8111-111111111111 + - 22222222-1111-4111-8111-111111111111 + ClusterDatabaseName: + description: The name of the cluster database + type: string + examples: + - DatabaseOne + - DatabaseTwo + maxLength: 64 + minLength: 1 + ClusterDatabaseRetentionPeriod: + description: | + The retention period of the [cluster database](/influxdb3/clustered/admin/databases/) in nanoseconds, if applicable + + If the retention period is not set or is set to 0, the database will have infinite retention + type: integer + format: int64 + default: 0 + examples: + - 300000000000 + - 600000000000 + minimum: 0 + ClusterDatabaseMaxTables: + description: The maximum number of tables for the cluster database + type: integer + format: int32 + default: 500 + examples: + - 100 + - 300 + minimum: 1 + ClusterDatabaseMaxColumnsPerTable: + description: The maximum number of columns per table for the cluster database + type: integer + format: int32 + default: 200 + examples: + - 50 + - 150 + minimum: 1 + ClusterDatabasePartitionTemplate: + description: | + A template for [partitioning](/influxdb3/clustered/admin/custom-partitions/) a cluster database. + + Each template part is evaluated in sequence, concatinating the final + partition key from the output of each part, delimited by the partition + key delimiter `|`. + + For example, using the partition template below: + + ```json + [ + { + "type": "time", + "value": "%Y" + }, + { + "type": "tag", + "value": "bananas" + }, + { + "type": "tag", + "value": "plátanos" + }, + { + "type": "bucket", + "value": { + "tagName": "c", + "numberOfBuckets": 10 + } + } + ] + ``` + + The following partition keys are derived: + + * `time=2023-01-01, a=bananas, b=plátanos, c=ananas` -> `2023|bananas|plátanos|5` + * `time=2023-01-01, b=plátanos` -> `2023|!|plátanos|!` + * `time=2023-01-01, another=cat, b=plátanos` -> `2023|!|plátanos|!` + * `time=2023-01-01` -> `2023|!|!|!` + * `time=2023-01-01, a=cat|dog, b=!, c=!` -> `2023|cat%7Cdog|%21|8` + * `time=2023-01-01, a=%50, c=%50` -> `2023|%2550|!|9` + * `time=2023-01-01, a=, c=` -> `2023|^|!|0` + * `time=2023-01-01, a=` -> `2023|#|!|!` + * `time=2023-01-01, c=` -> `2023|!|!|` + + When using the default [partitioning](/influxdb3/clustered/admin/custom-partitions/) template (YYYY-MM-DD) there is no + encoding necessary, as the derived partition key contains a single part, and + no reserved characters. [`TemplatePart::Bucket`] parts by definition will + always be within the part length limit and contain no restricted characters + so are also not percent-encoded and/or truncated. + type: array + items: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplatePart' + examples: + - - type: time + value: '%Y' + - type: tag + value: bananas + - type: tag + value: plátanos + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + maxItems: 8 + minItems: 1 + uniqueItems: true + ClusterDatabasePartitionTemplatePart: + description: A sub-part of a `PartitionTemplate` + anyOf: + - $ref: '#/components/schemas/ClusterDatabasePartitionTemplatePartTagValue' + - $ref: '#/components/schemas/ClusterDatabasePartitionTemplatePartTimeFormat' + - $ref: '#/components/schemas/ClusterDatabasePartitionTemplatePartBucket' + examples: + - type: time + value: '%Y' + - type: tag + value: bananas + - type: tag + value: plátanos + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + ClusterDatabasePartitionTemplatePartTagValue: + description: | + A tag value matcher that extracts a string value from the specified tag name + + If a row does not contain a value for the specified tag name, the NULL/missing partition key part `!` is rendered. + type: object + properties: + type: + type: string + enum: + - tag + value: + type: string + minLength: 1 + examples: + - type: tag + value: bananas + - type: tag + value: plátanos + ClusterDatabasePartitionTemplatePartTimeFormat: + description: A time format matcher that accepts a "strftime"-like format string and evaluates it against the "time" column + type: object + properties: + type: + type: string + enum: + - time + value: + type: string + minLength: 1 + examples: + - type: time + value: '%Y' + ClusterDatabasePartitionTemplatePartBucket: + description: | + A bucketing matcher that sorts data through a uniform hash function on the values of the given tag name. + + If a row does not contain a value for the specified tag name, the NULL/missing partition key part `!` is rendered. + type: object + properties: + type: + type: string + enum: + - bucket + value: + type: object + properties: + tagName: + description: The name of the tag used to derive the bucket the data belongs in + type: string + minLength: 1 + numberOfBuckets: + description: The number of buckets tag values are distributed across + type: integer + format: int32 + maximum: 100000 + minimum: 1 + examples: + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + ClusterDatabaseTableName: + description: The name of the [cluster database](/influxdb3/clustered/admin/databases/) table + type: string + examples: + - TableOne + - TableTwo + minLength: 1 + DatabaseTokenDescription: + description: The description of the database token + type: string + examples: + - Limited Access Token + - Full Access Token + DatabaseTokenResourceAllDatabases: + description: A resource value for a [database token](/influxdb3/clustered/admin/tokens/database/) permission that refers to all databases + type: string + enum: + - '*' + DatabaseTokenPermissionAction: + description: The action the [database token](/influxdb3/clustered/admin/tokens/database/) permission allows + type: string + DatabaseTokenPermissionResource: + description: The resource the [database token](/influxdb3/clustered/admin/tokens/database/) permission applies to + anyOf: + - $ref: '#/components/schemas/ClusterDatabaseName' + - $ref: '#/components/schemas/DatabaseTokenResourceAllDatabases' + examples: + - DatabaseOne + - DatabaseTwo + - '*' + DatabaseTokenPermission: + description: The description of the database token + type: object + properties: + action: + $ref: '#/components/schemas/DatabaseTokenPermissionAction' + resource: + $ref: '#/components/schemas/DatabaseTokenPermissionResource' + examples: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + - action: write + resource: '*' + DatabaseTokenPermissions: + description: The list of permissions the [database token](/influxdb3/clustered/admin/tokens/database/) allows + type: array + items: + $ref: '#/components/schemas/DatabaseTokenPermission' + examples: + - - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + - - action: write + resource: '*' + DatabaseTokenCreatedAt: + description: | + The date and time that the [database token](/influxdb3/clustered/admin/tokens/database/) was created + + Uses RFC3339 format + $ref: '#/components/schemas/DateTimeRfc3339' + examples: + - '2023-12-21T17:32:28.000Z' + - '2024-03-02T04:20:19.000Z' + DatabaseTokenExpiresAt: + description: | + The date and time that the database token expires, if applicable + + Uses RFC3339 format + $ref: '#/components/schemas/DateTimeRfc3339' + DatabaseTokenRevokedAt: + description: | + The date and time that the database token was revoked, if applicable + + Uses RFC3339 format + $ref: '#/components/schemas/DateTimeRfc3339' + DatabaseTokenAccessToken: + description: | + The access token that can be used to authenticate query and write requests to the cluster + + The access token is never stored by InfluxDB and is only returned once when the token is created. If the access token is lost, a new token must be created. + type: string + examples: + - apiv1_5555555555555555555555555555555555555555555555555555555555555555 + - apiv1_6666666666666666666666666666666666666666666666666666666666666666 + minLength: 64 + responses: + BadRequest: + description: Bad Request + content: + application/json: + schema: + properties: + code: + type: integer + enum: + - 400 + $ref: '#/components/schemas/Error' + example: + code: 400 + message: bad request + Unauthorized: + description: Unauthorized + content: + application/json: + schema: + properties: + code: + type: integer + enum: + - 401 + $ref: '#/components/schemas/Error' + example: + code: 401 + message: unauthorized + Forbidden: + description: Forbidden + content: + application/json: + schema: + properties: + code: + type: integer + enum: + - 403 + $ref: '#/components/schemas/Error' + example: + code: 403 + message: forbidden + NotFound: + description: Not Found + content: + application/json: + schema: + properties: + code: + type: integer + enum: + - 404 + $ref: '#/components/schemas/Error' + example: + code: 404 + message: not found + Conflict: + description: Conflict + content: + application/json: + schema: + properties: + code: + type: integer + enum: + - 409 + $ref: '#/components/schemas/Error' + example: + code: 409 + message: conflict + InternalServerError: + description: Internal Server Error + content: + application/json: + schema: + properties: + code: + type: integer + enum: + - 500 + $ref: '#/components/schemas/Error' + example: + code: 500 + message: internal server error + NoContent: + description: No Content + securitySchemes: + bearerAuthManagementToken: + type: http + scheme: bearer + bearerFormat: Management Token + bearerAuthJwt: + type: http + scheme: bearer + bearerFormat: JWT +x-tagGroups: + - name: Using the Management API + tags: + - Authentication + - Quickstart + - name: All endpoints + tags: + - Database tokens + - Databases + - Tables diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index 36c3e08b0..7735c655d 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -65,7 +65,23 @@ paths: schema: type: string required: true - description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy. + description: | + The database to write to. + + **Database targeting:** In InfluxDB Clustered, databases can be named using the `database_name/retention_policy_name` convention for InfluxQL compatibility. InfluxDB Clustered does not use DBRP mappings. The db and rp parameters are used to construct the target database name following this naming convention. + + **Auto-creation behavior:** InfluxDB Clustered requires databases to be created before writing data. The v1 `/write` API does not automatically create databases. If the specified + database does not exist, the write request will fail. + + Authentication: Requires a valid API token with _write_ permissions for the target database. + + ### Related + + - [Write data to InfluxDB Clustered](/influxdb3/clustered/write-data/) + - [Use the InfluxDB v1 API with InfluxDB Clustered](/influxdb3/clustered/guides/api-compatibility/v1/) + - [Manage databases in InfluxDB Clustered](/influxdb3/clustered/admin/databases/) + - [InfluxQL DBRP naming convention in InfluxDB Clustered](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention) + - [Migrate data from InfluxDB v1 to InfluxDB Clustered](/influxdb3/clustered/guides/migrate-data/migrate-1x-to-clustered/) - in: query name: rp schema: @@ -136,6 +152,141 @@ paths: schema: $ref: '#/components/schemas/Error' /query: + get: + operationId: GetQueryV1 + tags: + - Query + summary: Query using the InfluxDB v1 HTTP API + parameters: + - $ref: '#/components/parameters/TraceSpan' + - $ref: '#/components/parameters/AuthUserV1' + - $ref: '#/components/parameters/AuthPassV1' + - in: header + name: Accept + schema: + type: string + description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps. + default: application/json + enum: + - application/json + - application/csv + - text/csv + - application/x-msgpack + - in: header + name: Accept-Encoding + description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand. + schema: + type: string + description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. + schema: + type: boolean + default: false + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + schema: + type: string + required: true + description: The database to query from. + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: Defines the InfluxQL query to run. + required: true + schema: + type: string + - in: query + name: rp + schema: + type: string + description: Retention policy name. + - name: epoch + description: | + Formats timestamps as unix (epoch) timestamps with the specified precision + instead of RFC3339 timestamps with nanosecond precision. + in: query + schema: + type: string + enum: + - h + - m + - s + - ms + - u + - µ + - ns + responses: + '200': + description: Query results + headers: + Content-Encoding: + description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body + schema: + type: string + description: Specifies that the response in the body is encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + Trace-Id: + description: The Trace-Id header reports the request's trace ID, if one was generated. + schema: + type: string + description: Specifies the request's trace ID. + content: + application/csv: + schema: + $ref: '#/components/schemas/InfluxQLCSVResponse' + text/csv: + schema: + $ref: '#/components/schemas/InfluxQLCSVResponse' + application/json: + schema: + $ref: '#/components/schemas/InfluxQLResponse' + examples: + influxql-chunk_size_2: + value: | + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:55Z",90,"1"],["2016-05-19T18:37:56Z",90,"1"]],"partial":true}],"partial":true}]} + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:57Z",90,"1"],["2016-05-19T18:37:58Z",90,"1"]]}]}]} + application/x-msgpack: + schema: + type: string + format: binary + '429': + description: Token is temporarily over quota. The Retry-After header describes when to try the read again. + headers: + Retry-After: + description: A non-negative decimal integer indicating the seconds to delay after the response is received. + schema: + type: integer + format: int32 + default: + description: Error processing query + content: + application/json: + schema: + $ref: '#/components/schemas/Error' post: operationId: PostQueryV1 tags: @@ -147,6 +298,64 @@ paths: text/plain: schema: type: string + application/json: + schema: + type: object + properties: + db: + type: string + description: Database to query. + rp: + description: | + The retention policy name for InfluxQL compatibility + + Optional parameter that, when combined with the db parameter, forms the complete database name to query. In InfluxDB Clustered, databases can be named using the + database_name/retention_policy_name convention for InfluxQL compatibility. + + When a request specifies both `db` and `rp`, InfluxDB Clustered combines them as `db/rp` to target the database--for example: + + - If `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen` + - If only `db=mydb` is provided (no `rp`), the query targets the database named `mydb` + + Unlike InfluxDB v1 and Cloud Serverless, InfluxDB Clustered does not use DBRP mappings or separate retention policy objects. This parameter exists solely for v1 API + compatibility and database naming conventions. + + Note: The retention policy name does not control data retention in InfluxDB Clustered. Data retention is determined by the database's _retention period_ setting. + + ### Related + + - [Use the v1 query API and InfluxQL to query data in InfluxDB Clustered](/influxdb3/clustered/query-data/execute-queries/influxdb-v1-api/) + - [Use the InfluxDB v1 API with InfluxDB Clustered](/influxdb3/clustered/guides/api-compatibility/v1/) + - [Manage databases in InfluxDB Clustered](/influxdb3/clustered/admin/databases/) + - [InfluxQL DBRP naming convention in InfluxDB Clustered](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention) + - [Migrate data from InfluxDB v1 to InfluxDB Clustered](/influxdb3/clustered/guides/migrate-data/migrate-1x-to-clustered/) + type: string + q: + description: | + Defines the InfluxQL query to run. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: | + A unix timestamp precision. + type: string + enum: + - h + - m + - s + - ms + - u + - µ + - ns parameters: - $ref: '#/components/parameters/TraceSpan' - $ref: '#/components/parameters/AuthUserV1' diff --git a/api-docs/influxdb3/clustered/v2/ref.yml b/api-docs/influxdb3/clustered/v2/ref.yml index 09afff6d0..05507ea49 100644 --- a/api-docs/influxdb3/clustered/v2/ref.yml +++ b/api-docs/influxdb3/clustered/v2/ref.yml @@ -63,12 +63,14 @@ tags: name: API compatibility x-traitTag: true - description: | - Use one of the following schemes to authenticate to the InfluxDB API: + Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API: - - [Bearer authentication](#section/Authentication/BearerAuthentication) - - [Token authentication](#section/Authentication/TokenAuthentication) - - [Basic authentication](#section/Authentication/BasicAuthentication) - - [Querystring authentication](#section/Authentication/QuerystringAuthentication) + | Authentication scheme | Works with | + |:-------------------|:-----------| + | [Bearer authentication](#section/Authentication/BearerAuthentication) | All endpoints | + | [Token authentication](#section/Authentication/TokenAuthentication) | v1, v2 endpoints | + | [Basic authentication](#section/Authentication/BasicAuthentication) | v1 endpoints | + | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | name: Authentication x-traitTag: true @@ -1074,7 +1076,7 @@ components: Default is [`RFC3339` date/time format](/influxdb3/clustered/reference/glossary/#rfc3339-timestamp). To include nanoseconds in timestamps, use `RFC3339Nano`. - #### Example formatted date/time values + ### Example formatted date/time values | Format | Value | |:------------|:----------------------------| @@ -1955,12 +1957,15 @@ components: type: string securitySchemes: BasicAuthentication: + type: http + scheme: basic description: | - ### Basic authentication scheme + Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests. - Use the `Authorization` header with the `Basic` scheme to authenticate v1 API `/write` and `/query` requests. - When authenticating requests, InfluxDB 3 Clustered checks that the `password` part of the decoded credential is an authorized [database token](/influxdb3/clustered/admin/tokens/#database-tokens). - InfluxDB 3 Clustered ignores the `username` part of the decoded credential. + Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints in InfluxDB 3. + + When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an authorized token + and ignores the `username` part of the decoded credential. ### Syntax @@ -1968,61 +1973,57 @@ components: Authorization: Basic ``` - Replace the following: - - - **`[USERNAME]`**: an optional string value (ignored by InfluxDB 3 Clustered). - - **`DATABASE_TOKEN`**: a [database token](/influxdb3/clustered/admin/tokens/#database-tokens). - - Encode the `[USERNAME]:DATABASE_TOKEN` credential using base64 encoding, and then append the encoded string to the `Authorization: Basic` header. - ### Example - The following example shows how to use cURL with the `Basic` authentication scheme and a [database token](/influxdb3/clustered/admin/tokens/#database-tokens): - - ```sh - ####################################### - # Use Basic authentication with a database token - # to query the InfluxDB v1 HTTP API - ####################################### - # Use the --user option with `--user username:DATABASE_TOKEN` syntax - ####################################### - - curl --get "http://cluster-id.a.influxdb.io/query" \ + ```bash + curl "http://cluster-host.com/write?db=DATABASE_NAME&precision=s" \ --user "":"DATABASE_TOKEN" \ - --data-urlencode "db=DATABASE_NAME" \ - --data-urlencode "q=SELECT * FROM MEASUREMENT" + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary 'home,room=kitchen temp=72 1641024000' ``` Replace the following: - **`DATABASE_NAME`**: your InfluxDB 3 Clustered database - - **`DATABASE_TOKEN`**: a [database token](/influxdb3/clustered/admin/tokens/#database-tokens) with sufficient permissions to the database - scheme: basic - type: http + - **`DATABASE_TOKEN`**: a database token with sufficient permissions to the database + + #### Related guides + + - [Authenticate v1 API requests](/influxdb3/clustered/guides/api-compatibility/v1/) + - [Manage tokens](/influxdb3/clustered/admin/tokens/) QuerystringAuthentication: type: apiKey in: query name: u=&p= description: | - Use the Querystring authentication - scheme with InfluxDB 1.x API parameters to provide credentials through the query string. + Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests. - ### Query string authentication + Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints. - In the URL, pass the `p` query parameter to authenticate `/write` and `/query` requests. - When authenticating requests, InfluxDB 3 Clustered checks that `p` (_password_) is an authorized database token and ignores the `u` (_username_) parameter. + When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token + and ignores the `u` (_username_) query parameter. ### Syntax ```http - https://cluster-id.a.influxdb.io/query/?[u=any]&p=DATABASE_TOKEN - https://cluster-id.a.influxdb.io/write/?[u=any]&p=DATABASE_TOKEN + https://cluster-host.com/query/?[u=any]&p=DATABASE_TOKEN + https://cluster-host.com/write/?[u=any]&p=DATABASE_TOKEN ``` - ### Example + ### Examples - The following example shows how to use cURL with query string authentication and a [database token](/influxdb3/clustered/admin/tokens/#database-tokens). + ```bash + curl "http://cluster-host.com/write?db=DATABASE_NAME&precision=s&p=DATABASE_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary 'home,room=kitchen temp=72 1641024000' + ``` - ```sh + Replace the following: + + - **`DATABASE_NAME`**: your InfluxDB 3 Clustered database + - **`DATABASE_TOKEN`**: a database token with sufficient permissions to the database + + ```bash ####################################### # Use an InfluxDB 1.x compatible username and password # to query the InfluxDB v1 HTTP API @@ -2031,7 +2032,7 @@ components: # ?p=DATABASE_TOKEN ####################################### - curl --get "https://cluster-id.a.influxdb.io/query" \ + curl --get "https://cluster-host.com/query" \ --data-urlencode "p=DATABASE_TOKEN" \ --data-urlencode "db=DATABASE_NAME" \ --data-urlencode "q=SELECT * FROM MEASUREMENT" @@ -2039,16 +2040,23 @@ components: Replace the following: - - **`DATABASE_NAME`**: your InfluxDB 3 Clustered database - - **`DATABASE_TOKEN`**: a [database token](/influxdb3/clustered/admin/tokens/#database-tokens) with sufficient permissions to the database + - **`DATABASE_NAME`**: the database to query + - **`DATABASE_TOKEN`**: a database token with sufficient permissions to the database + + #### Related guides + + - [Authenticate v1 API requests](/influxdb3/clustered/guides/api-compatibility/v1/) + - [Manage tokens](/influxdb3/clustered/admin/tokens/) BearerAuthentication: type: http scheme: bearer bearerFormat: JWT description: | - Use the OAuth Bearer authentication - scheme to authenticate to the InfluxDB API. + Use the OAuth Bearer authentication + scheme to provide an authorization token to InfluxDB 3. + + Bearer authentication works with all endpoints. In your API requests, send an `Authorization` header. For the header value, provide the word `Bearer` followed by a space and a database token. @@ -2057,29 +2065,20 @@ components: ### Syntax ```http - Authorization: Bearer INFLUX_TOKEN + Authorization: Bearer DATABASE_TOKEN ``` ### Example - ```sh - ######################################################## - # Use the Bearer token authentication scheme with /api/v2/write - # to write data. - ######################################################## - - curl --request post "https://cluster-id.a.influxdb.io/api/v2/write?bucket=DATABASE_NAME&precision=s" \ - --header "Authorization: Bearer DATABASE_TOKEN" \ - --data-binary 'home,room=kitchen temp=72 1463683075' + ```bash + curl http://cluster-host.com/api/v3/query_influxql \ + --header "Authorization: Bearer DATABASE_TOKEN" ``` - - For examples and more information, see the following: - - [Authenticate API requests](/influxdb3/clustered/primers/api/v2/#authenticate-api-requests) - - [Manage tokens](/influxdb3/clustered/admin/tokens/) TokenAuthentication: description: | - Use the Token authentication - scheme to authenticate to the InfluxDB API. + Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3. + + The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3. In your API requests, send an `Authorization` header. For the header value, provide the word `Token` followed by a space and a database token. @@ -2088,7 +2087,7 @@ components: ### Syntax ```http - Authorization: Token INFLUX_API_TOKEN + Authorization: Token DATABASE_TOKEN ``` ### Example @@ -2099,14 +2098,13 @@ components: # to write data. ######################################################## - curl --request post "https://cluster-id.a.influxdb.io/api/v2/write?bucket=DATABASE_NAME&precision=s" \ + curl --request post "https://cluster-host.com/api/v2/write?bucket=DATABASE_NAME&precision=s" \ --header "Authorization: Token DATABASE_TOKEN" \ --data-binary 'home,room=kitchen temp=72 1463683075' ``` ### Related guides - - [Authenticate API requests](/influxdb3/clustered/primers/api/v2/#authenticate-api-requests) - [Manage tokens](/influxdb3/clustered/admin/tokens/) in: header name: Authorization diff --git a/api-docs/influxdb3/core/.config.yml b/api-docs/influxdb3/core/.config.yml index 52ae681b6..14792e219 100644 --- a/api-docs/influxdb3/core/.config.yml +++ b/api-docs/influxdb3/core/.config.yml @@ -1,5 +1,5 @@ plugins: - - '../../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.cjs' extends: - recommended - docs/all @@ -10,3 +10,7 @@ apis: root: v3/ref.yml x-influxdata-docs-aliases: - /influxdb3/core/api/ + - /influxdb3/core/api/v1/ + - /influxdb3/core/api/v2/ + - /influxdb3/core/api/v1-compatibility + - /influxdb3/core/api/v2-compatibility diff --git a/api-docs/influxdb3/core/v3/content/tag-groups.yml b/api-docs/influxdb3/core/v3/content/tag-groups.yml index 5289ddb33..364d5e794 100644 --- a/api-docs/influxdb3/core/v3/content/tag-groups.yml +++ b/api-docs/influxdb3/core/v3/content/tag-groups.yml @@ -2,11 +2,14 @@ tags: - Quick start - Authentication + - Cache data - Common parameters - Response codes - Compatibility endpoints - - Data I/O - - Databases + - Database - Processing engine - Server information - - Tables + - Table + - Token + - Query data + - Write data diff --git a/api-docs/influxdb3/core/v3/ref.yml b/api-docs/influxdb3/core/v3/ref.yml index 7c344f7d7..37844f59c 100644 --- a/api-docs/influxdb3/core/v3/ref.yml +++ b/api-docs/influxdb3/core/v3/ref.yml @@ -39,12 +39,56 @@ servers: default: localhost:8181 description: InfluxDB 3 Core URL security: - - BearerAuth: [] + - BearerAuthentication: [] + - TokenAuthentication: [] + - BasicAuthentication: [] + - QuerystringAuthentication: [] tags: - name: Authentication description: | - During the initial Alpha phase, InfluxDB 3 Core does not require authentication. + Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API: + + | Authentication scheme | Works with | + |:-------------------|:-----------| + | [Bearer authentication](#section/Authentication/BearerAuthentication) | All endpoints | + | [Token authentication](#section/Authentication/TokenAuthentication) | v1, v2 endpoints | + | [Basic authentication](#section/Authentication/BasicAuthentication) | v1 endpoints | + | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | + x-traitTag: true + - name: Cache data + description: | + Manage the in-memory cache. + + #### Distinct Value Cache + + The Distinct Value Cache (DVC) lets you cache distinct + values of one or more columns in a table, improving the performance of + queries that return distinct tag and field values. + + The DVC is an in-memory cache that stores distinct values for specific columns + in a table. When you create an DVC, you can specify what columns' distinct + values to cache, the maximum number of distinct value combinations to cache, and + the maximum age of cached values. A DVC is associated with a table, which can + have multiple DVCs. + + #### Last value cache + + The Last Value Cache (LVC) lets you cache the most recent + values for specific fields in a table, improving the performance of queries that + return the most recent value of a field for specific series or the last N values + of a field. + + The LVC is an in-memory cache that stores the last N number of values for + specific fields of series in a table. When you create an LVC, you can specify + what fields to cache, what tags to use to identify each series, and the + number of values to cache for each unique series. + An LVC is associated with a table, which can have multiple LVCs. + + #### Related guides + + - [Manage the Distinct Value Cache](/influxdb3/core/admin/distinct-value-cache/) + - [Manage the Last Value Cache](/influxdb3/core/admin/last-value-cache/) - name: Compatibility endpoints description: | InfluxDB 3 provides compatibility endpoints for InfluxDB 1.x and InfluxDB 2.x workloads and clients. @@ -72,19 +116,8 @@ tags: ### Server information Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x clients. - - name: Data I/O - description: | - Write and query data - - #### Data flow in InfluxDB 3 Core - - 1. **Incoming writes**: The system validates incoming data and stores it in the write buffer (in memory). If the `no_sync` write option is enabled (`no_sync=true`), the server sends a response to acknowledge the write. - 2. **WAL flush**: Every second (default), the system flushes the write buffer to the Write-Ahead Log (WAL) for persistence in the Object store. If `no_sync=false` (default), the server sends a response to acknowledge the write. - 3. **Query availability**: After WAL persistence completes, data moves to the queryable buffer where it becomes available for queries. By default, the server keeps up to 900 WAL files (15 minutes of data) buffered. - 4. **Long-term storage in Parquet**: Every ten minutes (default), the system persists the oldest data from the queryable buffer to the Object store in Parquet format. InfluxDB keeps the remaining data (the most recent 5 minutes) in memory. - 5. **In-memory cache**: InfluxDB puts Parquet files into an in-memory cache so that queries against the most recently persisted data don't have to go to object storage. - - name: Databases - description: Create, read, update, and delete database and cache resources + - name: Database + description: Manage databases - description: | Most InfluxDB API endpoints require parameters in the request--for example, specifying the database to use. @@ -115,32 +148,42 @@ tags: description: | Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins. - InfluxDB 3 Core provides the InfluxDB 3 Processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database. + InfluxDB 3 Core provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database. Use Processing engine plugins and triggers to run code and perform tasks for different database events. - To get started with the Processing engine, see the [Processing engine and Python plugins](/influxdb3/core/processing-engine/) guide. + To get started with the processing engine, see the [Processing engine and Python plugins](/influxdb3/core/processing-engine/) guide. + - name: Query data + description: Query data using SQL or InfluxQL - name: Quick start description: | - 1. [Check the status](#section/Server-information) of the InfluxDB server. + 1. [Create an admin token](#section/Authentication) to authorize API requests. ```bash - curl "http://localhost:8181/health" + curl -X POST "http://localhost:8181/api/v3/configure/token/admin" + ``` + 2. [Check the status](#section/Server-information) of the InfluxDB server. + + ```bash + curl "http://localhost:8181/health" \ + --header "Authorization: Bearer ADMIN_TOKEN" ``` - 2. [Write data](#section/Compatibility-endpoints/Write-data) to InfluxDB. + 3. [Write data](#operation/PostWriteLP) to InfluxDB. ```bash - curl "http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto" \ + curl "http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto" + --header "Authorization: Bearer ADMIN_TOKEN" \ --data-raw "home,room=Kitchen temp=72.0 home,room=Living\ room temp=71.5" ``` If all data is written, the response is `204 No Content`. - 3. [Query data](#section/Compatibility-endpoints/Query-data) from InfluxDB. + 4. [Query data](#operation/GetExecuteQuerySQL) from InfluxDB. ```bash curl -G "http://localhost:8181/api/v3/query_sql" \ + --header "Authorization: Bearer ADMIN_TOKEN" \ --data-urlencode "db=sensors" \ --data-urlencode "q=SELECT * FROM home WHERE room='Living room'" \ --data-urlencode "format=jsonl" @@ -156,8 +199,12 @@ tags: x-traitTag: true - name: Server information description: Retrieve server metrics, status, and version information - - name: Tables + - name: Table description: Manage table schemas and data + - name: Token + description: Manage tokens for authentication and authorization + - name: Write data + description: Write data to InfluxDB 3 paths: /write: post: @@ -170,9 +217,40 @@ paths: Use this endpoint to send data in [line protocol](https://docs.influxdata.com/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. Use query parameters to specify options for writing data. + + #### Related + + - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) parameters: - $ref: '#/components/parameters/dbWriteParam' - $ref: '#/components/parameters/compatibilityPrecisionParam' + - $ref: '#/components/parameters/v1UsernameParam' + - $ref: '#/components/parameters/v1PasswordParam' + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: consistency + in: query + required: false + schema: + type: string + description: | + Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) - name: Content-Type in: header description: | @@ -244,7 +322,10 @@ paths: description: Request entity too large. tags: - Compatibility endpoints - - Data I/O + - Write data + x-influxdata-guides: + - title: "Use compatibility APIs to write data" + href: "/influxdb3/core/write-data/http-api/compatibility-apis/" /api/v2/write: post: operationId: PostV2Write @@ -256,6 +337,10 @@ paths: Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. Use query parameters to specify options for writing data. + + #### Related + + - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) parameters: - name: Content-Type in: header @@ -327,25 +412,36 @@ paths: description: Request entity too large. tags: - Compatibility endpoints - - Data I/O + - Write data + x-influxdata-guides: + - title: "Use compatibility APIs to write data" + href: "/influxdb3/core/write-data/http-api/compatibility-apis/" /api/v3/write_lp: post: operationId: PostWriteLP summary: Write line protocol description: | Writes line protocol to the specified database. + + This is the native InfluxDB 3 Core write endpoint that provides enhanced control + over write behavior with advanced parameters for high-performance and fault-tolerant operations. Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. Use query parameters to specify options for writing data. + + #### Features + + - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail + - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response times but sacrificing durability guarantees + - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) + + #### Related + + - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/core/write-data/http-api/v3-write-lp/) parameters: - $ref: '#/components/parameters/dbWriteParam' - $ref: '#/components/parameters/accept_partial' - - name: precision - in: query - required: true - schema: - $ref: '#/components/schemas/PrecisionWrite' - description: Precision of timestamps. + - $ref: '#/components/parameters/precisionParam' - name: no_sync in: query schema: @@ -386,8 +482,40 @@ paths: description: Request entity too large. '422': description: Unprocessable entity. + x-codeSamples: + - label: cURL - Basic write + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" + - label: cURL - Write with millisecond precision + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000" + - label: cURL - Asynchronous write with partial acceptance + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 + memory,host=server01 used=4096" + - label: cURL - Multiple measurements with tags + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 + memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 + disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" tags: - - Data I/O + - Write data /api/v3/query_sql: get: operationId: GetExecuteQuerySQL @@ -395,16 +523,8 @@ paths: description: Executes an SQL query to retrieve data from the specified database. parameters: - $ref: '#/components/parameters/db' - - name: q - in: query - required: true - schema: - type: string - - name: format - in: query - required: false - schema: - type: string + - $ref: '#/components/parameters/querySqlParam' + - $ref: '#/components/parameters/format' - $ref: '#/components/parameters/AcceptQueryHeader' - $ref: '#/components/parameters/ContentType' responses: @@ -446,7 +566,7 @@ paths: '422': description: Unprocessable entity. tags: - - Data I/O + - Query data post: operationId: PostExecuteQuerySQL summary: Execute SQL query @@ -485,7 +605,7 @@ paths: '422': description: Unprocessable entity. tags: - - Data I/O + - Query data /api/v3/query_influxql: get: operationId: GetExecuteInfluxQLQuery @@ -533,7 +653,7 @@ paths: '422': description: Unprocessable entity. tags: - - Data I/O + - Query data post: operationId: PostExecuteQueryInfluxQL summary: Execute InfluxQL query @@ -572,7 +692,7 @@ paths: '422': description: Unprocessable entity. tags: - - Data I/O + - Query data /query: get: operationId: GetV1ExecuteQuery @@ -582,6 +702,10 @@ paths: This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. Use query parameters to specify the database and the InfluxQL query. + + #### Related + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) parameters: - name: Accept in: header @@ -640,6 +764,26 @@ paths: in: query schema: $ref: '#/components/schemas/EpochCompatibility' + - $ref: '#/components/parameters/v1UsernameParam' + - $ref: '#/components/parameters/v1PasswordParam' + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) responses: '200': description: | @@ -678,12 +822,20 @@ paths: '422': description: Unprocessable entity. tags: - - Data I/O + - Query data - Compatibility endpoints + x-influxdata-guides: + - title: "Use the InfluxDB v1 HTTP query API and InfluxQL to query data" + href: "/influxdb3/core/query-data/execute-queries/influxdb-v1-api/" post: operationId: PostExecuteV1Query summary: Execute InfluxQL query (v1-compatible) - description: Executes an InfluxQL query to retrieve data from the specified database. + description: | + Executes an InfluxQL query to retrieve data from the specified database. + + #### Related + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) requestBody: content: application/json: @@ -789,8 +941,11 @@ paths: '422': description: Unprocessable entity. tags: - - Data I/O + - Query data - Compatibility endpoints + x-influxdata-guides: + - title: "Use the InfluxDB v1 HTTP query API and InfluxQL to query data" + href: "/influxdb3/core/query-data/execute-queries/influxdb-v1-api/" /health: get: operationId: GetHealth @@ -863,7 +1018,7 @@ paths: '404': description: Database not found. tags: - - Databases + - Database post: operationId: PostConfigureDatabase summary: Create a database @@ -884,15 +1039,31 @@ paths: '409': description: Database already exists. tags: - - Databases + - Database delete: operationId: DeleteConfigureDatabase summary: Delete a database description: | Soft deletes a database. - The database is scheduled for deletion and unavailable for querying. + The database is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. parameters: - $ref: '#/components/parameters/db' + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time + description: | + Schedule the database for hard deletion at the specified time. + If not provided, the database will be soft deleted. + Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). + + #### Deleting a database cannot be undone + + Deleting a database is a destructive action. + Once a database is deleted, data stored in that database cannot be recovered. responses: '200': description: Success. Database deleted. @@ -901,7 +1072,7 @@ paths: '404': description: Database not found. tags: - - Databases + - Database /api/v3/configure/table: post: operationId: PostConfigureTable @@ -923,13 +1094,19 @@ paths: '404': description: Database not found. tags: - - Tables + - Table delete: operationId: DeleteConfigureTable summary: Delete a table description: | Soft deletes a table. - The table is scheduled for deletion and unavailable for querying. + The table is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + + #### Deleting a table cannot be undone + + Deleting a table is a destructive action. + Once a table is deleted, data stored in that table cannot be recovered. parameters: - $ref: '#/components/parameters/db' - name: table @@ -937,6 +1114,16 @@ paths: required: true schema: type: string + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time + description: | + Schedule the table for hard deletion at the specified time. + If not provided, the table will be soft deleted. + Use ISO 8601 format (for example, "2025-12-31T23:59:59Z"). responses: '200': description: Success (no content). The table has been deleted. @@ -945,14 +1132,15 @@ paths: '404': description: Table not found. tags: - - Tables + - Table /api/v3/configure/distinct_cache: post: operationId: PostConfigureDistinctCache summary: Create distinct cache description: Creates a distinct cache for a table. tags: - - Tables + - Cache data + - Table requestBody: required: true content: @@ -992,7 +1180,8 @@ paths: '409': description: Cache already exists. tags: - - Tables + - Cache data + - Table delete: operationId: DeleteConfigureLastCache summary: Delete last cache @@ -1019,20 +1208,110 @@ paths: '404': description: Cache not found. tags: - - Tables + - Cache data + - Table /api/v3/configure/processing_engine_trigger: post: operationId: PostConfigureProcessingEngineTrigger summary: Create processing engine trigger - description: Creates a new processing engine trigger. + description: | + Creates a processing engine trigger with the specified plugin file and trigger specification. + + ### Related guides + + - [Processing engine and Python plugins](/influxdb3/core/plugins/) requestBody: required: true content: application/json: schema: $ref: '#/components/schemas/ProcessingEngineTriggerRequest' + examples: + schedule_cron: + summary: Schedule trigger using cron + description: | + In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. + The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to Friday). + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_cron_trigger + trigger_specification: cron:0 0 6 * * 1-5 + schedule_every: + summary: Schedule trigger using interval + description: | + In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. + The duration `1h` means the trigger will run every hour. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_trigger + trigger_specification: every:1h + schedule_every_seconds: + summary: Schedule trigger using seconds interval + description: | + Example of scheduling a trigger to run every 30 seconds. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_30s_trigger + trigger_specification: every:30s + schedule_every_minutes: + summary: Schedule trigger using minutes interval + description: | + Example of scheduling a trigger to run every 5 minutes. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_5m_trigger + trigger_specification: every:5m + all_tables: + summary: All tables trigger example + description: | + Trigger that fires on write events to any table in the database. + value: + db: mydb + plugin_filename: all_tables.py + trigger_name: all_tables_trigger + trigger_specification: all_tables + table_specific: + summary: Table-specific trigger example + description: | + Trigger that fires on write events to a specific table. + value: + db: mydb + plugin_filename: table.py + trigger_name: table_trigger + trigger_specification: table:sensors + api_request: + summary: On-demand request trigger example + description: | + Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. + value: + db: mydb + plugin_filename: request.py + trigger_name: hello_world_trigger + trigger_specification: request:hello-world + cron_friday_afternoon: + summary: Cron trigger for Friday afternoons + description: | + Example of a cron trigger that runs every Friday at 2:30 PM. + value: + db: reports + plugin_filename: weekly_report.py + trigger_name: friday_report_trigger + trigger_specification: cron:0 30 14 * * 5 + cron_monthly: + summary: Cron trigger for monthly execution + description: | + Example of a cron trigger that runs on the first day of every month at midnight. + value: + db: monthly_data + plugin_filename: monthly_cleanup.py + trigger_name: monthly_cleanup_trigger + trigger_specification: cron:0 0 0 1 * * responses: - '201': + '200': description: Success. Processing engine trigger created. '400': description: Bad request. @@ -1109,7 +1388,7 @@ paths: $ref: '#/components/schemas/ProcessingEngineTriggerRequest' responses: '200': - description: Success. The processing engine trigger has been enabled. + description: Success. The processing engine trigger is enabled. '400': description: Bad request. '401': @@ -1122,7 +1401,14 @@ paths: post: operationId: PostInstallPluginPackages summary: Install plugin packages - description: Installs packages for the plugin environment. + description: | + Installs the specified Python packages into the processing engine plugin environment. + + This endpoint is synchronous and blocks until the packages are installed. + + ### Related guides + + - [Processing engine and Python plugins](/influxdb3/core/plugins/) parameters: - $ref: '#/components/parameters/ContentType' requestBody: @@ -1131,10 +1417,30 @@ paths: application/json: schema: type: object - additionalProperties: true + properties: + packages: + type: array + items: + type: string + description: | + A list of Python package names to install. + Can include version specifiers (e.g., "scipy==1.9.0"). + example: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + required: + - packages + example: + packages: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests responses: '200': - description: Success. The packages have been installed. + description: Success. The packages are installed. '400': description: Bad request. '401': @@ -1145,7 +1451,15 @@ paths: post: operationId: PostInstallPluginRequirements summary: Install plugin requirements - description: Installs requirements for the plugin environment. + description: | + Installs requirements from a requirements file (also known as a "pip requirements file") into the processing engine plugin environment. + + This endpoint is synchronous and blocks until the requirements are installed. + + ### Related + + - [Processing engine and Python plugins](/influxdb3/core/plugins/) + - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) parameters: - $ref: '#/components/parameters/ContentType' requestBody: @@ -1154,7 +1468,17 @@ paths: application/json: schema: type: object - additionalProperties: true + properties: + requirements_location: + type: string + description: | + The path to the requirements file containing Python packages to install. + Can be a relative path (relative to the plugin directory) or an absolute path. + example: requirements.txt + required: + - requirements_location + example: + requirements_location: requirements.txt responses: '200': description: Success. The requirements have been installed. @@ -1196,22 +1520,22 @@ paths: description: Plugin not enabled. tags: - Processing engine - /api/v3/engine/{plugin_path}: + /api/v3/engine/{request_path}: parameters: - - name: plugin_path + - name: request_path description: | - The path configured in the `trigger-spec` for the plugin. + The path configured in the request trigger specification for the plugin. For example, if you define a trigger with the following: - ``` - trigger-spec: "request:hello-world" + ```json + trigger_specification: "request:hello-world" ``` then, the HTTP API exposes the following plugin endpoint: ``` - /api/v3/engine/hello-world + /api/v3/engine/hello-world ``` in: path required: true @@ -1221,7 +1545,7 @@ paths: operationId: GetProcessingEnginePluginRequest summary: On Request processing engine plugin request description: | - Sends a request to invoke an _On Request_ processing engine plugin. + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. An On Request plugin implements the following signature: @@ -1248,7 +1572,7 @@ paths: operationId: PostProcessingEnginePluginRequest summary: On Request processing engine plugin request description: | - Sends a request to invoke an _On Request_ processing engine plugin. + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. An On Request plugin implements the following signature: @@ -1280,6 +1604,46 @@ paths: description: Processing failure. tags: - Processing engine + /api/v3/configure/token/admin: + post: + operationId: PostCreateAdminToken + summary: Create admin token + description: | + Creates an admin token. + An admin token is a special type of token that has full access to all resources in the system. + responses: + '201': + description: | + Success. The admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: '#/components/schemas/AdminTokenObject' + '401': + $ref: '#/components/responses/Unauthorized' + tags: + - Authentication + - Token + /api/v3/configure/token/admin/regenerate: + post: + operationId: PostRegenerateAdminToken + summary: Regenerate admin token + description: | + Regenerates an admin token and revokes the previous token with the same name. + parameters: [] + responses: + '201': + description: Success. The admin token has been regenerated. + content: + application/json: + schema: + $ref: '#/components/schemas/AdminTokenObject' + '401': + $ref: '#/components/responses/Unauthorized' + tags: + - Authentication + - Token components: parameters: AcceptQueryHeader: @@ -1337,7 +1701,6 @@ components: schema: type: string description: | - The name of the database. The name of the database. InfluxDB creates the database if it doesn't already exist, and then writes all points in the batch to the database. @@ -1392,6 +1755,25 @@ components: required: true schema: $ref: '#/components/schemas/Format' + v1UsernameParam: + name: u + in: query + required: false + schema: + type: string + description: | + Username for v1 compatibility authentication. + When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any arbitrary string for compatibility with InfluxDB 1.x clients. + v1PasswordParam: + name: p + in: query + required: false + schema: + type: string + description: | + Password for v1 compatibility authentication. + For query string authentication, pass an admin token. + InfluxDB 3 checks that the `p` value is an authorized token. requestBodies: lineProtocolRequestBody: required: true @@ -1416,6 +1798,29 @@ components: schema: $ref: '#/components/schemas/QueryRequestObject' schemas: + AdminTokenObject: + type: object + properties: + id: + type: integer + name: + type: string + token: + type: string + hash: + type: string + created_at: + type: string + format: date-time + expiry: + format: date-time + example: + id: 0 + name: _admin + token: apiv3_00xx0Xx0xx00XX0x0 + hash: 00xx0Xx0xx00XX0x0 + created_at: '2025-04-18T14:02:45.331Z' + expiry: null ContentEncoding: type: string enum: @@ -1457,13 +1862,10 @@ components: description: | Acknowledges a successful write without waiting for WAL persistence. - #### Data flow in InfluxDB 3 Core + #### Related - 1. **Incoming writes**: The system validates incoming data and stores it in the write buffer (in memory). If the `no_sync` write option is enabled (`no_sync=true`), the server sends a response to acknowledge the write. - 2. **WAL flush**: Every second (default), the system flushes the write buffer to the Write-Ahead Log (WAL) for persistence in the Object store. If `no_sync=false` (default), the server sends a response to acknowledge the write. - 3. **Query availability**: After WAL persistence completes, data moves to the queryable buffer where it becomes available for queries. By default, the server keeps up to 900 WAL files (15 minutes of data) buffered. - 4. **Long-term storage in Parquet**: Every ten minutes (default), the system persists the oldest data from the queryable buffer to the Object store in Parquet format. InfluxDB keeps the remaining data (the most recent 5 minutes) in memory. - 5. **In-memory cache**: InfluxDB puts Parquet files into an in-memory cache so that queries against the most recently persisted data don't have to go to object storage. + - [Use the HTTP API and client libraries to write data](/influxdb3/core/write-data/api-client-libraries/) + - [Data durability](/influxdb3/core/reference/internals/durability/) PrecisionWriteCompatibility: enum: - ms @@ -1487,12 +1889,12 @@ components: QueryRequestObject: type: object properties: - database: + db: description: | The name of the database to query. - Required if the query (`query_str`) doesn't specify the database. + Required if the query (`q`) doesn't specify the database. type: string - query_str: + q: description: The query to execute. type: string format: @@ -1511,11 +1913,11 @@ components: type: object additionalProperties: true required: - - database - - query_str + - db + - q example: - database: mydb - query_str: SELECT * FROM mytable + db: mydb + q: SELECT * FROM mytable format: json params: {} CreateDatabaseRequest: @@ -1635,15 +2037,74 @@ components: type: string plugin_filename: type: string + description: | + The path and filename of the plugin to execute--for example, + `schedule.py` or `endpoints/report.py`. + The path can be absolute or relative to the `--plugins-dir` directory configured when starting InfluxDB 3. + + The plugin file must implement the trigger interface associated with the trigger's specification. trigger_name: type: string trigger_specification: type: string + description: | + Specifies when and how the processing engine trigger should be invoked. + + ## Supported trigger specifications: + + ### Cron-based scheduling + Format: `cron:CRON_EXPRESSION` + + Uses extended (6-field) cron format (second minute hour day_of_month month day_of_week): + ``` + ┌───────────── second (0-59) + │ ┌───────────── minute (0-59) + │ │ ┌───────────── hour (0-23) + │ │ │ ┌───────────── day of month (1-31) + │ │ │ │ ┌───────────── month (1-12) + │ │ │ │ │ ┌───────────── day of week (0-6, Sunday=0) + │ │ │ │ │ │ + * * * * * * + ``` + Examples: + - `cron:0 0 6 * * 1-5` - Every weekday at 6:00 AM + - `cron:0 30 14 * * 5` - Every Friday at 2:30 PM + - `cron:0 0 0 1 * *` - First day of every month at midnight + + ### Interval-based scheduling + Format: `every:DURATION` + + Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` (years): + - `every:30s` - Every 30 seconds + - `every:5m` - Every 5 minutes + - `every:1h` - Every hour + - `every:1d` - Every day + - `every:1w` - Every week + - `every:1M` - Every month + - `every:1y` - Every year + + **Maximum interval**: 1 year + + ### Table-based triggers + - `all_tables` - Triggers on write events to any table in the database + - `table:TABLE_NAME` - Triggers on write events to a specific table + + ### On-demand triggers + Format: `request:REQUEST_PATH` + + Creates an HTTP endpoint `/api/v3/engine/REQUEST_PATH` for manual invocation: + - `request:hello-world` - Creates endpoint `/api/v3/engine/hello-world` + - `request:data-export` - Creates endpoint `/api/v3/engine/data-export` + pattern: ^(cron:[0-9 *,/-]+|every:[0-9]+[smhd]|all_tables|table:[a-zA-Z_][a-zA-Z0-9_]*|request:[a-zA-Z0-9_-]+)$ + example: cron:0 0 6 * * 1-5 trigger_arguments: type: object additionalProperties: true + description: Optional arguments passed to the plugin. disabled: type: boolean + default: false + description: Whether the trigger is disabled. required: - db - plugin_filename @@ -1731,6 +2192,65 @@ components: - m - h type: string + UpdateDatabaseRequest: + type: object + properties: + retention_period: + type: string + description: | + The retention period for the database. Specifies how long data should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: "7d" + description: Request schema for updating database configuration. + UpdateTableRequest: + type: object + properties: + db: + type: string + description: The name of the database containing the table. + table: + type: string + description: The name of the table to update. + retention_period: + type: string + description: | + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: "30d" + required: + - db + - table + description: Request schema for updating table configuration. + LicenseResponse: + type: object + properties: + license_type: + type: string + description: The type of license (for example, "enterprise", "trial"). + example: "enterprise" + expires_at: + type: string + format: date-time + description: The expiration date of the license in ISO 8601 format. + example: "2025-12-31T23:59:59Z" + features: + type: array + items: + type: string + description: List of features enabled by the license. + example: + - "clustering" + - "processing_engine" + - "advanced_auth" + status: + type: string + enum: + - "active" + - "expired" + - "invalid" + description: The current status of the license. + example: "active" + description: Response schema for license information. responses: Unauthorized: description: Unauthorized access. @@ -1762,31 +2282,166 @@ components: schema: $ref: '#/components/schemas/ErrorMessage' securitySchemes: - BearerAuth: + BasicAuthentication: + type: http + scheme: basic + description: | + Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests. + + Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints in InfluxDB 3. + + When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an authorized token + and ignores the `username` part of the decoded credential. + + ### Example + + ```bash + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s" \ + --user "":"AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary 'home,room=kitchen temp=72 1641024000' + ``` + + Replace the following: + + - **`DATABASE_NAME`**: your InfluxDB 3 Core database + - **`AUTH_TOKEN`**: an admin token + + #### Related guides + + - [Authenticate v1 API requests](/influxdb3/core/guides/api-compatibility/v1/) + - [Manage tokens](/influxdb3/core/admin/tokens/) + QuerystringAuthentication: + type: apiKey + in: query + name: u=&p= + description: | + Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests. + + Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints. + + When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token + and ignores the `u` (_username_) query parameter. + + ### Syntax + + ```http + http://localhost:8181/query/?[u=any]&p=DATABASE_TOKEN + http://localhost:8181/write/?[u=any]&p=DATABASE_TOKEN + ``` + + ### Examples + + ```bash + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s&p=AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary 'home,room=kitchen temp=72 1641024000' + ``` + + Replace the following: + + - **`DATABASE_NAME`**: your InfluxDB 3 Core database + - **`AUTH_TOKEN`**: an admin token + + ```bash + ####################################### + # Use an InfluxDB 1.x compatible username and password + # to query the InfluxDB v1 HTTP API + ####################################### + # Use authentication query parameters: + # ?p=DATABASE_TOKEN + ####################################### + + curl --get "http://localhost:8181/query" \ + --data-urlencode "p=AUTH_TOKEN" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=SELECT * FROM MEASUREMENT" + ``` + + Replace the following: + + - **`DATABASE_NAME`**: the database to query + - **`AUTH_TOKEN`**: an [admin token](/influxdb3/core/admin/tokens/) + + #### Related guides + + - [Authenticate v1 API requests](/influxdb3/core/guides/api-compatibility/v1/) + - [Manage tokens](/influxdb3/core/admin/tokens/) + BearerAuthentication: type: http scheme: bearer bearerFormat: JWT description: | - _During Alpha release, an API token is not required._ - A Bearer token for authentication. + Use the OAuth Bearer authentication + scheme to provide an authorization token to InfluxDB 3. - Provide the scheme and the API token in the `Authorization` header--for example: + Bearer authentication works with all endpoints. + + In your API requests, send an `Authorization` header. + For the header value, provide the word `Bearer` followed by a space and an admin token. + + + ### Syntax + + ```http + Authorization: Bearer AUTH_TOKEN + ``` + + ### Example ```bash curl http://localhost:8181/api/v3/query_influxql \ - --header "Authorization: Bearer API_TOKEN" + --header "Authorization: Bearer AUTH_TOKEN" ``` + TokenAuthentication: + description: | + Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3. + + The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3. + + In your API requests, send an `Authorization` header. + For the header value, provide the word `Token` followed by a space and a database token. + The word `Token` is case-sensitive. + + ### Syntax + + ```http + Authorization: Token AUTH_TOKEN + ``` + + ### Example + + ```sh + ######################################################## + # Use the Token authentication scheme with /api/v2/write + # to write data. + ######################################################## + + curl --request post "http://localhost:8181/api/v2/write?bucket=DATABASE_NAME&precision=s" \ + --header "Authorization: Token AUTH_TOKEN" \ + --data-binary 'home,room=kitchen temp=72 1463683075' + ``` + + ### Related guides + + - [Manage tokens](/influxdb3/core/admin/tokens/) + in: header + name: Authorization + type: apiKey x-tagGroups: - name: Using the InfluxDB HTTP API tags: - Quick start - Authentication + - Cache data - Common parameters - Response codes - Compatibility endpoints - - Data I/O - - Databases + - Database - Processing engine - Server information - - Tables + - Table + - Token + - Query data + - Write data diff --git a/api-docs/influxdb3/enterprise/.config.yml b/api-docs/influxdb3/enterprise/.config.yml index 01fc29e46..4b8210b97 100644 --- a/api-docs/influxdb3/enterprise/.config.yml +++ b/api-docs/influxdb3/enterprise/.config.yml @@ -1,5 +1,5 @@ plugins: - - '../../openapi/plugins/docs-plugin.js' + - '../../openapi/plugins/docs-plugin.cjs' extends: - recommended - docs/all @@ -10,3 +10,7 @@ apis: root: v3/ref.yml x-influxdata-docs-aliases: - /influxdb3/enterprise/api/ + - /influxdb3/enterprise/api/v1/ + - /influxdb3/enterprise/v2/ + - /influxdb3/enterprise/v1-compatibility + - /influxdb3/enterprise/v2-compatibility \ No newline at end of file diff --git a/api-docs/influxdb3/enterprise/v3/content/tag-groups.yml b/api-docs/influxdb3/enterprise/v3/content/tag-groups.yml index 5289ddb33..364d5e794 100644 --- a/api-docs/influxdb3/enterprise/v3/content/tag-groups.yml +++ b/api-docs/influxdb3/enterprise/v3/content/tag-groups.yml @@ -2,11 +2,14 @@ tags: - Quick start - Authentication + - Cache data - Common parameters - Response codes - Compatibility endpoints - - Data I/O - - Databases + - Database - Processing engine - Server information - - Tables + - Table + - Token + - Query data + - Write data diff --git a/api-docs/influxdb3/enterprise/v3/ref.yml b/api-docs/influxdb3/enterprise/v3/ref.yml index 5738d28f7..d8baaec21 100644 --- a/api-docs/influxdb3/enterprise/v3/ref.yml +++ b/api-docs/influxdb3/enterprise/v3/ref.yml @@ -39,12 +39,56 @@ servers: default: localhost:8181 description: InfluxDB 3 Enterprise URL security: - - BearerAuth: [] + - BearerAuthentication: [] + - TokenAuthentication: [] + - BasicAuthentication: [] + - QuerystringAuthentication: [] tags: - name: Authentication description: | - During the initial Alpha phase, InfluxDB 3 Enterprise does not require authentication. + Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API: + + | Authentication scheme | Works with | + |:-------------------|:-----------| + | [Bearer authentication](#section/Authentication/BearerAuthentication) | All endpoints | + | [Token authentication](#section/Authentication/TokenAuthentication) | v1, v2 endpoints | + | [Basic authentication](#section/Authentication/BasicAuthentication) | v1 endpoints | + | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | + x-traitTag: true + - name: Cache data + description: | + Manage the in-memory cache. + + #### Distinct Value Cache + + The Distinct Value Cache (DVC) lets you cache distinct + values of one or more columns in a table, improving the performance of + queries that return distinct tag and field values. + + The DVC is an in-memory cache that stores distinct values for specific columns + in a table. When you create an DVC, you can specify what columns' distinct + values to cache, the maximum number of distinct value combinations to cache, and + the maximum age of cached values. A DVC is associated with a table, which can + have multiple DVCs. + + #### Last value cache + + The Last Value Cache (LVC) lets you cache the most recent + values for specific fields in a table, improving the performance of queries that + return the most recent value of a field for specific series or the last N values + of a field. + + The LVC is an in-memory cache that stores the last N number of values for + specific fields of series in a table. When you create an LVC, you can specify + what fields to cache, what tags to use to identify each series, and the + number of values to cache for each unique series. + An LVC is associated with a table, which can have multiple LVCs. + + #### Related guides + + - [Manage the Distinct Value Cache](/influxdb3/enterprise/admin/distinct-value-cache/) + - [Manage the Last Value Cache](/influxdb3/enterprise/admin/last-value-cache/) - name: Compatibility endpoints description: | InfluxDB 3 provides compatibility endpoints for InfluxDB 1.x and InfluxDB 2.x workloads and clients. @@ -72,19 +116,8 @@ tags: ### Server information Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x clients. - - name: Data I/O - description: | - Write and query data - - #### Data flow in InfluxDB 3 Enterprise - - 1. **Incoming writes**: The system validates incoming data and stores it in the write buffer (in memory). If the `no_sync` write option is enabled (`no_sync=true`), the server sends a response to acknowledge the write. - 2. **WAL flush**: Every second (default), the system flushes the write buffer to the Write-Ahead Log (WAL) for persistence in the Object store. If `no_sync=false` (default), the server sends a response to acknowledge the write. - 3. **Query availability**: After WAL persistence completes, data moves to the queryable buffer where it becomes available for queries. By default, the server keeps up to 900 WAL files (15 minutes of data) buffered. - 4. **Long-term storage in Parquet**: Every ten minutes (default), the system persists the oldest data from the queryable buffer to the Object store in Parquet format. InfluxDB keeps the remaining data (the most recent 5 minutes) in memory. - 5. **In-memory cache**: InfluxDB puts Parquet files into an in-memory cache so that queries against the most recently persisted data don't have to go to object storage. - - name: Databases - description: Create, read, update, and delete database and cache resources + - name: Database + description: Manage databases - description: | Most InfluxDB API endpoints require parameters in the request--for example, specifying the database to use. @@ -115,32 +148,42 @@ tags: description: | Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins. - InfluxDB 3 Enterprise provides the InfluxDB 3 Processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database. + InfluxDB 3 Enterprise provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database. Use Processing engine plugins and triggers to run code and perform tasks for different database events. - To get started with the Processing engine, see the [Processing engine and Python plugins](/influxdb3/enterprise/processing-engine/) guide. + To get started with the processing engine, see the [Processing engine and Python plugins](/influxdb3/enterprise/processing-engine/) guide. + - name: Query data + description: Query data using SQL or InfluxQL - name: Quick start description: | - 1. [Check the status](#section/Server-information) of the InfluxDB server. + 1. [Create an admin token](#section/Authentication) to authorize API requests. ```bash - curl "http://localhost:8181/health" + curl -X POST "http://localhost:8181/api/v3/configure/token/admin" + ``` + 2. [Check the status](#section/Server-information) of the InfluxDB server. + + ```bash + curl "http://localhost:8181/health" \ + --header "Authorization: Bearer ADMIN_TOKEN" ``` - 2. [Write data](#section/Compatibility-endpoints/Write-data) to InfluxDB. + 3. [Write data](#operation/PostWriteLP) to InfluxDB. ```bash - curl "http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto" \ + curl "http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto" + --header "Authorization: Bearer ADMIN_TOKEN" \ --data-raw "home,room=Kitchen temp=72.0 home,room=Living\ room temp=71.5" ``` If all data is written, the response is `204 No Content`. - 3. [Query data](#section/Compatibility-endpoints/Query-data) from InfluxDB. + 4. [Query data](#operation/GetExecuteQuerySQL) from InfluxDB. ```bash curl -G "http://localhost:8181/api/v3/query_sql" \ + --header "Authorization: Bearer ADMIN_TOKEN" \ --data-urlencode "db=sensors" \ --data-urlencode "q=SELECT * FROM home WHERE room='Living room'" \ --data-urlencode "format=jsonl" @@ -156,8 +199,12 @@ tags: x-traitTag: true - name: Server information description: Retrieve server metrics, status, and version information - - name: Tables + - name: Table description: Manage table schemas and data + - name: Token + description: Manage tokens for authentication and authorization + - name: Write data + description: Write data to InfluxDB 3 paths: /write: post: @@ -170,9 +217,40 @@ paths: Use this endpoint to send data in [line protocol](https://docs.influxdata.com/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. Use query parameters to specify options for writing data. + + #### Related + + - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) parameters: - $ref: '#/components/parameters/dbWriteParam' - $ref: '#/components/parameters/compatibilityPrecisionParam' + - $ref: '#/components/parameters/v1UsernameParam' + - $ref: '#/components/parameters/v1PasswordParam' + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: consistency + in: query + required: false + schema: + type: string + description: | + Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) - name: Content-Type in: header description: | @@ -244,7 +322,10 @@ paths: description: Request entity too large. tags: - Compatibility endpoints - - Data I/O + - Write data + x-influxdata-guides: + - title: "Use compatibility APIs to write data" + href: "/influxdb3/enterprise/write-data/http-api/compatibility-apis/" /api/v2/write: post: operationId: PostV2Write @@ -256,6 +337,10 @@ paths: Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. Use query parameters to specify options for writing data. + + #### Related + + - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) parameters: - name: Content-Type in: header @@ -327,25 +412,36 @@ paths: description: Request entity too large. tags: - Compatibility endpoints - - Data I/O + - Write data + x-influxdata-guides: + - title: "Use compatibility APIs to write data" + href: "/influxdb3/enterprise/write-data/http-api/compatibility-apis/" /api/v3/write_lp: post: operationId: PostWriteLP summary: Write line protocol description: | Writes line protocol to the specified database. + + This is the native InfluxDB 3 Enterprise write endpoint that provides enhanced control + over write behavior with advanced parameters for high-performance and fault-tolerant operations. Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. Use query parameters to specify options for writing data. + + #### Features + + - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail + - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response times but sacrificing durability guarantees + - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) + + #### Related + + - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/enterprise/write-data/http-api/v3-write-lp/) parameters: - $ref: '#/components/parameters/dbWriteParam' - $ref: '#/components/parameters/accept_partial' - - name: precision - in: query - required: true - schema: - $ref: '#/components/schemas/PrecisionWrite' - description: Precision of timestamps. + - $ref: '#/components/parameters/precisionParam' - name: no_sync in: query schema: @@ -386,8 +482,40 @@ paths: description: Request entity too large. '422': description: Unprocessable entity. + x-codeSamples: + - label: cURL - Basic write + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" + - label: cURL - Write with millisecond precision + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000" + - label: cURL - Asynchronous write with partial acceptance + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 + memory,host=server01 used=4096" + - label: cURL - Multiple measurements with tags + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 + memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 + disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" tags: - - Data I/O + - Write data /api/v3/query_sql: get: operationId: GetExecuteQuerySQL @@ -395,16 +523,8 @@ paths: description: Executes an SQL query to retrieve data from the specified database. parameters: - $ref: '#/components/parameters/db' - - name: q - in: query - required: true - schema: - type: string - - name: format - in: query - required: false - schema: - type: string + - $ref: '#/components/parameters/querySqlParam' + - $ref: '#/components/parameters/format' - $ref: '#/components/parameters/AcceptQueryHeader' - $ref: '#/components/parameters/ContentType' responses: @@ -446,7 +566,7 @@ paths: '422': description: Unprocessable entity. tags: - - Data I/O + - Query data post: operationId: PostExecuteQuerySQL summary: Execute SQL query @@ -485,7 +605,7 @@ paths: '422': description: Unprocessable entity. tags: - - Data I/O + - Query data /api/v3/query_influxql: get: operationId: GetExecuteInfluxQLQuery @@ -533,7 +653,7 @@ paths: '422': description: Unprocessable entity. tags: - - Data I/O + - Query data post: operationId: PostExecuteQueryInfluxQL summary: Execute InfluxQL query @@ -572,7 +692,7 @@ paths: '422': description: Unprocessable entity. tags: - - Data I/O + - Query data /query: get: operationId: GetV1ExecuteQuery @@ -582,6 +702,10 @@ paths: This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. Use query parameters to specify the database and the InfluxQL query. + + #### Related + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) parameters: - name: Accept in: header @@ -640,6 +764,26 @@ paths: in: query schema: $ref: '#/components/schemas/EpochCompatibility' + - $ref: '#/components/parameters/v1UsernameParam' + - $ref: '#/components/parameters/v1PasswordParam' + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) responses: '200': description: | @@ -678,12 +822,20 @@ paths: '422': description: Unprocessable entity. tags: - - Data I/O + - Query data - Compatibility endpoints + x-influxdata-guides: + - title: "Use the InfluxDB v1 HTTP query API and InfluxQL to query data" + href: "/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/" post: operationId: PostExecuteV1Query summary: Execute InfluxQL query (v1-compatible) - description: Executes an InfluxQL query to retrieve data from the specified database. + description: | + Executes an InfluxQL query to retrieve data from the specified database. + + #### Related + + - [Use the InfluxDB v1 HTTP query API and InfluxQL to query data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) requestBody: content: application/json: @@ -789,8 +941,11 @@ paths: '422': description: Unprocessable entity. tags: - - Data I/O + - Query data - Compatibility endpoints + x-influxdata-guides: + - title: "Use the InfluxDB v1 HTTP query API and InfluxQL to query data" + href: "/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/" /health: get: operationId: GetHealth @@ -863,7 +1018,7 @@ paths: '404': description: Database not found. tags: - - Databases + - Database post: operationId: PostConfigureDatabase summary: Create a database @@ -884,15 +1039,31 @@ paths: '409': description: Database already exists. tags: - - Databases + - Database delete: operationId: DeleteConfigureDatabase summary: Delete a database description: | Soft deletes a database. - The database is scheduled for deletion and unavailable for querying. + The database is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. parameters: - $ref: '#/components/parameters/db' + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time + description: | + Schedule the database for hard deletion at the specified time. + If not provided, the database will be soft deleted. + Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). + + #### Deleting a database cannot be undone + + Deleting a database is a destructive action. + Once a database is deleted, data stored in that database cannot be recovered. responses: '200': description: Success. Database deleted. @@ -901,7 +1072,7 @@ paths: '404': description: Database not found. tags: - - Databases + - Database /api/v3/configure/table: post: operationId: PostConfigureTable @@ -923,13 +1094,19 @@ paths: '404': description: Database not found. tags: - - Tables + - Table delete: operationId: DeleteConfigureTable summary: Delete a table description: | Soft deletes a table. - The table is scheduled for deletion and unavailable for querying. + The table is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + + #### Deleting a table cannot be undone + + Deleting a table is a destructive action. + Once a table is deleted, data stored in that table cannot be recovered. parameters: - $ref: '#/components/parameters/db' - name: table @@ -937,6 +1114,16 @@ paths: required: true schema: type: string + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time + description: | + Schedule the table for hard deletion at the specified time. + If not provided, the table will be soft deleted. + Use ISO 8601 format (for example, "2025-12-31T23:59:59Z"). responses: '200': description: Success (no content). The table has been deleted. @@ -945,14 +1132,86 @@ paths: '404': description: Table not found. tags: - - Tables + - Table + patch: + operationId: PatchConfigureTable + summary: Update a table + description: | + Updates table configuration, such as retention period. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateTableRequest' + responses: + '200': + description: Success. The table has been updated. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Table not found. + tags: + - Table + /api/v3/configure/database/{db}: + patch: + operationId: PatchConfigureDatabase + summary: Update a database + description: | + Updates database configuration, such as retention period. + parameters: + - name: db + in: path + required: true + schema: + type: string + description: The name of the database to update. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateDatabaseRequest' + responses: + '200': + description: Success. The database has been updated. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Database not found. + tags: + - Database + /api/v3/show/license: + get: + operationId: GetShowLicense + summary: Show license information + description: | + Retrieves information about the current InfluxDB 3 Enterprise license. + responses: + '200': + description: Success. The response body contains license information. + content: + application/json: + schema: + $ref: '#/components/schemas/LicenseResponse' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + tags: + - Server information /api/v3/configure/distinct_cache: post: operationId: PostConfigureDistinctCache summary: Create distinct cache description: Creates a distinct cache for a table. tags: - - Tables + - Cache data + - Table requestBody: required: true content: @@ -992,7 +1251,8 @@ paths: '409': description: Cache already exists. tags: - - Tables + - Cache data + - Table delete: operationId: DeleteConfigureLastCache summary: Delete last cache @@ -1019,20 +1279,110 @@ paths: '404': description: Cache not found. tags: - - Tables + - Cache data + - Table /api/v3/configure/processing_engine_trigger: post: operationId: PostConfigureProcessingEngineTrigger summary: Create processing engine trigger - description: Creates a new processing engine trigger. + description: | + Creates a processing engine trigger with the specified plugin file and trigger specification. + + ### Related guides + + - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) requestBody: required: true content: application/json: schema: $ref: '#/components/schemas/ProcessingEngineTriggerRequest' + examples: + schedule_cron: + summary: Schedule trigger using cron + description: | + In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. + The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to Friday). + value: + db: DATABASE_NAME + plugin_filename: schedule.py + trigger_name: schedule_cron_trigger + trigger_specification: cron:0 0 6 * * 1-5 + schedule_every: + summary: Schedule trigger using interval + description: | + In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. + The duration `1h` means the trigger will run every hour. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_trigger + trigger_specification: every:1h + schedule_every_seconds: + summary: Schedule trigger using seconds interval + description: | + Example of scheduling a trigger to run every 30 seconds. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_30s_trigger + trigger_specification: every:30s + schedule_every_minutes: + summary: Schedule trigger using minutes interval + description: | + Example of scheduling a trigger to run every 5 minutes. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_5m_trigger + trigger_specification: every:5m + all_tables: + summary: All tables trigger example + description: | + Trigger that fires on write events to any table in the database. + value: + db: mydb + plugin_filename: all_tables.py + trigger_name: all_tables_trigger + trigger_specification: all_tables + table_specific: + summary: Table-specific trigger example + description: | + Trigger that fires on write events to a specific table. + value: + db: mydb + plugin_filename: table.py + trigger_name: table_trigger + trigger_specification: table:sensors + api_request: + summary: On-demand request trigger example + description: | + Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. + value: + db: mydb + plugin_filename: request.py + trigger_name: hello_world_trigger + trigger_specification: request:hello-world + cron_friday_afternoon: + summary: Cron trigger for Friday afternoons + description: | + Example of a cron trigger that runs every Friday at 2:30 PM. + value: + db: reports + plugin_filename: weekly_report.py + trigger_name: friday_report_trigger + trigger_specification: cron:0 30 14 * * 5 + cron_monthly: + summary: Cron trigger for monthly execution + description: | + Example of a cron trigger that runs on the first day of every month at midnight. + value: + db: monthly_data + plugin_filename: monthly_cleanup.py + trigger_name: monthly_cleanup_trigger + trigger_specification: cron:0 0 0 1 * * responses: - '201': + '200': description: Success. Processing engine trigger created. '400': description: Bad request. @@ -1109,7 +1459,7 @@ paths: $ref: '#/components/schemas/ProcessingEngineTriggerRequest' responses: '200': - description: Success. The processing engine trigger has been enabled. + description: Success. The processing engine trigger is enabled. '400': description: Bad request. '401': @@ -1122,7 +1472,14 @@ paths: post: operationId: PostInstallPluginPackages summary: Install plugin packages - description: Installs packages for the plugin environment. + description: | + Installs the specified Python packages into the processing engine plugin environment. + + This endpoint is synchronous and blocks until the packages are installed. + + ### Related guides + + - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) parameters: - $ref: '#/components/parameters/ContentType' requestBody: @@ -1131,10 +1488,30 @@ paths: application/json: schema: type: object - additionalProperties: true + properties: + packages: + type: array + items: + type: string + description: | + A list of Python package names to install. + Can include version specifiers (e.g., "scipy==1.9.0"). + example: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + required: + - packages + example: + packages: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests responses: '200': - description: Success. The packages have been installed. + description: Success. The packages are installed. '400': description: Bad request. '401': @@ -1145,7 +1522,15 @@ paths: post: operationId: PostInstallPluginRequirements summary: Install plugin requirements - description: Installs requirements for the plugin environment. + description: | + Installs requirements from a requirements file (also known as a "pip requirements file") into the processing engine plugin environment. + + This endpoint is synchronous and blocks until the requirements are installed. + + ### Related + + - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) + - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) parameters: - $ref: '#/components/parameters/ContentType' requestBody: @@ -1154,7 +1539,17 @@ paths: application/json: schema: type: object - additionalProperties: true + properties: + requirements_location: + type: string + description: | + The path to the requirements file containing Python packages to install. + Can be a relative path (relative to the plugin directory) or an absolute path. + example: requirements.txt + required: + - requirements_location + example: + requirements_location: requirements.txt responses: '200': description: Success. The requirements have been installed. @@ -1196,22 +1591,22 @@ paths: description: Plugin not enabled. tags: - Processing engine - /api/v3/engine/{plugin_path}: + /api/v3/engine/{request_path}: parameters: - - name: plugin_path + - name: request_path description: | - The path configured in the `trigger-spec` for the plugin. + The path configured in the request trigger specification for the plugin. For example, if you define a trigger with the following: - ``` - trigger-spec: "request:hello-world" + ```json + trigger_specification: "request:hello-world" ``` then, the HTTP API exposes the following plugin endpoint: ``` - /api/v3/engine/hello-world + /api/v3/engine/hello-world ``` in: path required: true @@ -1221,7 +1616,7 @@ paths: operationId: GetProcessingEnginePluginRequest summary: On Request processing engine plugin request description: | - Sends a request to invoke an _On Request_ processing engine plugin. + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. An On Request plugin implements the following signature: @@ -1248,7 +1643,7 @@ paths: operationId: PostProcessingEnginePluginRequest summary: On Request processing engine plugin request description: | - Sends a request to invoke an _On Request_ processing engine plugin. + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. An On Request plugin implements the following signature: @@ -1280,6 +1675,69 @@ paths: description: Processing failure. tags: - Processing engine + /api/v3/configure/enterprise/token: + post: + operationId: PostCreateResourceToken + summary: Create a resource token + description: | + Creates a resource (fine-grained permissions) token. + A resource token is a token that has access to specific resources in the system. + + This endpoint is only available in InfluxDB 3 Enterprise. + responses: + '201': + description: | + Success. The resource token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: '#/components/schemas/ResourceTokenObject' + '401': + $ref: '#/components/responses/Unauthorized' + tags: + - Authentication + - Token + /api/v3/configure/token/admin: + post: + operationId: PostCreateAdminToken + summary: Create admin token + description: | + Creates an admin token. + An admin token is a special type of token that has full access to all resources in the system. + responses: + '201': + description: | + Success. The admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: '#/components/schemas/AdminTokenObject' + '401': + $ref: '#/components/responses/Unauthorized' + tags: + - Authentication + - Token + /api/v3/configure/token/admin/regenerate: + post: + operationId: PostRegenerateAdminToken + summary: Regenerate admin token + description: | + Regenerates an admin token and revokes the previous token with the same name. + parameters: [] + responses: + '201': + description: Success. The admin token has been regenerated. + content: + application/json: + schema: + $ref: '#/components/schemas/AdminTokenObject' + '401': + $ref: '#/components/responses/Unauthorized' + tags: + - Authentication + - Token components: parameters: AcceptQueryHeader: @@ -1337,7 +1795,6 @@ components: schema: type: string description: | - The name of the database. The name of the database. InfluxDB creates the database if it doesn't already exist, and then writes all points in the batch to the database. @@ -1392,6 +1849,25 @@ components: required: true schema: $ref: '#/components/schemas/Format' + v1UsernameParam: + name: u + in: query + required: false + schema: + type: string + description: | + Username for v1 compatibility authentication. + When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any arbitrary string for compatibility with InfluxDB 1.x clients. + v1PasswordParam: + name: p + in: query + required: false + schema: + type: string + description: | + Password for v1 compatibility authentication. + For query string authentication, pass a database token with write permissions as this parameter. + InfluxDB 3 checks that the `p` value is an authorized token. requestBodies: lineProtocolRequestBody: required: true @@ -1416,6 +1892,67 @@ components: schema: $ref: '#/components/schemas/QueryRequestObject' schemas: + AdminTokenObject: + type: object + properties: + id: + type: integer + name: + type: string + token: + type: string + hash: + type: string + created_at: + type: string + format: date-time + expiry: + format: date-time + example: + id: 0 + name: _admin + token: apiv3_00xx0Xx0xx00XX0x0 + hash: 00xx0Xx0xx00XX0x0 + created_at: '2025-04-18T14:02:45.331Z' + expiry: null + ResourceTokenObject: + type: object + properties: + token_name: + type: string + permissions: + type: array + items: + type: object + properties: + resource_type: + type: string + enum: + - system + - db + resource_identifier: + type: array + items: + type: string + actions: + type: array + items: + type: string + enum: + - read + - write + expiry_secs: + type: integer + description: The expiration time in seconds. + example: + token_name: All system information + permissions: + - resource_type: system + resource_identifier: + - '*' + actions: + - read + expiry_secs: 300000 ContentEncoding: type: string enum: @@ -1457,13 +1994,10 @@ components: description: | Acknowledges a successful write without waiting for WAL persistence. - #### Data flow in InfluxDB 3 Enterprise + #### Related - 1. **Incoming writes**: The system validates incoming data and stores it in the write buffer (in memory). If the `no_sync` write option is enabled (`no_sync=true`), the server sends a response to acknowledge the write. - 2. **WAL flush**: Every second (default), the system flushes the write buffer to the Write-Ahead Log (WAL) for persistence in the Object store. If `no_sync=false` (default), the server sends a response to acknowledge the write. - 3. **Query availability**: After WAL persistence completes, data moves to the queryable buffer where it becomes available for queries. By default, the server keeps up to 900 WAL files (15 minutes of data) buffered. - 4. **Long-term storage in Parquet**: Every ten minutes (default), the system persists the oldest data from the queryable buffer to the Object store in Parquet format. InfluxDB keeps the remaining data (the most recent 5 minutes) in memory. - 5. **In-memory cache**: InfluxDB puts Parquet files into an in-memory cache so that queries against the most recently persisted data don't have to go to object storage. + - [Use the HTTP API and client libraries to write data](/influxdb3/enterprise/write-data/api-client-libraries/) + - [Data durability](/influxdb3/enterprise/reference/internals/durability/) PrecisionWriteCompatibility: enum: - ms @@ -1487,12 +2021,12 @@ components: QueryRequestObject: type: object properties: - database: + db: description: | The name of the database to query. - Required if the query (`query_str`) doesn't specify the database. + Required if the query (`q`) doesn't specify the database. type: string - query_str: + q: description: The query to execute. type: string format: @@ -1511,11 +2045,11 @@ components: type: object additionalProperties: true required: - - database - - query_str + - db + - q example: - database: mydb - query_str: SELECT * FROM mytable + db: mydb + q: SELECT * FROM mytable format: json params: {} CreateDatabaseRequest: @@ -1523,6 +2057,16 @@ components: properties: db: type: string + pattern: '^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$|^[a-zA-Z0-9]$' + description: |- + The database name. Database names cannot contain underscores (_). + Names must start and end with alphanumeric characters and can contain hyphens (-) in the middle. + retention_period: + type: string + description: |- + The retention period for the database. Specifies how long data should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: "7d" required: - db CreateTableRequest: @@ -1554,6 +2098,12 @@ components: required: - name - type + retention_period: + type: string + description: |- + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: "30d" required: - db - table @@ -1635,15 +2185,73 @@ components: type: string plugin_filename: type: string + description: | + The path and filename of the plugin to execute--for example, + `schedule.py` or `endpoints/report.py`. + The path can be absolute or relative to the `--plugins-dir` directory configured when starting InfluxDB 3. + + The plugin file must implement the trigger interface associated with the trigger's specification. trigger_name: type: string trigger_specification: - type: string + description: | + Specifies when and how the processing engine trigger should be invoked. + + ## Supported trigger specifications: + + ### Cron-based scheduling + Format: `cron:CRON_EXPRESSION` + + Uses extended (6-field) cron format (second minute hour day_of_month month day_of_week): + ``` + ┌───────────── second (0-59) + │ ┌───────────── minute (0-59) + │ │ ┌───────────── hour (0-23) + │ │ │ ┌───────────── day of month (1-31) + │ │ │ │ ┌───────────── month (1-12) + │ │ │ │ │ ┌───────────── day of week (0-6, Sunday=0) + │ │ │ │ │ │ + * * * * * * + ``` + Examples: + - `cron:0 0 6 * * 1-5` - Every weekday at 6:00 AM + - `cron:0 30 14 * * 5` - Every Friday at 2:30 PM + - `cron:0 0 0 1 * *` - First day of every month at midnight + + ### Interval-based scheduling + Format: `every:DURATION` + + Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` (years): + - `every:30s` - Every 30 seconds + - `every:5m` - Every 5 minutes + - `every:1h` - Every hour + - `every:1d` - Every day + - `every:1w` - Every week + - `every:1M` - Every month + - `every:1y` - Every year + + **Maximum interval**: 1 year + + ### Table-based triggers + - `all_tables` - Triggers on write events to any table in the database + - `table:TABLE_NAME` - Triggers on write events to a specific table + + ### On-demand triggers + Format: `request:REQUEST_PATH` + + Creates an HTTP endpoint `/api/v3/engine/REQUEST_PATH` for manual invocation: + - `request:hello-world` - Creates endpoint `/api/v3/engine/hello-world` + - `request:data-export` - Creates endpoint `/api/v3/engine/data-export` + pattern: ^(cron:[0-9 *,/-]+|every:[0-9]+[smhd]|all_tables|table:[a-zA-Z_][a-zA-Z0-9_]*|request:[a-zA-Z0-9_-]+)$ + example: cron:0 0 6 * * 1-5 trigger_arguments: type: object additionalProperties: true + description: Optional arguments passed to the plugin. disabled: type: boolean + default: false + description: Whether the trigger is disabled. required: - db - plugin_filename @@ -1731,6 +2339,65 @@ components: - m - h type: string + UpdateDatabaseRequest: + type: object + properties: + retention_period: + type: string + description: | + The retention period for the database. Specifies how long data should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: "7d" + description: Request schema for updating database configuration. + UpdateTableRequest: + type: object + properties: + db: + type: string + description: The name of the database containing the table. + table: + type: string + description: The name of the table to update. + retention_period: + type: string + description: | + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: "30d" + required: + - db + - table + description: Request schema for updating table configuration. + LicenseResponse: + type: object + properties: + license_type: + type: string + description: The type of license (for example, "enterprise", "trial"). + example: "enterprise" + expires_at: + type: string + format: date-time + description: The expiration date of the license in ISO 8601 format. + example: "2025-12-31T23:59:59Z" + features: + type: array + items: + type: string + description: List of features enabled by the license. + example: + - "clustering" + - "processing_engine" + - "advanced_auth" + status: + type: string + enum: + - "active" + - "expired" + - "invalid" + description: The current status of the license. + example: "active" + description: Response schema for license information. responses: Unauthorized: description: Unauthorized access. @@ -1762,31 +2429,171 @@ components: schema: $ref: '#/components/schemas/ErrorMessage' securitySchemes: - BearerAuth: + BasicAuthentication: + type: http + scheme: basic + description: | + Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests. + + Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints in InfluxDB 3. + + When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an authorized token + and ignores the `username` part of the decoded credential. + + ### Syntax + + ```http + Authorization: Basic + ``` + + ### Example + + ```bash + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s" \ + --user "":"AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary 'home,room=kitchen temp=72 1641024000' + ``` + + Replace the following: + + - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database + + #### Related guides + + - [Authenticate v1 API requests](/influxdb3/enterprise/guides/api-compatibility/v1/) + - [Manage tokens](/influxdb3/enterprise/admin/tokens/) + QuerystringAuthentication: + type: apiKey + in: query + name: u=&p= + description: | + Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests. + + Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints. + + When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token + and ignores the `u` (_username_) query parameter. + + ### Syntax + + ```http + https://localhost:8181/query/?[u=any]&p=AUTH_TOKEN + https://localhost:8181/write/?[u=any]&p=AUTH_TOKEN + ``` + + ### Examples + + ```bash + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s&p=AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary 'home,room=kitchen temp=72 1641024000' + ``` + + Replace the following: + + - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database + - **`AUTH_TOKEN`**: an admin token or database token authorized for the database + + ```bash + ####################################### + # Use an InfluxDB 1.x compatible username and password + # to query the InfluxDB v1 HTTP API + ####################################### + # Use authentication query parameters: + # ?p=AUTH_TOKEN + ####################################### + + curl --get "http://localhost:8181/query" \ + --data-urlencode "p=AUTH_TOKEN" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=SELECT * FROM MEASUREMENT" + ``` + + Replace the following: + + - **`DATABASE_NAME`**: the database to query + - **`AUTH_TOKEN`**: a database token with sufficient permissions to the database + + #### Related guides + + - [Authenticate v1 API requests](/influxdb3/enterprise/guides/api-compatibility/v1/) + - [Manage tokens](/influxdb3/enterprise/admin/tokens/) + BearerAuthentication: type: http scheme: bearer bearerFormat: JWT description: | - _During Alpha release, an API token is not required._ - A Bearer token for authentication. + Use the OAuth Bearer authentication + scheme to provide an authorization token to InfluxDB 3. - Provide the scheme and the API token in the `Authorization` header--for example: + Bearer authentication works with all endpoints. + + In your API requests, send an `Authorization` header. + For the header value, provide the word `Bearer` followed by a space and a database token. + + ### Syntax + + ```http + Authorization: Bearer AUTH_TOKEN + ``` + + ### Example ```bash curl http://localhost:8181/api/v3/query_influxql \ - --header "Authorization: Bearer API_TOKEN" + --header "Authorization: Bearer AUTH_TOKEN" ``` + TokenAuthentication: + description: | + Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3. + + The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3. + + In your API requests, send an `Authorization` header. + For the header value, provide the word `Token` followed by a space and a database token. + The word `Token` is case-sensitive. + + ### Syntax + + ```http + Authorization: Token AUTH_TOKEN + ``` + + ### Example + + ```sh + ######################################################## + # Use the Token authentication scheme with /api/v2/write + # to write data. + ######################################################## + + curl --request post "http://localhost:8181/api/v2/write?bucket=DATABASE_NAME&precision=s" \ + --header "Authorization: Token AUTH_TOKEN" \ + --data-binary 'home,room=kitchen temp=72 1463683075' + ``` + + ### Related guides + + - [Manage tokens](/influxdb3/enterprise/admin/tokens/) + in: header + name: Authorization + type: apiKey x-tagGroups: - name: Using the InfluxDB HTTP API tags: - Quick start - Authentication + - Cache data - Common parameters - Response codes - Compatibility endpoints - - Data I/O - - Databases + - Database - Processing engine - Server information - - Tables + - Table + - Token + - Query data + - Write data diff --git a/api-docs/openapi/plugins/decorators/paths/remove-private-paths.js b/api-docs/openapi/plugins/decorators/paths/remove-private-paths.cjs similarity index 100% rename from api-docs/openapi/plugins/decorators/paths/remove-private-paths.js rename to api-docs/openapi/plugins/decorators/paths/remove-private-paths.cjs diff --git a/api-docs/openapi/plugins/decorators/paths/strip-trailing-slash.js b/api-docs/openapi/plugins/decorators/paths/strip-trailing-slash.cjs similarity index 100% rename from api-docs/openapi/plugins/decorators/paths/strip-trailing-slash.js rename to api-docs/openapi/plugins/decorators/paths/strip-trailing-slash.cjs diff --git a/api-docs/openapi/plugins/decorators/paths/strip-version-prefix.js b/api-docs/openapi/plugins/decorators/paths/strip-version-prefix.cjs similarity index 100% rename from api-docs/openapi/plugins/decorators/paths/strip-version-prefix.js rename to api-docs/openapi/plugins/decorators/paths/strip-version-prefix.cjs diff --git a/api-docs/openapi/plugins/decorators/replace-shortcodes.js b/api-docs/openapi/plugins/decorators/replace-shortcodes.cjs similarity index 100% rename from api-docs/openapi/plugins/decorators/replace-shortcodes.js rename to api-docs/openapi/plugins/decorators/replace-shortcodes.cjs diff --git a/api-docs/openapi/plugins/decorators/replace-urls.js b/api-docs/openapi/plugins/decorators/replace-urls.cjs similarity index 100% rename from api-docs/openapi/plugins/decorators/replace-urls.js rename to api-docs/openapi/plugins/decorators/replace-urls.cjs diff --git a/api-docs/openapi/plugins/decorators/servers/delete-servers.js b/api-docs/openapi/plugins/decorators/servers/delete-servers.cjs similarity index 100% rename from api-docs/openapi/plugins/decorators/servers/delete-servers.js rename to api-docs/openapi/plugins/decorators/servers/delete-servers.cjs diff --git a/api-docs/openapi/plugins/decorators/servers/set-servers.js b/api-docs/openapi/plugins/decorators/servers/set-servers.cjs similarity index 100% rename from api-docs/openapi/plugins/decorators/servers/set-servers.js rename to api-docs/openapi/plugins/decorators/servers/set-servers.cjs diff --git a/api-docs/openapi/plugins/decorators/set-info.js b/api-docs/openapi/plugins/decorators/set-info.cjs similarity index 100% rename from api-docs/openapi/plugins/decorators/set-info.js rename to api-docs/openapi/plugins/decorators/set-info.cjs diff --git a/api-docs/openapi/plugins/decorators/tags/set-tag-groups.js b/api-docs/openapi/plugins/decorators/tags/set-tag-groups.cjs similarity index 99% rename from api-docs/openapi/plugins/decorators/tags/set-tag-groups.js rename to api-docs/openapi/plugins/decorators/tags/set-tag-groups.cjs index 40dbb6d4b..38a752859 100644 --- a/api-docs/openapi/plugins/decorators/tags/set-tag-groups.js +++ b/api-docs/openapi/plugins/decorators/tags/set-tag-groups.cjs @@ -1,6 +1,6 @@ module.exports = SetTagGroups; -const { collect, getName, sortName, isPresent } = require('../../helpers/content-helper.js') +const { collect, getName, sortName, isPresent } = require('../../helpers/content-helper.cjs') /** * Returns an object that defines handler functions for: * - Operation nodes diff --git a/api-docs/openapi/plugins/decorators/tags/set-tags.js b/api-docs/openapi/plugins/decorators/tags/set-tags.js deleted file mode 100644 index 7369eeea6..000000000 --- a/api-docs/openapi/plugins/decorators/tags/set-tags.js +++ /dev/null @@ -1,25 +0,0 @@ -module.exports = SetTags; - -const { tags } = require('../../../content/content') -/** - * Returns an object that defines handler functions for: - * - DefinitionRoot (the root openapi) node - * The DefinitionRoot handler, executed when - * the parser is leaving the root node, - * sets the root `tags` list to the provided `data`. - */ -/** @type {import('@redocly/openapi-cli').OasDecorator} */ -function SetTags() { - const data = tags(); - - return { - DefinitionRoot: { - /** Set tags from custom tags when visitor enters root. */ - enter(root) { - if(data) { - root.tags = data; - } - } - } - } -}; diff --git a/api-docs/openapi/plugins/docs-content.js b/api-docs/openapi/plugins/docs-content.cjs similarity index 92% rename from api-docs/openapi/plugins/docs-content.js rename to api-docs/openapi/plugins/docs-content.cjs index 975b2ad6b..289ee6215 100644 --- a/api-docs/openapi/plugins/docs-content.js +++ b/api-docs/openapi/plugins/docs-content.cjs @@ -1,5 +1,5 @@ const path = require('path'); -const { toJSON } = require('./helpers/content-helper'); +const { toJSON } = require('./helpers/content-helper.cjs'); function getVersioned(filename) { const apiDocsRoot=path.resolve(process.env.API_DOCS_ROOT_PATH || process.cwd()); diff --git a/api-docs/openapi/plugins/docs-plugin.js b/api-docs/openapi/plugins/docs-plugin.cjs similarity index 80% rename from api-docs/openapi/plugins/docs-plugin.js rename to api-docs/openapi/plugins/docs-plugin.cjs index ebeacd7ac..1fba52bfc 100644 --- a/api-docs/openapi/plugins/docs-plugin.js +++ b/api-docs/openapi/plugins/docs-plugin.cjs @@ -1,14 +1,14 @@ -const {info, servers, tagGroups} = require('./docs-content'); -const ReportTags = require('./rules/report-tags'); -const ValidateServersUrl = require('./rules/validate-servers-url'); -const RemovePrivatePaths = require('./decorators/paths/remove-private-paths'); -const ReplaceShortcodes = require('./decorators/replace-shortcodes'); -const SetInfo = require('./decorators/set-info'); -const DeleteServers = require('./decorators/servers/delete-servers'); -const SetServers = require('./decorators/servers/set-servers'); -const SetTagGroups = require('./decorators/tags/set-tag-groups'); -const StripVersionPrefix = require('./decorators/paths/strip-version-prefix'); -const StripTrailingSlash = require('./decorators/paths/strip-trailing-slash'); +const {info, servers, tagGroups} = require('./docs-content.cjs'); +const ReportTags = require('./rules/report-tags.cjs'); +const ValidateServersUrl = require('./rules/validate-servers-url.cjs'); +const RemovePrivatePaths = require('./decorators/paths/remove-private-paths.cjs'); +const ReplaceShortcodes = require('./decorators/replace-shortcodes.cjs'); +const SetInfo = require('./decorators/set-info.cjs'); +const DeleteServers = require('./decorators/servers/delete-servers.cjs'); +const SetServers = require('./decorators/servers/set-servers.cjs'); +const SetTagGroups = require('./decorators/tags/set-tag-groups.cjs'); +const StripVersionPrefix = require('./decorators/paths/strip-version-prefix.cjs'); +const StripTrailingSlash = require('./decorators/paths/strip-trailing-slash.cjs'); const id = 'docs'; diff --git a/api-docs/openapi/plugins/helpers/content-helper.js b/api-docs/openapi/plugins/helpers/content-helper.cjs similarity index 100% rename from api-docs/openapi/plugins/helpers/content-helper.js rename to api-docs/openapi/plugins/helpers/content-helper.cjs diff --git a/api-docs/openapi/plugins/rules/report-tags.js b/api-docs/openapi/plugins/rules/report-tags.cjs similarity index 100% rename from api-docs/openapi/plugins/rules/report-tags.js rename to api-docs/openapi/plugins/rules/report-tags.cjs diff --git a/api-docs/openapi/plugins/rules/validate-servers-url.js b/api-docs/openapi/plugins/rules/validate-servers-url.cjs similarity index 100% rename from api-docs/openapi/plugins/rules/validate-servers-url.js rename to api-docs/openapi/plugins/rules/validate-servers-url.cjs diff --git a/assets/js/api-libs.js b/assets/js/api-libs.js index 17cb76c85..edfbdebe0 100644 --- a/assets/js/api-libs.js +++ b/assets/js/api-libs.js @@ -2,7 +2,7 @@ ///////////////// Preferred Client Library programming language /////////////// //////////////////////////////////////////////////////////////////////////////// import { activateTabs, updateBtnURLs } from './tabbed-content.js'; -import { getPreference, setPreference } from './local-storage.js'; +import { getPreference, setPreference } from './services/local-storage.js'; function getVisitedApiLib() { const path = window.location.pathname.match( diff --git a/assets/js/ask-ai.js b/assets/js/ask-ai.js index 292fb0a4c..f315711a2 100644 --- a/assets/js/ask-ai.js +++ b/assets/js/ask-ai.js @@ -8,29 +8,31 @@ function setUser(userid, email) { window[NAMESPACE] = { user: { uniqueClientId: userid, - email: email, - } - } + email: email, + }, + }; } // Initialize the chat widget -function initializeChat({onChatLoad, chatAttributes}) { - /* See https://docs.kapa.ai/integrations/website-widget/configuration for +function initializeChat({ onChatLoad, chatAttributes }) { + /* See https://docs.kapa.ai/integrations/website-widget/configuration for * available configuration options. * All values are strings. */ - // If you make changes to data attributes here, you also need to port the changes to the api-docs/template.hbs API reference template. + // If you make changes to data attributes here, you also need to + // port the changes to the api-docs/template.hbs API reference template. const requiredAttributes = { websiteId: 'a02bca75-1dd3-411e-95c0-79ee1139be4d', projectName: 'InfluxDB', projectColor: '#020a47', projectLogo: '/img/influx-logo-cubo-white.png', - } + }; const optionalAttributes = { - - modalDisclaimer: 'This AI can access [documentation for InfluxDB, clients, and related tools](https://docs.influxdata.com). Information you submit is used in accordance with our [Privacy Policy](https://www.influxdata.com/legal/privacy-policy/).', - modalExampleQuestions: 'Use Python to write data to InfluxDB 3,How do I query using SQL?,How do I use MQTT with Telegraf?', + modalDisclaimer: + 'This AI can access [documentation for InfluxDB, clients, and related tools](https://docs.influxdata.com). Information you submit is used in accordance with our [Privacy Policy](https://www.influxdata.com/legal/privacy-policy/).', + modalExampleQuestions: + 'Use Python to write data to InfluxDB 3,How do I query using SQL?,How do I use MQTT with Telegraf?', buttonHide: 'true', exampleQuestionButtonWidth: 'auto', modalOpenOnCommandK: 'true', @@ -52,28 +54,32 @@ function initializeChat({onChatLoad, chatAttributes}) { modalHeaderBorderBottom: 'none', modalTitleColor: '#fff', modalTitleFontSize: '1.25rem', - } + }; const scriptUrl = 'https://widget.kapa.ai/kapa-widget.bundle.js'; const script = document.createElement('script'); script.async = true; script.src = scriptUrl; - script.onload = function() { + script.onload = function () { onChatLoad(); window.influxdatadocs.AskAI = AskAI; }; - script.onerror = function() { + script.onerror = function () { console.error('Error loading AI chat widget script'); }; - const dataset = {...requiredAttributes, ...optionalAttributes, ...chatAttributes}; - Object.keys(dataset).forEach(key => { - // Assign dataset attributes from the object + const dataset = { + ...requiredAttributes, + ...optionalAttributes, + ...chatAttributes, + }; + Object.keys(dataset).forEach((key) => { + // Assign dataset attributes from the object script.dataset[key] = dataset[key]; }); // Check for an existing script element to remove - const oldScript= document.querySelector(`script[src="${scriptUrl}"]`); + const oldScript = document.querySelector(`script[src="${scriptUrl}"]`); if (oldScript) { oldScript.remove(); } @@ -82,22 +88,21 @@ function initializeChat({onChatLoad, chatAttributes}) { function getProductExampleQuestions() { const questions = productData?.product?.ai_sample_questions; - return questions?.join(',') || ''; + return questions?.join(',') || ''; } -/** +/** * chatParams: specify custom (for example, page-specific) attribute values for the chat, pass the dataset key-values (collected in ...chatParams). See https://docs.kapa.ai/integrations/website-widget/configuration for available configuration options. * onChatLoad: function to call when the chat widget has loaded * userid: optional, a unique user ID for the user (not currently used for public docs) -*/ + */ export default function AskAI({ userid, email, onChatLoad, ...chatParams }) { - const modalExampleQuestions = getProductExampleQuestions(); const chatAttributes = { ...(modalExampleQuestions && { modalExampleQuestions }), ...chatParams, - } - initializeChat({onChatLoad, chatAttributes}); + }; + initializeChat({ onChatLoad, chatAttributes }); if (userid) { setUser(userid, email); diff --git a/assets/js/code-controls.js b/assets/js/code-controls.js index ffdf02003..59ca50ec7 100644 --- a/assets/js/code-controls.js +++ b/assets/js/code-controls.js @@ -1,8 +1,9 @@ import $ from 'jquery'; +import { context } from './page-context.js'; function initialize() { var codeBlockSelector = '.article--content pre'; - var codeBlocks = $(codeBlockSelector); + var $codeBlocks = $(codeBlockSelector); var appendHTML = `
@@ -15,7 +16,7 @@ function initialize() { `; // Wrap all codeblocks with a new 'codeblock' div - $(codeBlocks).each(function () { + $codeBlocks.each(function () { $(this).wrap("
"); }); @@ -68,7 +69,94 @@ function initialize() { // Trigger copy failure state lifecycle $('.copy-code').click(function () { - let text = $(this).closest('.code-controls').prevAll('pre:has(code)')[0].innerText; + let codeElement = $(this) + .closest('.code-controls') + .prevAll('pre:has(code)')[0]; + + let text = codeElement.innerText; + + // Extract additional code block information + const codeBlockInfo = extractCodeBlockInfo(codeElement); + + // Add Google Analytics event tracking + const currentUrl = new URL(window.location.href); + + // Determine which tracking parameter to add based on product context + switch (context) { + case 'cloud': + currentUrl.searchParams.set('dl', 'cloud'); + break; + case 'core': + /** Track using the same value used by www.influxdata.com pages */ + currentUrl.searchParams.set('dl', 'oss3'); + break; + case 'enterprise': + /** Track using the same value used by www.influxdata.com pages */ + currentUrl.searchParams.set('dl', 'enterprise'); + break; + case 'serverless': + currentUrl.searchParams.set('dl', 'serverless'); + break; + case 'dedicated': + currentUrl.searchParams.set('dl', 'dedicated'); + break; + case 'clustered': + currentUrl.searchParams.set('dl', 'clustered'); + break; + case 'oss/enterprise': + currentUrl.searchParams.set('dl', 'oss'); + break; + case 'other': + default: + // No tracking parameter for other/unknown products + break; + } + + // Add code block specific tracking parameters + if (codeBlockInfo.language) { + currentUrl.searchParams.set('code_lang', codeBlockInfo.language); + } + if (codeBlockInfo.lineCount) { + currentUrl.searchParams.set('code_lines', codeBlockInfo.lineCount); + } + if (codeBlockInfo.hasPlaceholders) { + currentUrl.searchParams.set('has_placeholders', 'true'); + } + if (codeBlockInfo.blockType) { + currentUrl.searchParams.set('code_type', codeBlockInfo.blockType); + } + if (codeBlockInfo.sectionTitle) { + currentUrl.searchParams.set( + 'section', + encodeURIComponent(codeBlockInfo.sectionTitle) + ); + } + if (codeBlockInfo.firstLine) { + currentUrl.searchParams.set( + 'first_line', + encodeURIComponent(codeBlockInfo.firstLine.substring(0, 100)) + ); + } + + // Update browser history without triggering page reload + if (window.history && window.history.replaceState) { + window.history.replaceState(null, '', currentUrl.toString()); + } + + // Send custom Google Analytics event if gtag is available + if (typeof window.gtag !== 'undefined') { + window.gtag('event', 'code_copy', { + language: codeBlockInfo.language, + line_count: codeBlockInfo.lineCount, + has_placeholders: codeBlockInfo.hasPlaceholders, + dl: codeBlockInfo.dl || null, + section_title: codeBlockInfo.sectionTitle, + first_line: codeBlockInfo.firstLine + ? codeBlockInfo.firstLine.substring(0, 100) + : null, + product: context, + }); + } const copyContent = async () => { try { @@ -82,6 +170,71 @@ function initialize() { copyContent(); }); + /** + * Extract contextual information about a code block + * @param {HTMLElement} codeElement - The code block element + * @returns {Object} Information about the code block + */ + function extractCodeBlockInfo(codeElement) { + const codeTag = codeElement.querySelector('code'); + const info = { + language: null, + lineCount: 0, + hasPlaceholders: false, + blockType: 'code', + dl: null, // Download script type + sectionTitle: null, + firstLine: null, + }; + + // Extract language from class attribute + if (codeTag && codeTag.className) { + const langMatch = codeTag.className.match( + /language-(\w+)|hljs-(\w+)|(\w+)/ + ); + if (langMatch) { + info.language = langMatch[1] || langMatch[2] || langMatch[3]; + } + } + + // Count lines + const text = codeElement.innerText || ''; + const lines = text.split('\n'); + info.lineCount = lines.length; + + // Get first non-empty line + info.firstLine = lines.find((line) => line.trim() !== '') || null; + + // Check for placeholders (common patterns) + info.hasPlaceholders = + /\b[A-Z_]{2,}\b|\{\{[^}]+\}\}|\$\{[^}]+\}|<[^>]+>/.test(text); + + // Determine if this is a download script + if (text.includes('https://www.influxdata.com/d/install_influxdb3.sh')) { + if (text.includes('install_influxdb3.sh enterprise')) { + info.dl = 'enterprise'; + } else { + info.dl = 'oss3'; + } + } else if (text.includes('docker pull influxdb:3-enterprise')) { + info.dl = 'enterprise'; + } else if (text.includes('docker pull influxdb:3-core')) { + info.dl = 'oss3'; + } + + // Find nearest section heading + let element = codeElement; + while (element && element !== document.body) { + element = element.previousElementSibling || element.parentElement; + if (element && element.tagName && /^H[1-6]$/.test(element.tagName)) { + info.sectionTitle = element.textContent.trim(); + break; + } + } + + return info; + } + /////////////////////////////// FULL WINDOW CODE /////////////////////////////// /* @@ -90,7 +243,10 @@ Disable scrolling on the body. Disable user selection on everything but the fullscreen codeblock. */ $('.fullscreen-toggle').click(function () { - var code = $(this).closest('.code-controls').prevAll('pre:has(code)').clone(); + var code = $(this) + .closest('.code-controls') + .prevAll('pre:has(code)') + .clone(); $('#fullscreen-code-placeholder').replaceWith(code[0]); $('body').css('overflow', 'hidden'); diff --git a/assets/js/code-placeholders.js b/assets/js/code-placeholders.js index 3912df97d..596364819 100644 --- a/assets/js/code-placeholders.js +++ b/assets/js/code-placeholders.js @@ -1,30 +1,52 @@ -const placeholderWrapper = '.code-placeholder-wrapper'; +import $ from 'jquery'; + const placeholderElement = 'var.code-placeholder'; const editIcon = ""; // When clicking a placeholder, append the edit input -function handleClick(element) { - $(element).on('click', function() { +function handleClick($element) { + const $placeholder = $($element).find(placeholderElement); + $placeholder.on('click', function() { var placeholderData = $(this)[0].dataset; - var placeholderID = placeholderData.codeVar; + var placeholderID = placeholderData.codeVarEscaped; var placeholderValue = placeholderData.codeVarValue; - var placeholderInputWrapper = $('
'); - var placeholderInput = ``; - $(this).before(placeholderInputWrapper) - $(this).siblings('.code-input-wrapper').append(placeholderInput); - $(`input#${placeholderID}`).width(`${placeholderValue.length}ch`); - $(`input#${placeholderID}`).focus().select(); - $(this).css('opacity', 0); + const placeholderInput = document.createElement('input'); + placeholderInput.setAttribute('class', 'placeholder-edit'); + placeholderInput.setAttribute('data-id', placeholderID); + placeholderInput.setAttribute('data-code-var-escaped', placeholderID); + placeholderInput.setAttribute('value', placeholderValue); + placeholderInput.setAttribute('spellcheck', 'false'); + + placeholderInput.addEventListener('blur', function() { + submitPlaceholder($(this)); + } + ); + placeholderInput.addEventListener('input', function() { + updateInputWidth($(this)); + } + ); + placeholderInput.addEventListener('keydown', function(event) { + closeOnEnter($(this)[0], event); + } + ); + + const placeholderInputWrapper = $('
'); + $placeholder.before(placeholderInputWrapper) + $placeholder.siblings('.code-input-wrapper').append(placeholderInput); + $(`input[data-code-var-escaped="${placeholderID}"]`).width(`${placeholderValue.length}ch`); + document.querySelector(`input[data-code-var-escaped="${placeholderID}"]`).focus(); + document.querySelector(`input[data-code-var-escaped="${placeholderID}"]`).select(); + $placeholder.css('opacity', 0); }); } function submitPlaceholder(placeholderInput) { - var placeholderID = placeholderInput.attr('id'); + var placeholderID = placeholderInput.attr('data-code-var-escaped'); var placeholderValue = placeholderInput[0].value; - var placeholderInput = $(`input.placeholder-edit#${placeholderID}`); + placeholderInput = $(`input.placeholder-edit[data-id="${placeholderID}"]`); - $(`*[data-code-var='${placeholderID}']`).each(function() { + $(`*[data-code-var="${placeholderID}"]`).each(function() { $(this).attr('data-code-var-value', placeholderValue); $(this).html(placeholderValue + editIcon); $(this).css('opacity', 1); @@ -44,13 +66,7 @@ function closeOnEnter(input, event) { } } -function CodePlaceholder({element}) { - handleClick(element); -} - -$(function() { - const codePlaceholders = $(placeholderElement); - codePlaceholders.each(function() { - CodePlaceholder({element: this}); - }); -}); \ No newline at end of file +export default function CodePlaceholder({ component }) { + const $component = $(component); + handleClick($component); +} \ No newline at end of file diff --git a/assets/js/components/diagram.js b/assets/js/components/diagram.js new file mode 100644 index 000000000..17f07dbe5 --- /dev/null +++ b/assets/js/components/diagram.js @@ -0,0 +1,78 @@ +// Memoize the mermaid module import +let mermaidPromise = null; + +export default function Diagram({ component }) { + // Import mermaid.js module (memoized) + if (!mermaidPromise) { + mermaidPromise = import('mermaid'); + } + mermaidPromise + .then(({ default: mermaid }) => { + // Configure mermaid with InfluxData theming + mermaid.initialize({ + startOnLoad: false, // We'll manually call run() + theme: document.body.classList.contains('dark-theme') + ? 'dark' + : 'default', + themeVariables: { + fontFamily: 'Proxima Nova', + fontSize: '16px', + lineColor: '#22ADF6', + primaryColor: '#22ADF6', + primaryTextColor: '#545454', + secondaryColor: '#05CE78', + tertiaryColor: '#f4f5f5', + }, + securityLevel: 'loose', // Required for interactive diagrams + logLevel: 'error', + }); + + // Process the specific diagram component + try { + mermaid.run({ nodes: [component] }); + } catch (error) { + console.error('Mermaid diagram rendering error:', error); + } + + // Store reference to mermaid for theme switching + if (!window.mermaidInstances) { + window.mermaidInstances = new Map(); + } + window.mermaidInstances.set(component, mermaid); + }) + .catch((error) => { + console.error('Failed to load Mermaid library:', error); + }); + + // Listen for theme changes to refresh diagrams + const observer = new MutationObserver((mutations) => { + mutations.forEach((mutation) => { + if ( + mutation.attributeName === 'class' && + document.body.classList.contains('dark-theme') !== window.isDarkTheme + ) { + window.isDarkTheme = document.body.classList.contains('dark-theme'); + + // Reload this specific diagram with new theme + if (window.mermaidInstances?.has(component)) { + const mermaid = window.mermaidInstances.get(component); + mermaid.initialize({ + theme: window.isDarkTheme ? 'dark' : 'default', + }); + mermaid.run({ nodes: [component] }); + } + } + }); + }); + + // Watch for theme changes on body element + observer.observe(document.body, { attributes: true }); + + // Return cleanup function to be called when component is destroyed + return () => { + observer.disconnect(); + if (window.mermaidInstances?.has(component)) { + window.mermaidInstances.delete(component); + } + }; +} diff --git a/assets/js/components/doc-search.js b/assets/js/components/doc-search.js new file mode 100644 index 000000000..52e1b1f65 --- /dev/null +++ b/assets/js/components/doc-search.js @@ -0,0 +1,180 @@ +/** + * DocSearch component for InfluxData documentation + * Handles asynchronous loading and initialization of Algolia DocSearch + */ +const debug = false; // Set to true for debugging output + +export default function DocSearch({ component }) { + // Store configuration from component data attributes + const config = { + apiKey: component.getAttribute('data-api-key'), + appId: component.getAttribute('data-app-id'), + indexName: component.getAttribute('data-index-name'), + inputSelector: component.getAttribute('data-input-selector'), + searchTag: component.getAttribute('data-search-tag'), + includeFlux: component.getAttribute('data-include-flux') === 'true', + includeResources: + component.getAttribute('data-include-resources') === 'true', + debug: component.getAttribute('data-debug') === 'true', + }; + + // Initialize global object to track DocSearch state + window.InfluxDocs = window.InfluxDocs || {}; + window.InfluxDocs.search = { + initialized: false, + options: config, + }; + + // Load DocSearch asynchronously + function loadDocSearch() { + if (debug) { + console.log('Loading DocSearch script...'); + } + const script = document.createElement('script'); + script.src = + 'https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.js'; + script.async = true; + script.onload = initializeDocSearch; + document.body.appendChild(script); + } + + // Initialize DocSearch after script loads + function initializeDocSearch() { + if (debug) { + console.log('Initializing DocSearch...'); + } + const multiVersion = ['influxdb']; + + // Use object-based lookups instead of conditionals for version and product names + // These can be replaced with data from productData in the future + + // Version display name mappings + const versionDisplayNames = { + cloud: 'Cloud (TSM)', + core: 'Core', + enterprise: 'Enterprise', + 'cloud-serverless': 'Cloud Serverless', + 'cloud-dedicated': 'Cloud Dedicated', + clustered: 'Clustered', + explorer: 'Explorer', + }; + + // Product display name mappings + const productDisplayNames = { + influxdb: 'InfluxDB', + influxdb3: 'InfluxDB 3', + explorer: 'InfluxDB 3 Explorer', + enterprise_influxdb: 'InfluxDB Enterprise', + flux: 'Flux', + telegraf: 'Telegraf', + chronograf: 'Chronograf', + kapacitor: 'Kapacitor', + platform: 'InfluxData Platform', + resources: 'Additional Resources', + }; + + // Initialize DocSearch with configuration + window.docsearch({ + apiKey: config.apiKey, + appId: config.appId, + indexName: config.indexName, + inputSelector: config.inputSelector, + debug: config.debug, + transformData: function (hits) { + // Format version using object lookup instead of if-else chain + function fmtVersion(version, productKey) { + if (version == null) { + return ''; + } else if (versionDisplayNames[version]) { + return versionDisplayNames[version]; + } else if (multiVersion.includes(productKey)) { + return version; + } else { + return ''; + } + } + + hits.map((hit) => { + const pathData = new URL(hit.url).pathname + .split('/') + .filter((n) => n); + const product = productDisplayNames[pathData[0]] || pathData[0]; + const version = fmtVersion(pathData[1], pathData[0]); + + hit.product = product; + hit.version = version; + hit.hierarchy.lvl0 = + hit.hierarchy.lvl0 + + ` ${product} ${version}`; + hit._highlightResult.hierarchy.lvl0.value = + hit._highlightResult.hierarchy.lvl0.value + + ` ${product} ${version}`; + }); + return hits; + }, + algoliaOptions: { + hitsPerPage: 10, + facetFilters: buildFacetFilters(config), + }, + autocompleteOptions: { + templates: { + header: + '
Search all InfluxData content ', + empty: + '

Not finding what you\'re looking for?

Search all InfluxData content
', + }, + }, + }); + + // Mark DocSearch as initialized + window.InfluxDocs.search.initialized = true; + + // Dispatch event for other components to know DocSearch is ready + window.dispatchEvent(new CustomEvent('docsearch-initialized')); + } + + /** + * Helper function to build facet filters based on config + * - Uses nested arrays for AND conditions + * - Includes space after colon in filter expressions + */ + function buildFacetFilters(config) { + if (!config.searchTag) { + return ['latest:true']; + } else if (config.includeFlux) { + // Return a nested array to match original template structure + // Note the space after each colon + return [ + [ + 'searchTag: ' + config.searchTag, + 'flux:true', + 'resources: ' + config.includeResources, + ], + ]; + } else { + // Return a nested array to match original template structure + // Note the space after each colon + return [ + [ + 'searchTag: ' + config.searchTag, + 'resources: ' + config.includeResources, + ], + ]; + } + } + + // Load DocSearch when page is idle or after a slight delay + if ('requestIdleCallback' in window) { + requestIdleCallback(loadDocSearch); + } else { + setTimeout(loadDocSearch, 500); + } + + // Return cleanup function + return function cleanup() { + // Clean up any event listeners if needed + if (debug) { + console.log('DocSearch component cleanup'); + } + }; +} diff --git a/assets/js/components/sidebar-search.js b/assets/js/components/sidebar-search.js new file mode 100644 index 000000000..f3d09fbe4 --- /dev/null +++ b/assets/js/components/sidebar-search.js @@ -0,0 +1,6 @@ +import SearchInteractions from '../utils/search-interactions.js'; + +export default function SidebarSearch({ component }) { + const searchInput = component.querySelector('.sidebar--search-field'); + SearchInteractions({ searchInput }); +} diff --git a/assets/js/custom-timestamps.js b/assets/js/custom-timestamps.js index c9e32838b..3cde0a6ad 100644 --- a/assets/js/custom-timestamps.js +++ b/assets/js/custom-timestamps.js @@ -1,7 +1,7 @@ import $ from 'jquery'; import { Datepicker } from 'vanillajs-datepicker'; import { toggleModal } from './modals.js'; -import * as localStorage from './local-storage.js'; +import * as localStorage from './services/local-storage.js'; // Placeholder start date used in InfluxDB custom timestamps const defaultStartDate = '2022-01-01'; @@ -53,65 +53,65 @@ function timeToUnixSeconds(time) { return unixSeconds; } - // Default time values in getting started sample data - const defaultTimes = [ - { - rfc3339: `${defaultStartDate}T08:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T08:00:00Z`), - }, // 1641024000 - { - rfc3339: `${defaultStartDate}T09:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T09:00:00Z`), - }, // 1641027600 - { - rfc3339: `${defaultStartDate}T10:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T10:00:00Z`), - }, // 1641031200 - { - rfc3339: `${defaultStartDate}T11:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T11:00:00Z`), - }, // 1641034800 - { - rfc3339: `${defaultStartDate}T12:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T12:00:00Z`), - }, // 1641038400 - { - rfc3339: `${defaultStartDate}T13:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T13:00:00Z`), - }, // 1641042000 - { - rfc3339: `${defaultStartDate}T14:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T14:00:00Z`), - }, // 1641045600 - { - rfc3339: `${defaultStartDate}T15:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T15:00:00Z`), - }, // 1641049200 - { - rfc3339: `${defaultStartDate}T16:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T16:00:00Z`), - }, // 1641052800 - { - rfc3339: `${defaultStartDate}T17:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T17:00:00Z`), - }, // 1641056400 - { - rfc3339: `${defaultStartDate}T18:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T18:00:00Z`), - }, // 1641060000 - { - rfc3339: `${defaultStartDate}T19:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T19:00:00Z`), - }, // 1641063600 - { - rfc3339: `${defaultStartDate}T20:00:00Z`, - unix: timeToUnixSeconds(`${defaultStartDate}T20:00:00Z`), - }, // 1641067200 - ]; +// Default time values in getting started sample data +const defaultTimes = [ + { + rfc3339: `${defaultStartDate}T08:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T08:00:00Z`), + }, // 1641024000 + { + rfc3339: `${defaultStartDate}T09:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T09:00:00Z`), + }, // 1641027600 + { + rfc3339: `${defaultStartDate}T10:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T10:00:00Z`), + }, // 1641031200 + { + rfc3339: `${defaultStartDate}T11:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T11:00:00Z`), + }, // 1641034800 + { + rfc3339: `${defaultStartDate}T12:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T12:00:00Z`), + }, // 1641038400 + { + rfc3339: `${defaultStartDate}T13:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T13:00:00Z`), + }, // 1641042000 + { + rfc3339: `${defaultStartDate}T14:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T14:00:00Z`), + }, // 1641045600 + { + rfc3339: `${defaultStartDate}T15:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T15:00:00Z`), + }, // 1641049200 + { + rfc3339: `${defaultStartDate}T16:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T16:00:00Z`), + }, // 1641052800 + { + rfc3339: `${defaultStartDate}T17:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T17:00:00Z`), + }, // 1641056400 + { + rfc3339: `${defaultStartDate}T18:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T18:00:00Z`), + }, // 1641060000 + { + rfc3339: `${defaultStartDate}T19:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T19:00:00Z`), + }, // 1641063600 + { + rfc3339: `${defaultStartDate}T20:00:00Z`, + unix: timeToUnixSeconds(`${defaultStartDate}T20:00:00Z`), + }, // 1641067200 +]; -function updateTimestamps (newStartDate, seedTimes=defaultTimes) { +function updateTimestamps(newStartDate, seedTimes = defaultTimes) { // Update the times array with replacement times - const times = seedTimes.map(x => { + const times = seedTimes.map((x) => { var newStartTimestamp = x.rfc3339.replace(/^.*T/, newStartDate + 'T'); return { @@ -178,7 +178,7 @@ function updateTimestamps (newStartDate, seedTimes=defaultTimes) { /////////////////////// MODAL INTERACTIONS / DATE PICKER /////////////////////// -function CustomTimeTrigger({component}) { +function CustomTimeTrigger({ component }) { const $component = $(component); $component .find('a[data-action="open"]:first') @@ -212,7 +212,7 @@ function CustomTimeTrigger({component}) { if (newDate != undefined) { newDate = formatDate(newDate); - + // Update the last updated timestamps with the new date // and reassign the updated times. updatedTimes = updateTimestamps(newDate, updatedTimes); diff --git a/assets/js/datetime.js b/assets/js/datetime.js index ec0f8ee2b..7c0261416 100644 --- a/assets/js/datetime.js +++ b/assets/js/datetime.js @@ -1,30 +1,54 @@ -const monthNames = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]; -var date = new Date() -var currentTimestamp = date.toISOString().replace(/^(.*)(\.\d+)(Z)/, '$1$3') // 2023-01-01T12:34:56Z -var currentTime = date.toISOString().replace(/(^.*T)(.*)(Z)/, '$2') + '084216' // 12:34:56.000084216 +import $ from 'jquery'; -function currentDate(offset=0, trimTime=false) { - outputDate = new Date(date) - outputDate.setDate(outputDate.getDate() + offset) +var date = new Date(); +var currentTimestamp = date.toISOString().replace(/^(.*)(\.\d+)(Z)/, '$1$3'); // 2023-01-01T12:34:56Z + +// Microsecond offset appended to the current time string for formatting purposes +const MICROSECOND_OFFSET = '084216'; + +var currentTime = + date.toISOString().replace(/(^.*T)(.*)(Z)/, '$2') + MICROSECOND_OFFSET; // 12:34:56.000084216 +function currentDate(offset = 0, trimTime = false) { + let outputDate = new Date(date); + outputDate.setDate(outputDate.getDate() + offset); if (trimTime) { - return outputDate.toISOString().replace(/T.*$/, '') // 2023-01-01 + return outputDate.toISOString().replace(/T.*$/, ''); // 2023-01-01 } else { - return outputDate.toISOString().replace(/T.*$/, 'T00:00:00Z') // 2023-01-01T00:00:00Z + return outputDate.toISOString().replace(/T.*$/, 'T00:00:00Z'); // 2023-01-01T00:00:00Z } } function enterpriseEOLDate() { - var inTwoYears = date.setFullYear(date.getFullYear() + 2) - earliestEOL = new Date(inTwoYears) - return `${monthNames[earliestEOL.getMonth()]} ${earliestEOL.getDate()}, ${earliestEOL.getFullYear()}` + const monthNames = [ + 'January', + 'February', + 'March', + 'April', + 'May', + 'June', + 'July', + 'August', + 'September', + 'October', + 'November', + 'December', + ]; + var inTwoYears = new Date(date); + inTwoYears.setFullYear(inTwoYears.getFullYear() + 2); + let earliestEOL = new Date(inTwoYears); + return `${monthNames[earliestEOL.getMonth()]} ${earliestEOL.getDate()}, ${earliestEOL.getFullYear()}`; } -$('span.current-timestamp').text(currentTimestamp) -$('span.current-time').text(currentTime) -$('span.enterprise-eol-date').text(enterpriseEOLDate) -$('span.current-date').each(function() { - var dayOffset = parseInt($(this).attr("offset")) - var trimTime = $(this).attr("trim-time") === "true" - $(this).text(currentDate(dayOffset, trimTime)) -}) +function initialize() { + $('span.current-timestamp').text(currentTimestamp); + $('span.current-time').text(currentTime); + $('span.enterprise-eol-date').text(enterpriseEOLDate()); + $('span.current-date').each(function () { + var dayOffset = parseInt($(this).attr('offset')); + var trimTime = $(this).attr('trim-time') === 'true'; + $(this).text(currentDate(dayOffset, trimTime)); + }); +} + +export { initialize }; diff --git a/assets/js/feature-callouts.js b/assets/js/feature-callouts.js index 253b09b13..a3ad28d94 100644 --- a/assets/js/feature-callouts.js +++ b/assets/js/feature-callouts.js @@ -2,37 +2,24 @@ This feature is designed to callout new features added to the documentation CSS is required for the callout bubble to determine look and position, but the element must have the `callout` class and a unique id. - Callouts are treated as notifications and use the notification cookie API in - assets/js/cookies.js. + Callouts are treated as notifications and use the LocalStorage notification API. */ +import $ from 'jquery'; +import * as LocalStorageAPI from './services/local-storage.js'; + // Get notification ID -function getCalloutID (el) { +function getCalloutID(el) { return $(el).attr('id'); } -// Hide a callout and update the cookie with the viewed callout -function hideCallout (calloutID) { - if (!window.LocalStorageAPI.notificationIsRead(calloutID)) { - window.LocalStorageAPI.setNotificationAsRead(calloutID, 'callout'); - $(`#${calloutID}`).fadeOut(200); +// Show the url feature callouts on page load +export default function FeatureCallout({ component }) { + const calloutID = getCalloutID($(component)); + + if (!LocalStorageAPI.notificationIsRead(calloutID, 'callout')) { + $(`#${calloutID}.feature-callout`) + .fadeIn(300) + .removeClass('start-position'); } } - -// Show the url feature callouts on page load -$(document).ready(function () { - $('.feature-callout').each(function () { - const calloutID = getCalloutID($(this)); - - if (!window.LocalStorageAPI.notificationIsRead(calloutID, 'callout')) { - $(`#${calloutID}.feature-callout`) - .fadeIn(300) - .removeClass('start-position'); - } - }); -}); - -// Hide the InfluxDB URL selector callout -// $('button.url-trigger, #influxdb-url-selector .close').click(function () { -// hideCallout('influxdb-url-selector'); -// }); diff --git a/assets/js/flux-group-keys.js b/assets/js/flux-group-keys.js index 80ab46b70..60ed99b0b 100644 --- a/assets/js/flux-group-keys.js +++ b/assets/js/flux-group-keys.js @@ -1,49 +1,148 @@ -var tablesElement = $("#flux-group-keys-demo #grouped-tables") +import $ from 'jquery'; // Sample data let data = [ [ - { _time: "2021-01-01T00:00:00Z", _measurement: "example", loc: "rm1", sensorID: "A123", _field: "temp", _value: 110.3 }, - { _time: "2021-01-01T00:01:00Z", _measurement: "example", loc: "rm1", sensorID: "A123", _field: "temp", _value: 112.5 }, - { _time: "2021-01-01T00:02:00Z", _measurement: "example", loc: "rm1", sensorID: "A123", _field: "temp", _value: 111.9 } + { + _time: '2021-01-01T00:00:00Z', + _measurement: 'example', + loc: 'rm1', + sensorID: 'A123', + _field: 'temp', + _value: 110.3, + }, + { + _time: '2021-01-01T00:01:00Z', + _measurement: 'example', + loc: 'rm1', + sensorID: 'A123', + _field: 'temp', + _value: 112.5, + }, + { + _time: '2021-01-01T00:02:00Z', + _measurement: 'example', + loc: 'rm1', + sensorID: 'A123', + _field: 'temp', + _value: 111.9, + }, ], [ - { _time: "2021-01-01T00:00:00Z", _measurement: "example", loc: "rm1", sensorID: "A123", _field: "hum", _value: 73.4 }, - { _time: "2021-01-01T00:01:00Z", _measurement: "example", loc: "rm1", sensorID: "A123", _field: "hum", _value: 73.7 }, - { _time: "2021-01-01T00:02:00Z", _measurement: "example", loc: "rm1", sensorID: "A123", _field: "hum", _value: 75.1 } + { + _time: '2021-01-01T00:00:00Z', + _measurement: 'example', + loc: 'rm1', + sensorID: 'A123', + _field: 'hum', + _value: 73.4, + }, + { + _time: '2021-01-01T00:01:00Z', + _measurement: 'example', + loc: 'rm1', + sensorID: 'A123', + _field: 'hum', + _value: 73.7, + }, + { + _time: '2021-01-01T00:02:00Z', + _measurement: 'example', + loc: 'rm1', + sensorID: 'A123', + _field: 'hum', + _value: 75.1, + }, ], [ - { _time: "2021-01-01T00:00:00Z", _measurement: "example", loc: "rm2", sensorID: "B456", _field: "temp", _value: 108.2 }, - { _time: "2021-01-01T00:01:00Z", _measurement: "example", loc: "rm2", sensorID: "B456", _field: "temp", _value: 108.5 }, - { _time: "2021-01-01T00:02:00Z", _measurement: "example", loc: "rm2", sensorID: "B456", _field: "temp", _value: 109.6 } + { + _time: '2021-01-01T00:00:00Z', + _measurement: 'example', + loc: 'rm2', + sensorID: 'B456', + _field: 'temp', + _value: 108.2, + }, + { + _time: '2021-01-01T00:01:00Z', + _measurement: 'example', + loc: 'rm2', + sensorID: 'B456', + _field: 'temp', + _value: 108.5, + }, + { + _time: '2021-01-01T00:02:00Z', + _measurement: 'example', + loc: 'rm2', + sensorID: 'B456', + _field: 'temp', + _value: 109.6, + }, ], [ - { _time: "2021-01-01T00:00:00Z", _measurement: "example", loc: "rm2", sensorID: "B456", _field: "hum", _value: 71.8 }, - { _time: "2021-01-01T00:01:00Z", _measurement: "example", loc: "rm2", sensorID: "B456", _field: "hum", _value: 72.3 }, - { _time: "2021-01-01T00:02:00Z", _measurement: "example", loc: "rm2", sensorID: "B456", _field: "hum", _value: 72.1 } - ] -] + { + _time: '2021-01-01T00:00:00Z', + _measurement: 'example', + loc: 'rm2', + sensorID: 'B456', + _field: 'hum', + _value: 71.8, + }, + { + _time: '2021-01-01T00:01:00Z', + _measurement: 'example', + loc: 'rm2', + sensorID: 'B456', + _field: 'hum', + _value: 72.3, + }, + { + _time: '2021-01-01T00:02:00Z', + _measurement: 'example', + loc: 'rm2', + sensorID: 'B456', + _field: 'hum', + _value: 72.1, + }, + ], +]; // Default group key -let groupKey = ["_measurement", "loc", "sensorID", "_field"] +let groupKey = ['_measurement', 'loc', 'sensorID', '_field']; + +export default function FluxGroupKeysDemo({ component }) { + $('.column-list label').click(function () { + toggleCheckbox($(this)); + groupKey = getChecked(component); + groupData(); + buildGroupExample(component); + }); + + // Group and render tables on load + groupData(); +} // Build a table group (group key and table) using an array of objects function buildTable(inputData) { - // Build the group key string function wrapString(column, value) { - var stringColumns = ["_measurement", "loc", "sensorID", "_field"] + var stringColumns = ['_measurement', 'loc', 'sensorID', '_field']; if (stringColumns.includes(column)) { - return '"' + value + '"' + return '"' + value + '"'; } else { - return value + return value; } } - var groupKeyString = "Group key instance = [" + (groupKey.map(column => column + ": " + wrapString(column, (inputData[0])[column])) ).join(", ") + "]"; - var groupKeyLabel = document.createElement("p"); - groupKeyLabel.className = "table-group-key" - groupKeyLabel.innerHTML = groupKeyString - + var groupKeyString = + 'Group key instance = [' + + groupKey + .map((column) => column + ': ' + wrapString(column, inputData[0][column])) + .join(', ') + + ']'; + var groupKeyLabel = document.createElement('p'); + groupKeyLabel.className = 'table-group-key'; + groupKeyLabel.innerHTML = groupKeyString; // Extract column headers var columns = []; @@ -54,56 +153,57 @@ function buildTable(inputData) { } } } - + // Create the table element - var table = document.createElement("table"); - + const table = document.createElement('table'); + // Create the table header for (let i = 0; i < columns.length; i++) { var header = table.createTHead(); - var th = document.createElement("th"); + var th = document.createElement('th'); th.innerHTML = columns[i]; if (groupKey.includes(columns[i])) { - th.className = "grouped-by"; + th.className = 'grouped-by'; } header.appendChild(th); } // Add inputData to the HTML table for (let i = 0; i < inputData.length; i++) { - tr = table.insertRow(-1); + let tr = table.insertRow(-1); for (let j = 0; j < columns.length; j++) { var td = tr.insertCell(-1); td.innerHTML = inputData[i][columns[j]]; // Highlight the value if column is part of the group key if (groupKey.includes(columns[j])) { - td.className = "grouped-by"; + td.className = 'grouped-by'; } } } // Create a table group with group key and table - var tableGroup = document.createElement("div"); - tableGroup.innerHTML += groupKeyLabel.outerHTML + table.outerHTML + var tableGroup = document.createElement('div'); + tableGroup.innerHTML += groupKeyLabel.outerHTML + table.outerHTML; - return tableGroup + return tableGroup; } // Clear and rebuild all HTML tables function buildTables(data) { - existingTables = tablesElement[0] + let tablesElement = $('#flux-group-keys-demo #grouped-tables'); + let existingTables = tablesElement[0]; while (existingTables.firstChild) { existingTables.removeChild(existingTables.firstChild); } for (let i = 0; i < data.length; i++) { - var table = buildTable(data[i]) + var table = buildTable(data[i]); tablesElement.append(table); } } // Group data based on the group key and output new tables function groupData() { - let groupedData = data.flat() + let groupedData = data.flat(); function groupBy(array, f) { var groups = {}; @@ -114,20 +214,19 @@ function groupData() { }); return Object.keys(groups).map(function (group) { return groups[group]; - }) + }); } groupedData = groupBy(groupedData, function (r) { - return groupKey.map(v => r[v]); + return groupKey.map((v) => r[v]); }); buildTables(groupedData); } -// Get selected column names -var checkboxes = $("input[type=checkbox]"); - -function getChecked() { +function getChecked(component) { + // Get selected column names + var checkboxes = $(component).find('input[type=checkbox]'); var checked = []; for (var i = 0; i < checkboxes.length; i++) { var checkbox = checkboxes[i]; @@ -141,17 +240,12 @@ function toggleCheckbox(element) { } // Build example group function -function buildGroupExample() { - var columnCollection = getChecked().map(i => '"' + i + '"').join(", ") - $("pre#group-by-example")[0].innerHTML = "data\n |> group(columns: [" + columnCollection + "])"; +function buildGroupExample(component) { + var columnCollection = getChecked(component) + .map((i) => '"' + i + '"') + .join(', '); + $('pre#group-by-example')[0].innerHTML = + "data\n |> group(columns: [" + + columnCollection + + '])'; } - -$(".column-list label").click(function () { - toggleCheckbox($(this)) - groupKey = getChecked(); - groupData(); - buildGroupExample(); -}); - -// Group and render tables on load -groupData() diff --git a/assets/js/home-interactions.js b/assets/js/home-interactions.js deleted file mode 100644 index a90df14cd..000000000 --- a/assets/js/home-interactions.js +++ /dev/null @@ -1,22 +0,0 @@ -$('.exp-btn').click(function() { - var targetBtnElement = $(this).parent() - $('.exp-btn > p', targetBtnElement).fadeOut(100); - setTimeout(function() { - $('.exp-btn-links', targetBtnElement).fadeIn(200) - $('.exp-btn', targetBtnElement).addClass('open'); - $('.close-btn', targetBtnElement).fadeIn(200); - }, 100); -}) - -$('.close-btn').click(function() { - var targetBtnElement = $(this).parent().parent() - $('.exp-btn-links', targetBtnElement).fadeOut(100) - $('.exp-btn', targetBtnElement).removeClass('open'); - $(this).fadeOut(100); - setTimeout(function() { - $('p', targetBtnElement).fadeIn(100); - }, 100); -}) - -/////////////////////////////// EXPANDING BUTTONS ////////////////////////////// - diff --git a/assets/js/influxdb-url.js b/assets/js/influxdb-url.js index bed47eb94..e0f5d34d8 100644 --- a/assets/js/influxdb-url.js +++ b/assets/js/influxdb-url.js @@ -3,7 +3,6 @@ ///////////////////////// INFLUXDB URL PREFERENCE ///////////////////////////// //////////////////////////////////////////////////////////////////////////////// */ -import * as pageParams from '@params'; import { DEFAULT_STORAGE_URLS, getPreference, @@ -12,15 +11,18 @@ import { removeInfluxDBUrl, getInfluxDBUrl, getInfluxDBUrls, -} from './local-storage.js'; +} from './services/local-storage.js'; import $ from 'jquery'; import { context as PRODUCT_CONTEXT, referrerHost } from './page-context.js'; +import { influxdbUrls } from './services/influxdb-urls.js'; import { delay } from './helpers.js'; import { toggleModal } from './modals.js'; let CLOUD_URLS = []; -if (pageParams && pageParams.influxdb_urls) { - CLOUD_URLS = Object.values(pageParams.influxdb_urls.cloud.providers).flatMap((provider) => provider.regions?.map((region) => region.url)); +if (influxdbUrls?.cloud) { + CLOUD_URLS = Object.values(influxdbUrls.cloud.providers).flatMap((provider) => + provider.regions?.map((region) => region.url) + ); } export { CLOUD_URLS }; @@ -28,7 +30,7 @@ export function InfluxDBUrl() { const UNIQUE_URL_PRODUCTS = ['dedicated', 'clustered']; const IS_UNIQUE_URL_PRODUCT = UNIQUE_URL_PRODUCTS.includes(PRODUCT_CONTEXT); - // Add actual cloud URLs as needed + // Add actual cloud URLs as needed const elementSelector = '.article--content pre:not(.preserve)'; ///////////////////// Stored preference management /////////////////////// @@ -118,11 +120,12 @@ export function InfluxDBUrl() { }); } - // Retrieve the currently selected URLs from the urls local storage object. - function getUrls() { - const { cloud, oss, core, enterprise, serverless, dedicated, clustered } = getInfluxDBUrls(); - return { oss, cloud, core, enterprise, serverless, dedicated, clustered }; -} + // Retrieve the currently selected URLs from the urls local storage object. + function getUrls() { + const { cloud, oss, core, enterprise, serverless, dedicated, clustered } = + getInfluxDBUrls(); + return { oss, cloud, core, enterprise, serverless, dedicated, clustered }; + } // Retrieve the previously selected URLs from the from the urls local storage object. // This is used to update URLs whenever you switch between browser tabs. @@ -289,15 +292,17 @@ export function InfluxDBUrl() { } // Append the URL selector button to each codeblock containing a placeholder URL - function appendUrlSelector(urls={ - cloud: '', - oss: '', - core: '', - enterprise: '', - serverless: '', - dedicated: '', - clustered: '', - }) { + function appendUrlSelector( + urls = { + cloud: '', + oss: '', + core: '', + enterprise: '', + serverless: '', + dedicated: '', + clustered: '', + } + ) { const appendToUrls = Object.values(urls); const getBtnText = (context) => { @@ -315,7 +320,7 @@ export function InfluxDBUrl() { return contextText[context]; }; - appendToUrls.forEach(function (url) { + appendToUrls.forEach(function (url) { $(elementSelector).each(function () { var code = $(this).html(); if (code.includes(url)) { @@ -330,20 +335,32 @@ export function InfluxDBUrl() { }); } -//////////////////////////////////////////////////////////////////////////// -////////////////// Initialize InfluxDB URL interactions //////////////////// -//////////////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////////////// + ////////////////// Initialize InfluxDB URL interactions //////////////////// + //////////////////////////////////////////////////////////////////////////// // Add the preserve tag to code blocks that shouldn't be updated addPreserve(); - const { cloud, oss, core, enterprise, serverless, dedicated, clustered } = DEFAULT_STORAGE_URLS; + const { cloud, oss, core, enterprise, serverless, dedicated, clustered } = + DEFAULT_STORAGE_URLS; // Append URL selector buttons to code blocks - appendUrlSelector({ cloud, oss, core, enterprise, serverless, dedicated, clustered }); + appendUrlSelector({ + cloud, + oss, + core, + enterprise, + serverless, + dedicated, + clustered, + }); // Update URLs on load - updateUrls({ cloud, oss, core, enterprise, serverless, dedicated, clustered }, getUrls()); + updateUrls( + { cloud, oss, core, enterprise, serverless, dedicated, clustered }, + getUrls() + ); // Set active radio button on page load setRadioButtons(getUrls()); diff --git a/assets/js/keybindings.js b/assets/js/keybindings.js index 6c8f2fcbe..50ee3a683 100644 --- a/assets/js/keybindings.js +++ b/assets/js/keybindings.js @@ -1,41 +1,58 @@ -// Dynamically update keybindings or hotkeys -function getPlatform() { - if (/Mac/.test(navigator.platform)) { - return "osx" - } else if (/Win/.test(navigator.platform)) { - return "win" - } else if (/Linux/.test(navigator.platform)) { - return "linux" - } else { - return "other" - } +import { getPlatform } from './utils/user-agent-platform.js'; +import $ from 'jquery'; + +/** + * Adds OS-specific class to component + * @param {string} osClass - OS-specific class to add + * @param {Object} options - Component options + * @param {jQuery} options.$component - jQuery element reference + */ +function addOSClass(osClass, { $component }) { + $component.addClass(osClass); } -const platform = getPlatform() +/** + * Updates keybinding display based on detected platform + * @param {Object} options - Component options + * @param {jQuery} options.$component - jQuery element reference + * @param {string} options.platform - Detected platform + */ +function updateKeyBindings({ $component, platform }) { + const osx = $component.data('osx'); + const linux = $component.data('linux'); + const win = $component.data('win'); -function addOSClass(osClass) { - $('.keybinding').addClass(osClass) -} + let keybind; -function updateKeyBindings() { - $('.keybinding').each(function() { - var osx = $(this).data("osx") - var linux = $(this).data("linux") - var win = $(this).data("win") - - if (platform === "other") { - if (win != linux) { - var keybind = '' + osx + ' for macOS, ' + linux + ' for Linux, and ' + win + ' for Windows'; - } else { - var keybind = '' + linux + ' for Linux and Windows and ' + osx + ' for macOS'; - } + if (platform === 'other') { + if (win !== linux) { + keybind = + `${osx} for macOS, ` + + `${linux} for Linux, ` + + `and ${win} for Windows`; } else { - var keybind = '' + $(this).data(platform) + '' + keybind = + `${linux} for Linux and Windows and ` + + `${osx} for macOS`; } + } else { + keybind = `${$component.data(platform)}`; + } - $(this).html(keybind) - }) + $component.html(keybind); } -addOSClass(platform) -updateKeyBindings() +/** + * Initialize and render platform-specific keybindings + * @param {Object} options - Component options + * @param {HTMLElement} options.component - DOM element + * @returns {void} + */ +export default function KeyBinding({ component }) { + // Initialize keybindings + const platform = getPlatform(); + const $component = $(component); + + addOSClass(platform, { $component }); + updateKeyBindings({ $component, platform }); +} diff --git a/assets/js/list-filters.js b/assets/js/list-filters.js index 7b008dcb6..28c818507 100644 --- a/assets/js/list-filters.js +++ b/assets/js/list-filters.js @@ -1,11 +1,15 @@ +import $ from 'jquery'; + // Count tag elements function countTag(tag) { - return $(".visible[data-tags*='" + tag + "']").length + return $(".visible[data-tags*='" + tag + "']").length; } -function getFilterCounts() { - $('#list-filters label').each(function() { - var tagName = $('input', this).attr('name').replace(/[\W/]+/, "-"); +function getFilterCounts($labels) { + $labels.each(function () { + var tagName = $('input', this) + .attr('name') + .replace(/[\W/]+/, '-'); var tagCount = countTag(tagName); $(this).attr('data-count', '(' + tagCount + ')'); if (tagCount <= 0) { @@ -13,38 +17,58 @@ function getFilterCounts() { } else { $(this).fadeTo(400, 1.0); } - }) + }); } -// Get initial filter count on page load -getFilterCounts() +/** TODO: Include the data source value in the as an additional attribute + * in the HTML and pass it into the component, which would let us use selectors + * for only the source items and let us have more than one + * list filter component per page without conflicts */ +export default function ListFilters({ component }) { + const $component = $(component); + const $labels = $component.find('label'); + const $inputs = $component.find('input'); -$("#list-filters input").click(function() { + getFilterCounts($labels); - // List of tags to hide - var tagArray = $("#list-filters input:checkbox:checked").map(function(){ - return $(this).attr('name').replace(/[\W]+/, "-"); - }).get(); + $inputs.click(function () { + // List of tags to hide + var tagArray = $component + .find('input:checkbox:checked') + .map(function () { + return $(this).attr('name').replace(/[\W]+/, '-'); + }) + .get(); - // List of tags to restore - var restoreArray = $("#list-filters input:checkbox:not(:checked)").map(function(){ - return $(this).attr('name').replace(/[\W]+/, "-"); - }).get(); + // List of tags to restore + var restoreArray = $component + .find('input:checkbox:not(:checked)') + .map(function () { + return $(this).attr('name').replace(/[\W]+/, '-'); + }) + .get(); - // Actions for filter select - if ( $(this).is(':checked') ) { - $.each( tagArray, function( index, value ) { - $(".filter-item.visible:not([data-tags~='" + value + "'])").removeClass('visible').fadeOut() - }) - } else { - $.each( restoreArray, function( index, value ) { - $(".filter-item:not(.visible)[data-tags~='" + value + "']").addClass('visible').fadeIn() - }) - $.each( tagArray, function( index, value ) { - $(".filter-item.visible:not([data-tags~='" + value + "'])").removeClass('visible').hide() - }) - } + // Actions for filter select + if ($(this).is(':checked')) { + $.each(tagArray, function (index, value) { + $(".filter-item.visible:not([data-tags~='" + value + "'])") + .removeClass('visible') + .fadeOut(); + }); + } else { + $.each(restoreArray, function (index, value) { + $(".filter-item:not(.visible)[data-tags~='" + value + "']") + .addClass('visible') + .fadeIn(); + }); + $.each(tagArray, function (index, value) { + $(".filter-item.visible:not([data-tags~='" + value + "'])") + .removeClass('visible') + .hide(); + }); + } - // Refresh filter count - getFilterCounts() -}); + // Refresh filter count + getFilterCounts($labels); + }); +} diff --git a/assets/js/main.js b/assets/js/main.js index c1b95d1bc..ca99dff48 100644 --- a/assets/js/main.js +++ b/assets/js/main.js @@ -1,35 +1,24 @@ // assets/js/main.js -// If you need to pass parameters from the calling Hugo page, you can import them here like so: -// import * as pageParams from '@params'; +// Import dependencies that we still need to load in the global scope +import $ from 'jquery'; /** Import modules that are not components. * TODO: Refactor these into single-purpose component modules. */ -// import * as codeblocksPreferences from './api-libs.js'; -// import * as datetime from './datetime.js'; -// import * as featureCallouts from './feature-callouts.js'; import * as apiLibs from './api-libs.js'; import * as codeControls from './code-controls.js'; import * as contentInteractions from './content-interactions.js'; +import * as datetime from './datetime.js'; import { delay } from './helpers.js'; import { InfluxDBUrl } from './influxdb-url.js'; -import * as localStorage from './local-storage.js'; +import * as localStorage from './services/local-storage.js'; import * as modals from './modals.js'; import * as notifications from './notifications.js'; import * as pageContext from './page-context.js'; import * as pageFeedback from './page-feedback.js'; import * as tabbedContent from './tabbed-content.js'; import * as v3Wayfinding from './v3-wayfinding.js'; -// import * as homeInteractions from './home-interactions.js'; -// import { getUrls, getReferrerHost, InfluxDBUrl } from './influxdb-url.js'; -// import * as keybindings from './keybindings.js'; -// import * as listFilters from './list-filters.js'; -// import { Modal } from './modal.js'; -// import { showNotifications } from './notifications.js'; -// import ReleaseTOC from './release-toc.js'; -// import * as scroll from './scroll.js'; -// import { TabbedContent } from './tabbed-content.js'; /** Import component modules * The component pattern organizes JavaScript, CSS, and HTML for a specific UI element or interaction: @@ -39,107 +28,151 @@ import * as v3Wayfinding from './v3-wayfinding.js'; * The JavaScript is ideally a single-purpose module that exports a single default function to initialize the component and handle any component interactions. */ import AskAITrigger from './ask-ai-trigger.js'; +import CodePlaceholder from './code-placeholders.js'; import { CustomTimeTrigger } from './custom-timestamps.js'; +import Diagram from './components/diagram.js'; +import DocSearch from './components/doc-search.js'; +import FeatureCallout from './feature-callouts.js'; +import FluxGroupKeysDemo from './flux-group-keys.js'; +import FluxInfluxDBVersionsTrigger from './flux-influxdb-versions.js'; +import KeyBinding from './keybindings.js'; +import ListFilters from './list-filters.js'; +import ProductSelector from './version-selector.js'; +import ReleaseToc from './release-toc.js'; import { SearchButton } from './search-button.js'; +import SidebarSearch from './components/sidebar-search.js'; import { SidebarToggle } from './sidebar-toggle.js'; import Theme from './theme.js'; import ThemeSwitch from './theme-switch.js'; -// import CodeControls from './code-controls.js'; -// import ContentInteractions from './content-interactions.js'; -// import CustomTimestamps from './custom-timestamps.js'; -// import Diagram from './Diagram.js'; -// import FluxGroupKeysExample from './FluxGroupKeysExample.js'; -import FluxInfluxDBVersionsTrigger from './flux-influxdb-versions.js'; -// import PageFeedback from './page-feedback.js'; -// import SearchInput from './SearchInput.js'; -// import Sidebar from './Sidebar.js'; -// import V3Wayfinding from './v3-wayfinding.js'; -// import VersionSelector from './VersionSelector.js'; -// Expose libraries and components within a namespaced object (for backwards compatibility or testing) -// Expose libraries and components within a namespaced object (for backwards compatibility or testing) +/** + * Component Registry + * A central registry that maps component names to their constructor functions. + * Add new components to this registry as they are created or migrated from non-component modules. + * This allows for: + * 1. Automatic component initialization based on data-component attributes + * 2. Centralized component management + * 3. Easy addition/removal of components + * 4. Simplified testing and debugging + */ +const componentRegistry = { + 'ask-ai-trigger': AskAITrigger, + 'code-placeholder': CodePlaceholder, + 'custom-time-trigger': CustomTimeTrigger, + diagram: Diagram, + 'doc-search': DocSearch, + 'feature-callout': FeatureCallout, + 'flux-group-keys-demo': FluxGroupKeysDemo, + 'flux-influxdb-versions-trigger': FluxInfluxDBVersionsTrigger, + keybinding: KeyBinding, + 'list-filters': ListFilters, + 'product-selector': ProductSelector, + 'release-toc': ReleaseToc, + 'search-button': SearchButton, + 'sidebar-search': SidebarSearch, + 'sidebar-toggle': SidebarToggle, + theme: Theme, + 'theme-switch': ThemeSwitch, +}; - - -document.addEventListener('DOMContentLoaded', function () { +/** + * Initialize global namespace for documentation JavaScript + * Exposes core modules for debugging, testing, and backwards compatibility + */ +function initGlobals() { if (typeof window.influxdatadocs === 'undefined') { window.influxdatadocs = {}; } - // Expose modules to the global object for debugging, testing, and backwards compatibility for non-ES6 modules. + // Expose modules to the global object for debugging, testing, and backwards compatibility window.influxdatadocs.delay = delay; window.influxdatadocs.localStorage = window.LocalStorageAPI = localStorage; window.influxdatadocs.pageContext = pageContext; window.influxdatadocs.toggleModal = modals.toggleModal; + window.influxdatadocs.componentRegistry = componentRegistry; - // On content loaded, initialize (not-component-ready) UI interaction modules - // To differentiate these from component-ready modules, these modules typically export an initialize function that wraps UI interactions and event listeners. + // Re-export jQuery to global namespace for legacy scripts + if (typeof window.jQuery === 'undefined') { + window.jQuery = window.$ = $; + } + + return window.influxdatadocs; +} + +/** + * Initialize components based on data-component attributes + * @param {Object} globals - The global influxdatadocs namespace + */ +function initComponents(globals) { + const components = document.querySelectorAll('[data-component]'); + + components.forEach((component) => { + const componentName = component.getAttribute('data-component'); + const ComponentConstructor = componentRegistry[componentName]; + + if (ComponentConstructor) { + // Initialize the component and store its instance in the global namespace + try { + const instance = ComponentConstructor({ component }); + globals[componentName] = ComponentConstructor; + + // Optionally store component instances for future reference + if (!globals.instances) { + globals.instances = {}; + } + + if (!globals.instances[componentName]) { + globals.instances[componentName] = []; + } + + globals.instances[componentName].push({ + element: component, + instance, + }); + } catch (error) { + console.error( + `Error initializing component "${componentName}":`, + error + ); + } + } else { + console.warn(`Unknown component: "${componentName}"`); + } + }); +} + +/** + * Initialize all non-component modules + */ +function initModules() { modals.initialize(); apiLibs.initialize(); codeControls.initialize(); contentInteractions.initialize(); + datetime.initialize(); InfluxDBUrl(); notifications.initialize(); pageFeedback.initialize(); tabbedContent.initialize(); v3Wayfinding.initialize(); +} - /** Initialize components - Component Structure: Each component is structured as a jQuery anonymous function that listens for the document ready state. - Initialization in main.js: Each component is called in main.js inside a jQuery document ready function to ensure they are initialized when the document is ready. - Note: These components should *not* be called directly in the HTML. - */ - const components = document.querySelectorAll('[data-component]'); - components.forEach((component) => { - const componentName = component.getAttribute('data-component'); - switch (componentName) { - case 'ask-ai-trigger': - AskAITrigger({ component }); - window.influxdatadocs[componentName] = AskAITrigger; - break; - case 'custom-time-trigger': - CustomTimeTrigger({ component }); - window.influxdatadocs[componentName] = CustomTimeTrigger; - break; - case 'flux-influxdb-versions-trigger': - FluxInfluxDBVersionsTrigger({ component }); - window.influxdatadocs[componentName] = FluxInfluxDBVersionsTrigger; - break; - case 'search-button': - SearchButton({ component }); - window.influxdatadocs[componentName] = SearchButton; - break; - case 'sidebar-toggle': - SidebarToggle({ component }); - window.influxdatadocs[componentName] = SidebarToggle; - break; - case 'theme': - Theme({ component }); - window.influxdatadocs[componentName] = Theme; - break; - // CodeControls(); - // ContentInteractions(); - // CustomTimestamps(); - // Diagram(); - // FluxGroupKeysExample(); - // FluxInfluxDBVersionsModal(); - // InfluxDBUrl(); - // Modal(); - // PageFeedback(); - // ReleaseTOC(); - // SearchInput(); - // showNotifications(); - // Sidebar(); - // TabbedContent(); - // ThemeSwitch({}); - // V3Wayfinding(); - // VersionSelector(); - case 'theme-switch': - ThemeSwitch({ component }); - window.influxdatadocs[componentName] = ThemeSwitch; - break; - default: - console.warn(`Unknown component: ${componentName}`); - } - }); -}); +/** + * Main initialization function + */ +function init() { + // Initialize global namespace and expose core modules + const globals = initGlobals(); + + // Initialize non-component UI modules + initModules(); + + // Initialize components from registry + initComponents(globals); +} + +// Initialize everything when the DOM is ready +document.addEventListener('DOMContentLoaded', init); + +// Export public API +export { initGlobals, componentRegistry }; diff --git a/assets/js/page-context.js b/assets/js/page-context.js index 4903e9a14..6779fbca2 100644 --- a/assets/js/page-context.js +++ b/assets/js/page-context.js @@ -1,34 +1,80 @@ /** This module retrieves browser context information and site data for the * current page, version, and product. */ -import { products, influxdb_urls } from '@params'; - -const safeProducts = products || {}; -const safeUrls = influxdb_urls || {}; +import { products } from './services/influxdata-products.js'; +import { influxdbUrls } from './services/influxdb-urls.js'; function getCurrentProductData() { const path = window.location.pathname; const mappings = [ - { pattern: /\/influxdb\/cloud\//, product: safeProducts.cloud, urls: safeUrls.influxdb_cloud }, - { pattern: /\/influxdb3\/core/, product: safeProducts.influxdb3_core, urls: safeUrls.core }, - { pattern: /\/influxdb3\/enterprise/, product: safeProducts.influxdb3_enterprise, urls: safeUrls.enterprise }, - { pattern: /\/influxdb3\/cloud-serverless/, product: safeProducts.influxdb3_cloud_serverless, urls: safeUrls.cloud }, - { pattern: /\/influxdb3\/cloud-dedicated/, product: safeProducts.influxdb3_cloud_dedicated, urls: safeUrls.dedicated }, - { pattern: /\/influxdb3\/clustered/, product: safeProducts.influxdb3_clustered, urls: safeUrls.clustered }, - { pattern: /\/enterprise_v1\//, product: safeProducts.enterprise_influxdb, urls: safeUrls.oss }, - { pattern: /\/influxdb.*v1\//, product: safeProducts.influxdb, urls: safeUrls.oss }, - { pattern: /\/influxdb.*v2\//, product: safeProducts.influxdb, urls: safeUrls.oss }, - { pattern: /\/kapacitor\//, product: safeProducts.kapacitor, urls: safeUrls.oss }, - { pattern: /\/telegraf\//, product: safeProducts.telegraf, urls: safeUrls.oss }, - { pattern: /\/chronograf\//, product: safeProducts.chronograf, urls: safeUrls.oss }, - { pattern: /\/flux\//, product: safeProducts.flux, urls: safeUrls.oss }, + { + pattern: /\/influxdb\/cloud\//, + product: products.cloud, + urls: influxdbUrls.influxdb_cloud, + }, + { + pattern: /\/influxdb3\/core/, + product: products.influxdb3_core, + urls: influxdbUrls.core, + }, + { + pattern: /\/influxdb3\/enterprise/, + product: products.influxdb3_enterprise, + urls: influxdbUrls.enterprise, + }, + { + pattern: /\/influxdb3\/cloud-serverless/, + product: products.influxdb3_cloud_serverless, + urls: influxdbUrls.cloud, + }, + { + pattern: /\/influxdb3\/cloud-dedicated/, + product: products.influxdb3_cloud_dedicated, + urls: influxdbUrls.dedicated, + }, + { + pattern: /\/influxdb3\/clustered/, + product: products.influxdb3_clustered, + urls: influxdbUrls.clustered, + }, + { + pattern: /\/enterprise_v1\//, + product: products.enterprise_influxdb, + urls: influxdbUrls.oss, + }, + { + pattern: /\/influxdb.*v1\//, + product: products.influxdb, + urls: influxdbUrls.oss, + }, + { + pattern: /\/influxdb.*v2\//, + product: products.influxdb, + urls: influxdbUrls.oss, + }, + { + pattern: /\/kapacitor\//, + product: products.kapacitor, + urls: influxdbUrls.oss, + }, + { + pattern: /\/telegraf\//, + product: products.telegraf, + urls: influxdbUrls.oss, + }, + { + pattern: /\/chronograf\//, + product: products.chronograf, + urls: influxdbUrls.oss, + }, + { pattern: /\/flux\//, product: products.flux, urls: influxdbUrls.oss }, ]; for (const { pattern, product, urls } of mappings) { if (pattern.test(path)) { - return { - product: product || 'unknown', - urls: urls || {} + return { + product: product || 'unknown', + urls: urls || {}, }; } } @@ -36,7 +82,8 @@ function getCurrentProductData() { return { product: 'other', urls: {} }; } -// Return the page context (cloud, serverless, oss/enterprise, dedicated, clustered, other) +// Return the page context +// (cloud, serverless, oss/enterprise, dedicated, clustered, other) function getContext() { if (/\/influxdb\/cloud\//.test(window.location.pathname)) { return 'cloud'; @@ -78,8 +125,12 @@ const context = getContext(), protocol = location.protocol, referrer = document.referrer === '' ? 'direct' : document.referrer, referrerHost = getReferrerHost(), - // TODO: Verify this still does what we want since the addition of InfluxDB 3 naming and the Core and Enterprise versions. - version = (/^v\d/.test(pathArr[1]) || pathArr[1]?.includes('cloud') ? pathArr[1].replace(/^v/, '') : "n/a") + // TODO: Verify this works since the addition of InfluxDB 3 naming + // and the Core and Enterprise versions. + version = + /^v\d/.test(pathArr[1]) || pathArr[1]?.includes('cloud') + ? pathArr[1].replace(/^v/, '') + : 'n/a'; export { context, @@ -92,4 +143,4 @@ export { referrer, referrerHost, version, -}; \ No newline at end of file +}; diff --git a/assets/js/release-toc.js b/assets/js/release-toc.js index 42858fccc..9e02ec5c4 100644 --- a/assets/js/release-toc.js +++ b/assets/js/release-toc.js @@ -1,26 +1,67 @@ /////////////////////////// Table of Contents Script /////////////////////////// /* - * This script is used to generate a table of contents for the - * release notes pages. -*/ + * This script is used to generate a table of contents for the + * release notes pages. + */ +export default function ReleaseToc({ component }) { + // Get all h2 elements that are not checkpoint-releases + const releases = Array.from(document.querySelectorAll('h2')).filter( + (el) => !el.id.match(/checkpoint-releases/) + ); -// Use jQuery filter to get an array of all the *release* h2 elements -const releases = $('h2').filter( - (_i, el) => !el.id.match(/checkpoint-releases/) -); + // Extract data about each release from the array of releases + const releaseData = releases.map((el) => ({ + name: el.textContent, + id: el.id, + class: el.getAttribute('class'), + date: el.getAttribute('date'), + })); -// Extract data about each release from the array of releases -releaseData = releases.map((_i, el) => ({ - name: el.textContent, - id: el.id, - class: el.getAttribute('class'), - date: el.getAttribute('date') -})); + // Build the release table of contents + const releaseTocUl = component.querySelector('#release-toc ul'); + releaseData.forEach((release) => { + releaseTocUl.appendChild(getReleaseItem(release)); + }); + + /* + * This script is used to expand the release notes table of contents by the + * number specified in the `show` attribute of `ul.release-list`. + * Once all the release items are visible, the "Show More" button is hidden. + */ + const showMoreBtn = component.querySelector('.show-more'); + if (showMoreBtn) { + showMoreBtn.addEventListener('click', function () { + const itemHeight = 1.885; // Item height in rem + const releaseNum = releaseData.length; + const maxHeight = releaseNum * itemHeight; + const releaseList = document.getElementById('release-list'); + const releaseIncrement = Number(releaseList.getAttribute('show')); + const currentHeightMatch = releaseList.style.height.match(/\d+\.?\d+/); + const currentHeight = currentHeightMatch + ? Number(currentHeightMatch[0]) + : 0; + const potentialHeight = currentHeight + releaseIncrement * itemHeight; + const newHeight = + potentialHeight > maxHeight ? maxHeight : potentialHeight; + + releaseList.style.height = `${newHeight}rem`; + + if (newHeight >= maxHeight) { + // Simple fade out + showMoreBtn.style.transition = 'opacity 0.1s'; + showMoreBtn.style.opacity = 0; + setTimeout(() => { + showMoreBtn.style.display = 'none'; + }, 100); + } + }); + } +} // Use release data to generate a list item for each release -getReleaseItem = (releaseData) => { - var li = document.createElement("li"); +function getReleaseItem(releaseData) { + const li = document.createElement('li'); if (releaseData.class !== null) { li.className = releaseData.class; } @@ -28,31 +69,3 @@ getReleaseItem = (releaseData) => { li.setAttribute('date', releaseData.date); return li; } - -// Use jQuery each to build the release table of contents -releaseData.each((_i, release) => { - $('#release-toc ul')[0].appendChild(getReleaseItem(release)); -}); - -/* - * This script is used to expand the release notes table of contents by the - * number specified in the `show` attribute of `ul.release-list`. - * Once all the release items are visible, the "Show More" button is hidden. -*/ -$('#release-toc .show-more').click(function () { - const itemHeight = 1.885; // Item height in rem - const releaseNum = releaseData.length; - const maxHeight = releaseNum * itemHeight; - const releaseIncrement = Number($('#release-list')[0].getAttribute('show')); - const currentHeight = Number( - $('#release-list')[0].style.height.match(/\d+\.?\d+/)[0] - ); - const potentialHeight = currentHeight + releaseIncrement * itemHeight; - const newHeight = potentialHeight > maxHeight ? maxHeight : potentialHeight; - - $('#release-list')[0].style.height = `${newHeight}rem`; - - if (newHeight >= maxHeight) { - $('#release-toc .show-more').fadeOut(100); - } -}); diff --git a/assets/js/search-interactions.js b/assets/js/search-interactions.js deleted file mode 100644 index 4f8fdd8ac..000000000 --- a/assets/js/search-interactions.js +++ /dev/null @@ -1,10 +0,0 @@ -// Fade content wrapper when focusing on search input -$('#algolia-search-input').focus(function() { - $('.content-wrapper').fadeTo(300, .35); -}) - -// Hide search dropdown when leaving search input -$('#algolia-search-input').blur(function() { - $('.content-wrapper').fadeTo(200, 1); - $('.ds-dropdown-menu').hide(); -}) diff --git a/assets/js/services/influxdata-products.js b/assets/js/services/influxdata-products.js new file mode 100644 index 000000000..eecd8aa89 --- /dev/null +++ b/assets/js/services/influxdata-products.js @@ -0,0 +1,3 @@ +import { products as productsParam } from '@params'; + +export const products = productsParam || {}; diff --git a/assets/js/services/influxdb-urls.js b/assets/js/services/influxdb-urls.js new file mode 100644 index 000000000..1d31ff67f --- /dev/null +++ b/assets/js/services/influxdb-urls.js @@ -0,0 +1,3 @@ +import { influxdb_urls as influxdbUrlsParam } from '@params'; + +export const influxdbUrls = influxdbUrlsParam || {}; diff --git a/assets/js/local-storage.js b/assets/js/services/local-storage.js similarity index 93% rename from assets/js/local-storage.js rename to assets/js/services/local-storage.js index 103685f4d..8efccde12 100644 --- a/assets/js/local-storage.js +++ b/assets/js/services/local-storage.js @@ -10,7 +10,8 @@ - messages: Messages (data/notifications.yaml) that have been seen (array) - callouts: Feature callouts that have been seen (array) */ -import * as pageParams from '@params'; + +import { influxdbUrls } from './influxdb-urls.js'; // Prefix for all InfluxData docs local storage const storagePrefix = 'influxdata_docs_'; @@ -82,14 +83,12 @@ function getPreferences() { //////////// MANAGE INFLUXDATA DOCS URLS IN LOCAL STORAGE ////////////////////// //////////////////////////////////////////////////////////////////////////////// - const defaultUrls = {}; -// Guard against pageParams being null/undefined and safely access nested properties -if (pageParams && pageParams.influxdb_urls) { - Object.entries(pageParams.influxdb_urls).forEach(([product, {providers}]) => { - defaultUrls[product] = providers.filter(provider => provider.name === 'Default')[0]?.regions[0]?.url; - }); -} +Object.entries(influxdbUrls).forEach(([product, { providers }]) => { + defaultUrls[product] = + providers.filter((provider) => provider.name === 'Default')[0]?.regions[0] + ?.url || 'https://cloud2.influxdata.com'; +}); export const DEFAULT_STORAGE_URLS = { oss: defaultUrls.oss, @@ -177,7 +176,10 @@ const defaultNotificationsObj = { function getNotifications() { // Initialize notifications data if it doesn't already exist if (localStorage.getItem(notificationStorageKey) === null) { - initializeStorageItem('notifications', JSON.stringify(defaultNotificationsObj)); + initializeStorageItem( + 'notifications', + JSON.stringify(defaultNotificationsObj) + ); } // Retrieve and parse the notifications data as JSON @@ -221,7 +223,10 @@ function setNotificationAsRead(notificationID, notificationType) { readNotifications.push(notificationID); notificationsObj[notificationType + 's'] = readNotifications; - localStorage.setItem(notificationStorageKey, JSON.stringify(notificationsObj)); + localStorage.setItem( + notificationStorageKey, + JSON.stringify(notificationsObj) + ); } // Export functions as a module and make the file backwards compatible for non-module environments until all remaining dependent scripts are ported to modules diff --git a/assets/js/sidebar-toggle.js b/assets/js/sidebar-toggle.js index 4db64db79..49af74008 100644 --- a/assets/js/sidebar-toggle.js +++ b/assets/js/sidebar-toggle.js @@ -3,7 +3,7 @@ http://www.thesitewizard.com/javascripts/change-style-sheets.shtml */ -import * as localStorage from './local-storage.js'; +import * as localStorage from './services/local-storage.js'; // *** TO BE CUSTOMISED *** var sidebar_state_preference_name = 'sidebar_state'; diff --git a/assets/js/theme-switch.js b/assets/js/theme-switch.js index 4c97c3108..c7de8552d 100644 --- a/assets/js/theme-switch.js +++ b/assets/js/theme-switch.js @@ -1,20 +1,21 @@ import Theme from './theme.js'; export default function ThemeSwitch({ component }) { - if ( component == undefined) { + if (component === undefined) { component = document; } - component.querySelectorAll(`.theme-switch-light`).forEach((button) => { - button.addEventListener('click', function(event) { + + component.querySelectorAll('.theme-switch-light').forEach((button) => { + button.addEventListener('click', function (event) { event.preventDefault(); - Theme({ style: 'light-theme' }); + Theme({ component, style: 'light-theme' }); }); }); - component.querySelectorAll(`.theme-switch-dark`).forEach((button) => { - button.addEventListener('click', function(event) { + component.querySelectorAll('.theme-switch-dark').forEach((button) => { + button.addEventListener('click', function (event) { event.preventDefault(); - Theme({ style: 'dark-theme' }); + Theme({ component, style: 'dark-theme' }); }); }); } diff --git a/assets/js/theme.js b/assets/js/theme.js index 92a6c190e..8588d44a9 100644 --- a/assets/js/theme.js +++ b/assets/js/theme.js @@ -1,4 +1,4 @@ -import { getPreference, setPreference } from './local-storage.js'; +import { getPreference, setPreference } from './services/local-storage.js'; const PROPS = { style_preference_name: 'theme', @@ -6,19 +6,22 @@ const PROPS = { style_domain: 'docs.influxdata.com', }; -function getPreferredTheme () { +function getPreferredTheme() { return `${getPreference(PROPS.style_preference_name)}-theme`; } function switchStyle({ styles_element, css_title }) { // Disable all other theme stylesheets - styles_element.querySelectorAll('link[rel*="stylesheet"][title*="theme"]') - .forEach(function (link) { - link.disabled = true; - }); + styles_element + .querySelectorAll('link[rel*="stylesheet"][title*="theme"]') + .forEach(function (link) { + link.disabled = true; + }); // Enable the stylesheet with the specified title - const link = styles_element.querySelector(`link[rel*="stylesheet"][title="${css_title}"]`); + const link = styles_element.querySelector( + `link[rel*="stylesheet"][title="${css_title}"]` + ); link && (link.disabled = false); setPreference(PROPS.style_preference_name, css_title.replace(/-theme/, '')); @@ -38,5 +41,4 @@ export default function Theme({ component, style }) { if (component.dataset?.themeCallback === 'setVisibility') { setVisibility(component); } - } diff --git a/assets/js/utils/debug-helpers.js b/assets/js/utils/debug-helpers.js new file mode 100644 index 000000000..08433e2cf --- /dev/null +++ b/assets/js/utils/debug-helpers.js @@ -0,0 +1,38 @@ +/** + * Helper functions for debugging without source maps + * Example usage: + * In your code, you can use these functions like this: + * ```javascript + * import { debugLog, debugBreak, debugInspect } from './debug-helpers.js'; + * + * const data = debugInspect(someData, 'Data'); + * debugLog('Processing data', 'myFunction'); + * + * function processData() { + * // Add a breakpoint that works with DevTools + * debugBreak(); + * + * // Your existing code... + * } + * ``` + * + * @fileoverview DEVELOPMENT USE ONLY - Functions should not be committed to production + */ + +/* eslint-disable no-debugger */ +/* eslint-disable-next-line */ +// NOTE: These functions are detected by ESLint rules to prevent committing debug code + +export function debugLog(message, context = '') { + const contextStr = context ? `[${context}]` : ''; + console.log(`DEBUG${contextStr}: ${message}`); +} + +export function debugBreak() { + debugger; +} + +export function debugInspect(value, label = 'Inspect') { + console.log(`DEBUG[${label}]:`, value); + return value; +} diff --git a/assets/js/utils/search-interactions.js b/assets/js/utils/search-interactions.js new file mode 100644 index 000000000..6a73b2535 --- /dev/null +++ b/assets/js/utils/search-interactions.js @@ -0,0 +1,107 @@ +/** + * Manages search interactions for DocSearch integration + * Uses MutationObserver to watch for dropdown creation + */ +export default function SearchInteractions({ searchInput }) { + const contentWrapper = document.querySelector('.content-wrapper'); + let observer = null; + let dropdownObserver = null; + let dropdownMenu = null; + const debug = false; // Set to true for debugging logs + + // Fade content wrapper when focusing on search input + function handleFocus() { + contentWrapper.style.opacity = '0.35'; + contentWrapper.style.transition = 'opacity 300ms'; + } + + // Hide search dropdown when leaving search input + function handleBlur(event) { + // Only process blur if not clicking within dropdown + const relatedTarget = event.relatedTarget; + if ( + relatedTarget && + (relatedTarget.closest('.algolia-autocomplete') || + relatedTarget.closest('.ds-dropdown-menu')) + ) { + return; + } + + contentWrapper.style.opacity = '1'; + contentWrapper.style.transition = 'opacity 200ms'; + + // Hide dropdown if it exists + if (dropdownMenu) { + dropdownMenu.style.display = 'none'; + } + } + + // Add event listeners + searchInput.addEventListener('focus', handleFocus); + searchInput.addEventListener('blur', handleBlur); + + // Use MutationObserver to detect when dropdown is added to the DOM + observer = new MutationObserver((mutations) => { + for (const mutation of mutations) { + if (mutation.type === 'childList') { + const newDropdown = document.querySelector( + '.ds-dropdown-menu:not([data-monitored])' + ); + if (newDropdown) { + // Save reference to dropdown + dropdownMenu = newDropdown; + newDropdown.setAttribute('data-monitored', 'true'); + + // Monitor dropdown removal/display changes + dropdownObserver = new MutationObserver((dropdownMutations) => { + for (const dropdownMutation of dropdownMutations) { + if (debug) { + if ( + dropdownMutation.type === 'attributes' && + dropdownMutation.attributeName === 'style' + ) { + console.log( + 'Dropdown style changed:', + dropdownMenu.style.display + ); + } + } + } + }); + + // Observe changes to dropdown attributes (like style) + dropdownObserver.observe(dropdownMenu, { + attributes: true, + attributeFilter: ['style'], + }); + + // Add event listeners to keep dropdown open when interacted with + dropdownMenu.addEventListener('mousedown', (e) => { + // Prevent blur on searchInput when clicking in dropdown + e.preventDefault(); + }); + } + } + } + }); + + // Start observing the document body for dropdown creation + observer.observe(document.body, { + childList: true, + subtree: true, + }); + + // Return cleanup function + return function cleanup() { + searchInput.removeEventListener('focus', handleFocus); + searchInput.removeEventListener('blur', handleBlur); + + if (observer) { + observer.disconnect(); + } + + if (dropdownObserver) { + dropdownObserver.disconnect(); + } + }; +} diff --git a/assets/js/utils/user-agent-platform.js b/assets/js/utils/user-agent-platform.js new file mode 100644 index 000000000..803f1bdf9 --- /dev/null +++ b/assets/js/utils/user-agent-platform.js @@ -0,0 +1,35 @@ +/** + * Platform detection utility functions + * Provides methods for detecting user's operating system + */ + +/** + * Detects user's operating system using modern techniques + * Falls back to userAgent parsing when newer APIs aren't available + * @returns {string} Operating system identifier ("osx", "win", "linux", or "other") + */ +export function getPlatform() { + // Try to use modern User-Agent Client Hints API first (Chrome 89+, Edge 89+) + if (navigator.userAgentData && navigator.userAgentData.platform) { + const platform = navigator.userAgentData.platform.toLowerCase(); + + if (platform.includes('mac')) return 'osx'; + if (platform.includes('win')) return 'win'; + if (platform.includes('linux')) return 'linux'; + } + + // Fall back to userAgent string parsing + const userAgent = navigator.userAgent.toLowerCase(); + + if ( + userAgent.includes('mac') || + userAgent.includes('iphone') || + userAgent.includes('ipad') + ) + return 'osx'; + if (userAgent.includes('win')) return 'win'; + if (userAgent.includes('linux') || userAgent.includes('android')) + return 'linux'; + + return 'other'; +} diff --git a/assets/js/v3-wayfinding.js b/assets/js/v3-wayfinding.js index b50c58f7f..761a19044 100644 --- a/assets/js/v3-wayfinding.js +++ b/assets/js/v3-wayfinding.js @@ -1,6 +1,14 @@ import { CLOUD_URLS } from './influxdb-url.js'; -import * as localStorage from './local-storage.js'; -import { context, host, hostname, path, protocol, referrer, referrerHost } from './page-context.js'; +import * as localStorage from './services/local-storage.js'; +import { + context, + host, + hostname, + path, + protocol, + referrer, + referrerHost, +} from './page-context.js'; /** * Builds a referrer whitelist array that includes the current page host and all @@ -69,8 +77,6 @@ function setWayfindingInputState() { } function submitWayfindingData(engine, action) { - - // Build lp using page data and engine data const lp = `ioxwayfinding,host=${hostname},path=${path},referrer=${referrer},engine=${engine} action="${action}"`; @@ -81,10 +87,7 @@ function submitWayfindingData(engine, action) { 'https://j32dswat7l.execute-api.us-east-1.amazonaws.com/prod/wayfinding' ); xhr.setRequestHeader('X-Requested-With', 'XMLHttpRequest'); - xhr.setRequestHeader( - 'Access-Control-Allow-Origin', - `${protocol}//${host}` - ); + xhr.setRequestHeader('Access-Control-Allow-Origin', `${protocol}//${host}`); xhr.setRequestHeader('Content-Type', 'text/plain; charset=utf-8'); xhr.setRequestHeader('Accept', 'application/json'); xhr.send(lp); diff --git a/assets/js/version-selector.js b/assets/js/version-selector.js index 51fa52c53..7d9161c87 100644 --- a/assets/js/version-selector.js +++ b/assets/js/version-selector.js @@ -1,19 +1,21 @@ -// Select the product dropdown and dropdown items -const productDropdown = document.querySelector("#product-dropdown"); -const dropdownItems = document.querySelector("#dropdown-items"); +export default function ProductSelector({ component }) { + // Select the product dropdown and dropdown items + const productDropdown = component.querySelector('#product-dropdown'); + const dropdownItems = component.querySelector('#dropdown-items'); -// Expand the menu on click -if (productDropdown) { - productDropdown.addEventListener("click", function() { - productDropdown.classList.toggle("open"); - dropdownItems.classList.toggle("open"); + // Expand the menu on click + if (productDropdown) { + productDropdown.addEventListener('click', function () { + productDropdown.classList.toggle('open'); + dropdownItems.classList.toggle('open'); + }); + } + + // Close the dropdown by clicking anywhere else + document.addEventListener('click', function (e) { + // Check if the click was outside of the '.product-list' container + if (!e.target.closest('.product-list')) { + dropdownItems.classList.remove('open'); + } }); } - -// Close the dropdown by clicking anywhere else -document.addEventListener("click", function(e) { - // Check if the click was outside of the '.product-list' container - if (!e.target.closest('.product-list')) { - dropdownItems.classList.remove("open"); - } -}); diff --git a/assets/jsconfig.json b/assets/jsconfig.json index 377218ccb..4ad710c10 100644 --- a/assets/jsconfig.json +++ b/assets/jsconfig.json @@ -3,7 +3,8 @@ "baseUrl": ".", "paths": { "*": [ - "*" + "*", + "../node_modules/*" ] } } diff --git a/assets/styles/layouts/_datetime.scss b/assets/styles/layouts/_datetime.scss new file mode 100644 index 000000000..dc8f20bdf --- /dev/null +++ b/assets/styles/layouts/_datetime.scss @@ -0,0 +1,18 @@ +/* + Datetime Components + ---------------------------------------------- +*/ + +.current-timestamp, +.current-date, +.current-time, +.enterprise-eol-date { + color: $current-timestamp-color; + display: inline-block; + font-family: $proxima; + white-space: nowrap; +} + +.nowrap { + white-space: nowrap; +} \ No newline at end of file diff --git a/assets/styles/layouts/_homepage.scss b/assets/styles/layouts/_homepage.scss index 387c4b6be..ca92588e9 100644 --- a/assets/styles/layouts/_homepage.scss +++ b/assets/styles/layouts/_homepage.scss @@ -105,7 +105,7 @@ .product { padding: 0 1rem; display: flex; - flex: 1 1 50%; + flex: 1 1 33%; flex-direction: column; justify-content: space-between; max-width: 33%; @@ -118,11 +118,10 @@ line-height: 1.5rem; color: rgba($article-text, .7); } - } - &.beta { - .product-info h3::after { - content: "beta"; + h3[state] { + &::after { + content: attr(state); margin-left: .5rem; font-size: 1rem; padding: .25em .5em .25em .4em; @@ -132,6 +131,8 @@ font-style: italic; vertical-align: middle; } + + } } ul.product-links { @@ -227,6 +228,30 @@ background: $article-bg; } + .categories { + display: flex; + flex-direction: row; + flex-wrap: wrap; + // margin: 0 -1rem; + width: calc(100% + 2rem); + + .category { + &.full-width { + width: 100%; + } + &.two-thirds { + width: 66.66%; + .product { max-width: 50%; } + } + &.one-third { + width: 33.33%; + .product { + max-width: 100%; + } + } + } + } + .category-head{ margin: 1rem 0 2rem; &::after { @@ -234,6 +259,7 @@ display: block; border-top: 1px solid $article-hr; margin-top: -1.15rem; + width: calc(100% - 2rem); } } } @@ -441,6 +467,16 @@ ul {margin-bottom: 0;} } } + .categories .category { + &.two-thirds { + width: 100%; + .product { max-width: 100%; } + } + &.one-third { + width: 100%; + .product { max-width: 100%; } + } + } } #telegraf { flex-direction: column; diff --git a/assets/styles/layouts/_notifications.scss b/assets/styles/layouts/_notifications.scss index 00034186a..e7418ff82 100644 --- a/assets/styles/layouts/_notifications.scss +++ b/assets/styles/layouts/_notifications.scss @@ -99,6 +99,26 @@ pre { background: rgba($r-basalt, .35); } } + &.ga-announcement { + background-image: url('/svgs/influxdb3-ga-background.svg'); + background-size: cover; + a:hover { color: $br-dark-blue; } + code { color: $gr-gypsy; background: rgba($gr-gypsy, .25); } + pre { background: rgba($gr-gypsy, .25); } + + h3 {font-size: 1.4rem !important;} + .notification-slug { font-size: 1.15rem; + .btn { + display: inline-block; + background: $g20-white; + color: $br-dark-blue; + padding: .5rem 1rem; + border-radius: $radius * 2; + font-size: 1rem; + } + } + } + //////////// Basic HTML element styles for notification content //////////// h1,h2,h3,h4,h5,h6 { @@ -156,6 +176,16 @@ } .show::before {content: "Show more"} } + + .title-tag { + padding: .15rem .45rem; + text-transform: uppercase; + font-size: .85rem; + border-radius: $radius * 2; + font-family: $code; + background: $br-dark-blue; + } + .title-tag + h3 {margin-top: .75rem;} } } diff --git a/assets/styles/layouts/article/_blocks.scss b/assets/styles/layouts/article/_blocks.scss index 62b205491..c7250749d 100644 --- a/assets/styles/layouts/article/_blocks.scss +++ b/assets/styles/layouts/article/_blocks.scss @@ -97,4 +97,4 @@ blockquote { "blocks/important", "blocks/warning", "blocks/caution", - "blocks/beta"; + "blocks/special-state"; diff --git a/assets/styles/layouts/article/_diagrams.scss b/assets/styles/layouts/article/_diagrams.scss index 4e3c1694e..f6c3e1b07 100644 --- a/assets/styles/layouts/article/_diagrams.scss +++ b/assets/styles/layouts/article/_diagrams.scss @@ -16,6 +16,10 @@ background: $article-code-bg !important; font-size: .85em; font-weight: $medium; + + p { + background: $article-bg !important; + } } .node { diff --git a/assets/styles/layouts/article/_feedback.scss b/assets/styles/layouts/article/_feedback.scss index 7578943ee..aba35b825 100644 --- a/assets/styles/layouts/article/_feedback.scss +++ b/assets/styles/layouts/article/_feedback.scss @@ -15,27 +15,48 @@ padding-right: 2rem; ul { - display: flex; - flex-wrap: wrap; margin-bottom: 1.25rem; padding: 0; list-style: none; - li {display: inline-block} - a { - margin-right: 1.5rem; color: $article-heading; + font-weight: $medium; + position: relative; + + &::after { + content: "\e90a"; + font-family: 'icomoon-v4'; + font-weight: bold; + font-size: 1.3rem; + display: inline-block; + position: absolute; + @include gradient($grad-burningDusk); + background-clip: text; + -webkit-text-fill-color: transparent; + right: 0; + transform: translateX(.25rem); + opacity: 0; + transition: transform .2s, opacity .2s; + } &:hover { - color: $article-link; - border-radius: calc($radius * 1.5); + &::after {transform: translateX(1.5rem); opacity: 1;} + } + + &.discord:before { + content: url('/svgs/discord.svg'); + display: inline-block; + height: 1.1rem; + width: 1.25rem; + vertical-align: top; + margin: 2px .65rem 0 0; } &.community:before { content: "\e900"; color: $article-heading; - margin: 0 .5rem 0 -.25rem; + margin-right: .75rem; font-size: 1.2rem; font-family: 'icomoon-v2'; vertical-align: middle; @@ -46,7 +67,16 @@ height: 1.1rem; width: 1.1rem; vertical-align: text-top; - margin-right: .5rem; + margin-right: .8rem; + } + + &.reddit:before { + content: url('/svgs/reddit.svg'); + display: inline-block; + height: 1.1rem; + width: 1.2rem; + vertical-align: top; + margin: 2px .75rem 0 0; } } } diff --git a/assets/styles/layouts/article/_pagination-btns.scss b/assets/styles/layouts/article/_pagination-btns.scss index 7f44860f7..c069bc8ae 100644 --- a/assets/styles/layouts/article/_pagination-btns.scss +++ b/assets/styles/layouts/article/_pagination-btns.scss @@ -34,5 +34,10 @@ vertical-align: middle; } } + + // Remove max-width when only one button is present + &:only-child { + max-width: none; + } } } diff --git a/assets/styles/layouts/article/blocks/_beta.scss b/assets/styles/layouts/article/blocks/_special-state.scss similarity index 97% rename from assets/styles/layouts/article/blocks/_beta.scss rename to assets/styles/layouts/article/blocks/_special-state.scss index 7c6636b94..0717952cd 100644 --- a/assets/styles/layouts/article/blocks/_beta.scss +++ b/assets/styles/layouts/article/blocks/_special-state.scss @@ -1,10 +1,10 @@ -.block.beta { +.block.special-state { @include gradient($grad-burningDusk); padding: 4px; border: none; border-radius: 25px !important; - .beta-content { + .state-content { background: $article-bg; border-radius: 21px; padding: calc(1.65rem - 4px) calc(2rem - 4px) calc(.1rem + 4px) calc(2rem - 4px); @@ -36,12 +36,12 @@ padding: 0; margin: -1rem 0 1.5rem 2rem; list-style: none; - + a { color: $article-heading; font-weight: $medium; position: relative; - + &.discord:before { content: url('/svgs/discord.svg'); display: inline-block; @@ -50,7 +50,7 @@ vertical-align: top; margin: 2px .65rem 0 0; } - + &.community:before { content: "\e900"; color: $article-heading; @@ -59,7 +59,7 @@ font-family: 'icomoon-v2'; vertical-align: middle; } - + &.slack:before { content: url('/svgs/slack.svg'); display: inline-block; @@ -68,7 +68,7 @@ vertical-align: text-top; margin-right: .65rem; } - + &.reddit:before { content: url('/svgs/reddit.svg'); display: inline-block; @@ -77,7 +77,7 @@ vertical-align: top; margin: 2px .65rem 0 0; } - + &::after { content: "\e90a"; font-family: 'icomoon-v4'; @@ -93,7 +93,7 @@ opacity: 0; transition: transform .2s, opacity .2s; } - + &:hover { &::after {transform: translateX(1.5rem); opacity: 1;} } diff --git a/assets/styles/styles-default.scss b/assets/styles/styles-default.scss index 1e8b162f9..5fd3eed2d 100644 --- a/assets/styles/styles-default.scss +++ b/assets/styles/styles-default.scss @@ -23,6 +23,7 @@ "layouts/syntax-highlighting", "layouts/algolia-search-overrides", "layouts/landing", + "layouts/datetime", "layouts/error-page", "layouts/footer-widgets", "layouts/modals", diff --git a/assets/styles/themes/_theme-dark.scss b/assets/styles/themes/_theme-dark.scss index b46051152..800740cf1 100644 --- a/assets/styles/themes/_theme-dark.scss +++ b/assets/styles/themes/_theme-dark.scss @@ -203,6 +203,12 @@ $article-btn-text-hover: $g20-white; $article-nav-icon-bg: $g5-pepper; $article-nav-acct-bg: $g3-castle; +// Datetime shortcode colors +$current-timestamp-color: $g15-platinum; +$current-date-color: $g15-platinum; +$current-time-color: $g15-platinum; +$enterprise-eol-date-color: $g15-platinum; + // Error Page Colors $error-page-btn: $b-pool; $error-page-btn-text: $g20-white; diff --git a/assets/styles/themes/_theme-light.scss b/assets/styles/themes/_theme-light.scss index c19e91ab2..eb9e530f3 100644 --- a/assets/styles/themes/_theme-light.scss +++ b/assets/styles/themes/_theme-light.scss @@ -203,6 +203,12 @@ $article-btn-text-hover: $g20-white !default; $article-nav-icon-bg: $g6-smoke !default; $article-nav-acct-bg: $g5-pepper !default; +// Datetime Colors +$current-timestamp-color: $article-text !default; +$current-date-color: $article-text !default; +$current-time-color: $article-text !default; +$enterprise-eol-date-color: $article-text !default; + // Error Page Colors $error-page-btn: $b-pool !default; $error-page-btn-text: $g20-white !default; diff --git a/broken_links_report.json b/broken_links_report.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/broken_links_report.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/compose.yaml b/compose.yaml index 1b51376e5..ea11e03cb 100644 --- a/compose.yaml +++ b/compose.yaml @@ -1,6 +1,7 @@ # This is a Docker Compose file for the InfluxData documentation site. ## Run documentation tests for code samples. name: influxdata-docs +# Configure your credentials in the following secrets files. secrets: influxdb2-admin-username: file: ~/.env.influxdb2-admin-username @@ -8,6 +9,10 @@ secrets: file: ~/.env.influxdb2-admin-password influxdb2-admin-token: file: ~/.env.influxdb2-admin-token + influxdb3-core-admin-token: + file: ~/.env.influxdb3-core-admin-token + influxdb3-enterprise-admin-token: + file: ~/.env.influxdb3-enterprise-admin-token services: local-dev: build: @@ -17,7 +22,7 @@ services: RUN apk add --no-cache curl openssl command: hugo server --bind 0.0.0.0 healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:1313/influxdb/cloud-dedicated/"] + test: ["CMD", "curl", "-f", "http://localhost:1313/influxdb3/cloud-dedicated/"] interval: 1m timeout: 10s retries: 2 @@ -101,9 +106,9 @@ services: command: # In the command, pass file paths to test. # The container preprocesses the files for testing and runs the tests. - - content/influxdb/cloud-dedicated/**/*.md + - content/influxdb3/cloud-dedicated/**/*.md environment: - - CONTENT_PATH=content/influxdb/cloud-dedicated + - CONTENT_PATH=content/influxdb3/cloud-dedicated profiles: - test - v3 @@ -120,12 +125,12 @@ services: source: ./test/shared target: /shared - type: bind - source: ./content/influxdb/cloud-dedicated/.env.test + source: ./content/influxdb3/cloud-dedicated/.env.test target: /app/.env.test read_only: true - # The following mount assumes your influxctl configuration file is located at ./content/influxdb/cloud-dedicated/config.toml. + # The following mount assumes your influxctl configuration file is located at ./content/influxdb3/cloud-dedicated/config.toml. - type: bind - source: ./content/influxdb/cloud-dedicated/config.toml + source: ./content/influxdb3/cloud-dedicated/config.toml target: /root/.config/influxctl/config.toml read_only: true # In your code samples, use `/app/data/` or `data/` to access sample data files from the `static/downloads` directory. @@ -156,9 +161,9 @@ services: command: # In the command, pass file paths to test. # The container preprocesses the files for testing and runs the tests. - - content/influxdb/cloud-serverless/**/*.md + - content/influxdb3/cloud-serverless/**/*.md environment: - - CONTENT_PATH=content/influxdb/cloud-serverless + - CONTENT_PATH=content/influxdb3/cloud-serverless profiles: - test - v3 @@ -175,7 +180,7 @@ services: source: ./test/shared target: /shared - type: bind - source: ./content/influxdb/cloud-serverless/.env.test + source: ./content/influxdb3/cloud-serverless/.env.test target: /app/.env.test read_only: true # In your code samples, use `/app/data/` or `data/` to access sample data files from the `static/downloads` directory. @@ -206,9 +211,9 @@ services: command: # In the command, pass file paths to test. # The container preprocesses the files for testing and runs the tests. - - content/influxdb/clustered/**/*.md + - content/influxdb3/clustered/**/*.md environment: - - CONTENT_PATH=content/influxdb/clustered + - CONTENT_PATH=content/influxdb3/clustered profiles: - test - v3 @@ -225,12 +230,12 @@ services: source: ./test/shared target: /shared - type: bind - source: ./content/influxdb/clustered/.env.test + source: ./content/influxdb3/clustered/.env.test target: /app/.env.test read_only: true - # The following mount assumes your influxctl configuration file is located at ./content/influxdb/clustered/config.toml. + # The following mount assumes your influxctl configuration file is located at ./content/influxdb3/clustered/config.toml. - type: bind - source: ./content/influxdb/clustered/config.toml + source: ./content/influxdb3/clustered/config.toml target: /root/.config/influxctl/config.toml read_only: true # In your code samples, use `/app/data/` or `data/` to access sample data files from the `static/downloads` directory. @@ -301,15 +306,59 @@ services: working_dir: /app influxdb3-core: container_name: influxdb3-core - image: quay.io/influxdb/influxdb3-core:latest + image: influxdb:3-core + # Set variables (except your auth token) for Core in the .env.3core file. + env_file: + - .env.3core + ports: + - 8282:8181 + command: + - influxdb3 + - serve + - --node-id=node0 + - --log-filter=debug + - --object-store=file + - --data-dir=/var/lib/influxdb3/data + - --plugin-dir=/var/lib/influxdb3/plugins + volumes: + - type: bind + source: test/.influxdb3/core/data + target: /var/lib/influxdb3/data + - type: bind + source: test/.influxdb3/core/plugins + target: /var/lib/influxdb3/plugins + environment: + - INFLUXDB3_AUTH_TOKEN=/run/secrets/influxdb3-core-admin-token + secrets: + - influxdb3-core-admin-token + influxdb3-enterprise: + container_name: influxdb3-enterprise + image: influxdb:3-enterprise + # Set license email and other variables (except your auth token) for Enterprise in the .env.3ent file. + env_file: + - .env.3ent ports: - 8181:8181 command: + - influxdb3 - serve - - --node-id=sensors_node0 + - --node-id=node0 + - --cluster-id=cluster0 - --log-filter=debug - --object-store=file - - --data-dir=/var/lib/influxdb3 + - --data-dir=/var/lib/influxdb3/data + - --plugin-dir=/var/lib/influxdb3/plugins + environment: + - INFLUXDB3_AUTH_TOKEN=/run/secrets/influxdb3-enterprise-admin-token + volumes: + - type: bind + source: test/.influxdb3/enterprise/data + target: /var/lib/influxdb3/data + - type: bind + source: test/.influxdb3/enterprise/plugins + target: /var/lib/influxdb3/plugins + secrets: + - influxdb3-enterprise-admin-token telegraf-pytest: container_name: telegraf-pytest image: influxdata/docs-pytest @@ -448,6 +497,9 @@ services: - type: bind source: ./content target: /app/content + - type: bind + source: ./CONTRIBUTING.md + target: /app/CONTRIBUTING.md volumes: test-content: cloud-tmp: diff --git a/hugo.yml b/config/_default/hugo.yml similarity index 57% rename from hugo.yml rename to config/_default/hugo.yml index cb4775438..b98cf11f7 100644 --- a/hugo.yml +++ b/config/_default/hugo.yml @@ -1,4 +1,4 @@ -baseURL: 'https://docs.influxdata.com/' +baseURL: https://docs.influxdata.com/ languageCode: en-us title: InfluxDB Documentation @@ -49,8 +49,52 @@ privacy: youtube: disable: false privacyEnhanced: true + outputFormats: json: mediaType: application/json baseName: pages isPlainText: true + +# Asset processing configuration for development +build: + # Ensure Hugo correctly processes JavaScript modules + jsConfig: + nodeEnv: "development" +# Development asset processing + writeStats: false + useResourceCacheWhen: "fallback" + noJSConfigInAssets: false + +# Asset processing configuration +assetDir: "assets" + +module: + mounts: + - source: assets + target: assets + - source: node_modules + target: assets/node_modules + +# Environment parameters +params: + env: development + environment: development + +# Configure the server for development +server: + port: 1313 + baseURL: 'http://localhost:1313/' + watchChanges: true + disableLiveReload: false + +# Ignore specific warning logs +ignoreLogs: + - warning-goldmark-raw-html + +# Disable minification for development +minify: + disableJS: true + disableCSS: true + disableHTML: true + minifyOutput: false diff --git a/config/production/config.yml b/config/production/config.yml new file mode 100644 index 000000000..da574daff --- /dev/null +++ b/config/production/config.yml @@ -0,0 +1,40 @@ +# Production overrides for CI/CD builds +baseURL: 'https://docs.influxdata.com/' + +# Production environment parameters +params: + env: production + environment: production + +# Enable minification for production +minify: + disableJS: false + disableCSS: false + disableHTML: false + minifyOutput: true + +# Production asset processing +build: + writeStats: false + useResourceCacheWhen: "fallback" + buildOptions: + sourcemap: false + target: "es2015" + +# Asset processing configuration +assetDir: "assets" + +# Mount assets for production +module: + mounts: + - source: assets + target: assets + - source: node_modules + target: assets/node_modules + +# Disable development server settings +server: {} + +# Suppress the warning mentioned in the error +ignoreLogs: + - 'warning-goldmark-raw-html' \ No newline at end of file diff --git a/config/production/hugo.yml b/config/production/hugo.yml new file mode 100644 index 000000000..bd5911c96 --- /dev/null +++ b/config/production/hugo.yml @@ -0,0 +1,17 @@ +build: + writeStats: false + useResourceCacheWhen: "fallback" + buildOptions: + sourcemap: false + target: "es2015" +minify: + disableJS: false + disableCSS: false + disableHTML: false + minifyOutput: true +params: + env: production + environment: production +server: { + disableLiveReload: true +} \ No newline at end of file diff --git a/config/staging/hugo.yml b/config/staging/hugo.yml new file mode 100644 index 000000000..7d22ffb17 --- /dev/null +++ b/config/staging/hugo.yml @@ -0,0 +1,19 @@ +baseURL: https://test2.docs.influxdata.com/ +build: + writeStats: false + useResourceCacheWhen: "fallback" + buildOptions: + sourcemap: false + target: "es2015" +minify: + disableJS: false + disableCSS: false + disableHTML: false + minifyOutput: true +params: + env: staging + environment: staging +server: { + disableLiveReload: true +} + \ No newline at end of file diff --git a/content/chronograf/v1/about_the_project/release-notes.md b/content/chronograf/v1/about_the_project/release-notes.md index ed15f4b61..eae49effd 100644 --- a/content/chronograf/v1/about_the_project/release-notes.md +++ b/content/chronograf/v1/about_the_project/release-notes.md @@ -10,6 +10,22 @@ aliases: - /chronograf/v1/about_the_project/release-notes-changelog/ --- +## v1.10.8 {date="2025-08-15"} + +### Bug Fixes + +- Fix missing retention policies on the Databases page. + +## v1.10.7 {date="2025-04-15"} + +### Bug Fixes + +- Fix Hosts page loading. + +### Dependency updates + +- Upgrade Go to 1.23.8. + ## v1.10.6 {date="2024-12-16"} ### Bug Fixes diff --git a/content/enterprise_influxdb/v1/about-the-project/release-notes.md b/content/enterprise_influxdb/v1/about-the-project/release-notes.md index 6edaa9e4d..b6db183f6 100644 --- a/content/enterprise_influxdb/v1/about-the-project/release-notes.md +++ b/content/enterprise_influxdb/v1/about-the-project/release-notes.md @@ -1,5 +1,5 @@ --- -title: InfluxDB Enterprise 1.11 release notes +title: InfluxDB Enterprise v1 release notes description: > Important changes and what's new in each version InfluxDB Enterprise. menu: @@ -7,20 +7,112 @@ menu: name: Release notes weight: 10 parent: About the project +alt_links: + v1: /influxdb/v1/about_the_project/release-notes/ --- -{{% note %}} -#### InfluxDB Enterprise and FIPS-compliance +## v1.12.x {date="TBD"} -**InfluxDB Enterprise 1.11+** introduces builds that are compliant with -[Federal Information Processing Standards (FIPS)](https://www.nist.gov/standardsgov/compliance-faqs-federal-information-processing-standards-fips) -and adhere to a strict set of security standards. Both standard and FIPS-compliant -InfluxDB Enterprise builds are available. For more information, see -[FIPS-compliant InfluxDB Enterprise builds](/enterprise_influxdb/v1/introduction/installation/fips-compliant/). -{{% /note %}} +> [!Important] +> #### Pre-release documentation +> +> This release is not yet available. [**v{{% latest-patch %}}**](#v1118) is the latest InfluxDB Enterprise v1 release. + +> [!Important] +> #### Upgrade meta nodes first +> +> When upgrading to InfluxDB Enterprise 1.12.1+, upgrade meta nodes before +> upgrading data nodes. + +## Features + +- Add additional log output when using + [`influx_inspect buildtsi`](/enterprise_influxdb/v1/tools/influx_inspect/#buildtsi) to + rebuild the TSI index. + + +- Use [`influx_inspect export`](/enterprise_influxdb/v1/tools/influx_inspect/#export) with + `-tsmfile` option to + export a single TSM file. +- Add `-m` flag to the [`influxd-ctl show-shards` command](/enterprise_influxdb/v1/tools/influxd-ctl/show-shards/) + to output inconsistent shards. +- Allow the specification of a write window for retention policies. +- Add `fluxQueryRespBytes` metric to the `/debug/vars` metrics endpoint. +- Log whenever meta gossip times exceed expiration. + + +- Add `query-log-path` configuration option to data nodes. +- Add `aggressive-points-per-block` configuration option to prevent TSM files from not getting fully compacted. +- Log TLS configuration settings on startup. +- Check for TLS certificate and private key permissions. +- Add a warning if the TLS certificate is expired. +- Add authentication to the Raft portal and add the following related _data_ + node configuration options: + + + - `[meta].raft-portal-auth-required` + - `[meta].raft-dialer-auth-required` +- Improve error handling. +- InfluxQL updates: + - Delete series by retention policy. + + + + - Allow retention policies to discard writes that fall within their range, but + outside of `FUTURE LIMIT` and `PAST LIMIT`. + +## Bug fixes + +- Log rejected writes to subscriptions. +- Update `xxhash` and avoid `stringtoslicebyte` in the cache. +- Prevent a panic when a shard group has no shards. +- Fix file handle leaks in `Compactor.write`. +- Ensure fields in memory match the fields on disk. +- Ensure temporary files are removed after failed compactions. +- Do not panic on invalid multiple subqueries. +- Update the `/shard-status` API to return the correct result and use a + consistent "idleness" definition for shards. + +## Other + +- Update Go to 1.23.5. +- Upgrade Flux to v0.196.1. +- Upgrade InfluxQL to v1.4.1. +- Various other dependency updates. + +--- + +> [!Note] +> #### InfluxDB Enterprise and FIPS-compliance +> +> **InfluxDB Enterprise 1.11+** introduces builds that are compliant with +> [Federal Information Processing Standards (FIPS)](https://www.nist.gov/standardsgov/compliance-faqs-federal-information-processing-standards-fips) +> and adhere to a strict set of security standards. Both standard and FIPS-compliant +> InfluxDB Enterprise builds are available. For more information, see +> [FIPS-compliant InfluxDB Enterprise builds](/enterprise_influxdb/v1/introduction/installation/fips-compliant/). ## v1.11.8 {date="2024-11-15"} +### Features + +- Add a startup logger to InfluxDB Enterprise data nodes. + ### Bug Fixes - Strip double quotes from measurement names in the [`/api/v2/delete` compatibility @@ -28,6 +120,8 @@ InfluxDB Enterprise builds are available. For more information, see string comparisons (e.g. to allow special characters in measurement names). - Enable SHA256 for FIPS RPMs. +--- + ## v1.11.7 {date="2024-09-19"} ### Bug Fixes @@ -79,14 +173,13 @@ InfluxDB Enterprise builds are available. For more information, see ## v1.11.5 {date="2024-02-14"} -{{% note %}} -#### Upgrading from InfluxDB Enterprise v1.11.3 - -If upgrading from InfluxDB Enterprise v1.11.3+ to {{< latest-patch >}}, you can -now configure whether or not InfluxDB compacts series files on startup using the -[`compact-series-file` configuration option](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#compact-series-file) -in your [InfluxDB Enterprise data node configuration file](/enterprise_influxdb/v1/administration/configure/config-data-nodes/). -{{% /note %}} +> [!Note] +> #### Upgrading from InfluxDB Enterprise v1.11.3 +> +> If upgrading from InfluxDB Enterprise v1.11.3+ to {{< latest-patch >}}, you can +> now configure whether or not InfluxDB compacts series files on startup using the +> [`compact-series-file` configuration option](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#compact-series-file) +> in your [InfluxDB Enterprise data node configuration file](/enterprise_influxdb/v1/administration/configure/config-data-nodes/). ### Bug Fixes @@ -101,29 +194,28 @@ in your [InfluxDB Enterprise data node configuration file](/enterprise_influxdb/ ## v1.11.4 {date="2023-12-14"} -{{% note %}} -#### Series file compaction - -With InfluxDB Enterprise v1.11.4+, InfluxDB can be configured to optionally -[compact series files](/enterprise_influxdb/v1/tools/influx_inspect/#--compact-series-file-) -before data nodes are started. -Series files are stored in `_series` directories inside the -[InfluxDB data directory](/enterprise_influxdb/v1/concepts/file-system-layout/#data-node-file-system-layout). -Default: `/var/lib/data//_series`. - -To compact series files on startup, set the [`compact-series-file` configuration option](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#compact-series-file) -to `true` in your [InfluxDB Enterprise data node configuration file](/enterprise_influxdb/v1/administration/configure/config-data-nodes/). - -- If any series files are corrupt, the `influx_inspect` or `influxd` processes on - the data node may fail to start. In both cases, delete the series file - directories before restarting the database. InfluxDB automatically - regenerates the necessary series directories and files when restarting. -- To check if series files are corrupt before starting the database, run the - [`influx_inspect verify-seriesfile` command](/enterprise_influxdb/v1/tools/influx_inspect/#verify-seriesfile) - while the database is off-line. -- If series files are large (20+ gigabytes), it may be faster to delete the - series file directories before starting the database. -{{% /note %}} +> [!Note] +> #### Series file compaction +> +> With InfluxDB Enterprise v1.11.4+, InfluxDB can be configured to optionally +> [compact series files](/enterprise_influxdb/v1/tools/influx_inspect/#--compact-series-file-) +> before data nodes are started. +> Series files are stored in `_series` directories inside the +> [InfluxDB data directory](/enterprise_influxdb/v1/concepts/file-system-layout/#data-node-file-system-layout). +> Default: `/var/lib/data//_series`. +> +> To compact series files on startup, set the [`compact-series-file` configuration option](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#compact-series-file) +> to `true` in your [InfluxDB Enterprise data node configuration file](/enterprise_influxdb/v1/administration/configure/config-data-nodes/). +> +> - If any series files are corrupt, the `influx_inspect` or `influxd` processes on +> the data node may fail to start. In both cases, delete the series file +> directories before restarting the database. InfluxDB automatically +> regenerates the necessary series directories and files when restarting. +> - To check if series files are corrupt before starting the database, run the +> [`influx_inspect verify-seriesfile` command](/enterprise_influxdb/v1/tools/influx_inspect/#verify-seriesfile) +> while the database is off-line. +> - If series files are large (20+ gigabytes), it may be faster to delete the +> series file directories before starting the database. ### Bug Fixes @@ -448,8 +540,10 @@ An edge case regression was introduced into this version that may cause a consta ## v1.9.6 {date="2022-02-16"} -{{% note %}} InfluxDB Enterprise offerings are no longer available on AWS, Azure, and GCP marketplaces. Please [contact Sales](https://www.influxdata.com/contact-sales/) to request an license key to [install InfluxDB Enterprise in your own environment](/enterprise_influxdb/v1/introduction/installation/). -{{% /note %}} +> [!Note] +> InfluxDB Enterprise offerings are no longer available on AWS, Azure, and GCP +> marketplaces. Please [contact Sales](https://www.influxdata.com/contact-sales/) +> to request an license key to [install InfluxDB Enterprise in your own environment](/enterprise_influxdb/v1/introduction/installation/). ### Features @@ -495,10 +589,9 @@ An edge case regression was introduced into this version that may cause a consta ## v1.9.5 {date="2021-10-11"} -{{% note %}} -InfluxDB Enterprise 1.9.4 was not released. -Changes below are included in InfluxDB Enterprise 1.9.5. -{{% /note %}} +> [!Note] +> InfluxDB Enterprise 1.9.4 was not released. +> Changes below are included in InfluxDB Enterprise 1.9.5. ### Features @@ -581,7 +674,7 @@ in that there is no corresponding InfluxDB OSS release. ### Features - Upgrade to Go 1.15.10. -- Support user-defined *node labels*. +- Support user-defined _node labels_. Node labels let you assign arbitrary key-value pairs to meta and data nodes in a cluster. For instance, an operator might want to label nodes with the availability zone in which they're located. - Improve performance of `SHOW SERIES CARDINALITY` and `SHOW SERIES CARDINALITY from ` InfluxQL queries. @@ -646,10 +739,9 @@ in that there is no corresponding InfluxDB OSS release. Instead, use [`inch`](https://github.com/influxdata/inch) or [`influx-stress`](https://github.com/influxdata/influx-stress) (not to be confused with `influx_stress`). -{{% note %}} -**Note:** InfluxDB Enterprise 1.9.0 and 1.9.1 were not released. -Bug fixes intended for 1.9.0 and 1.9.1 were rolled into InfluxDB Enterprise 1.9.2. -{{% /note %}} +> [!Note] +> InfluxDB Enterprise 1.9.0 and 1.9.1 were not released. +> Bug fixes intended for 1.9.0 and 1.9.1 were rolled into InfluxDB Enterprise 1.9.2. --- @@ -756,11 +848,15 @@ For details on changes incorporated from the InfluxDB OSS release, see ### Features -#### **Back up meta data only** +#### Back up meta data only -- Add option to back up **meta data only** (users, roles, databases, continuous queries, and retention policies) using the new `-strategy` flag and `only meta` option: `influx ctl backup -strategy only meta `. +- Add option to back up **meta data only** (users, roles, databases, continuous + queries, and retention policies) using the new `-strategy` flag and `only meta` + option: `influx ctl backup -strategy only meta `. - > **Note:** To restore a meta data backup, use the `restore -full` command and specify your backup manifest: `influxd-ctl restore -full `. + > [!Note] + > To restore a meta data backup, use the `restore -full` command and specify + > your backup manifest: `influxd-ctl restore -full `. For more information, see [Perform a metastore only backup](/enterprise_influxdb/v1/administration/backup-and-restore/#perform-a-metastore-only-backup). @@ -1007,7 +1103,10 @@ The following summarizes the expected settings for proper configuration of JWT a `""`. - A long pass phrase is recommended for better security. ->**Note:** To provide encrypted internode communication, you must enable HTTPS. Although the JWT signature is encrypted, the the payload of a JWT token is encoded, but is not encrypted. +> [!Note] +> To provide encrypted internode communication, you must enable HTTPS. Although +> the JWT signature is encrypted, the the payload of a JWT token is encoded, but +> is not encrypted. ### Bug fixes diff --git a/content/enterprise_influxdb/v1/administration/configure/config-data-nodes.md b/content/enterprise_influxdb/v1/administration/configure/config-data-nodes.md index c45d343f7..6295ac3d5 100644 --- a/content/enterprise_influxdb/v1/administration/configure/config-data-nodes.md +++ b/content/enterprise_influxdb/v1/administration/configure/config-data-nodes.md @@ -259,6 +259,29 @@ For detailed configuration information, see [`meta.ensure-fips`](/enterprise_inf Environment variable: `INFLUXDB_META_ENSURE_FIPS` +#### raft-portal-auth-required {metadata="v1.12.0+"} + +Default is `false`. + +Require Raft clients to authenticate with server using the +[`meta-internal-shared-secret`](#meta-internal-shared-secret). +This requires that all meta nodes are running InfluxDB Enterprise v1.12.0+ and +are configured with the correct `meta-internal-shared-secret`. + +Environment variable: `INFLUXDB_META_RAFT_PORTAL_AUTH_REQUIRED` + +#### raft-dialer-auth-required {metadata="v1.12.0+"} + +Default is `false`. + +Require Raft servers to authenticate Raft clients using the +[`meta-internal-shared-secret`](#meta-internal-shared-secret). +This requires that all meta nodes are running InfluxDB Enterprise v1.12.0+, have +`raft-portal-auth-required=true`, and are configured with the correct +`meta-internal-shared-secret`. + +Environment variable: `INFLUXDB_META_RAFT_DIALER_AUTH_REQUIRED` + ----- ## Data settings @@ -303,7 +326,9 @@ Very useful for troubleshooting, but will log any sensitive data contained withi Environment variable: `INFLUXDB_DATA_QUERY_LOG_ENABLED` -#### query-log-path +#### query-log-path {metadata="v1.12.0+"} + +Default is `""`. An absolute path to the query log file. The default is `""` (queries aren't logged to a file). @@ -326,6 +351,8 @@ The following is an example of a `logrotate` configuration: } ``` +Environment variable: `INFLUXDB_DATA_QUERY_LOG_PATH` +--> #### wal-fsync-delay Default is `"0s"`. @@ -422,6 +449,16 @@ The duration at which to compact all TSM and TSI files in a shard if it has not Environment variable: `INFLUXDB_DATA_COMPACT_FULL_WRITE_COLD_DURATION` +#### aggressive-points-per-block {metadata="v1.12.0+"} + +Default is `10000`. + +The number of points per block to use during aggressive compaction. There are +certain cases where TSM files do not get fully compacted. This adjusts an +internal parameter to help ensure these files do get fully compacted. + +Environment variable: `INFLUXDB_DATA_AGGRESSIVE_POINTS_PER_BLOCK` + #### index-version Default is `"inmem"`. diff --git a/content/enterprise_influxdb/v1/administration/monitor/logs.md b/content/enterprise_influxdb/v1/administration/monitor/logs.md index 0efdb8947..871de19ab 100644 --- a/content/enterprise_influxdb/v1/administration/monitor/logs.md +++ b/content/enterprise_influxdb/v1/administration/monitor/logs.md @@ -120,13 +120,13 @@ You can view the file [here](https://github.com/influxdb/influxdb/blob/master/sc InfluxDB 1.5 introduces the option to log HTTP request traffic separately from the other InfluxDB log output. When HTTP request logging is enabled, the HTTP logs are intermingled by default with internal InfluxDB logging. By redirecting the HTTP request log entries to a separate file, both log files are easier to read, monitor, and debug. -See [Redirecting HTTP request logging](/enterprise_influxdb/v1/administration/logs/#redirecting-http-access-logging) in the InfluxDB OSS documentation. +For more information, see the [InfluxDB OSS v1 HTTP access logging documentation](/influxdb/v1/administration/logs/#http-access-logging). ## Structured logging With InfluxDB 1.5, structured logging is supported and enable machine-readable and more developer-friendly log output formats. The two new structured log formats, `logfmt` and `json`, provide easier filtering and searching with external tools and simplifies integration of InfluxDB logs with Splunk, Papertrail, Elasticsearch, and other third party tools. -See [Structured logging](/enterprise_influxdb/v1/administration/logs/#structured-logging) in the InfluxDB OSS documentation. +For more information, see the [InfluxDB OSS v1 structured logging documentation](/influxdb/v1/administration/logs/#structured-logging). ## Tracing diff --git a/content/enterprise_influxdb/v1/concepts/schema_and_data_layout.md b/content/enterprise_influxdb/v1/concepts/schema_and_data_layout.md index 43c781460..c60e8ccef 100644 --- a/content/enterprise_influxdb/v1/concepts/schema_and_data_layout.md +++ b/content/enterprise_influxdb/v1/concepts/schema_and_data_layout.md @@ -22,7 +22,7 @@ We recommend the following design guidelines for most use cases: Your queries should guide what data you store in [tags](/enterprise_influxdb/v1/concepts/glossary/#tag) and what you store in [fields](/enterprise_influxdb/v1/concepts/glossary/#field) : -- Store commonly queried and grouping ([`group()`](/flux/v0.x/stdlib/universe/group) or [`GROUP BY`](/enterprise_influxdb/v1/query_language/explore-data/#group-by-tags)) metadata in tags. +- Store commonly queried and grouping ([`group()`](/flux/v0/stdlib/universe/group) or [`GROUP BY`](/enterprise_influxdb/v1/query_language/explore-data/#group-by-tags)) metadata in tags. - Store data in fields if each data point contains a different value. - Store numeric values as fields ([tag values](/enterprise_influxdb/v1/concepts/glossary/#tag-value) only support string values). diff --git a/content/enterprise_influxdb/v1/flux/flux-vs-influxql.md b/content/enterprise_influxdb/v1/flux/flux-vs-influxql.md index 98df88877..fca04a286 100644 --- a/content/enterprise_influxdb/v1/flux/flux-vs-influxql.md +++ b/content/enterprise_influxdb/v1/flux/flux-vs-influxql.md @@ -8,6 +8,7 @@ menu: weight: 5 --- + Flux is an alternative to [InfluxQL](/enterprise_influxdb/v1/query_language/) and other SQL-like query languages for querying and analyzing data. Flux uses functional language patterns making it incredibly powerful, flexible, and able to overcome many of the limitations of InfluxQL. This article outlines many of the tasks possible with Flux but not InfluxQL and provides information about Flux and InfluxQL parity. @@ -278,13 +279,11 @@ from(bucket: "geo/autogen") ## InfluxQL and Flux parity -Flux is working towards complete parity with InfluxQL and new functions are being added to that end. + The table below shows InfluxQL statements, clauses, and functions along with their equivalent Flux functions. _For a complete list of Flux functions, [view all Flux functions](/flux/v0/stdlib/all-functions)._ -### InfluxQL and Flux parity - | InfluxQL | Flux Functions | | :------------------------------------------------------------------------------------------------------------------------------------------ | :----------------------------------------------------------------------------------------------------------------------------------------------------------------- | | [SELECT](/enterprise_influxdb/v1/query_language/explore-data/#the-basic-select-statement) | [filter()](/flux/v0/stdlib/universe/filter/) | diff --git a/content/enterprise_influxdb/v1/introduction/installation/_index.md b/content/enterprise_influxdb/v1/introduction/installation/_index.md index 1998a5f8d..772703fbf 100644 --- a/content/enterprise_influxdb/v1/introduction/installation/_index.md +++ b/content/enterprise_influxdb/v1/introduction/installation/_index.md @@ -11,6 +11,10 @@ menu: name: Install weight: 103 parent: Introduction +related: + - /enterprise_influxdb/v1/introduction/installation/docker/ + - /enterprise_influxdb/v1/introduction/installation/single-server/ + - /enterprise_influxdb/v1/introduction/installation/fips-compliant/ --- Complete the following steps to install an InfluxDB Enterprise cluster in your own environment: @@ -19,8 +23,4 @@ Complete the following steps to install an InfluxDB Enterprise cluster in your o 2. [Install InfluxDB data nodes](/enterprise_influxdb/v1/introduction/installation/data_node_installation/) 3. [Install Chronograf](/enterprise_influxdb/v1/introduction/installation/chrono_install/) -{{< influxdbu title="Installing InfluxDB Enterprise" summary="Learn about InfluxDB architecture and how to install InfluxDB Enterprise with step-by-step instructions." action="Take the course" link="https://university.influxdata.com/courses/installing-influxdb-enterprise-tutorial/" >}} - -#### Other installation options -- [Install InfluxDB Enterprise on a single server](/enterprise_influxdb/v1/introduction/installation/single-server/) -- [Federal Information Processing Standards (FIPS)-compliant InfluxDB Enterprise](/enterprise_influxdb/v1/introduction/installation/fips-compliant/) \ No newline at end of file +{{< influxdbu title="Installing InfluxDB Enterprise" summary="Learn about InfluxDB architecture and how to install InfluxDB Enterprise with step-by-step instructions." action="Take the course" link="https://university.influxdata.com/courses/installing-influxdb-enterprise-tutorial/" >}} \ No newline at end of file diff --git a/content/enterprise_influxdb/v1/introduction/installation/data_node_installation.md b/content/enterprise_influxdb/v1/introduction/installation/data_node_installation.md index 57771f105..08aeb0437 100644 --- a/content/enterprise_influxdb/v1/introduction/installation/data_node_installation.md +++ b/content/enterprise_influxdb/v1/introduction/installation/data_node_installation.md @@ -193,7 +193,7 @@ For added security, follow these steps to verify the signature of your InfluxDB 1. Download and import InfluxData's public key: ```sh - curl -s https://repos.influxdata.com/influxdata-archive_compat.key | gpg --import + curl -s https://repos.influxdata.com/influxdata-archive.key | gpg --import ``` 2. Download the signature file for the release by adding `.asc` to the download URL. For example: @@ -224,7 +224,7 @@ wget https://dl.influxdata.com/enterprise/releases/fips/influxdb-data-{{< latest The output from this command should include the following: ```sh - gpg: Good signature from "InfluxDB Packaging Service " [unknown] + gpg: Good signature from "InfluxData Package Signing Key " [unknown] ``` {{% /expand %}} {{< /expand-wrapper >}} @@ -327,7 +327,7 @@ influxdb 2706 0.2 7.0 571008 35376 ? Sl 15:37 0:16 /usr/bin/influx ``` If you do not see the expected output, the process is either not launching or is exiting prematurely. -Check the [logs](/enterprise_influxdb/v1/administration/logs/) +Check the [logs](/enterprise_influxdb/v1/administration/monitor/logs/) for error messages and verify the previous setup steps are complete. If you see the expected output, repeat for the remaining data nodes. @@ -395,6 +395,10 @@ to the cluster. {{% /expand %}} {{< /expand-wrapper >}} +## Docker installation + +For Docker-based installations, see [Install and run InfluxDB v1 Enterprise with Docker](/enterprise_influxdb/v1/introduction/installation/docker/) for complete instructions on setting up data nodes using Docker images. + ## Next steps Once your data nodes are part of your cluster, do the following: diff --git a/content/enterprise_influxdb/v1/introduction/installation/docker/_index.md b/content/enterprise_influxdb/v1/introduction/installation/docker/_index.md new file mode 100644 index 000000000..a1c6c40be --- /dev/null +++ b/content/enterprise_influxdb/v1/introduction/installation/docker/_index.md @@ -0,0 +1,259 @@ +--- +title: Install and run InfluxDB v1 Enterprise with Docker +description: Install and run InfluxDB v1 Enterprise using Docker images for meta nodes and data nodes. +menu: + enterprise_influxdb_v1: + name: Install with Docker + weight: 30 + parent: Install +related: + - /enterprise_influxdb/v1/introduction/installation/docker/docker-troubleshooting/ +alt_links: + core: /influxdb3/core/get-started/setup/ + enterprise: /influxdb3/enterprise/get-started/setup/ + v1: /influxdb/v1/introduction/install/docker/ + v2: /influxdb/v2/install/use-docker-compose/ +--- + +InfluxDB v1 Enterprise provides Docker images for both meta nodes and data nodes to simplify cluster deployment and management. +Using Docker allows you to quickly set up and run InfluxDB Enterprise clusters with consistent configurations. + +> [!Important] +> #### Enterprise license required +> You must have a valid license to run InfluxDB Enterprise. +> Contact for licensing information or obtain a 14-day demo license via the [InfluxDB Enterprise portal](https://portal.influxdata.com/users/new). + +- [Docker image variants](#docker-image-variants) +- [Requirements](#requirements) +- [Set up an InfluxDB Enterprise cluster with Docker](#set-up-an-influxdb-enterprise-cluster-with-docker) +- [Configuration options](#configuration-options) +- [Exposing ports](#exposing-ports) +- [Persistent data storage](#persistent-data-storage) +- [Next steps](#next-steps) + +## Docker image variants + +InfluxDB Enterprise provides two specialized Docker images: + +- **`influxdb:meta`**: Enterprise meta node package for clustering +- **`influxdb:data`**: Enterprise data node package for clustering + +## Requirements + +- [Docker](https://docs.docker.com/get-docker/) installed and running +- Valid [InfluxData license key](#enterprise-license-required) +- Network connectivity between nodes +- At least 3 meta nodes (odd number recommended) +- At least 2 data nodes + +## Set up an InfluxDB Enterprise cluster with Docker + +1. [Create a Docker network](#create-a-docker-network) +2. [Start meta nodes](#start-meta-nodes) +3. [Configure meta nodes to know each other](#configure-meta-nodes-to-know-each-other) +4. [Start data nodes](#start-data-nodes) +5. [Add data nodes to the cluster](#add-data-nodes-to-the-cluster) +6. [Verify the cluster](#verify-the-cluster) +7. [Stop and restart InfluxDB v1 Enterprise Containers](#stop-and-restart-influxdb-v1-enterprise-containers) + +### Create a Docker network + + Create a custom Docker network to allow communication between meta and data nodes: + + ```bash + docker network create influxdb + ``` + +### Start meta nodes + +Start three meta nodes using the `influxdb:meta` image. +Each meta node requires a unique hostname and the Enterprise license key: + +```bash +# Start first meta node +docker run -d \ + --name=influxdb-meta-0 \ + --network=influxdb \ + -h influxdb-meta-0 \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:meta + +# Start second meta node +docker run -d \ + --name=influxdb-meta-1 \ + --network=influxdb \ + -h influxdb-meta-1 \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:meta + +# Start third meta node +docker run -d \ + --name=influxdb-meta-2 \ + --network=influxdb \ + -h influxdb-meta-2 \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:meta +``` + +### Configure meta nodes to know each other + +From the first meta node, add the other meta nodes to the cluster: + +```bash +# Add the second meta node +docker exec influxdb-meta-0 \ + influxd-ctl add-meta influxdb-meta-1:8091 + +# Add the third meta node +docker exec influxdb-meta-0 \ + influxd-ctl add-meta influxdb-meta-2:8091 +``` + +### Start data nodes + +Start two or more data nodes using the `influxdb:data` image: + +```bash +# Start first data node +docker run -d \ + --name=influxdb-data-0 \ + --network=influxdb \ + -h influxdb-data-0 \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:data + +# Start second data node +docker run -d \ + --name=influxdb-data-1 \ + --network=influxdb \ + -h influxdb-data-1 \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:data +``` + +### Add data nodes to the cluster + +From the first meta node, register each data node with the cluster: + +```bash +# Add first data node +docker exec influxdb-meta-0 \ + influxd-ctl add-data influxdb-data-0:8088 + +# Add second data node +docker exec influxdb-meta-0 \ + influxd-ctl add-data influxdb-data-1:8088 +``` + +### Verify the cluster + +Check that all nodes are properly added to the cluster: + +```bash +docker exec influxdb-meta-0 influxd-ctl show +``` + +Expected output: +``` +Data Nodes +========== +ID TCP Address Version +4 influxdb-data-0:8088 1.x.x-cX.X.X +5 influxdb-data-1:8088 1.x.x-cX.X.X + +Meta Nodes +========== +TCP Address Version +influxdb-meta-0:8091 1.x.x-cX.X.X +influxdb-meta-1:8091 1.x.x-cX.X.X +influxdb-meta-2:8091 1.x.x-cX.X.X +``` + +## Configuration options + +### Using environment variables + +You can configure {{% product-name %}} using environment variables with the format `INFLUXDB_
_`. + +Common environment variables: +- `INFLUXDB_REPORTING_DISABLED=true` +- `INFLUXDB_META_DIR=/path/to/metadir` +- `INFLUXDB_ENTERPRISE_REGISTRATION_ENABLED=true` +- `INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key` + +For all available environment variables, see how to [Configure Enterprise](/enterprise_influxdb/v1/administration/configure/). + +### Using configuration files + +You can also mount custom configuration files: + +```bash +# Mount custom meta configuration +docker run -d \ + --name=influxdb-meta-0 \ + --network=influxdb \ + -h influxdb-meta-0 \ + -v /path/to/influxdb-meta.conf:/etc/influxdb/influxdb-meta.conf \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:meta + +# Mount custom data configuration +docker run -d \ + --name=influxdb-data-0 \ + --network=influxdb \ + -h influxdb-data-0 \ + -v /path/to/influxdb.conf:/etc/influxdb/influxdb.conf \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:data +``` + +## Exposing ports + +To access your InfluxDB Enterprise cluster from outside Docker, expose the necessary ports: + +```bash +# Data node with HTTP API port exposed +docker run -d \ + --name=influxdb-data-0 \ + --network=influxdb \ + -h influxdb-data-0 \ + -p 8086:8086 \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:data +``` + +## Persistent data storage + +To persist data beyond container lifecycles, mount volumes: + +```bash +# Meta node with persistent storage +docker run -d \ + --name=influxdb-meta-0 \ + --network=influxdb \ + -h influxdb-meta-0 \ + -v influxdb-meta-0-data:/var/lib/influxdb \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:meta + +# Data node with persistent storage +docker run -d \ + --name=influxdb-data-0 \ + --network=influxdb \ + -h influxdb-data-0 \ + -v influxdb-data-0-data:/var/lib/influxdb \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:data +``` + +## Next steps + +Once your InfluxDB Enterprise cluster is running: + +1. [Set up authentication and authorization](/enterprise_influxdb/v1/administration/configure/security/authentication/) for your cluster. +2. [Enable TLS encryption](/enterprise_influxdb/v1/guides/enable-tls/) for secure communication. +3. [Install and set up Chronograf](/enterprise_influxdb/v1/introduction/installation/chrono_install) for cluster management and visualization. +4. Configure your load balancer to send client traffic to data nodes. For more information, see [Data node installation](/enterprise_influxdb/v1/introduction/installation/data_node_installation/). +5. [Monitor your cluster](/enterprise_influxdb/v1/administration/monitor/) for performance and reliability. +6. [Write data with the InfluxDB API](/enterprise_influxdb/v1/guides/write_data/). +7. [Query data with the InfluxDB API](/enterprise_influxdb/v1/guides/query_data/). diff --git a/content/enterprise_influxdb/v1/introduction/installation/docker/docker-troubleshooting.md b/content/enterprise_influxdb/v1/introduction/installation/docker/docker-troubleshooting.md new file mode 100644 index 000000000..959a84ea4 --- /dev/null +++ b/content/enterprise_influxdb/v1/introduction/installation/docker/docker-troubleshooting.md @@ -0,0 +1,226 @@ +--- +title: Docker troubleshooting for InfluxDB v1 Enterprise +description: Common Docker-specific issues and solutions for InfluxDB v1 Enterprise deployments. +menu: + enterprise_influxdb_v1: + name: Docker troubleshooting + weight: 35 + parent: Install with Docker +related: + - /enterprise_influxdb/v1/introduction/installation/docker/ + - /enterprise_influxdb/v1/troubleshooting/ + - /enterprise_influxdb/v1/administration/monitor/logs/ +--- + +This guide covers common Docker-specific issues and solutions when running InfluxDB v1 Enterprise in containers. + +## Common Docker issues + +### License key issues + +#### Problem: Container fails to start with license error + +**Symptoms:** +``` +license key verification failed +``` + +**Solution:** +1. Verify your license key is valid and not expired +2. Ensure the license key environment variable is set correctly: + ```bash + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-actual-license-key + ``` +3. If nodes cannot reach `portal.influxdata.com`, use a license file instead: + ```bash + -v /path/to/license.json:/etc/influxdb/license.json + -e INFLUXDB_ENTERPRISE_LICENSE_PATH=/etc/influxdb/license.json + ``` + +### Network connectivity issues + +#### Problem: Nodes cannot communicate with each other + +**Symptoms:** +- Meta nodes fail to join cluster +- Data nodes cannot connect to meta nodes +- `influxd-ctl show` shows missing nodes + +**Solution:** +1. Ensure all containers are on the same Docker network: + ```bash + docker network create influxdb + # Add --network=influxdb to all container runs + ``` +2. Use container hostnames consistently: + ```bash + # Use hostname (-h) that matches container name + -h influxdb-meta-0 --name=influxdb-meta-0 + ``` +3. Verify network connectivity between containers: + ```bash + docker exec influxdb-meta-0 ping influxdb-meta-1 + ``` + +#### Problem: Cannot access InfluxDB from host machine + +**Symptoms:** +- Connection refused when trying to connect to InfluxDB API +- Client tools cannot reach the database + +**Solution:** +Expose the HTTP API port (8086) when starting data nodes: +```bash +docker run -d \ + --name=influxdb-data-0 \ + --network=influxdb \ + -h influxdb-data-0 \ + -p 8086:8086 \ + -e INFLUXDB_ENTERPRISE_LICENSE_KEY=your-license-key \ + influxdb:data +``` + +### Configuration issues + +#### Problem: Custom configuration not being applied + +**Symptoms:** +- Environment variables ignored +- Configuration file changes not taking effect + +**Solution:** +1. For environment variables, use the correct format `INFLUXDB_$SECTION_$NAME`: + ```bash + # Correct + -e INFLUXDB_REPORTING_DISABLED=true + -e INFLUXDB_META_DIR=/custom/meta/dir + + # Incorrect + -e REPORTING_DISABLED=true + ``` + +2. For configuration files, ensure proper mounting: + ```bash + # Mount config file correctly + -v /host/path/influxdb.conf:/etc/influxdb/influxdb.conf + ``` + +3. Verify file permissions on mounted configuration files: + ```bash + # Config files should be readable by influxdb user (uid 1000) + chown 1000:1000 /host/path/influxdb.conf + chmod 644 /host/path/influxdb.conf + ``` + +### Data persistence issues + +#### Problem: Data lost when container restarts + +**Symptoms:** +- Databases and data disappear after container restart +- Cluster state not preserved + +**Solution:** +Mount data directories as volumes: +```bash +# For meta nodes +-v influxdb-meta-0-data:/var/lib/influxdb + +# For data nodes +-v influxdb-data-0-data:/var/lib/influxdb +``` + +### Resource and performance issues + +#### Problem: Containers running out of memory + +**Symptoms:** +- Containers being killed by Docker +- OOMKilled status in `docker ps` + +**Solution:** +1. Increase memory limits: + ```bash + --memory=4g --memory-swap=8g + ``` + +2. Monitor memory usage: + ```bash + docker stats influxdb-data-0 + ``` + +3. Optimize InfluxDB configuration for available resources. + +#### Problem: Poor performance in containerized environment + +**Solution:** +1. Ensure adequate CPU and memory allocation +2. Use appropriate Docker storage drivers +3. Consider host networking for high-throughput scenarios: + ```bash + --network=host + ``` + +## Debugging commands + +### Check container logs +```bash +# View container logs +docker logs influxdb-meta-0 +docker logs influxdb-data-0 + +# Follow logs in real-time +docker logs -f influxdb-meta-0 +``` + +### Verify cluster status +```bash +# Check cluster status from any meta node +docker exec influxdb-meta-0 influxd-ctl show + +# Check individual node status +docker exec influxdb-meta-0 influxd-ctl show-shards +``` + +### Network troubleshooting +```bash +# Test connectivity between containers +docker exec influxdb-meta-0 ping influxdb-data-0 +docker exec influxdb-meta-0 telnet influxdb-data-0 8088 + +# Check which ports are listening +docker exec influxdb-meta-0 netstat -tlnp +``` + +### Configuration verification +```bash +# Check effective configuration +docker exec influxdb-meta-0 cat /etc/influxdb/influxdb-meta.conf +docker exec influxdb-data-0 cat /etc/influxdb/influxdb.conf + +# Verify environment variables +docker exec influxdb-meta-0 env | grep INFLUXDB +``` + +## Best practices for Docker deployments + +1. **Use specific image tags** instead of `latest` for production deployments +2. **Implement health checks** to monitor container status +3. **Use Docker Compose** for complex multi-container setups +4. **Mount volumes** for data persistence +5. **Set resource limits** to prevent resource exhaustion +6. **Use secrets management** for license keys in production +7. **Implement proper logging** and monitoring +8. **Regular backups** of data volumes + +## Getting additional help + +If you continue to experience issues: + +1. Check the [general troubleshooting guide](/enterprise_influxdb/v1/troubleshooting/) +2. Review [InfluxDB Enterprise logs](/enterprise_influxdb/v1/administration/monitor/logs/) +3. Contact [InfluxData support](https://support.influxdata.com/) with: + - Docker version and configuration + - Container logs + - Cluster status output + - Network configuration details diff --git a/content/enterprise_influxdb/v1/introduction/installation/meta_node_installation.md b/content/enterprise_influxdb/v1/introduction/installation/meta_node_installation.md index 36ec1fe85..7e627b448 100644 --- a/content/enterprise_influxdb/v1/introduction/installation/meta_node_installation.md +++ b/content/enterprise_influxdb/v1/introduction/installation/meta_node_installation.md @@ -200,7 +200,7 @@ For added security, follow these steps to verify the signature of your InfluxDB 1. Download and import InfluxData's public key: ```sh - curl -s https://repos.influxdata.com/influxdata-archive_compat.key | gpg --import + curl -s https://repos.influxdata.com/influxdata-archive.key | gpg --import ``` 2. Download the signature file for the release by adding `.asc` to the download URL. @@ -232,7 +232,7 @@ wget https://dl.influxdata.com/enterprise/releases/fips/influxdb-meta-{{< latest The output from this command should include the following: ```sh - gpg: Good signature from "InfluxDB Packaging Service " [unknown] + gpg: Good signature from "InfluxData Package Signing Key " [unknown] ``` {{% /expand %}} {{< /expand-wrapper >}} @@ -365,6 +365,10 @@ the cluster._ {{% /expand %}} {{< /expand-wrapper >}} +## Docker installation + +For Docker-based installations, see [Install and run InfluxDB v1 Enterprise with Docker](/enterprise_influxdb/v1/introduction/installation/docker/) for complete instructions on setting up meta nodes using Docker images. + After your meta nodes are part of your cluster, [install data nodes](/enterprise_influxdb/v1/introduction/installation/data_node_installation/). diff --git a/content/enterprise_influxdb/v1/introduction/installation/single-server.md b/content/enterprise_influxdb/v1/introduction/installation/single-server.md index 3ebb48701..0b3ef3554 100644 --- a/content/enterprise_influxdb/v1/introduction/installation/single-server.md +++ b/content/enterprise_influxdb/v1/introduction/installation/single-server.md @@ -111,7 +111,7 @@ InfluxDB Enterprise meta service download with `gpg`. 1. Download and import InfluxData's public key: ```sh - curl -s https://repos.influxdata.com/influxdata-archive_compat.key | gpg --import + curl -s https://repos.influxdata.com/influxdata-archive.key | gpg --import ``` 2. Download the signature file for the release by adding `.asc` to the download URL. @@ -130,7 +130,7 @@ InfluxDB Enterprise meta service download with `gpg`. The output from this command should include the following: ``` - gpg: Good signature from "InfluxDB Packaging Service " [unknown] + gpg: Good signature from "InfluxData Package Signing Key " [unknown] ``` {{% /expand %}} {{< /expand-wrapper >}} @@ -356,7 +356,7 @@ InfluxDB Enterprise data service download with `gpg`. 1. Download and import InfluxData's public key: ```sh - curl -s https://repos.influxdata.com/influxdata-archive_compat.key | gpg --import + curl -s https://repos.influxdata.com/influxdata-archive.key | gpg --import ``` 2. Download the signature file for the release by adding `.asc` to the download URL. @@ -375,7 +375,7 @@ InfluxDB Enterprise data service download with `gpg`. The output from this command should include the following: ``` - gpg: Good signature from "InfluxDB Packaging Service " [unknown] + gpg: Good signature from "InfluxData Package Signing Key " [unknown] ``` {{% /expand %}} {{< /expand-wrapper >}} @@ -475,7 +475,7 @@ sudo systemctl start influxdb ``` If you do not see the expected output, the process is either not launching or is exiting prematurely. - Check the [logs](/enterprise_influxdb/v1/administration/logs/) + Check the [logs](/enterprise_influxdb/v1/administration/monitor/logs/) for error messages and verify the previous setup steps are complete. 5. **Use `influxd-ctl` to add the data process to the InfluxDB Enterprise "cluster"**: @@ -542,9 +542,7 @@ For Chronograf installation instructions, see [Install Chronograf](/chronograf/v1/introduction/installation/). ## Next steps -- Add more users if necessary. - See [Manage users and permissions](/enterprise_influxdb/v1/administration/manage/users-and-permissions/) - for more information. -- [Enable TLS](/enterprise_influxdb/v1/guides/enable-tls/). -- [Write data with the InfluxDB API](/enterprise_influxdb/v1/guides/write_data/). -- [Query data with the InfluxDB API](/enterprise_influxdb/v1/guides/query_data/). +- For information about adding users, see [Manage users and permissions](/enterprise_influxdb/v1/administration/manage/users-and-permissions/) +- [Enable TLS](/enterprise_influxdb/v1/guides/enable-tls/) +- [Write data with the InfluxDB API](/enterprise_influxdb/v1/guides/write_data/) +- [Query data with the InfluxDB API](/enterprise_influxdb/v1/guides/query_data/) diff --git a/content/enterprise_influxdb/v1/query_language/manage-database.md b/content/enterprise_influxdb/v1/query_language/manage-database.md index 73d5ce7fc..4a1f09b7a 100644 --- a/content/enterprise_influxdb/v1/query_language/manage-database.md +++ b/content/enterprise_influxdb/v1/query_language/manage-database.md @@ -62,17 +62,22 @@ Creates a new database. #### Syntax ```sql -CREATE DATABASE [WITH [DURATION ] [REPLICATION ] [SHARD DURATION ] [NAME ]] +CREATE DATABASE [WITH [DURATION ] [REPLICATION ] [SHARD DURATION ] [PAST LIMIT ] [FUTURE LIMIT ] [NAME ]] ``` #### Description of syntax `CREATE DATABASE` requires a database [name](/enterprise_influxdb/v1/troubleshooting/frequently-asked-questions/#what-words-and-characters-should-i-avoid-when-writing-data-to-influxdb). -The `WITH`, `DURATION`, `REPLICATION`, `SHARD DURATION`, and `NAME` clauses are optional and create a single [retention policy](/enterprise_influxdb/v1/concepts/glossary/#retention-policy-rp) associated with the created database. -If you do not specify one of the clauses after `WITH`, the relevant behavior defaults to the `autogen` retention policy settings. +The `WITH`, `DURATION`, `REPLICATION`, `SHARD DURATION`, `PAST LIMIT`, +`FUTURE LIMIT, and `NAME` clauses are optional and create a single +[retention policy](/enterprise_influxdb/v1/concepts/glossary/#retention-policy-rp) +associated with the created database. +If you do not specify one of the clauses after `WITH`, the relevant behavior +defaults to the `autogen` retention policy settings. The created retention policy automatically serves as the database's default retention policy. -For more information about those clauses, see [Retention Policy Management](/enterprise_influxdb/v1/query_language/manage-database/#retention-policy-management). +For more information about those clauses, see +[Retention Policy Management](/enterprise_influxdb/v1/query_language/manage-database/#retention-policy-management). A successful `CREATE DATABASE` query returns an empty result. If you attempt to create a database that already exists, InfluxDB does nothing and does not return an error. @@ -122,21 +127,25 @@ The `DROP SERIES` query deletes all points from a [series](/enterprise_influxdb/ and it drops the series from the index. The query takes the following form, where you must specify either the `FROM` clause or the `WHERE` clause: + ```sql DROP SERIES FROM WHERE ='' ``` Drop all series from a single measurement: + ```sql > DROP SERIES FROM "h2o_feet" ``` Drop series with a specific tag pair from a single measurement: + ```sql > DROP SERIES FROM "h2o_feet" WHERE "location" = 'santa_monica' ``` Drop all points in the series that have a specific tag pair from all measurements in the database: + ```sql > DROP SERIES WHERE "location" = 'santa_monica' ``` @@ -152,35 +161,48 @@ Unlike You must include either the `FROM` clause, the `WHERE` clause, or both: -``` +```sql DELETE FROM WHERE [=''] | [