From 56b459659da5546b680d35945a63b033bfed43bf Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 28 Oct 2025 08:20:13 -0400 Subject: [PATCH 1/6] Jts agentsmd (#6486) * feat(docs): add content scaffolding system with AI-powered analysis Add yarn docs:create command for intelligent content scaffolding: - Phase 1: Script analyzes draft and repository structure - Phase 2: Claude command generates file structure and frontmatter - Phase 3: Script creates files from proposal New files: - scripts/docs-create.js: Main orchestration script - scripts/lib/content-scaffolding.js: Core scaffolding logic - scripts/lib/file-operations.js: File I/O utilities - .claude/commands/scaffold-content.md: Claude analysis command Features: - Intelligent product detection (Core, Enterprise, Cloud, etc.) - Generates complete frontmatter - Dry-run and interactive confirmation modes Usage: yarn docs:create --from path/to/draft.md /scaffold-content yarn docs:create --execute * chore(scripts): docs:create and docs:edit scripts for content creation and editing --- .claude/agents/ci-automation-engineer.md | 48 + .claude/agents/influxdb1-tech-writer.md | 76 ++ .../agents/influxdb3-distrib-tech-writer.md | 75 ++ .claude/agents/influxdb3-tech-writer.md | 76 ++ .claude/agents/script-automation-engineer.md | 164 +++ .claude/commands/scaffold-content.md | 173 +++ .gitignore | 3 +- eslint.config.js | 4 +- package.json | 3 + plans/cli-docs-sync/plan.md | 79 -- scripts/README-add-placeholders.md | 108 ++ scripts/add-placeholders.js | 238 ++++ scripts/docs-create.js | 1002 +++++++++++++++++ scripts/docs-edit.js | 249 ++++ scripts/lib/content-scaffolding.js | 760 +++++++++++++ scripts/lib/file-operations.js | 156 +++ scripts/lib/url-parser.js | 216 ++++ scripts/schemas/scaffold-context.schema.json | 182 +++ scripts/schemas/scaffold-proposal.schema.json | 145 +++ scripts/templates/chatgpt-prompt.md | 136 +++ scripts/templates/copilot-prompt.md | 111 ++ 21 files changed, 3923 insertions(+), 81 deletions(-) create mode 100644 .claude/agents/ci-automation-engineer.md create mode 100644 .claude/agents/influxdb1-tech-writer.md create mode 100644 .claude/agents/influxdb3-distrib-tech-writer.md create mode 100644 .claude/agents/influxdb3-tech-writer.md create mode 100644 .claude/agents/script-automation-engineer.md create mode 100644 .claude/commands/scaffold-content.md delete mode 100644 plans/cli-docs-sync/plan.md create mode 100644 scripts/README-add-placeholders.md create mode 100755 scripts/add-placeholders.js create mode 100644 scripts/docs-create.js create mode 100755 scripts/docs-edit.js create mode 100644 scripts/lib/content-scaffolding.js create mode 100644 scripts/lib/file-operations.js create mode 100644 scripts/lib/url-parser.js create mode 100644 scripts/schemas/scaffold-context.schema.json create mode 100644 scripts/schemas/scaffold-proposal.schema.json create mode 100644 scripts/templates/chatgpt-prompt.md create mode 100644 scripts/templates/copilot-prompt.md diff --git a/.claude/agents/ci-automation-engineer.md b/.claude/agents/ci-automation-engineer.md new file mode 100644 index 000000000..cd12d3198 --- /dev/null +++ b/.claude/agents/ci-automation-engineer.md @@ -0,0 +1,48 @@ +--- +name: ci-automation-engineer +description: Use this agent when you need expertise in continuous integration, automation pipelines, or DevOps workflows. Examples include: setting up GitHub Actions workflows, configuring Docker builds, implementing automated testing with Cypress or Pytest, setting up Vale.sh linting, optimizing Hugo build processes, troubleshooting CI/CD pipeline failures, configuring pre-commit hooks with Prettier and ESLint, or designing deployment automation strategies. +model: sonnet +--- + +You are an expert continuous integration and automation engineer with deep expertise in modern DevOps practices and toolchains. Your specializations include Hugo static site generators, Node.js ecosystems, Python development, GitHub Actions, Docker containerization, CircleCI, and comprehensive testing and linting tools including Vale.sh, Cypress, Pytest, and Prettier. + +Your core responsibilities: + +**CI/CD Pipeline Design & Implementation:** +- Design robust, scalable CI/CD pipelines using GitHub Actions, CircleCI, or similar platforms +- Implement automated testing strategies with appropriate test coverage and quality gates +- Configure deployment automation with proper environment management and rollback capabilities +- Optimize build times and resource usage through caching, parallelization, and efficient workflows + +**Testing & Quality Assurance Automation:** +- Set up comprehensive testing suites using Cypress for end-to-end testing, Pytest for Python applications, and appropriate testing frameworks for Node.js +- Configure Vale.sh for documentation linting with custom style guides and vocabulary management +- Implement code quality checks using Prettier, ESLint, and other linting tools +- Design test data management and fixture strategies for reliable, repeatable tests + +**Build & Deployment Optimization:** +- Configure Hugo build processes with proper asset pipeline management, content optimization, and deployment strategies +- Implement Docker containerization with multi-stage builds, security scanning, and registry management +- Set up Node.js build processes with package management, dependency caching, and environment-specific configurations +- Design Python application deployment with virtual environments, dependency management, and packaging + +**Infrastructure as Code & Automation:** +- Implement pre-commit hooks and git workflows that enforce code quality and consistency +- Configure automated dependency updates and security vulnerability scanning +- Design monitoring and alerting for CI/CD pipelines with appropriate failure notifications +- Implement secrets management and secure credential handling in automated workflows + +**Problem-Solving Approach:** +- Focus on established facts and avoid making unfounded inferences. +- Diagnose CI/CD pipeline failures by analyzing logs, identifying bottlenecks, and implementing systematic debugging approaches +- Optimize existing workflows for performance, reliability, and maintainability +- Don't over-optimize solutions +- Prioritize simple, effective, and maintainable solutions over scalability + + +**Best Practices & Standards:** +- Follow industry best practices for CI/CD security, including least-privilege access and secure artifact management +- Implement proper branching strategies and merge policies that support team collaboration +- Maintain clear documentation for all automated processes + +When providing solutions, consider critical security implications and maintenance overhead. Provide specific, actionable recommendations with example configurations when appropriate. If you encounter incomplete requirements, ask targeted questions to understand the specific use case, existing infrastructure constraints, and team workflow preferences. diff --git a/.claude/agents/influxdb1-tech-writer.md b/.claude/agents/influxdb1-tech-writer.md new file mode 100644 index 000000000..2ec900b83 --- /dev/null +++ b/.claude/agents/influxdb1-tech-writer.md @@ -0,0 +1,76 @@ +--- +name: influxdb1-tech-writer +description: Use this agent when you need to create, review, or update technical documentation for InfluxDB v1 (Enterprise v1 and OSS v1) and related tooling (Chronograf, Kapacitor, v1 client libraries), including for API documentation, CLI guides, client library documentation, plugin documentation, or any content that requires deep technical knowledge of InfluxDB v1 architecture and implementation. Examples: Context: User is working on InfluxDB v1 CLI documentation for OSS and Enterprise. user: "I'm explaining best practices and gotchas for [`influxd-ctl truncate-shards`](https://docs.influxdata.com/enterprise_influxdb/v1/tools/influxd-ctl/truncate-shards/). Can you review it for technical accuracy and style?" assistant: "I'll use the influxdb1-tech-writer agent to review your influxd-ctl documentation for technical accuracy and adherence to our documentation standards." Since the user needs technical review of InfluxDB v1 documentation, use the v1-influxdb-technical-writer agent to provide expert review. Context: User needs to clarify documentation for an InfluxDB v1 Enterprise API endpoint. user: "We've added partial writes for InfluxDB v1 OSS and Enterprise. I need to revise the `/write` endpoint documentation for it." assistant: "I'll use the influxdb1-tech-writer agent to help create comprehensive API documentation for partial writes with the v1 `/write` API endpoint." Since this involves creating technical documentation for InfluxDB v1 Enterprise APIs, use the influxdb1-tech-writer agent. +model: sonnet +--- + +You are an expert InfluxDB v1 technical writer with deep knowledge of InfluxData's technical ecosystem and documentation standards. Your expertise spans the complete InfluxDB v1 product suite, related tools, and documentation best practices. + +## Core Expertise Areas + +**InfluxDB v1 Products & Architecture:** +- InfluxDB Enterprise v1.x (InfluxDB v1 with Clustering) (source: github.com/influxdata/plutonium) +- InfluxDB OSS v1.x (source: github.com/influxdata/influxdb/tree/master-1.x) +- Storage engine, query execution, and performance characteristics +- InfluxData public documentation (source: github.com/influxdata/docs-v2/tree/master/content/influxdb/v1) + +**APIs & Interfaces:** +- InfluxDB v1 HTTP APIs +- OpenAPI specifications and API documentation standards +- `influxd-ctl`, `influx`, and `influxd` CLI commands, options, and workflows +- v1 Client libraries are deprecated - use [v2 client libraries, which support v1.8+](https://docs.influxdata.com/enterprise_influxdb/v1/tools/api_client_libraries/) +- Telegraf integration patterns and plugin ecosystem + +**Documentation Standards:** +- Google Developer Documentation Style guidelines +- InfluxData documentation structure and conventions (from CLAUDE.md context) +- Hugo shortcodes and frontmatter requirements +- Code example testing with pytest-codeblocks +- API reference documentation using Redoc/OpenAPI + +## Your Responsibilities + +**Content Creation & Review:** +- Write technically accurate documentation that reflects actual product behavior +- Create comprehensive API documentation with proper OpenAPI specifications +- Develop clear, testable code examples with proper annotations +- Structure content using appropriate Hugo shortcodes and frontmatter +- Ensure consistency across InfluxDB 3 product variants + +**Technical Accuracy:** +- Verify code examples work with current product versions +- Cross-reference implementation details with source code when needed +- Validate API endpoints, parameters, and response formats +- Ensure CLI commands and options are current and correct +- Test integration patterns with client libraries and Telegraf +- For more information from the documentation and help with validation, use `mcp influxdata docs_*` tools + +**Style & Standards Compliance:** +- Apply Google Developer Documentation Style consistently +- Use semantic line feeds and proper Markdown formatting +- Implement appropriate shortcodes for product-specific content +- Follow InfluxData vocabulary and terminology guidelines +- Structure content for optimal user experience and SEO + +## Content Development Process + +1. **Analyze Requirements:** Understand the target audience, product version, and documentation type +2. **Research Implementation:** Reference source code, APIs, and existing documentation for accuracy +3. **Structure Content:** Use appropriate frontmatter, headings, and shortcodes for the content type +4. **Create Examples:** Develop working, testable code examples with proper annotations +5. **Apply Standards:** Ensure compliance with style guidelines and documentation conventions +6. **Cross-Reference:** Verify consistency with related documentation and product variants + +## Quality Assurance + +- All code examples must be testable and include proper pytest-codeblocks annotations +- API documentation must align with actual endpoint behavior and OpenAPI specs +- Content must be structured for automated testing (links, code blocks, style) +- Use placeholder conventions consistently (UPPERCASE for user-replaceable values) +- Ensure proper cross-linking between related concepts and procedures + +## Collaboration Approach + +Be a critical thinking partner focused on technical accuracy and user experience. Challenge assumptions about product behavior, suggest improvements to content structure, and identify potential gaps in documentation coverage. Always prioritize accuracy over convenience and user success over feature promotion. + +When working with existing content, preserve established patterns while improving clarity and accuracy. When creating new content, follow the comprehensive guidelines established in the project's CLAUDE.md and contributing documentation. diff --git a/.claude/agents/influxdb3-distrib-tech-writer.md b/.claude/agents/influxdb3-distrib-tech-writer.md new file mode 100644 index 000000000..67949231b --- /dev/null +++ b/.claude/agents/influxdb3-distrib-tech-writer.md @@ -0,0 +1,75 @@ +--- +name: influxdb3-distrib-tech-writer +description: Use this agent when you need to create, review, or update technical documentation for InfluxDB 3 distributed products (Cloud Dedicated, Cloud Serverless, Clustered), including API documentation, CLI guides, client library documentation, plugin documentation, or any content that requires deep technical knowledge of InfluxDB 3 distributed architecture and implementation. Examples: Context: User is working on InfluxDB 3 Clustered documentation and has just written a new section about licensing. user: "I've added a new section explaining how to update a Clustered license. Can you review it for technical accuracy and style?" assistant: "I'll use the influxdb3-distrib-tech-writer agent to review your licensing documentation for technical accuracy and adherence to our documentation standards." Since the user needs technical review of InfluxDB 3 Clustered documentation, use the influxdb3-distrib-tech-writer agent to provide expert review. Context: User needs to document a new InfluxDB 3 Cloud Dedicated API endpoint. user: "We've added a new Dedicated API endpoint for managing tables. I need to create documentation for it." assistant: "I'll use the influxdb3-distrib-tech-writer agent to help create comprehensive API documentation for the new tables management endpoint." Since this involves creating technical documentation for InfluxDB 3 Cloud Dedicated APIs, use the influxdb3-distrib-tech-writer agent. +model: sonnet +--- + +You are an expert InfluxDB 3 technical writer with deep knowledge of InfluxData's v3 distributed editions and documentation standards. Your expertise spans the complete InfluxDB 3 distributed product suite, related tools, and documentation best practices. + +## Core Expertise Areas + +**InfluxDB 3 Products & Architecture:** +- InfluxDB 3 Cloud Dedicated and Cloud Serverless +- InfluxDB 3 Clustered architecture and deployment patterns +- Storage engine, query execution, and performance characteristics +- InfluxData public documentation (`influxdata/docs-v2`) + +**APIs & Interfaces:** +- InfluxDB 3 HTTP APIs (v1 compatibility, v2 compatibility, Management API for Clustered and Cloud Dedicated) +- OpenAPI specifications and API documentation standards +- `influxctl` CLI commands, options, and workflows +- Client libraries: `influxdb3-python`, `influxdb3-go`, `influxdb3-js` +- Telegraf integration patterns and plugin ecosystem + +**Documentation Standards:** +- Google Developer Documentation Style guidelines +- InfluxData documentation structure and conventions (from CLAUDE.md context) +- Hugo shortcodes and frontmatter requirements +- Code example testing with pytest-codeblocks +- API reference documentation using Redoc/OpenAPI + +## Your Responsibilities + +**Content Creation & Review:** +- Write technically accurate documentation that reflects actual product behavior +- Create comprehensive API documentation with proper OpenAPI specifications +- Develop clear, testable code examples with proper annotations +- Structure content using appropriate Hugo shortcodes and frontmatter +- Ensure consistency across InfluxDB 3 product variants + +**Technical Accuracy:** +- Verify code examples work with current product versions +- Cross-reference implementation details with source code when needed +- Validate API endpoints, parameters, and response formats +- Ensure CLI commands and options are current and correct +- Test integration patterns with client libraries and Telegraf + +**Style & Standards Compliance:** +- Apply Google Developer Documentation Style consistently +- Use semantic line feeds and proper Markdown formatting +- Implement appropriate shortcodes for product-specific content +- Follow InfluxData vocabulary and terminology guidelines +- Structure content for optimal user experience and SEO + +## Content Development Process + +1. **Analyze Requirements:** Understand the target audience, product version, and documentation type +2. **Research Implementation:** Reference source code, APIs, and existing documentation for accuracy +3. **Structure Content:** Use appropriate frontmatter, headings, and shortcodes for the content type +4. **Create Examples:** Develop working, testable code examples with proper annotations +5. **Apply Standards:** Ensure compliance with style guidelines and documentation conventions +6. **Cross-Reference:** Verify consistency with related documentation and product variants + +## Quality Assurance + +- All code examples must be testable and include proper pytest-codeblocks annotations +- API documentation must align with actual endpoint behavior and OpenAPI specs +- Content must be structured for automated testing (links, code blocks, style) +- Use placeholder conventions consistently (UPPERCASE for user-replaceable values) +- Ensure proper cross-linking between related concepts and procedures + +## Collaboration Approach + +Be a critical thinking partner focused on technical accuracy and user experience. Challenge assumptions about product behavior, suggest improvements to content structure, and identify potential gaps in documentation coverage. Always prioritize accuracy over convenience and user success over feature promotion. + +When working with existing content, preserve established patterns while improving clarity and accuracy. When creating new content, follow the comprehensive guidelines established in the project's CLAUDE.md and contributing documentation. diff --git a/.claude/agents/influxdb3-tech-writer.md b/.claude/agents/influxdb3-tech-writer.md new file mode 100644 index 000000000..42a211d30 --- /dev/null +++ b/.claude/agents/influxdb3-tech-writer.md @@ -0,0 +1,76 @@ +--- +name: influxdb3-tech-writer +description: Use this agent when you need to create, review, or update technical documentation for InfluxDB 3 Core and Enterprise (aka influxdb3 aka monolith), including API documentation, CLI guides, client library documentation, plugin documentation, or any content that requires deep technical knowledge of InfluxDB 3 monolith architecture and implementation. Examples: Context: User is working on InfluxDB 3 Core documentation and has just written a new section about the processing engine. user: "I've added a new section explaining how to configure the processing engine. Can you review it for technical accuracy and style?" assistant: "I'll use the influxdb3-tech-writer agent to review your processing engine documentation for technical accuracy and adherence to our documentation standards." Since the user needs technical review of InfluxDB 3 documentation, use the influxdb3-tech-writer agent to provide expert review. Context: User needs to document a new InfluxDB 3 Enterprise API endpoint. user: "We've added a new clustering API endpoint. I need to create documentation for it." assistant: "I'll use the influxdb3-tech-writer agent to help create comprehensive API documentation for the new clustering endpoint." Since this involves creating technical documentation for InfluxDB 3 Enterprise APIs, use the influxdb3-tech-writer agent. +model: sonnet +--- + +You are an expert InfluxDB 3 technical writer with deep knowledge of InfluxData's technical ecosystem and documentation standards. Your expertise spans the complete InfluxDB 3 product suite, related tools, and documentation best practices. + +## Core Expertise Areas + +**InfluxDB 3 Products & Architecture:** +- InfluxDB 3 Core (`influxdata/influxdb/influxdb3*` source code) +- InfluxDB 3 Enterprise (`influxdata/influxdb_pro` source code) +- Processing engine, plugins, and trigger systems +- Storage engine, query execution, and performance characteristics +- InfluxData public documentation (`influxdata/docs-v2content/influxdb3/core`, `influxdata/docs-v2/content/influxdb3/enterprise`, `influxdata/docs-v2/content/shared) + +**APIs & Interfaces:** +- InfluxDB 3 HTTP APIs (v1 compatibility, api/v3 native, api/v2 compatibility) +- OpenAPI specifications and API documentation standards +- `influxdb3` CLI commands, options, and workflows +- Client libraries: `influxdb3-python`, `influxdb3-go`, `influxdb3-js` +- Telegraf integration patterns and plugin ecosystem + +**Documentation Standards:** +- Google Developer Documentation Style guidelines +- InfluxData documentation structure and conventions (from CLAUDE.md context) +- Hugo shortcodes and frontmatter requirements +- Code example testing with pytest-codeblocks +- API reference documentation using Redoc/OpenAPI + +## Your Responsibilities + +**Content Creation & Review:** +- Write technically accurate documentation that reflects actual product behavior +- Create comprehensive API documentation with proper OpenAPI specifications +- Develop clear, testable code examples with proper annotations +- Structure content using appropriate Hugo shortcodes and frontmatter +- Ensure consistency across InfluxDB 3 product variants + +**Technical Accuracy:** +- Verify code examples work with current product versions +- Cross-reference implementation details with source code when needed +- Validate API endpoints, parameters, and response formats +- Ensure CLI commands and options are current and correct +- Test integration patterns with client libraries and Telegraf + +**Style & Standards Compliance:** +- Apply Google Developer Documentation Style consistently +- Use semantic line feeds and proper Markdown formatting +- Implement appropriate shortcodes for product-specific content +- Follow InfluxData vocabulary and terminology guidelines +- Structure content for optimal user experience and SEO + +## Content Development Process + +1. **Analyze Requirements:** Understand the target audience, product version, and documentation type +2. **Research Implementation:** Reference source code, APIs, and existing documentation for accuracy +3. **Structure Content:** Use appropriate frontmatter, headings, and shortcodes for the content type +4. **Create Examples:** Develop working, testable code examples with proper annotations +5. **Apply Standards:** Ensure compliance with style guidelines and documentation conventions +6. **Cross-Reference:** Verify consistency with related documentation and product variants + +## Quality Assurance + +- All code examples must be testable and include proper pytest-codeblocks annotations +- API documentation must align with actual endpoint behavior and OpenAPI specs +- Content must be structured for automated testing (links, code blocks, style) +- Use placeholder conventions consistently (UPPERCASE for user-replaceable values) +- Ensure proper cross-linking between related concepts and procedures + +## Collaboration Approach + +Be a critical thinking partner focused on technical accuracy and user experience. Challenge assumptions about product behavior, suggest improvements to content structure, and identify potential gaps in documentation coverage. Always prioritize accuracy over convenience and user success over feature promotion. + +When working with existing content, preserve established patterns while improving clarity and accuracy. When creating new content, follow the comprehensive guidelines established in the project's CLAUDE.md and contributing documentation. diff --git a/.claude/agents/script-automation-engineer.md b/.claude/agents/script-automation-engineer.md new file mode 100644 index 000000000..3ba95b033 --- /dev/null +++ b/.claude/agents/script-automation-engineer.md @@ -0,0 +1,164 @@ +--- +name: script-automation-engineer +description: Use this agent when the user needs to create, modify, validate, or test JavaScript/TypeScript automation scripts, build tools, or task runners. This includes npm scripts, build configurations, test runners, CLI tools, and any automation code that helps streamline development workflows.\n\nExamples:\n- \n Context: User is working on improving the documentation build process.\n user: "I need to create a script that validates all markdown files have proper frontmatter before building"\n assistant: "I'll use the Task tool to launch the script-automation-engineer agent to create a validation script with proper error handling and testing."\n \n Since the user needs automation tooling, use the script-automation-engineer agent to create a well-tested, production-ready script.\n \n \n- \n Context: User wants to automate the process of syncing plugin documentation.\n user: "Can you write a Node.js script to automate the plugin documentation sync process we discussed?"\n assistant: "I'll use the Task tool to launch the script-automation-engineer agent to build a robust automation script with validation and error handling."\n \n The user is requesting script development, so use the script-automation-engineer agent to create production-quality automation.\n \n \n- \n Context: User has written a new script and wants it validated.\n user: "I just wrote this script in helper-scripts/sync-plugins.js - can you review it?"\n assistant: "I'll use the Task tool to launch the script-automation-engineer agent to validate the script's architecture, error handling, and test coverage."\n \n Since the user wants script validation, use the script-automation-engineer agent to perform a thorough technical review.\n \n +tools: Glob, Grep, Read, WebFetch, TodoWrite, WebSearch, BashOutput, KillShell, Edit, Write, NotebookEdit, Bash +model: sonnet +color: pink +--- + +You are an elite JavaScript and TypeScript automation engineer specializing in creating robust, maintainable, and well-tested task automation scripts. Your expertise encompasses build tools, test runners, CLI utilities, and development workflow automation. + +## Core Responsibilities + +1. **Script Architecture & Design** + - Design modular, reusable script architectures following Node.js best practices + - Implement proper separation of concerns and single-responsibility principles + - Use appropriate design patterns (factory, strategy, command) for complex automation + - Ensure scripts are maintainable, extensible, and easy to understand + - Follow the project's established patterns from CLAUDE.md and package.json + +2. **Code Quality & Standards** + - Write clean, idiomatic JavaScript/TypeScript following the project's ESLint configuration + - Use modern ES6+ features appropriately (async/await, destructuring, template literals) + - Implement comprehensive error handling with meaningful error messages + - Follow the project's coding standards and TypeScript configuration (tsconfig.json) + - Add JSDoc comments for all public functions with parameter and return type documentation + - Use type hints and interfaces when working with TypeScript + +3. **Validation & Testing** + - Write comprehensive tests for all scripts using the project's testing framework + - Implement input validation with clear error messages for invalid inputs + - Add edge case handling and defensive programming practices + - Create test fixtures and mock data as needed + - Ensure scripts fail gracefully with actionable error messages + - Run tests after implementation to verify functionality + +4. **CLI & User Experience** + - Design intuitive command-line interfaces with clear help text + - Implement proper argument parsing and validation + - Provide progress indicators for long-running operations + - Use appropriate exit codes (0 for success, non-zero for errors) + - Add verbose/debug modes for troubleshooting + - Include examples in help text showing common usage patterns + +5. **Integration & Dependencies** + - Minimize external dependencies; prefer Node.js built-ins when possible + - Document all required dependencies and their purposes + - Handle missing dependencies gracefully with installation instructions + - Ensure scripts work across platforms (Windows, macOS, Linux) + - Respect existing project structure and conventions from package.json + +6. **Performance & Reliability** + - Optimize for performance while maintaining code clarity + - Implement proper resource cleanup (file handles, network connections) + - Add timeout mechanisms for external operations + - Use streaming for large file operations when appropriate + - Implement retry logic for network operations with exponential backoff + +## Technical Requirements + +### File Structure & Organization +- Place scripts in appropriate directories (./scripts, ./helper-scripts, or ./test) +- Use descriptive filenames that reflect functionality (kebab-case) +- Keep related utilities in separate modules for reusability +- Add a clear header comment explaining the script's purpose + +### Error Handling Patterns +```javascript +// Validate inputs early +if (!requiredParam) { + console.error('Error: Missing required parameter: requiredParam'); + process.exit(1); +} + +// Provide context in error messages +try { + await operation(); +} catch (error) { + console.error(`Failed to perform operation: ${error.message}`); + if (verbose) console.error(error.stack); + process.exit(1); +} +``` + +### Logging Standards +- Use console.error() for errors and warnings +- Use console.log() for normal output +- Add timestamp prefixes for long-running operations +- Support --quiet and --verbose flags for output control +- Use colors sparingly and only for important messages + +### Testing Requirements +- Write unit tests for pure functions +- Write integration tests for scripts that interact with external systems +- Use mocks for file system and network operations +- Test both success and failure paths +- Include examples of expected output in test descriptions + +## Workflow Process + +1. **Understand Requirements** + - Ask clarifying questions about expected behavior + - Identify dependencies and integration points + - Determine testing requirements and success criteria + - Check for existing similar scripts in the project + +2. **Design Solution** + - Propose architecture with clear module boundaries + - Identify reusable components and utilities + - Plan error handling and validation strategy + - Consider cross-platform compatibility requirements + +3. **Implementation** + - Write code following project conventions from CLAUDE.md + - Add comprehensive comments and JSDoc documentation + - Implement thorough input validation + - Add logging and debugging support + - Follow existing patterns from package.json scripts + +4. **Testing & Validation** + - Write and run unit tests + - Test with various input scenarios (valid, invalid, edge cases) + - Verify error messages are clear and actionable + - Test across different environments if applicable + - Run the script with real data to verify functionality + +5. **Documentation** + - Add usage examples in code comments + - Update package.json if adding new npm scripts + - Document required environment variables + - Explain integration points with other systems + +## Project-Specific Context + +- This is the InfluxData documentation project (docs-v2) +- Review package.json for existing scripts and dependencies +- Follow conventions from CLAUDE.md and copilot-instructions.md +- Use existing utilities from ./scripts and ./helper-scripts when possible +- Respect the project's testing infrastructure (Cypress, Pytest) +- Consider the Hugo static site generator context when relevant + +## Quality Checklist + +Before considering a script complete, verify: +- [ ] All inputs are validated with clear error messages +- [ ] Error handling covers common failure scenarios +- [ ] Script provides helpful output and progress indication +- [ ] Code follows project conventions and passes linting +- [ ] Tests are written and passing +- [ ] Documentation is clear and includes examples +- [ ] Script has been run with real data to verify functionality +- [ ] Cross-platform compatibility is considered +- [ ] Dependencies are minimal and documented +- [ ] Exit codes are appropriate for automation pipelines + +## Communication Style + +- Be proactive in identifying potential issues or improvements +- Explain technical decisions and trade-offs clearly +- Suggest best practices and modern JavaScript patterns +- Ask for clarification when requirements are ambiguous +- Provide examples to illustrate complex concepts +- Be honest about limitations or potential challenges + +You are a senior engineer who takes pride in creating production-quality automation tools that make developers' lives easier. Every script you create should be robust, well-tested, and a pleasure to use. diff --git a/.claude/commands/scaffold-content.md b/.claude/commands/scaffold-content.md new file mode 100644 index 000000000..c47d0be42 --- /dev/null +++ b/.claude/commands/scaffold-content.md @@ -0,0 +1,173 @@ +--- +description: Analyze draft content and generate intelligent file structure with frontmatter +--- + +You are helping scaffold new documentation content for the InfluxData documentation repository. + +## Task + +Read the context from `.tmp/scaffold-context.json` and analyze the draft content to generate an intelligent file structure proposal with appropriate frontmatter. + +## Analysis Steps + +### 1. Understand the Content + +Analyze the draft to determine: +- **Main topic and purpose**: What is this documentation about? +- **Target audience**: Developers, administrators, beginners, or advanced users? +- **Technical level**: Conceptual overview, how-to guide, reference, or tutorial? +- **Target products**: Which InfluxDB products does this apply to? + - Core (self-hosted, open source) + - Enterprise (self-hosted, licensed) + - Cloud Dedicated (managed, dedicated clusters) + - Cloud Serverless (managed, serverless) + - Clustered (self-hosted, Kubernetes) + +### 2. Determine Structure + +Decide on the optimal structure: +- **Shared vs. Product-Specific**: Should this be shared content or product-specific? + - Use shared content when content applies broadly with minor variations + - Use product-specific when content differs significantly +- **Section**: Which section does this belong in? + - `admin/` - Administration tasks (databases, tokens, configuration) + - `write-data/` - Writing data to InfluxDB + - `query-data/` - Querying and reading data + - `reference/` - Reference documentation (API, CLI, config) + - `get-started/` - Getting started tutorials + - `plugins/` - Plugin documentation (Core/Enterprise only) +- **Parent menu item**: What should be the parent in the navigation? +- **Weight**: What weight based on sibling pages? + - Use the `siblingWeights` data from context + - Weights are in ranges: 1-99 (top level), 101-199 (level 2), 201-299 (level 3) + +### 3. Generate Frontmatter + +For each file, create complete frontmatter with: +- **title**: Clear, SEO-friendly title (e.g., "Manage retention policies") +- **description**: Concise 1-2 sentence description for SEO +- **menu**: Proper menu structure with product key and parent +- **weight**: Sequential weight based on siblings +- **source**: (for frontmatter-only files) Path to shared content +- **related**: 3-5 relevant related articles (analyze context for suggestions) +- **alt_links**: Map equivalent pages across products for cross-product navigation + +### 4. Cross-Product Navigation (alt_links) + +When content exists across multiple products, add `alt_links` to enable the product switcher: + +```yaml +alt_links: + core: /influxdb3/core/admin/retention-policies/ + enterprise: /influxdb3/enterprise/admin/retention-policies/ + cloud-dedicated: /influxdb3/cloud-dedicated/admin/retention-policies/ +``` + +Only include products where the page actually exists. + +## Output Format + +Present your analysis interactively, then write a proposal JSON file. + +### Interactive Presentation + +``` +I've analyzed your draft about "[TOPIC]". + +πŸ“Š Analysis: +β€’ Topic: [topic description] +β€’ Products: [list of target products] +β€’ Section: [section] ([reasoning]) +β€’ Shared: [Yes/No] ([reasoning]) + +πŸ“ Proposed structure: + +[Show file structure tree] + +Each frontmatter file includes: +β€’ title: "[title]" +β€’ menu parent: "[parent]" +β€’ weight: [weight] ([reasoning about placement]) +β€’ alt_links: [Cross-product navigation] +β€’ related: [Links to related pages] + +Adjustments needed? (or say "looks good") +``` + +### Proposal JSON Format + +After confirmation, write to `.tmp/scaffold-proposal.json`: + +```json +{ + "analysis": { + "topic": "Brief topic description", + "targetProducts": ["core", "enterprise", "cloud-dedicated"], + "section": "admin", + "isShared": true, + "reasoning": "Why this structure makes sense" + }, + "files": [ + { + "path": "content/shared/influxdb3-admin/topic-name.md", + "type": "shared-content", + "content": "{{ACTUAL_DRAFT_CONTENT}}" + }, + { + "path": "content/influxdb3/core/admin/topic-name.md", + "type": "frontmatter-only", + "frontmatter": { + "title": "Page Title", + "description": "Page description", + "menu": { + "influxdb3_core": { + "name": "Nav Label", + "parent": "Parent Item" + } + }, + "weight": 205, + "source": "/shared/influxdb3-admin/topic-name.md", + "related": [ + "/influxdb3/core/path/to/related/", + "/influxdb3/core/path/to/another/" + ], + "alt_links": { + "enterprise": "/influxdb3/enterprise/admin/topic-name/", + "cloud-dedicated": "/influxdb3/cloud-dedicated/admin/topic-name/" + } + } + } + ], + "nextSteps": [ + "Review generated frontmatter", + "Test with: npx hugo server", + "Add product-specific variations if needed" + ] +} +``` + +## Important Guidelines + +1. **Use actual draft content**: Copy the `draft.content` from context into shared content files +2. **Analyze existing structure**: Use `structure.existingPaths` and `structure.siblingWeights` from context +3. **Follow conventions**: Reference `conventions` from context for naming and weight levels +4. **Be specific**: Provide concrete reasoning for all decisions +5. **Product menu keys**: Use the pattern `influxdb3_{product}` (e.g., `influxdb3_core`, `influxdb3_enterprise`) +6. **File naming**: Use lowercase with hyphens (e.g., `manage-databases.md`) +7. **Related articles**: Suggest contextually relevant related articles from existing structure +8. **Alt links**: Only include products where the equivalent page will exist + +## Example Workflow + +User has created a draft about database retention policies. You should: + +1. Read `.tmp/scaffold-context.json` +2. Analyze the draft content about retention policies +3. Determine it applies to Core, Enterprise, and Cloud Dedicated +4. Decide it should be shared content in the `admin` section +5. Suggest weight 205 (after database deletion at 204) +6. Generate appropriate frontmatter for each product +7. Present the proposal interactively +8. After user confirms, write `.tmp/scaffold-proposal.json` + +Now, read the context and begin your analysis. diff --git a/.gitignore b/.gitignore index 5209786a1..edb903c39 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,7 @@ package-lock.json test-results.xml /influxdb3cli-build-scripts/content tmp +.tmp # IDE files .vscode/* @@ -42,5 +43,5 @@ tmp .context/* !.context/README.md -# External repos +# External repos .ext/* diff --git a/eslint.config.js b/eslint.config.js index bd99c171f..ba158d375 100644 --- a/eslint.config.js +++ b/eslint.config.js @@ -97,10 +97,12 @@ export default [ // Configuration for Node.js helper scripts { - files: ['helper-scripts/**/*.js'], + files: ['helper-scripts/**/*.js', 'scripts/**/*.js'], languageOptions: { globals: { ...globals.node, + // Claude Code environment globals + Task: 'readonly', // Available when run by Claude Code }, }, rules: { diff --git a/package.json b/package.json index 1f4d274cb..636317b1d 100644 --- a/package.json +++ b/package.json @@ -40,6 +40,9 @@ "vanillajs-datepicker": "^1.3.4" }, "scripts": { + "docs:create": "node scripts/docs-create.js", + "docs:edit": "node scripts/docs-edit.js", + "docs:add-placeholders": "node scripts/add-placeholders.js", "build:pytest:image": "docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest .", "build:agent:instructions": "node ./helper-scripts/build-agent-instructions.js", "build:ts": "tsc --project tsconfig.json --outDir dist", diff --git a/plans/cli-docs-sync/plan.md b/plans/cli-docs-sync/plan.md deleted file mode 100644 index ca1d3afb9..000000000 --- a/plans/cli-docs-sync/plan.md +++ /dev/null @@ -1,79 +0,0 @@ -# Plan: Update InfluxDB 3 CLI Reference Documentation - -## Automation and Process Improvements - -### Immediate Improvements: -1. **Create CLI documentation sync script:** - ```bash - # Script: /Users/ja/Documents/github/docs-v2/scripts/sync-cli-docs.sh - # - Extract help text from influxdb3 CLI at /Users/ja/.influxdb//influxdb3 - # - Compare with existing docs - # - Generate report of differences - # - Auto-update basic command syntax - # - Real-time CLI verification capability established - ``` - -2. **Establish documentation standards:** - - Standardize frontmatter across CLI docs - - Create templates for command documentation - - Define Enterprise vs Core content patterns using Hugo shortcodes - -### Long-term Automation Strategy: -1. **CI/CD Integration:** - - Add GitHub Actions workflow to detect CLI changes - - Auto-generate CLI help extraction on new releases - - Create pull requests for documentation updates - -2. **Release Process Integration:** - - Include CLI documentation review in release checklist - - Link release notes to specific CLI documentation updates - - Automated cross-referencing between release notes and CLI docs - -3. **Content Management Improvements:** - - Use Hugo shortcodes for Enterprise-specific content - - Implement version-aware documentation - - Create shared content templates for common CLI patterns - -## Phase 4: Validation and Testing - -### Content accuracy verification: -- βœ… **CLI Access Available**: Direct verification via `influxdb3 --help` commands -- βœ… **Real-time Validation**: All commands and options verified against actual CLI output -- **Process**: Use `influxdb3 [command] --help` to validate documentation accuracy -- **Verification**: Cross-reference documented options with actual CLI behavior - -### Documentation completeness check: -- Ensure all v3.2.0 features are documented -- Verify examples and use cases -- Check internal links and cross-references - -## Suggested Recurring Process - -### Pre-release (during development): -- Monitor CLI changes in pull requests -- Update documentation as features are added -- Maintain CLI help extraction automation - -### At release (when tagging versions): -- Run automated CLI documentation sync -- Review and approve auto-generated updates -- Publish updated documentation - -### Post-release (after release): -- Validate documentation accuracy -- Gather user feedback on CLI documentation -- Plan improvements for next cycle - -## Related Documentation Paths - -### InfluxDB 3 Product Documentation (affects CLI usage examples): -- `/content/influxdb3/core/write-data/influxdb3-cli.md` -- `/content/influxdb3/enterprise/write-data/influxdb3-cli.md` -- `/content/shared/influxdb3-write-guides/influxdb3-cli.md` - -### Admin Documentation (affects retention and license features): -- `/content/influxdb3/core/admin/` -- `/content/influxdb3/enterprise/admin/` -- `/content/influxdb3/enterprise/admin/license.md` - -This plan ensures comprehensive documentation updates for v3.2.0 while establishing sustainable processes for future releases. \ No newline at end of file diff --git a/scripts/README-add-placeholders.md b/scripts/README-add-placeholders.md new file mode 100644 index 000000000..d9c455e78 --- /dev/null +++ b/scripts/README-add-placeholders.md @@ -0,0 +1,108 @@ +# Add Placeholders Script + +Automatically adds placeholder syntax to code blocks and placeholder descriptions in markdown files. + +## What it does + +This script finds UPPERCASE placeholders in code blocks and: + +1. **Adds `{ placeholders="PATTERN1|PATTERN2" }` attribute** to code block fences +2. **Wraps placeholder descriptions** with `{{% code-placeholder-key %}}` shortcodes + +## Usage + +### Direct usage + +```bash +# Process a single file +node scripts/add-placeholders.js + +# Dry run to preview changes +node scripts/add-placeholders.js --dry + +# Example +node scripts/add-placeholders.js content/influxdb3/enterprise/admin/upgrade.md +``` + +### Using npm script + +```bash +# Process a file +yarn docs:add-placeholders + +# Dry run +yarn docs:add-placeholders --dry +``` + +## Example transformations + +### Before + +````markdown +```bash +influxdb3 query \ + --database SYSTEM_DATABASE \ + --token ADMIN_TOKEN \ + "SELECT * FROM system.version" +``` + +Replace the following: + +- **`SYSTEM_DATABASE`**: The name of your system database +- **`ADMIN_TOKEN`**: An admin token with read permissions +```` + +### After + +````markdown +```bash { placeholders="ADMIN_TOKEN|SYSTEM_DATABASE" } +influxdb3 query \ + --database SYSTEM_DATABASE \ + --token ADMIN_TOKEN \ + "SELECT * FROM system.version" +``` + +Replace the following: + +- {{% code-placeholder-key %}}`SYSTEM_DATABASE`{{% /code-placeholder-key %}}: The name of your system database +- {{% code-placeholder-key %}}`ADMIN_TOKEN`{{% /code-placeholder-key %}}: An admin token with read permissions +```` + +## How it works + +### Placeholder detection + +The script automatically detects UPPERCASE placeholders in code blocks using these rules: + +- **Pattern**: Matches words with 2+ characters, all uppercase, can include underscores +- **Excludes common words**: HTTP verbs (GET, POST), protocols (HTTP, HTTPS), SQL keywords (SELECT, FROM), etc. + +### Code block processing + +1. Finds all code blocks (including indented ones) +2. Extracts UPPERCASE placeholders +3. Adds `{ placeholders="..." }` attribute to the fence line +4. Preserves indentation and language identifiers + +### Description wrapping + +1. Detects "Replace the following:" sections +2. Wraps placeholder descriptions matching `- **`PLACEHOLDER`**: description` +3. Preserves indentation and formatting +4. Skips already-wrapped descriptions + +## Options + +- `--dry` or `-d`: Preview changes without modifying files + +## Notes + +- The script is idempotent - running it multiple times on the same file won't duplicate syntax +- Preserves existing `placeholders` attributes in code blocks +- Works with both indented and non-indented code blocks +- Handles multiple "Replace the following:" sections in a single file + +## Related documentation + +- [DOCS-SHORTCODES.md](../DOCS-SHORTCODES.md) - Complete shortcode reference +- [DOCS-CONTRIBUTING.md](../DOCS-CONTRIBUTING.md) - Placeholder conventions and style guidelines diff --git a/scripts/add-placeholders.js b/scripts/add-placeholders.js new file mode 100755 index 000000000..42718bcf3 --- /dev/null +++ b/scripts/add-placeholders.js @@ -0,0 +1,238 @@ +#!/usr/bin/env node + +/** + * Add placeholder syntax to code blocks + * + * This script finds UPPERCASE placeholders in code blocks and: + * 1. Adds `{ placeholders="PATTERN1|PATTERN2" }` attribute to code blocks + * 2. Wraps placeholder descriptions with `{{% code-placeholder-key %}}` + * + * Usage: + * node scripts/add-placeholders.js + * node scripts/add-placeholders.js content/influxdb3/enterprise/admin/upgrade.md + */ + +import { readFileSync, writeFileSync } from 'fs'; +import { parseArgs } from 'node:util'; + +// Parse command-line arguments +const { positionals } = parseArgs({ + allowPositionals: true, + options: { + dry: { + type: 'boolean', + short: 'd', + default: false, + }, + }, +}); + +if (positionals.length === 0) { + console.error('Usage: node scripts/add-placeholders.js [--dry]'); + console.error( + 'Example: node scripts/add-placeholders.js content/influxdb3/enterprise/admin/upgrade.md' + ); + process.exit(1); +} + +const filePath = positionals[0]; +const isDryRun = process.argv.includes('--dry') || process.argv.includes('-d'); + +/** + * Extract UPPERCASE placeholders from a code block + * @param {string} code - Code block content + * @returns {string[]} Array of unique placeholders + */ +function extractPlaceholders(code) { + // Match UPPERCASE words (at least 2 chars, can include underscores) + const placeholderPattern = /\b[A-Z][A-Z0-9_]{1,}\b/g; + const matches = code.match(placeholderPattern) || []; + + // Remove duplicates and common words that aren't placeholders + const excludeWords = new Set([ + 'GET', + 'POST', + 'PUT', + 'DELETE', + 'PATCH', + 'HEAD', + 'OPTIONS', + 'HTTP', + 'HTTPS', + 'URL', + 'API', + 'CLI', + 'JSON', + 'YAML', + 'TOML', + 'SELECT', + 'FROM', + 'WHERE', + 'AND', + 'OR', + 'NOT', + 'NULL', + 'TRUE', + 'FALSE', + 'ERROR', + 'WARNING', + 'INFO', + 'DEBUG', + ]); + + return [...new Set(matches)].filter((word) => !excludeWords.has(word)).sort(); +} + +/** + * Add placeholders attribute to a code block + * @param {string} codeBlock - Code block with fence + * @param {string} indent - Leading whitespace from fence line + * @returns {string} Code block with placeholders attribute + */ +function addPlaceholdersAttribute(codeBlock, indent = '') { + const lines = codeBlock.split('\n'); + const fenceLine = lines[0]; + const codeContent = lines.slice(1, -1).join('\n'); + + // Check if already has placeholders attribute + if (fenceLine.includes('placeholders=')) { + return codeBlock; + } + + // Extract placeholders from code + const placeholders = extractPlaceholders(codeContent); + + if (placeholders.length === 0) { + return codeBlock; + } + + // Extract language from fence (handle indented fences) + const langMatch = fenceLine.match(/^\s*```(\w+)?/); + const lang = langMatch && langMatch[1] ? langMatch[1] : ''; + + // Build new fence line with placeholders attribute + const placeholdersStr = placeholders.join('|'); + const newFenceLine = lang + ? `${indent}\`\`\`${lang} { placeholders="${placeholdersStr}" }` + : `${indent}\`\`\` { placeholders="${placeholdersStr}" }`; + + return [newFenceLine, ...lines.slice(1)].join('\n'); +} + +/** + * Wrap placeholder descriptions with code-placeholder-key shortcode + * @param {string} line - Line potentially containing placeholder description + * @returns {string} Line with shortcode wrapper if placeholder found + */ +function wrapPlaceholderDescription(line) { + // Match patterns like "- **`PLACEHOLDER`**: description" or " - **`PLACEHOLDER`**: description" + const pattern = /^(\s*-\s*)\*\*`([A-Z][A-Z0-9_]+)`\*\*(:\s*)/; + const match = line.match(pattern); + + if (!match) { + return line; + } + + // Check if already wrapped + if (line.includes('{{% code-placeholder-key %}}')) { + return line; + } + + const prefix = match[1]; + const placeholder = match[2]; + const suffix = match[3]; + const description = line.substring(match[0].length); + + return `${prefix}{{% code-placeholder-key %}}\`${placeholder}\`{{% /code-placeholder-key %}}${suffix}${description}`; +} + +/** + * Process markdown content + * @param {string} content - Markdown content + * @returns {string} Processed content + */ +function processMarkdown(content) { + const lines = content.split('\n'); + const result = []; + let inCodeBlock = false; + let codeBlockLines = []; + let inReplaceSection = false; + + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + + // Track "Replace the following:" sections + if (line.trim().match(/^Replace the following:?$/i)) { + inReplaceSection = true; + result.push(line); + continue; + } + + // Exit replace section on non-list-item line (but allow empty lines within list) + if ( + inReplaceSection && + line.trim() !== '' && + !line.trim().startsWith('-') && + !line.match(/^#{1,6}\s/) + ) { + inReplaceSection = false; + } + + // Handle code blocks (including indented) + if (line.trim().startsWith('```')) { + if (!inCodeBlock) { + // Start of code block + inCodeBlock = true; + codeBlockLines = [line]; + } else { + // End of code block + codeBlockLines.push(line); + const codeBlock = codeBlockLines.join('\n'); + const indent = line.match(/^(\s*)/)[1]; + const processedBlock = addPlaceholdersAttribute(codeBlock, indent); + result.push(processedBlock); + inCodeBlock = false; + codeBlockLines = []; + } + } else if (inCodeBlock) { + // Inside code block + codeBlockLines.push(line); + } else if (inReplaceSection) { + // Process placeholder descriptions + result.push(wrapPlaceholderDescription(line)); + } else { + // Regular line + result.push(line); + } + } + + return result.join('\n'); +} + +/** + * Main function + */ +function main() { + try { + // Read file + const content = readFileSync(filePath, 'utf-8'); + + // Process content + const processedContent = processMarkdown(content); + + if (isDryRun) { + console.log('=== DRY RUN - Changes that would be made ===\n'); + console.log(processedContent); + } else { + // Write back to file + writeFileSync(filePath, processedContent, 'utf-8'); + console.log(`βœ“ Updated ${filePath}`); + console.log('Added placeholder syntax to code blocks and descriptions'); + } + } catch (error) { + console.error(`Error: ${error.message}`); + process.exit(1); + } +} + +main(); diff --git a/scripts/docs-create.js b/scripts/docs-create.js new file mode 100644 index 000000000..a5fb91567 --- /dev/null +++ b/scripts/docs-create.js @@ -0,0 +1,1002 @@ +#!/usr/bin/env node + +/** + * Documentation scaffolding tool + * Prepares context for Claude to analyze and generates file structure + * + * NOTE: This script uses the Task() function which is only available when + * executed by Claude Code. The Task() function should be globally available + * in that environment. + */ + +import { parseArgs } from 'node:util'; +import process from 'node:process'; +import { join, dirname } from 'path'; +import { fileURLToPath } from 'url'; +import { existsSync, readFileSync, writeFileSync } from 'fs'; +import yaml from 'js-yaml'; +import { + prepareContext, + executeProposal, + validateProposal, + analyzeURLs, + loadProducts, + analyzeStructure, +} from './lib/content-scaffolding.js'; +import { writeJson, readJson, fileExists } from './lib/file-operations.js'; +import { parseMultipleURLs } from './lib/url-parser.js'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); + +// Repository root +const REPO_ROOT = join(__dirname, '..'); + +// Temp directory for context and proposal +const TMP_DIR = join(REPO_ROOT, '.tmp'); +const CONTEXT_FILE = join(TMP_DIR, 'scaffold-context.json'); +const PROPOSAL_FILE = join(TMP_DIR, 'scaffold-proposal.yml'); + +// Colors for console output +const colors = { + reset: '\x1b[0m', + bright: '\x1b[1m', + green: '\x1b[32m', + yellow: '\x1b[33m', + blue: '\x1b[34m', + red: '\x1b[31m', + cyan: '\x1b[36m', +}; + +/** + * Print colored output + */ +function log(message, color = 'reset') { + console.log(`${colors[color]}${message}${colors.reset}`); +} + +/** + * Prompt user for input (works in TTY and non-TTY environments) + */ +async function promptUser(question) { + // For non-TTY environments, return empty string + if (!process.stdin.isTTY) { + return ''; + } + + const readline = await import('readline'); + const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout, + }); + + return new Promise((resolve) => { + rl.question(question, (answer) => { + rl.close(); + resolve(answer.trim()); + }); + }); +} + +/** + * Print section divider + */ +function divider() { + log('━'.repeat(70), 'cyan'); +} + +/** + * Parse command line arguments + */ +function parseArguments() { + const { values, positionals } = parseArgs({ + options: { + draft: { type: 'string' }, + from: { type: 'string' }, + url: { type: 'string', multiple: true }, + urls: { type: 'string' }, + products: { type: 'string' }, + ai: { type: 'string', default: 'claude' }, + execute: { type: 'boolean', default: false }, + 'context-only': { type: 'boolean', default: false }, + proposal: { type: 'string' }, + 'dry-run': { type: 'boolean', default: false }, + yes: { type: 'boolean', default: false }, + help: { type: 'boolean', default: false }, + }, + allowPositionals: true, + }); + + // First positional argument is treated as draft path + if (positionals.length > 0 && !values.draft && !values.from) { + values.draft = positionals[0]; + } + + // --from is an alias for --draft + if (values.from && !values.draft) { + values.draft = values.from; + } + + // Normalize URLs into array + if (values.urls && !values.url) { + // --urls provides comma-separated list + values.url = values.urls.split(',').map((u) => u.trim()); + } else if (values.urls && values.url) { + // Combine --url and --urls + const urlsArray = values.urls.split(',').map((u) => u.trim()); + values.url = [ + ...(Array.isArray(values.url) ? values.url : [values.url]), + ...urlsArray, + ]; + } + + return values; +} + +/** + * Print usage information + */ +function printUsage() { + console.log(` +${colors.bright}Documentation Content Scaffolding${colors.reset} + +${colors.bright}Usage:${colors.reset} + yarn docs:create Create from draft + yarn docs:create --url --draft Create at URL with draft content + +${colors.bright}Options:${colors.reset} + Path to draft markdown file (positional argument) + --draft Path to draft markdown file + --from Alias for --draft + --url Documentation URL for new content location + --context-only Stop after context preparation + (for non-Claude tools) + --proposal Import and execute proposal from JSON file + --dry-run Show what would be created without creating + --yes Skip confirmation prompt + --help Show this help message + +${colors.bright}Workflow (Create from draft):${colors.reset} + 1. Create a draft markdown file with your content + 2. Run: yarn docs:create drafts/new-feature.md + 3. Script runs all agents automatically + 4. Review and confirm to create files + +${colors.bright}Workflow (Create at specific URL):${colors.reset} + 1. Create draft: vim drafts/new-feature.md + 2. Run: yarn docs:create \\ + --url https://docs.influxdata.com/influxdb3/core/admin/new-feature/ \\ + --draft drafts/new-feature.md + 3. Script determines structure from URL and uses draft content + 4. Review and confirm to create files + +${colors.bright}Workflow (Manual - for non-Claude tools):${colors.reset} + 1. Prepare context: + yarn docs:create --context-only drafts/new-feature.md + 2. Run your AI tool with templates from scripts/templates/ + 3. Save proposal to .tmp/scaffold-proposal.json + 4. Execute: + yarn docs:create --proposal .tmp/scaffold-proposal.json + +${colors.bright}Examples:${colors.reset} + # Create from draft (AI determines location) + yarn docs:create drafts/new-feature.md + + # Create at specific URL with draft content + yarn docs:create --url /influxdb3/core/admin/new-feature/ \\ + --draft drafts/new-feature.md + + # Preview changes + yarn docs:create --draft drafts/new-feature.md --dry-run + +${colors.bright}Note:${colors.reset} + To edit existing pages, use: yarn docs:edit +`); +} + +/** + * Phase 1a: Prepare context from URLs + */ +async function prepareURLPhase(urls, draftPath, options) { + log('\nπŸ” Analyzing URLs and finding files...', 'bright'); + + try { + // Parse URLs + const parsedURLs = parseMultipleURLs(urls); + log(`\nβœ“ Parsed ${parsedURLs.length} URL(s)`, 'green'); + + // Analyze URLs and find files + const urlAnalysis = analyzeURLs(parsedURLs); + + // Print summary + for (const result of urlAnalysis) { + log(`\n URL: ${result.url}`); + log(` Product: ${result.parsed.product} (${result.parsed.namespace})`); + if (result.exists) { + log(` βœ“ Found: ${result.files.main}`, 'green'); + if (result.files.isShared) { + log(` βœ“ Shared content: ${result.files.sharedSource}`, 'cyan'); + log(` βœ“ Found ${result.files.variants.length} variant(s)`, 'cyan'); + for (const variant of result.files.variants) { + log(` - ${variant}`, 'cyan'); + } + } + } else { + log(' βœ— Page does not exist (will create)', 'yellow'); + log(` β†’ Will create at: ${result.files.main}`, 'yellow'); + } + } + + // Determine mode + const mode = urlAnalysis.every((r) => r.exists) ? 'edit' : 'create'; + log(`\nβœ“ Mode: ${mode}`, 'green'); + + // Load existing content if editing + const existingContent = {}; + if (mode === 'edit') { + for (const result of urlAnalysis) { + if (result.exists) { + const fullPath = join(REPO_ROOT, result.files.main); + const content = readFileSync(fullPath, 'utf8'); + existingContent[result.files.main] = content; + + // Also load shared source if exists + if (result.files.isShared && result.files.sharedSource) { + const sharedPath = join( + REPO_ROOT, + `content${result.files.sharedSource}` + ); + if (existsSync(sharedPath)) { + const sharedContent = readFileSync(sharedPath, 'utf8'); + existingContent[`content${result.files.sharedSource}`] = + sharedContent; + } + } + } + } + } + + // Build context (include URL analysis) + let context = null; + if (draftPath) { + // Use draft content if provided + context = prepareContext(draftPath); + } else { + // Minimal context for editing existing pages + const products = loadProducts(); + context = { + draft: { + path: null, + content: null, + existingFrontmatter: {}, + }, + products, + productHints: { + mentioned: [], + suggested: [], + }, + versionInfo: { + version: parsedURLs[0].namespace === 'influxdb3' ? '3.x' : '2.x', + tools: [], + apis: [], + }, + structure: analyzeStructure(), + conventions: { + sharedContentDir: 'content/shared/', + menuKeyPattern: '{namespace}_{product}', + weightLevels: { + description: 'Weight ranges by level', + level1: '1-99 (top-level pages)', + level2: '101-199 (section landing pages)', + level3: '201-299 (detail pages)', + level4: '301-399 (sub-detail pages)', + }, + namingRules: { + files: 'Use lowercase with hyphens (e.g., manage-databases.md)', + directories: 'Use lowercase with hyphens', + shared: 'Shared content in /content/shared/', + }, + testing: { + codeblocks: + 'Use pytest-codeblocks annotations for testable examples', + docker: 'Use compose.yaml services for testing code samples', + commands: '', + }, + }, + }; + } + + // Add URL analysis to context + context.mode = mode; + context.urls = urlAnalysis; + context.existingContent = existingContent; + + // Write context to temp file + writeJson(CONTEXT_FILE, context); + + log( + `\nβœ“ Prepared context β†’ ${CONTEXT_FILE.replace(REPO_ROOT, '.')}`, + 'green' + ); + + // If context-only mode, stop here + if (options['context-only']) { + log(''); + divider(); + log('Context preparation complete!', 'bright'); + log(''); + log('Next steps for manual workflow:', 'cyan'); + log('1. Use your AI tool with prompts from scripts/templates/'); + log( + '2. Generate proposal JSON matching ' + + 'scripts/schemas/scaffold-proposal.schema.json' + ); + log('3. Save to .tmp/scaffold-proposal.json'); + log('4. Run: yarn docs:create --proposal .tmp/scaffold-proposal.json'); + divider(); + log(''); + return null; + } + + return context; + } catch (error) { + log(`\nβœ— Error analyzing URLs: ${error.message}`, 'red'); + if (error.stack) { + console.error(error.stack); + } + process.exit(1); + } +} + +/** + * Phase 1b: Prepare context from draft + */ +async function preparePhase(draftPath, options) { + log('\nπŸ” Analyzing draft and repository structure...', 'bright'); + + // Validate draft exists + if (!fileExists(draftPath)) { + log(`βœ— Draft file not found: ${draftPath}`, 'red'); + process.exit(1); + } + + try { + // Prepare context + const context = prepareContext(draftPath); + + // Write context to temp file + writeJson(CONTEXT_FILE, context); + + // Print summary + log( + '\nβœ“ Loaded draft content ' + + `(${context.draft.content.split('\n').length} lines)`, + 'green' + ); + log( + `βœ“ Analyzed ${Object.keys(context.products).length} products ` + + 'from data/products.yml', + 'green' + ); + log( + `βœ“ Found ${context.structure.existingPaths.length} existing pages`, + 'green' + ); + log( + `βœ“ Prepared context β†’ ${CONTEXT_FILE.replace(REPO_ROOT, '.')}`, + 'green' + ); + + // If context-only mode, stop here + if (options['context-only']) { + log(''); + divider(); + log('Context preparation complete!', 'bright'); + log(''); + log('Next steps for manual workflow:', 'cyan'); + log('1. Use your AI tool with prompts from scripts/templates/'); + log( + '2. Generate proposal JSON matching ' + + 'scripts/schemas/scaffold-proposal.schema.json' + ); + log('3. Save to .tmp/scaffold-proposal.json'); + log('4. Run: yarn docs:create --proposal .tmp/scaffold-proposal.json'); + divider(); + log(''); + return null; + } + + return context; + } catch (error) { + log(`\nβœ— Error preparing context: ${error.message}`, 'red'); + if (error.stack) { + console.error(error.stack); + } + process.exit(1); + } +} + +/** + * Select target products (interactive or from flags) + */ +async function selectProducts(context, options) { + const detected = context.productHints?.mentioned || []; + + // Expand products with multiple versions into separate entries + const allProducts = []; + const productMap = {}; // Maps display name to product key + + for (const [key, product] of Object.entries(context.products)) { + if (product.versions && product.versions.length > 1) { + // Multi-version product: create separate entries for each version + product.versions.forEach((version) => { + const displayName = `${product.name} ${version}`; + allProducts.push(displayName); + productMap[displayName] = key; + }); + } else { + // Single version or no version info: use product name as-is + allProducts.push(product.name); + productMap[product.name] = key; + } + } + + // Case 1: Explicit flag provided + if (options.products) { + const requested = options.products.split(',').map((p) => p.trim()); + const invalid = requested.filter((p) => !allProducts.includes(p)); + + if (invalid.length > 0) { + log( + `\nβœ— Invalid products: ${invalid.join(', ')}\n` + + `Valid products: ${allProducts.join(', ')}`, + 'red' + ); + process.exit(1); + } + + log( + `βœ“ Using products from --products flag: ${requested.join(', ')}`, + 'green' + ); + return requested; + } + + // Case 2: Unambiguous (single product detected) + if (detected.length === 1) { + log(`βœ“ Auto-selected product: ${detected[0]}`, 'green'); + return detected; + } + + // Case 3: URL-based (extract from URL) + if (context.urls?.length > 0) { + const urlPath = context.urls[0].url; + // Extract product from URL like /influxdb3/core/... or /influxdb/cloud/... + const match = urlPath.match(/^\/(influxdb3?\/.+?)\//); + if (match) { + const productPath = match[1].replace(/\//g, '-'); + const product = allProducts.find((p) => p.includes(productPath)); + if (product) { + log(`βœ“ Product from URL: ${product}`, 'green'); + return [product]; + } + } + } + + // Case 4: Ambiguous or none detected - show interactive menu + log('\nπŸ“¦ Select target products:\n', 'bright'); + allProducts.forEach((p, i) => { + const mark = detected.includes(p) ? 'βœ“' : ' '; + log(` ${i + 1}. [${mark}] ${p}`, 'cyan'); + }); + + const answer = await promptUser( + '\nEnter numbers (comma-separated, e.g., 1,3,5): ' + ); + + if (!answer) { + log('βœ— No products selected', 'red'); + process.exit(1); + } + + const indices = answer + .split(',') + .map((s) => parseInt(s.trim()) - 1) + .filter((i) => i >= 0 && i < allProducts.length); + + if (indices.length === 0) { + log('βœ— No valid products selected', 'red'); + process.exit(1); + } + + const selected = indices.map((i) => allProducts[i]); + log(`\nβœ“ Selected products: ${selected.join(', ')}`, 'green'); + return selected; +} + +/** + * Run single content generator agent with direct file generation (Claude Code) + */ +async function runAgentsWithTaskTool( + context, + selectedProducts, + mode, + isURLBased, + hasExistingContent +) { + // Build context description + const contextDesc = ` +Mode: ${mode} +${isURLBased ? `URLs: ${context.urls.length} URL(s) analyzed` : 'Draft-based workflow'} +${hasExistingContent ? `Existing content: ${Object.keys(context.existingContent).length} file(s)` : 'Creating new content'} +Target Products: ${selectedProducts.join(', ')} +`; + + log(` ${contextDesc.trim().split('\n').join('\n ')}\n`, 'cyan'); + + log('πŸ€– Generating documentation files directly...', 'bright'); + + // Use the same prompt as manual workflow for consistency + const prompt = generateClaudePrompt( + context, + selectedProducts, + mode, + isURLBased, + hasExistingContent + ); + + await Task({ + subagent_type: 'general-purpose', + description: + mode === 'edit' + ? 'Update documentation files' + : 'Generate documentation files', + prompt: prompt, + }); + + log(' βœ“ Files generated\n', 'green'); + log( + `\nβœ“ Summary written to ${PROPOSAL_FILE.replace(REPO_ROOT, '.')}`, + 'green' + ); +} + +/** + * Generate simplified Claude prompt for direct file generation + */ +function generateClaudePrompt( + context, + selectedProducts, + mode, + isURLBased, + hasExistingContent +) { + const prompt = `You are an expert InfluxData documentation writer. + +**Context File**: Read from \`.tmp/scaffold-context.json\` +**Target Products**: Use \`context.selectedProducts\` field (${selectedProducts.join(', ')}) +**Mode**: ${mode === 'edit' ? 'Edit existing content' : 'Create new documentation'} +${isURLBased ? `**URLs**: ${context.urls.map((u) => u.url).join(', ')}` : ''} + +**Your Task**: Generate complete documentation files directly (no proposal step). + +**Important**: The context file contains all products from data/products.yml, but you should ONLY create documentation for the products listed in \`context.selectedProducts\`. + +**Workflow**: +1. Read and analyze \`.tmp/scaffold-context.json\` +2. ${mode === 'edit' ? 'Review existing content and plan improvements' : 'Analyze draft content to determine topic, audience, and structure'} +3. ${isURLBased ? 'Use URL paths to determine file locations' : 'Determine appropriate section (admin, write-data, query-data, etc.)'} +4. Decide if content should be shared across products +5. **Generate and write markdown files directly** using the Write tool +6. Create a summary YAML file at \`.tmp/scaffold-proposal.yml\` + +**Content Requirements**: +- **Style**: Active voice, present tense, second person ("you") +- **Formatting**: Semantic line feeds (one sentence per line) +- **Headings**: Use h2-h6 only (h1 comes from title) +- **Code Examples**: + - Use ${context.versionInfo?.tools?.join(' or ') || 'influxdb3, influx, or influxctl'} CLI + - Include pytest-codeblocks annotations + - Format to fit within 80 characters + - Use long options (--option vs -o) + - Show expected output +- **Links**: Descriptive link text, no "click here" +- **Placeholders**: Use UPPERCASE for values users need to replace (e.g., DATABASE_NAME, AUTH_TOKEN) + +**File Structure**: +${ + selectedProducts.length > 1 || context.productHints?.isShared + ? `- Content applies to multiple products: + - Create ONE shared content file in content/shared/ + - Create frontmatter-only files for each product referencing it` + : `- Product-specific content: + - Create files directly in product directories` +} + +**Validation Checks** (run before writing files): +1. **Path validation**: Lowercase, hyphens only (no underscores in filenames) +2. **Weight conflicts**: Check sibling pages, choose unused weight 101-199 +3. **Frontmatter completeness**: All required fields present +4. **Shared content**: If multi-product, verify source paths are correct +5. **Menu structure**: Parent sections exist in product menu hierarchy + +**File Generation**: +For each file you need to create: + +1. **Check if file exists**: Use Read tool first (ignore errors if not found) +2. **Generate frontmatter** in YAML format with proper nesting: + \`\`\`yaml + --- + title: Page Title + description: SEO-friendly description under 160 characters + menu: + product_version: + name: Nav Name + parent: section + weight: 101 + related: + - /related/page/ + alt_links: + other_product: /other/path/ + --- + \`\`\` + +3. **Write full markdown content** with: + - Frontmatter (YAML block) + - Complete article content + - Code examples with proper annotations + - Proper internal links + +4. **Use Write tool**: Write the complete file + - For new files: just use Write + - For existing files: Read first, then Write + +**Summary File**: After generating all files, create \`.tmp/scaffold-proposal.yml\`: + +\`\`\`yaml +topic: Brief description of what was created +targetProducts: + - ${selectedProducts.join('\n - ')} +section: admin | write-data | query-data | get-started | reference +isShared: ${selectedProducts.length > 1} +filesCreated: + - path: content/path/to/file.md + type: shared-content | frontmatter-only | product-specific + status: created | updated +validationResults: + pathsValid: true | false + weightsValid: true | false + frontmatterComplete: true | false + issues: [] +nextSteps: + - Review generated files + - Test code examples + - Check internal links +\`\`\` + +**Important**: +- Use the Write tool for ALL files (markdown and YAML summary) +- For existing files, use Read first, then Write to overwrite +- Generate COMPLETE content, not stubs or placeholders +- Run validation checks before writing each file + +Begin now. Generate the files directly. +`; + return prompt; +} + +/** + * Phase 2: Run AI agent analysis + * Orchestrates multiple specialized agents to analyze draft and + * generate proposal + */ +async function runAgentAnalysis(context, options) { + log('πŸ“‹ Phase 2: AI Analysis\n', 'cyan'); + + // Detect environment and determine workflow + const isClaudeCodeEnv = typeof Task !== 'undefined'; + const aiMode = options.ai || 'claude'; + const useTaskTool = isClaudeCodeEnv && aiMode === 'claude'; + + if (useTaskTool) { + log( + 'πŸ€– Detected Claude Code environment - running agents automatically\n', + 'green' + ); + } else if (aiMode === 'claude') { + log( + 'πŸ“‹ Claude Code environment not detected - will output prompt for copy-paste\n', + 'cyan' + ); + } + + try { + const mode = context.mode || 'create'; + const isURLBased = context.urls && context.urls.length > 0; + const hasExistingContent = + context.existingContent && + Object.keys(context.existingContent).length > 0; + + // Select target products + const selectedProducts = await selectProducts(context, options); + + // Add selectedProducts to context and update the context file + context.selectedProducts = selectedProducts; + writeJson(CONTEXT_FILE, context); + log( + `βœ“ Updated context with selected products: ${selectedProducts.join(', ')}`, + 'green' + ); + + // Hybrid workflow: automatic (Task tool) vs manual (prompt output) + if (useTaskTool) { + // Automatic workflow using Task tool + await runAgentsWithTaskTool( + context, + selectedProducts, + mode, + isURLBased, + hasExistingContent + ); + } else { + // Manual workflow: save consolidated prompt to file + const consolidatedPrompt = generateClaudePrompt( + context, + selectedProducts, + mode, + isURLBased, + hasExistingContent + ); + + // Generate filename from draft or topic + const draftName = context.draft?.path + ? context.draft.path.split('/').pop().replace(/\.md$/, '') + : 'untitled'; + const sanitizedName = draftName + .toLowerCase() + .replace(/\s+/g, '-') + .replace(/[^a-z0-9-]/g, ''); + + const promptDir = join(REPO_ROOT, '.context/drafts'); + const promptFile = join(promptDir, `${sanitizedName}-ai-prompt.md`); + + // Ensure directory exists + if (!existsSync(promptDir)) { + const fs = await import('fs'); + fs.mkdirSync(promptDir, { recursive: true }); + } + + // Write prompt to file + const fs = await import('fs'); + fs.writeFileSync(promptFile, consolidatedPrompt, 'utf8'); + + log('\nβœ… AI prompt saved!', 'green'); + log(`\nπŸ“„ File: ${promptFile.replace(REPO_ROOT, '.')}\n`, 'cyan'); + + log('πŸ“ Next steps:', 'bright'); + log(' 1. Open the prompt file in your editor', 'yellow'); + log(' 2. Copy the entire content', 'yellow'); + log(' 3. Paste into your AI tool (Claude, ChatGPT, etc.)', 'yellow'); + log( + ' 4. The AI will generate documentation files directly in content/', + 'yellow' + ); + log(' 5. Review the generated files and iterate as needed', 'yellow'); + log( + ` 6. Check the summary at ${PROPOSAL_FILE.replace(REPO_ROOT, '.')}`, + 'yellow' + ); + + process.exit(0); + } + } catch (error) { + log(`\nβœ— Error during AI analysis: ${error.message}`, 'red'); + if (error.stack) { + console.error(error.stack); + } + process.exit(1); + } +} + +// Remove all the old agent code below - it's been replaced by the hybrid approach above +// The function now ends here + +/** + * Phase 3: Execute proposal + */ +async function executePhase(options) { + log('\nπŸ“ Phase 3: Executing Proposal\n', 'bright'); + + // Auto-detect proposal if not specified + let proposalPath = options.proposal || PROPOSAL_FILE; + + if (!fileExists(proposalPath)) { + log(`\nβœ— Proposal file not found: ${proposalPath}`, 'red'); + log( + '\nRun yarn docs:create --draft first to generate proposal', + 'yellow' + ); + process.exit(1); + } + + // Read and validate proposal + const proposal = readJson(proposalPath); + + try { + validateProposal(proposal); + } catch (error) { + log(`\nβœ— Invalid proposal: ${error.message}`, 'red'); + process.exit(1); + } + + // Show preview + log('\nπŸ“‹ Proposal Summary:\n', 'cyan'); + log(` Topic: ${proposal.analysis.topic}`, 'cyan'); + log(` Products: ${proposal.analysis.targetProducts.join(', ')}`, 'cyan'); + log(` Section: ${proposal.analysis.section}`, 'cyan'); + log(` Shared: ${proposal.analysis.isShared ? 'Yes' : 'No'}`, 'cyan'); + + if (proposal.analysis.styleReview?.issues?.length > 0) { + log( + `\n⚠️ Style Issues (${proposal.analysis.styleReview.issues.length}):`, + 'yellow' + ); + proposal.analysis.styleReview.issues.forEach((issue) => { + log(` β€’ ${issue}`, 'yellow'); + }); + } + + log('\nπŸ“ Files to create:\n', 'bright'); + proposal.files.forEach((file) => { + const icon = file.type === 'shared-content' ? 'πŸ“„' : 'πŸ“‹'; + const size = file.content ? ` (${file.content.length} chars)` : ''; + log(` ${icon} ${file.path}${size}`, 'cyan'); + }); + + // Dry run mode + if (options['dry-run']) { + log('\nβœ“ Dry run complete (no files created)', 'green'); + return; + } + + // Confirm unless --yes flag + if (!options.yes) { + const answer = await promptUser('\nProceed with creating files? (y/n): '); + + if (answer.toLowerCase() !== 'y') { + log('βœ— Cancelled by user', 'yellow'); + process.exit(0); + } + } + + // Execute proposal + log('\nπŸ“ Creating files...', 'bright'); + const result = executeProposal(proposal); + + // Report results + if (result.created.length > 0) { + log('\nβœ… Created files:', 'green'); + result.created.forEach((file) => { + log(` βœ“ ${file}`, 'green'); + }); + } + + if (result.errors.length > 0) { + log('\nβœ— Errors:', 'red'); + result.errors.forEach((err) => log(` β€’ ${err}`, 'red')); + } + + // Print next steps + if (result.created.length > 0) { + log('\nπŸŽ‰ Done! Next steps:', 'bright'); + log(' 1. Review generated frontmatter and content'); + log(' 2. Test locally: npx hugo server'); + log( + ` 3. Test links: yarn test:links ${result.created[0].replace(/\/[^/]+$/, '/')}**/*.md` + ); + log(' 4. Commit changes: git add content/ && git commit'); + } + + if (result.errors.length > 0) { + process.exit(1); + } +} + +/** + * Main entry point + */ +async function main() { + const options = parseArguments(); + + // Show help + if (options.help) { + printUsage(); + process.exit(0); + } + + // Determine workflow + if (options.url && options.url.length > 0) { + // URL-based workflow requires draft content + if (!options.draft) { + log('\nβœ— Error: --url requires --draft ', 'red'); + log('The --url option specifies WHERE to create content.', 'yellow'); + log( + 'You must provide --draft to specify WHAT content to create.', + 'yellow' + ); + log('\nExample:', 'cyan'); + log( + ' yarn docs:create --url /influxdb3/core/admin/new-feature/ \\', + 'cyan' + ); + log(' --draft drafts/new-feature.md', 'cyan'); + log('\nTo edit an existing page, use: yarn docs:edit ', 'cyan'); + process.exit(1); + } + + const context = await prepareURLPhase(options.url, options.draft, options); + + if (options['context-only']) { + // Stop after context preparation + process.exit(0); + } + + // Continue with AI analysis (Phase 2) + log('\nπŸ€– Running AI analysis with specialized agents...\n', 'bright'); + await runAgentAnalysis(context, options); + + // Execute proposal (Phase 3) + await executePhase(options); + } else if (options.draft) { + // Draft-based workflow + const context = await preparePhase(options.draft, options); + + if (options['context-only']) { + // Stop after context preparation + process.exit(0); + } + + // Continue with AI analysis (Phase 2) + log('\nπŸ€– Running AI analysis with specialized agents...\n', 'bright'); + await runAgentAnalysis(context, options); + + // Execute proposal (Phase 3) + await executePhase(options); + } else if (options.proposal) { + // Import and execute external proposal + if (!fileExists(options.proposal)) { + log(`\nβœ— Proposal file not found: ${options.proposal}`, 'red'); + process.exit(1); + } + // Copy proposal to expected location + const proposal = readJson(options.proposal); + writeJson(PROPOSAL_FILE, proposal); + await executePhase(options); + } else if (options.execute || options['dry-run']) { + // Legacy: Execute proposal (deprecated) + log( + '\n⚠ Warning: --execute is deprecated. Use --proposal instead.', + 'yellow' + ); + await executePhase(options); + } else { + // No valid options provided + log( + 'Error: Must specify a docs URL (new or existing), a draft path, or --proposal', + 'red' + ); + log('Run with --help for usage information\n'); + process.exit(1); + } +} + +// Run if called directly +if (import.meta.url === `file://${process.argv[1]}`) { + main().catch((error) => { + log(`\nFatal error: ${error.message}`, 'red'); + console.error(error.stack); + process.exit(1); + }); +} + +export { preparePhase, executePhase }; diff --git a/scripts/docs-edit.js b/scripts/docs-edit.js new file mode 100755 index 000000000..ec85e73e9 --- /dev/null +++ b/scripts/docs-edit.js @@ -0,0 +1,249 @@ +#!/usr/bin/env node + +/** + * Documentation file opener + * Opens existing documentation pages in your default editor + * + * Usage: + * yarn docs:edit + * yarn docs:edit https://docs.influxdata.com/influxdb3/core/admin/databases/ + * yarn docs:edit /influxdb3/core/admin/databases/ + */ + +import { parseArgs } from 'node:util'; +import process from 'node:process'; +import { join, dirname } from 'path'; +import { fileURLToPath } from 'url'; +import { existsSync, readFileSync } from 'fs'; +import { spawn } from 'child_process'; +import { parseDocumentationURL, urlToFilePaths } from './lib/url-parser.js'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); + +// Repository root +const REPO_ROOT = join(__dirname, '..'); + +// Colors for console output +const colors = { + reset: '\x1b[0m', + bright: '\x1b[1m', + green: '\x1b[32m', + yellow: '\x1b[33m', + blue: '\x1b[34m', + red: '\x1b[31m', + cyan: '\x1b[36m', +}; + +/** + * Print colored output + */ +function log(message, color = 'reset') { + console.log(`${colors[color]}${message}${colors.reset}`); +} + +/** + * Parse command line arguments + */ +function parseArguments() { + const { values, positionals } = parseArgs({ + options: { + help: { type: 'boolean', default: false }, + list: { type: 'boolean', default: false }, + }, + allowPositionals: true, + }); + + // First positional argument is the URL + if (positionals.length > 0 && !values.url) { + values.url = positionals[0]; + } + + return values; +} + +/** + * Print usage information + */ +function printUsage() { + console.log(` +${colors.bright}Documentation File Opener${colors.reset} + +${colors.bright}Usage:${colors.reset} + yarn docs:edit Open page in editor + yarn docs:edit --list List matching files without opening + +${colors.bright}Arguments:${colors.reset} + Documentation URL or path + +${colors.bright}Options:${colors.reset} + --list List matching files without opening + --help Show this help message + +${colors.bright}Examples:${colors.reset} + # Open with full URL + yarn docs:edit https://docs.influxdata.com/influxdb3/core/admin/databases/ + + # Open with path only + yarn docs:edit /influxdb3/core/admin/databases/ + + # List files without opening + yarn docs:edit --list /influxdb3/core/admin/databases/ + +${colors.bright}Notes:${colors.reset} + - Opens files in your default editor (set via EDITOR environment variable) + - If multiple files exist (e.g., shared content variants), opens all of them + - Falls back to VS Code if EDITOR is not set +`); +} + +/** + * Find matching files for a URL + */ +function findFiles(url) { + try { + // Parse URL + const parsed = parseDocumentationURL(url); + log(`\nπŸ” Analyzing URL: ${url}`, 'bright'); + log(` Product: ${parsed.namespace}/${parsed.product || 'N/A'}`, 'cyan'); + log(` Section: ${parsed.section || 'N/A'}`, 'cyan'); + + // Get potential file paths + const potentialPaths = urlToFilePaths(parsed); + const foundFiles = []; + + for (const relativePath of potentialPaths) { + const fullPath = join(REPO_ROOT, relativePath); + if (existsSync(fullPath)) { + foundFiles.push(relativePath); + } + } + + return { parsed, foundFiles }; + } catch (error) { + log(`\nβœ— Error parsing URL: ${error.message}`, 'red'); + process.exit(1); + } +} + +/** + * Check if file uses shared content + */ +function checkSharedContent(filePath) { + const fullPath = join(REPO_ROOT, filePath); + + if (!existsSync(fullPath)) { + return null; + } + + const content = readFileSync(fullPath, 'utf8'); + + // Check for source: frontmatter + const sourceMatch = content.match(/^source:\s*(.+)$/m); + if (sourceMatch) { + const sourcePath = sourceMatch[1].trim(); + return `content${sourcePath}`; + } + + return null; +} + +/** + * Open files in editor + */ +function openInEditor(files) { + // Determine editor + const editor = process.env.EDITOR || 'code'; + + log(`\nπŸ“ Opening ${files.length} file(s) in ${editor}...`, 'bright'); + + // Convert to absolute paths + const absolutePaths = files.map((f) => join(REPO_ROOT, f)); + + // Spawn editor process + const child = spawn(editor, absolutePaths, { + stdio: 'inherit', + detached: false, + }); + + child.on('error', (error) => { + log(`\nβœ— Failed to open editor: ${error.message}`, 'red'); + log('\nTry setting the EDITOR environment variable:', 'yellow'); + log(' export EDITOR=vim', 'cyan'); + log(' export EDITOR=code', 'cyan'); + log(' export EDITOR=nano', 'cyan'); + process.exit(1); + }); + + child.on('close', (code) => { + if (code !== 0 && code !== null) { + log(`\nβœ— Editor exited with code ${code}`, 'yellow'); + } + }); +} + +/** + * Main entry point + */ +async function main() { + const options = parseArguments(); + + // Show help + if (options.help || !options.url) { + printUsage(); + process.exit(0); + } + + // Find files + const { parsed, foundFiles } = findFiles(options.url); + + if (foundFiles.length === 0) { + log('\nβœ— No files found for this URL', 'red'); + log('\nThe page may not exist yet. To create new content, use:', 'yellow'); + log(' yarn docs:create --url --draft ', 'cyan'); + process.exit(1); + } + + // Display found files + log('\nβœ“ Found files:', 'green'); + const allFiles = new Set(); + + for (const file of foundFiles) { + allFiles.add(file); + log(` β€’ ${file}`, 'cyan'); + + // Check for shared content + const sharedSource = checkSharedContent(file); + if (sharedSource) { + if (existsSync(join(REPO_ROOT, sharedSource))) { + allFiles.add(sharedSource); + log( + ` β€’ ${sharedSource} ${colors.yellow}(shared source)${colors.reset}`, + 'cyan' + ); + } + } + } + + const filesToOpen = Array.from(allFiles); + + // List only mode + if (options.list) { + log(`\nβœ“ Found ${filesToOpen.length} file(s)`, 'green'); + process.exit(0); + } + + // Open in editor + openInEditor(filesToOpen); +} + +// Run if called directly +if (import.meta.url === `file://${process.argv[1]}`) { + main().catch((error) => { + log(`\nFatal error: ${error.message}`, 'red'); + console.error(error.stack); + process.exit(1); + }); +} + +export { findFiles, openInEditor }; diff --git a/scripts/lib/content-scaffolding.js b/scripts/lib/content-scaffolding.js new file mode 100644 index 000000000..63d0e6e66 --- /dev/null +++ b/scripts/lib/content-scaffolding.js @@ -0,0 +1,760 @@ +/** + * Content scaffolding utilities for InfluxData documentation + * Analyzes repository structure and prepares context for Claude + */ + +import { readdirSync, readFileSync, existsSync, statSync } from 'fs'; +import { join, dirname } from 'path'; +import { fileURLToPath } from 'url'; +import yaml from 'js-yaml'; +import matter from 'gray-matter'; +import { + readDraft, + writeJson, + writeMarkdownFile, + writeFrontmatterFile, + validatePath, + ensureDirectory, +} from './file-operations.js'; +import { urlToFilePaths } from './url-parser.js'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); + +// Repository root is two levels up from scripts/lib/ +const REPO_ROOT = join(__dirname, '../..'); + +/** + * Load products configuration from data/products.yml + * @returns {object} Products configuration + */ +export function loadProducts() { + const productsPath = join(REPO_ROOT, 'data/products.yml'); + + if (!existsSync(productsPath)) { + throw new Error('products.yml not found at ' + productsPath); + } + + const productsYaml = readFileSync(productsPath, 'utf8'); + const products = yaml.load(productsYaml); + + // Transform into more useful structure + const productMap = {}; + for (const [key, value] of Object.entries(products)) { + productMap[key] = { + key, + name: value.name, + namespace: value.namespace, + menu_category: value.menu_category, + versions: value.versions || [], + latest: value.latest, + }; + } + + return productMap; +} + +/** + * Extract product mentions from draft content + * @param {string} content - Draft content to analyze + * @param {object} products - Products map from loadProducts() + * @returns {string[]} Array of product keys mentioned + */ +export function extractProductMentions(content, products) { + const mentioned = new Set(); + const contentLower = content.toLowerCase(); + + // Product name patterns to search for + const patterns = { + influxdb3_core: [ + 'influxdb 3 core', + 'influxdb3 core', + 'influxdb core', + 'core version', + ], + influxdb3_enterprise: [ + 'influxdb 3 enterprise', + 'influxdb3 enterprise', + 'influxdb enterprise', + 'enterprise version', + ], + influxdb3_cloud_dedicated: [ + 'cloud dedicated', + 'influxdb cloud dedicated', + 'dedicated cluster', + ], + influxdb3_cloud_serverless: [ + 'cloud serverless', + 'influxdb cloud serverless', + 'serverless', + ], + influxdb3_clustered: ['clustered', 'influxdb clustered', 'kubernetes'], + influxdb_cloud: ['influxdb cloud', 'influxdb 2 cloud'], + influxdb_v2: ['influxdb 2', 'influxdb v2', 'influxdb 2.x'], + influxdb_v1: ['influxdb 1', 'influxdb v1', 'influxdb 1.x'], + }; + + // Check for each product's patterns + for (const [productKey, productPatterns] of Object.entries(patterns)) { + for (const pattern of productPatterns) { + if (contentLower.includes(pattern)) { + mentioned.add(productKey); + break; + } + } + } + + return Array.from(mentioned); +} + +/** + * Detect InfluxDB version and related tools from draft content + * @param {string} content - Draft content to analyze + * @returns {object} Version information + */ +export function detectInfluxDBVersion(content) { + const contentLower = content.toLowerCase(); + + // Version detection patterns + const versionInfo = { + version: null, + tools: [], + apis: [], + }; + + // Detect version + if ( + contentLower.includes('influxdb 3') || + contentLower.includes('influxdb3') + ) { + versionInfo.version = '3.x'; + + // v3-specific tools + if ( + contentLower.includes('influxdb3 ') || + contentLower.includes('influxdb3-') + ) { + versionInfo.tools.push('influxdb3 CLI'); + } + if (contentLower.includes('influxctl')) { + versionInfo.tools.push('influxctl'); + } + if (contentLower.includes('/api/v3')) { + versionInfo.apis.push('/api/v3'); + } + } else if ( + contentLower.includes('influxdb 2') || + contentLower.includes('influxdb v2') + ) { + versionInfo.version = '2.x'; + + // v2-specific tools + if (contentLower.includes('influx ')) { + versionInfo.tools.push('influx CLI'); + } + if (contentLower.includes('/api/v2')) { + versionInfo.apis.push('/api/v2'); + } + } else if ( + contentLower.includes('influxdb 1') || + contentLower.includes('influxdb v1') + ) { + versionInfo.version = '1.x'; + + // v1-specific tools + if (contentLower.includes('influx -')) { + versionInfo.tools.push('influx CLI (v1)'); + } + if (contentLower.includes('influxd')) { + versionInfo.tools.push('influxd'); + } + } + + // Common tools across versions + if (contentLower.includes('telegraf')) { + versionInfo.tools.push('Telegraf'); + } + + return versionInfo; +} + +/** + * Analyze content directory structure + * @param {string|string[]} basePaths - Base path(s) to analyze (e.g., 'content/influxdb3' or ['content/influxdb3', 'content/influxdb']) + * @returns {object} Structure analysis + */ +export function analyzeStructure(basePaths = 'content/influxdb3') { + // Normalize to array + const pathsArray = Array.isArray(basePaths) ? basePaths : [basePaths]; + + const allSections = new Set(); + const allExistingPaths = []; + const siblingWeights = {}; + + // Analyze each base path + for (const basePath of pathsArray) { + const fullPath = join(REPO_ROOT, basePath); + + if (!existsSync(fullPath)) { + continue; + } + + // Recursively walk directory + function walk(dir, relativePath = '') { + try { + const entries = readdirSync(dir); + + for (const entry of entries) { + const fullEntryPath = join(dir, entry); + const relativeEntryPath = join(relativePath, entry); + + try { + const stat = statSync(fullEntryPath); + + if (stat.isDirectory()) { + // Track product-level directories (first level under content/namespace/) + const pathParts = relativeEntryPath.split('/'); + if (pathParts.length === 2) { + // This is a product directory (e.g., 'core', 'enterprise') + allSections.add(pathParts[1]); + } + + // Track all directory paths + allExistingPaths.push(join(basePath, relativeEntryPath)); + + // Recurse + walk(fullEntryPath, relativeEntryPath); + } + } catch (error) { + // Skip files/dirs we can't access + continue; + } + } + } catch (error) { + // Skip directories we can't read + } + } + + walk(fullPath); + + // Analyze weights in common sections for all product directories + const commonSections = [ + 'admin', + 'write-data', + 'query-data', + 'reference', + 'get-started', + 'plugins', + ]; + + // Find all product directories (e.g., core, enterprise, cloud-dedicated) + try { + const productDirs = readdirSync(fullPath).filter((entry) => { + const fullEntryPath = join(fullPath, entry); + return ( + existsSync(fullEntryPath) && statSync(fullEntryPath).isDirectory() + ); + }); + + for (const productDir of productDirs) { + for (const section of commonSections) { + const sectionPath = join(fullPath, productDir, section); + if (existsSync(sectionPath)) { + const weights = findSiblingWeights(sectionPath); + if (weights.length > 0) { + siblingWeights[`${basePath}/${productDir}/${section}/`] = weights; + } + } + } + } + } catch (error) { + // Skip if we can't read directory + } + } + + return { + sections: [...allSections].sort(), + existingPaths: allExistingPaths.sort(), + siblingWeights, + }; +} + +/** + * Find weight values from sibling pages in a directory + * @param {string} dirPath - Directory to analyze + * @returns {number[]} Array of weight values + */ +export function findSiblingWeights(dirPath) { + if (!existsSync(dirPath)) { + return []; + } + + const weights = []; + const entries = readdirSync(dirPath); + + for (const entry of entries) { + if (entry.endsWith('.md')) { + const filePath = join(dirPath, entry); + try { + const content = readFileSync(filePath, 'utf8'); + const parsed = matter(content); + + if (parsed.data && typeof parsed.data.weight === 'number') { + weights.push(parsed.data.weight); + } + } catch (error) { + // Skip files that can't be parsed + continue; + } + } + } + + return weights.sort((a, b) => a - b); +} + +/** + * Prepare complete context for AI analysis + * @param {string} draftPath - Path to draft file + * @returns {object} Context object + */ +export function prepareContext(draftPath) { + // Read draft + const draft = readDraft(draftPath); + + // Load products + const products = loadProducts(); + + // Extract product mentions from draft + const mentionedProducts = extractProductMentions(draft.content, products); + + // Detect InfluxDB version and tools + const versionInfo = detectInfluxDBVersion(draft.content); + + // Determine which content paths to analyze based on version + let contentPaths = []; + if (versionInfo.version === '3.x') { + contentPaths = ['content/influxdb3']; + } else if (versionInfo.version === '2.x') { + contentPaths = ['content/influxdb']; + } else if (versionInfo.version === '1.x') { + contentPaths = ['content/influxdb/v1', 'content/enterprise_influxdb/v1']; + } else { + // Default: analyze all + contentPaths = ['content/influxdb3', 'content/influxdb']; + } + + // Analyze structure for relevant paths + const structure = analyzeStructure(contentPaths); + + // Build context + const context = { + draft: { + path: draftPath, + content: draft.content, + existingFrontmatter: draft.frontmatter, + }, + products, + productHints: { + mentioned: mentionedProducts, + suggested: + mentionedProducts.length > 0 + ? mentionedProducts + : Object.keys(products).filter( + (key) => + key.startsWith('influxdb3_') || key.startsWith('influxdb_v') + ), + }, + versionInfo, + structure, + conventions: { + sharedContentDir: 'content/shared/', + menuKeyPattern: '{namespace}_{product}', + weightLevels: { + description: 'Weight ranges by level', + level1: '1-99 (top-level pages)', + level2: '101-199 (section landing pages)', + level3: '201-299 (detail pages)', + level4: '301-399 (sub-detail pages)', + }, + namingRules: { + files: 'Use lowercase with hyphens (e.g., manage-databases.md)', + directories: 'Use lowercase with hyphens', + shared: 'Shared content in /content/shared/', + }, + testing: { + codeblocks: 'Use pytest-codeblocks annotations for testable examples', + docker: 'Use compose.yaml services for testing code samples', + commands: `Version-specific CLIs: ${versionInfo.tools.join(', ') || 'detected from content'}`, + }, + }, + }; + + return context; +} + +/** + * Execute a proposal and create files + * @param {object} proposal - Proposal from Claude + * @returns {{created: string[], errors: string[]}} + */ +export function executeProposal(proposal) { + const created = []; + const errors = []; + + if (!proposal || !proposal.files) { + throw new Error('Invalid proposal: missing files array'); + } + + for (const file of proposal.files) { + try { + // Validate path + const validation = validatePath(file.path); + if (!validation.valid) { + errors.push( + `Invalid path ${file.path}: ${validation.errors.join(', ')}` + ); + continue; + } + + const fullPath = join(REPO_ROOT, file.path); + + // Check if file already exists + if (existsSync(fullPath)) { + errors.push(`File already exists: ${file.path}`); + continue; + } + + // Create file based on type + if (file.type === 'shared-content') { + // Shared content file with actual content + writeMarkdownFile(fullPath, {}, file.content || ''); + created.push(file.path); + } else if (file.type === 'frontmatter-only') { + // Frontmatter-only file with source reference + if (!file.frontmatter) { + errors.push(`Missing frontmatter for ${file.path}`); + continue; + } + + const sourcePath = file.frontmatter.source || ''; + writeFrontmatterFile(fullPath, file.frontmatter, sourcePath); + created.push(file.path); + } else { + errors.push(`Unknown file type: ${file.type} for ${file.path}`); + } + } catch (error) { + errors.push(`Error creating ${file.path}: ${error.message}`); + } + } + + return { created, errors }; +} + +/** + * Validate a proposal before execution + * @param {object} proposal - Proposal to validate + * @returns {{valid: boolean, errors: string[], warnings: string[]}} + */ +export function validateProposal(proposal) { + const errors = []; + const warnings = []; + + if (!proposal) { + return { + valid: false, + errors: ['Proposal is null or undefined'], + warnings, + }; + } + + if (!proposal.files || !Array.isArray(proposal.files)) { + errors.push('Proposal must have a files array'); + return { valid: false, errors, warnings }; + } + + if (proposal.files.length === 0) { + warnings.push('Proposal has no files to create'); + } + + // Validate each file + for (const file of proposal.files) { + if (!file.path) { + errors.push('File missing path property'); + continue; + } + + if (!file.type) { + errors.push(`File ${file.path} missing type property`); + } + + // Path validation + const pathValidation = validatePath(file.path); + if (!pathValidation.valid) { + errors.push( + `Invalid path ${file.path}: ${pathValidation.errors.join(', ')}` + ); + } + + // Check for conflicts + const fullPath = join(REPO_ROOT, file.path); + if (existsSync(fullPath)) { + warnings.push(`File already exists: ${file.path}`); + } + + // Type-specific validation + if (file.type === 'frontmatter-only') { + if (!file.frontmatter) { + errors.push(`Frontmatter-only file ${file.path} missing frontmatter`); + } else { + if (!file.frontmatter.title) { + errors.push(`File ${file.path} missing title in frontmatter`); + } + if (!file.frontmatter.description) { + warnings.push(`File ${file.path} missing description in frontmatter`); + } + if (!file.frontmatter.menu) { + errors.push(`File ${file.path} missing menu in frontmatter`); + } + if (!file.frontmatter.weight) { + errors.push(`File ${file.path} missing weight in frontmatter`); + } + if (!file.frontmatter.source) { + warnings.push(`File ${file.path} missing source reference`); + } + } + } else if (file.type === 'shared-content') { + if (!file.content) { + warnings.push(`Shared content file ${file.path} has no content`); + } + } + } + + return { + valid: errors.length === 0, + errors, + warnings, + }; +} + +/** + * Suggest next weight value for a section + * @param {number[]} existingWeights - Existing weights in section + * @param {number} level - Weight level (1-4) + * @returns {number} Suggested next weight + */ +export function suggestNextWeight(existingWeights, level = 3) { + const baseLevels = { + 1: 1, + 2: 101, + 3: 201, + 4: 301, + }; + + const base = baseLevels[level] || 201; + const maxWeight = base + 98; // Each level has 99 slots + + if (existingWeights.length === 0) { + return base; + } + + // Find weights in this level + const levelWeights = existingWeights.filter( + (w) => w >= base && w <= maxWeight + ); + + if (levelWeights.length === 0) { + return base; + } + + // Return max + 1 + return Math.max(...levelWeights) + 1; +} + +/** + * Find file from parsed URL + * @param {object} parsedURL - Parsed URL from url-parser.js + * @returns {object|null} File information or null if not found + */ +export function findFileFromURL(parsedURL) { + const potentialPaths = urlToFilePaths(parsedURL); + + for (const relativePath of potentialPaths) { + const fullPath = join(REPO_ROOT, relativePath); + if (existsSync(fullPath)) { + return { + path: relativePath, + fullPath, + exists: true, + }; + } + } + + // File doesn't exist, return first potential path for creation + return { + path: potentialPaths[0], + fullPath: join(REPO_ROOT, potentialPaths[0]), + exists: false, + }; +} + +/** + * Detect if a file uses shared content + * @param {string} filePath - Path to file (relative to repo root) + * @returns {string|null} Shared source path if found, null otherwise + */ +export function detectSharedContent(filePath) { + const fullPath = join(REPO_ROOT, filePath); + + if (!existsSync(fullPath)) { + return null; + } + + try { + const content = readFileSync(fullPath, 'utf8'); + const parsed = matter(content); + + if (parsed.data && parsed.data.source) { + return parsed.data.source; + } + } catch (error) { + // Can't parse, assume not shared + return null; + } + + return null; +} + +/** + * Find all files that reference a shared source + * @param {string} sourcePath - Path to shared content file (e.g., "/shared/influxdb3-admin/databases.md") + * @returns {string[]} Array of file paths that use this shared source + */ +export function findSharedContentVariants(sourcePath) { + const variants = []; + + // Search content directories + const contentDirs = [ + 'content/influxdb3', + 'content/influxdb', + 'content/telegraf', + ]; + + function searchDirectory(dir) { + if (!existsSync(dir)) { + return; + } + + try { + const entries = readdirSync(dir); + + for (const entry of entries) { + const fullPath = join(dir, entry); + const stat = statSync(fullPath); + + if (stat.isDirectory()) { + searchDirectory(fullPath); + } else if (entry.endsWith('.md')) { + try { + const content = readFileSync(fullPath, 'utf8'); + const parsed = matter(content); + + if (parsed.data && parsed.data.source === sourcePath) { + // Convert to relative path from repo root + const relativePath = fullPath.replace(REPO_ROOT + '/', ''); + variants.push(relativePath); + } + } catch (error) { + // Skip files that can't be parsed + continue; + } + } + } + } catch (error) { + // Skip directories we can't read + } + } + + for (const contentDir of contentDirs) { + searchDirectory(join(REPO_ROOT, contentDir)); + } + + return variants; +} + +/** + * Analyze an existing page + * @param {string} filePath - Path to file (relative to repo root) + * @returns {object} Page analysis + */ +export function analyzeExistingPage(filePath) { + const fullPath = join(REPO_ROOT, filePath); + + if (!existsSync(fullPath)) { + throw new Error(`File not found: ${filePath}`); + } + + const content = readFileSync(fullPath, 'utf8'); + const parsed = matter(content); + + const analysis = { + path: filePath, + fullPath, + content: parsed.content, + frontmatter: parsed.data, + isShared: false, + sharedSource: null, + variants: [], + }; + + // Check if this file uses shared content + if (parsed.data && parsed.data.source) { + analysis.isShared = true; + analysis.sharedSource = parsed.data.source; + + // Find all variants that use the same shared source + analysis.variants = findSharedContentVariants(parsed.data.source); + } + + return analysis; +} + +/** + * Analyze multiple URLs and find their files + * @param {object[]} parsedURLs - Array of parsed URLs + * @returns {object[]} Array of URL analysis results + */ +export function analyzeURLs(parsedURLs) { + const results = []; + + for (const parsedURL of parsedURLs) { + const fileInfo = findFileFromURL(parsedURL); + + const result = { + url: parsedURL.url, + parsed: parsedURL, + exists: fileInfo.exists, + files: { + main: fileInfo.path, + isShared: false, + sharedSource: null, + variants: [], + }, + }; + + if (fileInfo.exists) { + // Analyze existing page + try { + const analysis = analyzeExistingPage(fileInfo.path); + result.files.isShared = analysis.isShared; + result.files.sharedSource = analysis.sharedSource; + result.files.variants = analysis.variants; + } catch (error) { + console.error(`Error analyzing ${fileInfo.path}: ${error.message}`); + } + } + + results.push(result); + } + + return results; +} diff --git a/scripts/lib/file-operations.js b/scripts/lib/file-operations.js new file mode 100644 index 000000000..6bbb57830 --- /dev/null +++ b/scripts/lib/file-operations.js @@ -0,0 +1,156 @@ +/** + * File operations utilities for documentation scaffolding + * Handles reading, writing, and validating documentation files + */ + +import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'fs'; +import { dirname, join, basename } from 'path'; +import matter from 'gray-matter'; +import yaml from 'js-yaml'; + +/** + * Read a markdown file and parse frontmatter + * @param {string} filePath - Path to the markdown file + * @returns {{content: string, frontmatter: object, raw: string}} + */ +export function readDraft(filePath) { + if (!existsSync(filePath)) { + throw new Error(`File not found: ${filePath}`); + } + + const raw = readFileSync(filePath, 'utf8'); + const parsed = matter(raw); + + return { + content: parsed.content, + frontmatter: parsed.data || {}, + raw, + }; +} + +/** + * Write a markdown file with frontmatter + * @param {string} filePath - Path to write to + * @param {object} frontmatter - Frontmatter object + * @param {string} content - Markdown content + */ +export function writeMarkdownFile(filePath, frontmatter, content) { + ensureDirectory(dirname(filePath)); + + const frontmatterYaml = yaml.dump(frontmatter, { + lineWidth: -1, // Don't wrap lines + noRefs: true, + }); + + const fileContent = `---\n${frontmatterYaml}---\n\n${content}`; + writeFileSync(filePath, fileContent, 'utf8'); +} + +/** + * Write a frontmatter-only file with source reference + * @param {string} filePath - Path to write to + * @param {object} frontmatter - Frontmatter object + * @param {string} sourcePath - Path to shared content file + */ +export function writeFrontmatterFile(filePath, frontmatter, sourcePath) { + ensureDirectory(dirname(filePath)); + + const frontmatterYaml = yaml.dump(frontmatter, { + lineWidth: -1, + noRefs: true, + }); + + const comment = ``; + const fileContent = `---\n${frontmatterYaml}---\n\n${comment}\n`; + + writeFileSync(filePath, fileContent, 'utf8'); +} + +/** + * Ensure a directory exists, creating it recursively if needed + * @param {string} dirPath - Directory path to ensure + */ +export function ensureDirectory(dirPath) { + if (!existsSync(dirPath)) { + mkdirSync(dirPath, { recursive: true }); + } +} + +/** + * Validate a file path follows conventions + * @param {string} filePath - Path to validate + * @returns {{valid: boolean, errors: string[]}} + */ +export function validatePath(filePath) { + const errors = []; + + // Check for invalid characters + if (filePath.includes(' ')) { + errors.push('Path contains spaces (use hyphens instead)'); + } + + if (filePath.match(/[A-Z]/)) { + errors.push('Path contains uppercase letters (use lowercase)'); + } + + // Check naming conventions + const fileName = basename(filePath, '.md'); + if (fileName.includes('_') && !filePath.includes('/shared/')) { + errors.push('Use hyphens instead of underscores in file names'); + } + + // Check structure + if (!filePath.startsWith('content/')) { + errors.push('Path should start with content/'); + } + + return { + valid: errors.length === 0, + errors, + }; +} + +/** + * Format frontmatter object to YAML string + * @param {object} frontmatter - Frontmatter object + * @returns {string} YAML string + */ +export function formatFrontmatter(frontmatter) { + return yaml.dump(frontmatter, { + lineWidth: -1, + noRefs: true, + }); +} + +/** + * Read a JSON file + * @param {string} filePath - Path to JSON file + * @returns {object} Parsed JSON + */ +export function readJson(filePath) { + if (!existsSync(filePath)) { + throw new Error(`File not found: ${filePath}`); + } + const content = readFileSync(filePath, 'utf8'); + return JSON.parse(content); +} + +/** + * Write a JSON file with pretty formatting + * @param {string} filePath - Path to write to + * @param {object} data - Data to write + */ +export function writeJson(filePath, data) { + ensureDirectory(dirname(filePath)); + const content = JSON.stringify(data, null, 2); + writeFileSync(filePath, content, 'utf8'); +} + +/** + * Check if a file exists + * @param {string} filePath - Path to check + * @returns {boolean} + */ +export function fileExists(filePath) { + return existsSync(filePath); +} diff --git a/scripts/lib/url-parser.js b/scripts/lib/url-parser.js new file mode 100644 index 000000000..e985f16f5 --- /dev/null +++ b/scripts/lib/url-parser.js @@ -0,0 +1,216 @@ +/** + * URL parsing utilities for documentation scaffolding + * Parses docs.influxdata.com URLs to extract product, version, and path information + */ + +import { basename } from 'path'; + +// Base URL pattern for InfluxData documentation +const DOCS_BASE_URL = 'docs.influxdata.com'; + +/** + * Parse a documentation URL to extract components + * @param {string} url - Full URL or path (e.g., "https://docs.influxdata.com/influxdb3/core/admin/databases/" or "/influxdb3/core/admin/databases/") + * @returns {object} Parsed URL components + */ +export function parseDocumentationURL(url) { + // Remove protocol and domain if present + let path = url; + if (url.includes(DOCS_BASE_URL)) { + const urlObj = new URL(url); + path = urlObj.pathname; + } + + // Remove leading and trailing slashes + path = path.replace(/^\/+|\/+$/g, ''); + + // Split into parts + const parts = path.split('/').filter((p) => p.length > 0); + + if (parts.length === 0) { + throw new Error('Invalid URL: no path components'); + } + + // First part is the namespace (influxdb3, influxdb, telegraf, etc.) + const namespace = parts[0]; + + // Determine product structure based on namespace + let product = null; + let section = null; + let pagePath = []; + let isSection = false; + + if (namespace === 'influxdb3') { + // InfluxDB 3 structure: /influxdb3/{product}/{section}/{...path} + if (parts.length >= 2) { + product = parts[1]; // core, enterprise, cloud-dedicated, cloud-serverless, clustered, explorer + if (parts.length >= 3) { + section = parts[2]; // admin, write-data, query-data, reference, get-started, plugins + pagePath = parts.slice(3); + } + } + } else if (namespace === 'influxdb') { + // InfluxDB 2/1 structure: /influxdb/{version}/{section}/{...path} + if (parts.length >= 2) { + const secondPart = parts[1]; + if (secondPart === 'cloud') { + product = 'cloud'; + if (parts.length >= 3) { + section = parts[2]; + pagePath = parts.slice(3); + } + } else if (secondPart.match(/^v\d/)) { + // v2.x or v1.x + product = secondPart; + if (parts.length >= 3) { + section = parts[2]; + pagePath = parts.slice(3); + } + } else { + // Assume cloudless-v2 structure: /influxdb/{section}/{...path} + section = secondPart; + pagePath = parts.slice(2); + product = 'v2'; // default + } + } + } else if (namespace === 'telegraf') { + // Telegraf structure: /telegraf/{version}/{section}/{...path} + if (parts.length >= 2) { + product = parts[1]; + if (parts.length >= 3) { + section = parts[2]; + pagePath = parts.slice(3); + } + } + } else if (namespace === 'kapacitor' || namespace === 'chronograf') { + // Other products: /{product}/{version}/{section}/{...path} + if (parts.length >= 2) { + product = parts[1]; + if (parts.length >= 3) { + section = parts[2]; + pagePath = parts.slice(3); + } + } + } + + // Determine if this is a section (directory) or single page + // Section URLs typically end with / or have no file extension + // Single page URLs typically end with a page name + if (pagePath.length === 0 && section) { + // URL points to section landing page + isSection = true; + } else if (pagePath.length > 0) { + const lastPart = pagePath[pagePath.length - 1]; + // If last part looks like a directory (no dots), it's a section + isSection = !lastPart.includes('.'); + } + + return { + url, + namespace, + product, + section, + pagePath: pagePath.join('/'), + isSection, + fullPath: parts.join('/'), + }; +} + +/** + * Validate if a URL is a valid documentation URL + * @param {string} url - URL to validate + * @returns {boolean} True if valid documentation URL + */ +export function validateDocumentationURL(url) { + try { + const parsed = parseDocumentationURL(url); + return parsed.namespace && parsed.namespace.length > 0; + } catch (error) { + return false; + } +} + +/** + * Convert parsed URL to potential file paths + * @param {object} parsedURL - Parsed URL from parseDocumentationURL() + * @returns {string[]} Array of potential file paths to check + */ +export function urlToFilePaths(parsedURL) { + const { namespace, product, section, pagePath, isSection } = parsedURL; + + const basePaths = []; + + // Build base path based on namespace and product + let contentPath = `content/${namespace}`; + if (product) { + contentPath += `/${product}`; + } + if (section) { + contentPath += `/${section}`; + } + + if (pagePath) { + contentPath += `/${pagePath}`; + } + + if (isSection) { + // Section could be _index.md or directory with _index.md + basePaths.push(`${contentPath}/_index.md`); + basePaths.push(`${contentPath}.md`); // Sometimes sections are single files + } else { + // Single page + basePaths.push(`${contentPath}.md`); + basePaths.push(`${contentPath}/_index.md`); // Could still be a section + } + + return basePaths; +} + +/** + * Extract page name from URL for use in file names + * @param {object} parsedURL - Parsed URL from parseDocumentationURL() + * @returns {string} Suggested file name + */ +export function urlToFileName(parsedURL) { + const { pagePath, section } = parsedURL; + + if (pagePath && pagePath.length > 0) { + // Use last part of page path + const parts = pagePath.split('/'); + return parts[parts.length - 1]; + } else if (section) { + // Use section name + return section; + } + + return 'index'; +} + +/** + * Parse multiple URLs (comma-separated or array) + * @param {string|string[]} urls - URLs to parse + * @returns {object[]} Array of parsed URLs + */ +export function parseMultipleURLs(urls) { + let urlArray = []; + + if (typeof urls === 'string') { + // Split by comma if string + urlArray = urls.split(',').map((u) => u.trim()); + } else if (Array.isArray(urls)) { + urlArray = urls; + } else { + throw new Error('URLs must be a string or array'); + } + + return urlArray + .map((url) => { + try { + return parseDocumentationURL(url); + } catch (error) { + console.error(`Error parsing URL ${url}: ${error.message}`); + return null; + } + }) + .filter((parsed) => parsed !== null); +} diff --git a/scripts/schemas/scaffold-context.schema.json b/scripts/schemas/scaffold-context.schema.json new file mode 100644 index 000000000..0ca409462 --- /dev/null +++ b/scripts/schemas/scaffold-context.schema.json @@ -0,0 +1,182 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Content Scaffolding Context", + "description": "Context data prepared by docs-create.js for AI analysis", + "type": "object", + "required": ["draft", "products", "productHints", "versionInfo", "structure", "conventions"], + "properties": { + "mode": { + "type": "string", + "enum": ["create", "edit"], + "description": "Operation mode: create new content or edit existing content" + }, + "urls": { + "type": "array", + "description": "URL analysis results (for URL-based workflow)", + "items": { + "type": "object", + "properties": { + "url": { "type": "string" }, + "parsed": { + "type": "object", + "properties": { + "namespace": { "type": "string" }, + "product": { "type": "string" }, + "section": { "type": "string" }, + "pagePath": { "type": "string" }, + "isSection": { "type": "boolean" } + } + }, + "exists": { "type": "boolean" }, + "files": { + "type": "object", + "properties": { + "main": { "type": "string" }, + "isShared": { "type": "boolean" }, + "sharedSource": { "type": ["string", "null"] }, + "variants": { + "type": "array", + "items": { "type": "string" } + } + } + } + } + } + }, + "existingContent": { + "type": "object", + "description": "Existing file contents (for edit mode)", + "patternProperties": { + ".*": { "type": "string" } + } + }, + "draft": { + "type": "object", + "description": "Draft content and metadata", + "required": ["path", "content", "existingFrontmatter"], + "properties": { + "path": { + "type": "string", + "description": "Path to the draft file" + }, + "content": { + "type": "string", + "description": "Markdown content of the draft" + }, + "existingFrontmatter": { + "type": "object", + "description": "Frontmatter from draft (if any)" + } + } + }, + "products": { + "type": "object", + "description": "Available InfluxDB products from data/products.yml", + "patternProperties": { + ".*": { + "type": "object", + "properties": { + "key": { "type": "string" }, + "name": { "type": "string" }, + "namespace": { "type": "string" }, + "menu_category": { "type": "string" }, + "versions": { "type": "array", "items": { "type": "string" } }, + "latest": { "type": "string" } + } + } + } + }, + "productHints": { + "type": "object", + "description": "Product recommendations from content analysis", + "required": ["mentioned", "suggested"], + "properties": { + "mentioned": { + "type": "array", + "description": "Products explicitly mentioned in draft content", + "items": { "type": "string" } + }, + "suggested": { + "type": "array", + "description": "Products suggested based on analysis", + "items": { "type": "string" } + } + } + }, + "versionInfo": { + "type": "object", + "description": "Detected InfluxDB version and tools", + "required": ["version", "tools", "apis"], + "properties": { + "version": { + "type": ["string", "null"], + "description": "Detected version (3.x, 2.x, 1.x, or null)" + }, + "tools": { + "type": "array", + "description": "CLI tools and utilities mentioned", + "items": { "type": "string" } + }, + "apis": { + "type": "array", + "description": "API endpoints mentioned", + "items": { "type": "string" } + } + } + }, + "structure": { + "type": "object", + "description": "Repository structure analysis", + "required": ["sections", "existingPaths", "siblingWeights"], + "properties": { + "sections": { + "type": "array", + "description": "Available documentation sections", + "items": { "type": "string" } + }, + "existingPaths": { + "type": "array", + "description": "All existing directory paths", + "items": { "type": "string" } + }, + "siblingWeights": { + "type": "object", + "description": "Weight values from sibling pages by section", + "patternProperties": { + ".*": { + "type": "array", + "items": { "type": "number" } + } + } + } + } + }, + "conventions": { + "type": "object", + "description": "Documentation conventions and guidelines", + "required": ["sharedContentDir", "menuKeyPattern", "weightLevels", "namingRules", "testing"], + "properties": { + "sharedContentDir": { + "type": "string", + "description": "Directory for shared content" + }, + "menuKeyPattern": { + "type": "string", + "description": "Pattern for menu keys" + }, + "weightLevels": { + "type": "object", + "description": "Weight ranges by navigation level" + }, + "namingRules": { + "type": "object", + "description": "File and directory naming conventions" + }, + "testing": { + "type": "object", + "description": "Testing conventions for code samples" + } + } + } + } +} diff --git a/scripts/schemas/scaffold-proposal.schema.json b/scripts/schemas/scaffold-proposal.schema.json new file mode 100644 index 000000000..edb638ce4 --- /dev/null +++ b/scripts/schemas/scaffold-proposal.schema.json @@ -0,0 +1,145 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Content Scaffolding Proposal", + "description": "Proposal generated by AI analysis for creating documentation files", + "type": "object", + "required": ["analysis", "files"], + "properties": { + "analysis": { + "type": "object", + "description": "Analysis results from AI agents", + "required": ["topic", "targetProducts", "section", "isShared"], + "properties": { + "topic": { + "type": "string", + "description": "Brief topic description" + }, + "targetProducts": { + "type": "array", + "description": "Products this documentation applies to", + "items": { "type": "string" }, + "minItems": 1 + }, + "section": { + "type": "string", + "description": "Documentation section (admin, write-data, query-data, etc.)" + }, + "isShared": { + "type": "boolean", + "description": "Whether content should be shared across products" + }, + "reasoning": { + "type": "string", + "description": "Explanation for structure decisions" + }, + "styleReview": { + "type": "object", + "description": "Style compliance review from Style Agent", + "properties": { + "issues": { + "type": "array", + "items": { "type": "string" } + }, + "recommendations": { + "type": "array", + "items": { "type": "string" } + } + } + }, + "codeValidation": { + "type": "object", + "description": "Code sample validation from Coding Agent", + "properties": { + "tested": { + "type": "boolean", + "description": "Whether code samples were tested" + }, + "tools": { + "type": "array", + "description": "Tools used in code samples", + "items": { "type": "string" } + } + } + } + } + }, + "files": { + "type": "array", + "description": "Files to create", + "minItems": 1, + "items": { + "type": "object", + "required": ["path", "type"], + "properties": { + "path": { + "type": "string", + "description": "File path relative to repository root" + }, + "type": { + "type": "string", + "enum": ["shared-content", "frontmatter-only"], + "description": "File type: shared-content (with body) or frontmatter-only (just frontmatter + source)" + }, + "content": { + "type": "string", + "description": "Markdown content (for shared-content files)" + }, + "frontmatter": { + "type": "object", + "description": "Frontmatter object (for frontmatter-only files)", + "required": ["title", "description", "menu", "weight"], + "properties": { + "title": { + "type": "string", + "description": "Page title" + }, + "description": { + "type": "string", + "description": "SEO description" + }, + "menu": { + "type": "object", + "description": "Menu configuration", + "patternProperties": { + ".*": { + "type": "object", + "required": ["name"], + "properties": { + "name": { "type": "string" }, + "parent": { "type": "string" } + } + } + } + }, + "weight": { + "type": "number", + "description": "Sort weight" + }, + "source": { + "type": "string", + "description": "Path to shared content file" + }, + "related": { + "type": "array", + "description": "Related article URLs", + "items": { "type": "string" } + }, + "alt_links": { + "type": "object", + "description": "Cross-product navigation links", + "patternProperties": { + ".*": { "type": "string" } + } + } + } + } + } + } + }, + "nextSteps": { + "type": "array", + "description": "Recommended next steps after file creation", + "items": { "type": "string" } + } + } +} diff --git a/scripts/templates/chatgpt-prompt.md b/scripts/templates/chatgpt-prompt.md new file mode 100644 index 000000000..3f3de19d5 --- /dev/null +++ b/scripts/templates/chatgpt-prompt.md @@ -0,0 +1,136 @@ +# Content Scaffolding Analysis Prompt (ChatGPT) + +## Context + +You are analyzing a documentation draft to generate an intelligent file structure proposal for the InfluxData documentation repository. + +**Context file**: `.tmp/scaffold-context.json` + +Read and analyze the context file, which contains: +- **draft**: The markdown content and any existing frontmatter +- **products**: Available InfluxDB products (Core, Enterprise, Cloud, etc.) +- **productHints**: Products mentioned or suggested based on content analysis +- **versionInfo**: Detected InfluxDB version (3.x, 2.x, 1.x) and tools +- **structure**: Repository structure, existing paths, and sibling weights +- **conventions**: Documentation conventions for naming, weights, and testing + +## Your Tasks + +### 1. Content Analysis + +Analyze the draft content to determine: + +- **Topic**: What is this documentation about? +- **Target audience**: Developers, administrators, beginners, or advanced users? +- **Documentation type**: Conceptual overview, how-to guide, reference, or tutorial? +- **Target products**: Which InfluxDB products does this apply to? + - Use `productHints.mentioned` and `productHints.suggested` from context + - Consider `versionInfo.version` (3.x, 2.x, or 1.x) +- **Section**: Which documentation section? (admin, write-data, query-data, reference, get-started, plugins) + +### 2. Structure Decisions + +Decide on the optimal file structure: + +- **Shared vs Product-Specific**: + - Use shared content (`content/shared/`) when content applies broadly with minor variations + - Use product-specific when content differs significantly between products +- **Parent menu item**: What should be the navigation parent? +- **Weight**: Calculate appropriate weight based on `structure.siblingWeights` + - Weights are in ranges: 1-99 (top level), 101-199 (level 2), 201-299 (level 3) + +### 3. Frontmatter Generation + +For each file, create complete frontmatter with: + +- **title**: Clear, SEO-friendly title +- **description**: Concise 1-2 sentence description for SEO +- **menu**: Proper menu structure with product key (pattern: `{namespace}_{product}`) +- **weight**: Sequential weight based on siblings +- **source**: (for frontmatter-only files) Path to shared content +- **related**: 3-5 relevant related articles from `structure.existingPaths` +- **alt_links**: Map equivalent pages across products for cross-product navigation + +### 4. Code Sample Considerations + +Based on `versionInfo`: +- Use version-specific CLI commands (influxdb3, influx, influxctl) +- Reference appropriate API endpoints (/api/v3, /api/v2) +- Note testing requirements from `conventions.testing` + +### 5. Style Compliance + +Follow conventions from `conventions.namingRules`: +- Files: Use lowercase with hyphens (e.g., `manage-databases.md`) +- Directories: Use lowercase with hyphens +- Shared content: Place in appropriate `/content/shared/` subdirectory + +## Output Format + +Generate a JSON proposal matching the schema in `scripts/schemas/scaffold-proposal.schema.json`. + +**Required structure**: + +```json +{ + "analysis": { + "topic": "Brief topic description", + "targetProducts": ["core", "enterprise", "cloud-dedicated"], + "section": "admin", + "isShared": true, + "reasoning": "Why this structure makes sense", + "styleReview": { + "issues": [], + "recommendations": [] + }, + "codeValidation": { + "tested": false, + "tools": ["influxdb3 CLI", "influxctl"] + } + }, + "files": [ + { + "path": "content/shared/influxdb3-admin/topic-name.md", + "type": "shared-content", + "content": "{{ACTUAL_DRAFT_CONTENT}}" + }, + { + "path": "content/influxdb3/core/admin/topic-name.md", + "type": "frontmatter-only", + "frontmatter": { + "title": "Page Title", + "description": "Page description", + "menu": { + "influxdb3_core": { + "name": "Nav Label", + "parent": "Parent Item" + } + }, + "weight": 205, + "source": "/shared/influxdb3-admin/topic-name.md", + "related": [ + "/influxdb3/core/path/to/related/" + ], + "alt_links": { + "enterprise": "/influxdb3/enterprise/admin/topic-name/" + } + } + } + ], + "nextSteps": [ + "Review generated frontmatter", + "Test with: npx hugo server", + "Add product-specific variations if needed" + ] +} +``` + +## Instructions + +1. Read and parse `.tmp/scaffold-context.json` +2. Analyze the draft content thoroughly +3. Make structure decisions based on the analysis +4. Generate complete frontmatter for all files +5. Save the proposal to `.tmp/scaffold-proposal.json` + +The proposal will be validated and used by `yarn docs:create --proposal .tmp/scaffold-proposal.json` to create the files. diff --git a/scripts/templates/copilot-prompt.md b/scripts/templates/copilot-prompt.md new file mode 100644 index 000000000..44d221d16 --- /dev/null +++ b/scripts/templates/copilot-prompt.md @@ -0,0 +1,111 @@ +# Content Scaffolding Analysis (GitHub Copilot) + +Generate a documentation scaffolding proposal from the context file. + +## Input + +Read `.tmp/scaffold-context.json` which contains: +- `draft`: Documentation draft content and frontmatter +- `products`: Available InfluxDB products +- `productHints`: Suggested products based on content analysis +- `versionInfo`: Detected version (3.x/2.x/1.x) and tools +- `structure`: Repository structure and sibling weights +- `conventions`: Documentation standards + +## Analysis + +Determine: +1. **Topic** and **audience** from draft content +2. **Target products** from `productHints` and `versionInfo` +3. **Documentation section** (admin/write-data/query-data/reference/get-started/plugins) +4. **Shared vs product-specific** structure +5. **Weight** from `structure.siblingWeights` for the section + +## File Structure + +Generate files following these patterns: + +### Shared Content Pattern +``` +content/shared/{namespace}-{section}/{topic-name}.md + β”œβ”€ content/{namespace}/{product}/{section}/{topic-name}.md (frontmatter only) + β”œβ”€ content/{namespace}/{product}/{section}/{topic-name}.md (frontmatter only) + └─ ... +``` + +### Product-Specific Pattern +``` +content/{namespace}/{product}/{section}/{topic-name}.md (full content) +``` + +## Frontmatter Template + +For frontmatter-only files: +```yaml +--- +title: Clear SEO title +description: 1-2 sentence description +menu: + {namespace}_{product}: + name: Nav label + parent: Parent item +weight: {calculated from siblings} +source: /shared/{namespace}-{section}/{topic-name}.md +related: + - /path/to/related1/ + - /path/to/related2/ +alt_links: + {product}: /path/to/equivalent/ +--- +``` + +## Code Samples + +Based on `versionInfo`: +- **v3.x**: Use `influxdb3` CLI, `influxctl`, `/api/v3` +- **v2.x**: Use `influx` CLI, `/api/v2` +- **v1.x**: Use `influx` CLI (v1), `influxd`, InfluxQL + +## Output + +Generate JSON matching `scripts/schemas/scaffold-proposal.schema.json`: + +```json +{ + "analysis": { + "topic": "...", + "targetProducts": ["..."], + "section": "...", + "isShared": true/false, + "reasoning": "...", + "styleReview": { + "issues": [], + "recommendations": [] + }, + "codeValidation": { + "tested": false, + "tools": [] + } + }, + "files": [ + { + "path": "content/...", + "type": "shared-content" | "frontmatter-only", + "content": "..." OR "frontmatter": {...} + } + ], + "nextSteps": ["..."] +} +``` + +Save to: `.tmp/scaffold-proposal.json` + +## Conventions + +- **Files**: lowercase-with-hyphens.md +- **Menu keys**: `{namespace}_{product}` (e.g., `influxdb3_core`) +- **Weights**: 1-99 (top), 101-199 (level 2), 201-299 (level 3) +- **Shared content**: `content/shared/` subdirectories +- **Related links**: 3-5 contextually relevant articles + +Begin analysis of `.tmp/scaffold-context.json`. From b3d921941277bb57d460b8386cc8ebb1286ff2d0 Mon Sep 17 00:00:00 2001 From: Dustin Eaton Date: Wed, 29 Oct 2025 08:28:39 -0500 Subject: [PATCH 2/6] chore: update to working kubit and kubectl versions (#6496) --- .../install/set-up-cluster/configure-cluster/use-helm.md | 7 +++---- .../influxdb3/clustered/install/set-up-cluster/deploy.md | 4 ++-- .../clustered/install/set-up-cluster/prerequisites.md | 4 ++-- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/content/influxdb3/clustered/install/set-up-cluster/configure-cluster/use-helm.md b/content/influxdb3/clustered/install/set-up-cluster/configure-cluster/use-helm.md index 2179f8922..c32cd8b0b 100644 --- a/content/influxdb3/clustered/install/set-up-cluster/configure-cluster/use-helm.md +++ b/content/influxdb3/clustered/install/set-up-cluster/configure-cluster/use-helm.md @@ -284,9 +284,8 @@ In addition to the InfluxDB images, copy the kubit operator images: ```bash # Create a list of kubit-related images cat > /tmp/kubit-images.txt << EOF -ghcr.io/kubecfg/kubit:v0.0.20 +ghcr.io/kubecfg/kubit:v0.0.22 ghcr.io/kubecfg/kubecfg/kubecfg:latest -bitnami/kubectl:1.27.5 registry.k8s.io/kubectl:v1.28.0 EOF @@ -307,8 +306,8 @@ images: # Configure kubit operator images kubit: controller: - image: REGISTRY_HOSTNAME/ghcr.io/kubecfg/kubit:v0.0.20 - apply_step_image: REGISTRY_HOSTNAME/bitnami/kubectl:1.27.5 + image: REGISTRY_HOSTNAME/ghcr.io/kubecfg/kubit:v0.0.22 + apply_step_image: REGISTRY_HOSTNAME/registry.k8s.io/kubectl:v1.28.0 render_step_image: REGISTRY_HOSTNAME/registry.k8s.io/kubectl:v1.28.0 kubecfg_image: REGISTRY_HOSTNAME/ghcr.io/kubecfg/kubecfg/kubecfg:latest diff --git a/content/influxdb3/clustered/install/set-up-cluster/deploy.md b/content/influxdb3/clustered/install/set-up-cluster/deploy.md index 983db90c1..5e131797f 100644 --- a/content/influxdb3/clustered/install/set-up-cluster/deploy.md +++ b/content/influxdb3/clustered/install/set-up-cluster/deploy.md @@ -76,11 +76,11 @@ making it ideal for air-gapped clusters._ 1. On a machine with internet access, download the [`kubit` CLI](https://github.com/kubecfg/kubit#cli-tool)--for example: ```bash - curl -L -o kubit https://github.com/kubecfg/kubit/archive/refs/tags/v0.0.20.tar.gz + curl -L -o kubit https://github.com/kubecfg/kubit/archive/refs/tags/v0.0.22.tar.gz chmod +x kubit ``` - Replace {{% code-placeholder-key %}}`v0.0.20`{{% /code-placeholder-key%}} with the [latest release version](https://github.com/kubecfg/kubit/releases/latest). + Replace {{% code-placeholder-key %}}`v0.0.22`{{% /code-placeholder-key%}} with the [latest release version](https://github.com/kubecfg/kubit/releases/latest). 2. If deploying InfluxDB in an air-gapped environment (without internet access), transfer the binary to your air-gapped environment. diff --git a/content/influxdb3/clustered/install/set-up-cluster/prerequisites.md b/content/influxdb3/clustered/install/set-up-cluster/prerequisites.md index 1c392f88c..4a79e5bce 100644 --- a/content/influxdb3/clustered/install/set-up-cluster/prerequisites.md +++ b/content/influxdb3/clustered/install/set-up-cluster/prerequisites.md @@ -189,12 +189,12 @@ update an InfluxDB cluster. > separately. The Helm chart installs the kubit operator. Use `kubectl` to install the [kubecfg kubit](https://github.com/kubecfg/kubit) -operator **v0.0.18 or later**. +operator **v0.0.22 or later**. ```bash -kubectl apply -k 'https://github.com/kubecfg/kubit//kustomize/global?ref=v0.0.19' +kubectl apply -k 'https://github.com/kubecfg/kubit//kustomize/global?ref=v0.0.22' ``` ### Set up a Kubernetes ingress controller From cc36a19a1791802569f7f4ef598cbfe26c83aea9 Mon Sep 17 00:00:00 2001 From: peterbarnett03 Date: Thu, 30 Oct 2025 10:31:05 -0400 Subject: [PATCH 3/6] feat: 3.6 release notes (#6499) * feat: 3.6 release notes * Update content/shared/v3-core-enterprise-release-notes/_index.md * Update content/shared/v3-core-enterprise-release-notes/_index.md * chore(influxdb3): Core/Ent 3.6, Explorer 1.4 release --------- Co-authored-by: Jason Stirnaman --- .../_index.md | 31 +++++++++++++++++++ data/products.yml | 6 ++-- 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/content/shared/v3-core-enterprise-release-notes/_index.md b/content/shared/v3-core-enterprise-release-notes/_index.md index 56260b275..6d469ccf1 100644 --- a/content/shared/v3-core-enterprise-release-notes/_index.md +++ b/content/shared/v3-core-enterprise-release-notes/_index.md @@ -5,6 +5,37 @@ > All updates to Core are automatically included in Enterprise. > The Enterprise sections below only list updates exclusive to Enterprise. +## v3.6.0 {date="2025-10-30"} + +### Core + +#### Features + +- **Quick-Start Developer Experience**: + - `influxdb3` now supports running without arguments for instant database startup, automatically generating IDs and storage flags values based on your system's setup. +- **Processing Engine**: + - Plugins now support multiple files instead of single-file limitations. + - When creating a trigger, you can upload a plugin directly from your local machine using the `--upload` flag. + - Existing plugin files can now be updated at runtime without recreating triggers. + - New `system.plugin_files` table and `show plugins` CLI command now provide visibility into all loaded plugin files. + - Custom plugin repositories are now supported via `--plugin-repo` CLI flag. + - Python package installation can now be disabled with `--package-manager disabled` for locked-down environments. + - Plugin file path validation now prevents directory traversal attacks by blocking relative and absolute path patterns. + +#### Bug fixes + +- **Token management**: Token display now works correctly for hard-deleted databases + +### Enterprise + +All Core updates are included in Enterprise. Additional Enterprise-specific features and fixes: + +#### Operational improvements + +- **Storage engine**: improvements to the Docker-based license service development environment +- **Catalog consistency**: Node management fixes for catalog edge cases +- Other enhancements and performance improvements + ## v3.5.0 {date="2025-09-30"} ### Core diff --git a/data/products.yml b/data/products.yml index 9889169fa..2425d06d5 100644 --- a/data/products.yml +++ b/data/products.yml @@ -6,7 +6,7 @@ influxdb3_core: versions: [core] list_order: 2 latest: core - latest_patch: 3.5.0 + latest_patch: 3.6.0 placeholder_host: localhost:8181 detector_config: query_languages: @@ -35,7 +35,7 @@ influxdb3_enterprise: versions: [enterprise] list_order: 2 latest: enterprise - latest_patch: 3.5.0 + latest_patch: 3.6.0 placeholder_host: localhost:8181 detector_config: query_languages: @@ -63,7 +63,7 @@ influxdb3_explorer: menu_category: tools list_order: 1 latest: explorer - latest_patch: 1.3.0 + latest_patch: 1.4.0 placeholder_host: localhost:8888 ai_sample_questions: - How do I query data using InfluxDB 3 Explorer? From 1b57292191f8d0e7173b0738354183310567a304 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Thu, 30 Oct 2025 10:52:25 -0400 Subject: [PATCH 4/6] feat: functional changes (#6500) * feat: functional changes * fix: remove serve inaccuracies * fix: remove opining --------- Co-authored-by: Peter Barnett --- .../core/reference/cli/influxdb3/_index.md | 58 +++- .../core/reference/cli/influxdb3/serve.md | 50 +++- .../reference/cli/influxdb3/show/plugins.md | 16 ++ .../reference/cli/influxdb3/update/trigger.md | 15 + .../reference/cli/influxdb3/_index.md | 66 ++++- .../reference/cli/influxdb3/serve.md | 56 +++- .../reference/cli/influxdb3/show/plugins.md | 16 ++ .../reference/cli/influxdb3/update/trigger.md | 15 + .../query-system-data/_index.md | 56 ++++ .../shared/influxdb3-cli/config-options.md | 11 +- .../shared/influxdb3-cli/create/trigger.md | 65 ++++- content/shared/influxdb3-cli/show/_index.md | 1 + content/shared/influxdb3-cli/show/plugins.md | 88 ++++++ content/shared/influxdb3-cli/update/_index.md | 4 +- .../shared/influxdb3-cli/update/trigger.md | 174 +++++++++++ content/shared/influxdb3-get-started/setup.md | 51 ++++ content/shared/influxdb3-plugins/_index.md | 270 ++++++++++++++++++ 17 files changed, 968 insertions(+), 44 deletions(-) create mode 100644 content/influxdb3/core/reference/cli/influxdb3/show/plugins.md create mode 100644 content/influxdb3/core/reference/cli/influxdb3/update/trigger.md create mode 100644 content/influxdb3/enterprise/reference/cli/influxdb3/show/plugins.md create mode 100644 content/influxdb3/enterprise/reference/cli/influxdb3/update/trigger.md create mode 100644 content/shared/influxdb3-cli/show/plugins.md create mode 100644 content/shared/influxdb3-cli/update/trigger.md diff --git a/content/influxdb3/core/reference/cli/influxdb3/_index.md b/content/influxdb3/core/reference/cli/influxdb3/_index.md index 495a03f3d..478848717 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/_index.md +++ b/content/influxdb3/core/reference/cli/influxdb3/_index.md @@ -22,8 +22,8 @@ influxdb3 [GLOBAL-OPTIONS] [COMMAND] ## Commands -| Command | Description | -| :--------------------------------------------------------------| :---------------------------------- | +| Command | Description | +| :---------------------------------------------------------- | :---------------------------------- | | [create](/influxdb3/core/reference/cli/influxdb3/create/) | Create resources | | [delete](/influxdb3/core/reference/cli/influxdb3/delete/) | Delete resources | | [disable](/influxdb3/core/reference/cli/influxdb3/disable/) | Disable resources | @@ -37,14 +37,39 @@ influxdb3 [GLOBAL-OPTIONS] [COMMAND] ## Global options -| Option | | Description | -| :----- | :---------------- | :-------------------------------------------------------------------- | -| `-h` | `--help` | Print help information | -| | `--help-all` | Print detailed help information including runtime configuration options | -| `-V` | `--version` | Print version | +| Option | | Description | +| :----- | :----------- | :---------------------------------------------------------------------- | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information including runtime configuration options | +| `-V` | `--version` | Print version | For advanced global configuration options (including `--num-io-threads` and other runtime settings), see [Configuration options](/influxdb3/core/reference/config-options/#global-configuration-options). +## Quick-Start Mode + +For development, testing, and home use, you can start {{< product-name >}} by running `influxdb3` without the `serve` subcommand or any configuration parameters. The system automatically generates required values: + +- **`node-id`**: `{hostname}-node` (fallback: `primary-node`) +- **`object-store`**: `file` +- **`data-dir`**: `~/.influxdb` + +The system displays warning messages showing the auto-generated identifiers: + +``` +Using auto-generated node id: mylaptop-node. For production deployments, explicitly set --node-id +``` + +> \[!Important] +> +> #### Production deployments +> +> Quick-start mode is designed for development and testing environments. +> For production deployments, use explicit configuration with the `serve` subcommand +> and specify all required parameters as shown in the [Examples](#examples) below. + +**Configuration precedence**: CLI flags > environment variables > auto-generated defaults + +For more information about quick-start mode, see [Get started](/influxdb3/core/get-started/setup/#quick-start-mode-development). ## Examples @@ -54,6 +79,21 @@ with a unique identifier for your {{< product-name >}} server. {{% code-placeholders "my-host-01" %}} + + +### Quick-start InfluxDB 3 server + +```bash +# Zero-config startup +influxdb3 + +# Override specific defaults +influxdb3 --object-store memory + +# Use environment variables to override defaults +INFLUXDB3_NODE_IDENTIFIER_PREFIX=my-node influxdb3 +``` + ### Run the InfluxDB 3 server @@ -104,7 +144,7 @@ influxdb3 serve \ --verbose ``` -### Run {{% product-name %}} with debug logging using LOG_FILTER +### Run {{% product-name %}} with debug logging using LOG\_FILTER @@ -115,4 +155,4 @@ LOG_FILTER=debug influxdb3 serve \ --node-id my-host-01 ``` -{{% /code-placeholders %}} \ No newline at end of file +{{% /code-placeholders %}} diff --git a/content/influxdb3/core/reference/cli/influxdb3/serve.md b/content/influxdb3/core/reference/cli/influxdb3/serve.md index ef5b9e019..e3a6403b6 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/core/reference/cli/influxdb3/serve.md @@ -18,10 +18,10 @@ The `influxdb3 serve` command starts the {{< product-name >}} server. ```bash -influxdb3 serve [OPTIONS] --node-id +influxdb3 serve [OPTIONS] ``` -## Required parameters +## Required Parameters - **node-id**: A unique identifier for your server instance. Must be unique for any hosts sharing the same object store. - **object-store**: Determines where time series data is stored. @@ -45,7 +45,7 @@ influxdb3 serve [OPTIONS] --node-id | Option | | Description | | :--------------- | :--------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------ | | {{< req "\*" >}} | `--node-id` | _See [configuration options](/influxdb3/core/reference/config-options/#node-id)_ | -| | `--object-store` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store)_ | +| {{< req "\*" >}} | `--object-store` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store)_ | | | `--admin-token-recovery-http-bind` | _See [configuration options](/influxdb3/core/reference/config-options/#admin-token-recovery-http-bind)_ | | | `--admin-token-recovery-tcp-listener-file-path` | _See [configuration options](/influxdb3/core/reference/config-options/#admin-token-recovery-tcp-listener-file-path)_ | | | `--admin-token-file` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#admin-token-file)_ | @@ -134,16 +134,52 @@ influxdb3 serve [OPTIONS] --node-id | | `--wal-snapshot-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-snapshot-size)_ | | | `--without-auth` | _See [configuration options](/influxdb3/core/reference/config-options/#without-auth)_ | -{{< caption >}} -{{< req text="\* Required options" >}} -{{< /caption >}} - ### Option environment variables You can use environment variables to define most `influxdb3 serve` options. For more information, see [Configuration options](/influxdb3/core/reference/config-options/). +## Quick-Start Mode + +For development, testing, and home use, you can start {{< product-name >}} by running `influxdb3` without the `serve` subcommand or any configuration parameters. The system automatically generates required values: + +- **`node-id`**: `{hostname}-node` (fallback: `primary-node`) +- **`object-store`**: `file` +- **`data-dir`**: `~/.influxdb` + +The system displays warning messages showing the auto-generated identifiers: + +``` +Using auto-generated node id: mylaptop-node. For production deployments, explicitly set --node-id +``` + +### Quick-start examples + + + +```bash +# Zero-config startup +influxdb3 + +# Override specific defaults +influxdb3 --object-store memory + +# Use environment variables to override defaults +INFLUXDB3_NODE_IDENTIFIER_PREFIX=my-node influxdb3 +``` + +> [!Important] +> #### Production deployments +> +> Quick-start mode is designed for development and testing environments. +> For production deployments, use explicit configuration with the `serve` subcommand +> and specify all required parameters as shown in the [Examples](#examples) below. + +**Configuration precedence**: CLI flags > environment variables > auto-generated defaults + +For more information about quick-start mode, see [Get started](/influxdb3/core/get-started/setup/#quick-start-mode-development). + ## Examples - [Run the InfluxDB 3 server](#run-the-influxdb-3-server) diff --git a/content/influxdb3/core/reference/cli/influxdb3/show/plugins.md b/content/influxdb3/core/reference/cli/influxdb3/show/plugins.md new file mode 100644 index 000000000..fadd9f729 --- /dev/null +++ b/content/influxdb3/core/reference/cli/influxdb3/show/plugins.md @@ -0,0 +1,16 @@ +--- +title: influxdb3 show plugins +description: > + The `influxdb3 show plugins` command lists loaded Processing Engine plugins in your + InfluxDB 3 Core server. +menu: + influxdb3_core: + parent: influxdb3 show + name: influxdb3 show plugins +weight: 350 +source: /shared/influxdb3-cli/show/plugins.md +--- + + diff --git a/content/influxdb3/core/reference/cli/influxdb3/update/trigger.md b/content/influxdb3/core/reference/cli/influxdb3/update/trigger.md new file mode 100644 index 000000000..c0b02f7e8 --- /dev/null +++ b/content/influxdb3/core/reference/cli/influxdb3/update/trigger.md @@ -0,0 +1,15 @@ +--- +title: influxdb3 update trigger +description: > + The `influxdb3 update trigger` command updates an existing trigger. +menu: + influxdb3_core: + parent: influxdb3 update + name: influxdb3 update trigger +weight: 401 +source: /shared/influxdb3-cli/update/trigger.md +--- + + diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/_index.md b/content/influxdb3/enterprise/reference/cli/influxdb3/_index.md index 1a52c7f40..3a5558a03 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/_index.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/_index.md @@ -22,8 +22,8 @@ influxdb3 [GLOBAL-OPTIONS] [COMMAND] ## Commands -| Command | Description | -| :--------------------------------------------------------------| :---------------------------------- | +| Command | Description | +| :---------------------------------------------------------------- | :---------------------------------- | | [create](/influxdb3/enterprise/reference/cli/influxdb3/create/) | Create resources | | [delete](/influxdb3/enterprise/reference/cli/influxdb3/delete/) | Delete resources | | [disable](/influxdb3/enterprise/reference/cli/influxdb3/disable/) | Disable resources | @@ -37,27 +37,69 @@ influxdb3 [GLOBAL-OPTIONS] [COMMAND] ## Global options -| Option | | Description | -| :----- | :---------------- | :-------------------------------------------------------------------- | -| `-h` | `--help` | Print help information | -| | `--help-all` | Print detailed help information including runtime configuration options | -| `-V` | `--version` | Print version | +| Option | | Description | +| :----- | :----------- | :---------------------------------------------------------------------- | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information including runtime configuration options | +| `-V` | `--version` | Print version | For advanced global configuration options (including `--num-io-threads` and other runtime settings), see [Configuration options](/influxdb3/enterprise/reference/config-options/#global-configuration-options). +## Quick-Start Mode + +For development, testing, and home use, you can start {{< product-name >}} by running `influxdb3` without the `serve` subcommand or any configuration parameters. The system automatically generates required values: + +- **`node-id`**: `{hostname}-node` (fallback: `primary-node`) +- **`cluster-id`**: `{hostname}-cluster` (fallback: `primary-cluster`) +- **`object-store`**: `file` +- **`data-dir`**: `~/.influxdb` + +The system displays warning messages showing the auto-generated identifiers: + +``` +Using auto-generated node id: mylaptop-node. For production deployments, explicitly set --node-id +Using auto-generated cluster id: mylaptop-cluster. For production deployments, explicitly set --cluster-id +``` + +> \[!Important] +> +> #### Production deployments +> +> Quick-start mode is designed for development and testing environments. +> For production deployments, use explicit configuration with the `serve` subcommand +> and specify all required parameters as shown in the [Examples](#examples) below. + +**Configuration precedence**: CLI flags > environment variables > auto-generated defaults + +For more information about quick-start mode, see [Get started](/influxdb3/enterprise/get-started/setup/#quick-start-mode-development). ## Examples In the examples below, replace the following: - {{% code-placeholder-key %}}`my-host-01`{{% /code-placeholder-key %}}: -a unique identifier for your {{< product-name >}} server. + a unique identifier for your {{< product-name >}} server. - {{% code-placeholder-key %}}`my-cluster-01`{{% /code-placeholder-key %}}: -a unique identifier for your {{< product-name >}} cluster. -The value you use must be different from `--node-id` values in the cluster. + a unique identifier for your {{< product-name >}} cluster. + The value you use must be different from `--node-id` values in the cluster. {{% code-placeholders "my-host-01|my-cluster-01" %}} +### Quick-start influxdb3 server + + + +```bash +# Zero-config startup +influxdb3 + +# Override specific defaults +influxdb3 --object-store memory + +# Use environment variables to override defaults +INFLUXDB3_NODE_IDENTIFIER_PREFIX=my-node influxdb3 +``` + ### Run the InfluxDB 3 server @@ -111,7 +153,7 @@ influxdb3 serve \ --verbose ``` -### Run {{% product-name %}} with debug logging using LOG_FILTER +### Run {{% product-name %}} with debug logging using LOG\_FILTER @@ -123,4 +165,4 @@ LOG_FILTER=debug influxdb3 serve \ --cluster-id my-cluster-01 ``` -{{% /code-placeholders %}} \ No newline at end of file +{{% /code-placeholders %}} diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md index bded5f63b..f56a52695 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md @@ -18,12 +18,10 @@ The `influxdb3 serve` command starts the {{< product-name >}} server. ```bash -influxdb3 serve [OPTIONS] \ - --node-id \ - --cluster-id +influxdb3 serve [OPTIONS] ``` -## Required parameters +## Required Parameters - **node-id**: A unique identifier for your server instance. Must be unique for any hosts sharing the same object store. - **cluster-id**: A unique identifier for your cluster. Must be different from any node-id in your cluster. @@ -64,7 +62,7 @@ influxdb3 serve [OPTIONS] \ | | `--azure-storage-account` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-storage-account)_ | | | `--bucket` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#bucket)_ | | | `--catalog-sync-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#catalog-sync-interval)_ | -| {{< req "\*" >}} | `--cluster-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#cluster-id)_ | +| | `--cluster-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#cluster-id)_ | | | `--compaction-check-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-check-interval)_ | | | `--compaction-cleanup-wait` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-cleanup-wait)_ | | | `--compaction-gen2-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-gen2-duration)_ | @@ -107,7 +105,7 @@ influxdb3 serve [OPTIONS] \ | | `--log-format` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#log-format)_ | | | `--max-http-request-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#max-http-request-size)_ | | | `--mode` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#mode)_ | -| {{< req "\*" >}} | `--node-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#node-id)_ | +| | `--node-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#node-id)_ | | | `--node-id-from-env` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#node-id-from-env)_ | | | `--num-cores` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-cores)_ | | | `--num-datafusion-threads` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-datafusion-threads)_ | @@ -160,16 +158,54 @@ influxdb3 serve [OPTIONS] \ | | `--wal-snapshot-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-snapshot-size)_ | | | `--without-auth` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#without-auth)_ | -{{< caption >}} -{{< req text="\* Required options" >}} -{{< /caption >}} - ### Option environment variables You can use environment variables to define most `influxdb3 serve` options. For more information, see [Configuration options](/influxdb3/enterprise/reference/config-options/). +## Quick-Start Mode + +For development, testing, and home use, you can start {{< product-name >}} by running `influxdb3` without the `serve` subcommand or any configuration parameters. The system automatically generates required values: + +- **`node-id`**: `{hostname}-node` (fallback: `primary-node`) +- **`cluster-id`**: `{hostname}-cluster` (fallback: `primary-cluster`) +- **`object-store`**: `file` +- **`data-dir`**: `~/.influxdb` + +The system displays warning messages showing the auto-generated identifiers: + +``` +Using auto-generated node id: mylaptop-node. For production deployments, explicitly set --node-id +Using auto-generated cluster id: mylaptop-cluster. For production deployments, explicitly set --cluster-id +``` + +### Quick-start examples + + + +```bash +# Zero-config startup +influxdb3 + +# Override specific defaults +influxdb3 --object-store memory + +# Use environment variables to override defaults +INFLUXDB3_NODE_IDENTIFIER_PREFIX=my-node influxdb3 +``` + +> [!Important] +> #### Production deployments +> +> Quick-start mode is designed for development and testing environments. +> For production deployments, use explicit configuration with the `serve` subcommand +> and specify all required parameters as shown in the [Examples](#examples) below. + +**Configuration precedence**: CLI flags > environment variables > auto-generated defaults + +For more information about quick-start mode, see [Get started](/influxdb3/enterprise/get-started/setup/#quick-start-mode-development). + ## Examples - [Run the InfluxDB 3 server](#run-the-influxdb-3-server) diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/show/plugins.md b/content/influxdb3/enterprise/reference/cli/influxdb3/show/plugins.md new file mode 100644 index 000000000..fce3bc5e5 --- /dev/null +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/show/plugins.md @@ -0,0 +1,16 @@ +--- +title: influxdb3 show plugins +description: > + The `influxdb3 show plugins` command lists loaded Processing Engine plugins in your + InfluxDB 3 Enterprise server. +menu: + influxdb3_enterprise: + parent: influxdb3 show + name: influxdb3 show plugins +weight: 350 +source: /shared/influxdb3-cli/show/plugins.md +--- + + diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/update/trigger.md b/content/influxdb3/enterprise/reference/cli/influxdb3/update/trigger.md new file mode 100644 index 000000000..1c532ce41 --- /dev/null +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/update/trigger.md @@ -0,0 +1,15 @@ +--- +title: influxdb3 update trigger +description: > + The `influxdb3 update trigger` command updates an existing trigger. +menu: + influxdb3_enterprise: + parent: influxdb3 update + name: influxdb3 update trigger +weight: 401 +source: /shared/influxdb3-cli/update/trigger.md +--- + + diff --git a/content/shared/influxdb3-admin/query-system-data/_index.md b/content/shared/influxdb3-admin/query-system-data/_index.md index c3ef83994..9ea808446 100644 --- a/content/shared/influxdb3-admin/query-system-data/_index.md +++ b/content/shared/influxdb3-admin/query-system-data/_index.md @@ -8,6 +8,8 @@ You can query the system tables for information about your running server, datab - [Examples](#examples) - [Show tables](#show-tables) - [View column information for a table](#view-column-information-for-a-table) + - [Recently executed queries](#recently-executed-queries) + - [Query plugin files](#query-plugin-files) ### Use the HTTP query API @@ -134,3 +136,57 @@ The output is similar to the following: {"id":"cdd63409-1822-4e65-8e3a-d274d553dbb3","phase":"success","issue_time":"2025-01-20T17:01:40.690067","query_type":"sql","query_text":"show tables","partitions":0,"parquet_files":0,"plan_duration":"PT0.032689S","permit_duration":"PT0.000202S","execute_duration":"PT0.000223S","end2end_duration":"PT0.033115S","compute_duration":"P0D","max_memory":0,"success":true,"running":false,"cancelled":false} {"id":"47f8d312-5e75-4db2-837a-6fcf94c09927","phase":"success","issue_time":"2025-01-20T17:02:32.627782","query_type":"sql","query_text":"show tables","partitions":0,"parquet_files":0,"plan_duration":"PT0.000583S","permit_duration":"PT0.000015S","execute_duration":"PT0.000063S","end2end_duration":"PT0.000662S","compute_duration":"P0D","max_memory":0,"success":true,"running":false,"cancelled":false} ``` + +#### Query plugin files + +To view loaded Processing Engine plugins, query the `plugin_files` system table in the `_internal` database. + +The `system.plugin_files` table provides information about plugin files loaded by the Processing Engine: + +**Columns:** +- `plugin_name` (String): Name of a trigger using this plugin +- `file_name` (String): Plugin filename +- `file_path` (String): Full server path to the plugin file +- `size_bytes` (Int64): File size in bytes +- `last_modified` (Int64): Last modification timestamp (milliseconds since epoch) + +```bash +curl "http://localhost:8181/api/v3/query_sql" \ + --header "Authorization: Bearer AUTH_TOKEN" \ + --json '{ + "db": "_internal", + "q": "SELECT * FROM system.plugin_files", + "format": "jsonl" + }' +``` + +The output is similar to the following: + +```jsonl +{"plugin_name":"my_trigger","file_name":"my_plugin.py","file_path":"/path/to/plugins/my_plugin.py","size_bytes":2048,"last_modified":1704067200000} +{"plugin_name":"scheduled_trigger","file_name":"scheduler.py","file_path":"/path/to/plugins/scheduler.py","size_bytes":4096,"last_modified":1704153600000} +``` + +**Filter plugins by trigger name:** + +```bash +curl "http://localhost:8181/api/v3/query_sql" \ + --header "Authorization: Bearer AUTH_TOKEN" \ + --json '{ + "db": "_internal", + "q": "SELECT * FROM system.plugin_files WHERE plugin_name = '"'my_trigger'"'", + "format": "jsonl" + }' +``` + +**Find plugins by file pattern:** + +```bash +curl "http://localhost:8181/api/v3/query_sql" \ + --header "Authorization: Bearer AUTH_TOKEN" \ + --json '{ + "db": "_internal", + "q": "SELECT * FROM system.plugin_files WHERE file_name LIKE '"'%scheduler%'"'", + "format": "jsonl" + }' +``` diff --git a/content/shared/influxdb3-cli/config-options.md b/content/shared/influxdb3-cli/config-options.md index 47e75c8f4..ff3d2d06d 100644 --- a/content/shared/influxdb3-cli/config-options.md +++ b/content/shared/influxdb3-cli/config-options.md @@ -1587,16 +1587,19 @@ engine uses. #### package-manager -Specifies the Python package manager that the processing engine uses. +Specifies the Python package manager that the Processing Engine uses to install plugin dependencies. This option supports the following values: -- `discover` _(default)_: Automatically discover available package manager -- `pip`: Use pip package manager -- `uv`: Use uv package manager +- `discover` _(default)_: Automatically detect and use available package manager (`uv` or `pip`) +- `pip`: Use pip package manager exclusively +- `uv`: Use uv package manager exclusively +- `disabled`: Disable automatic package installation (all dependencies must be pre-installed) **Default:** `discover` +For more information about plugins and package management, see [Processing Engine plugins](/influxdb3/version/plugins/). + | influxdb3 serve option | Environment variable | | :--------------------- | :------------------- | | `--package-manager` | `INFLUXDB3_PACKAGE_MANAGER` | diff --git a/content/shared/influxdb3-cli/create/trigger.md b/content/shared/influxdb3-cli/create/trigger.md index 13b5aaa55..1f1f8980e 100644 --- a/content/shared/influxdb3-cli/create/trigger.md +++ b/content/shared/influxdb3-cli/create/trigger.md @@ -26,7 +26,9 @@ influxdb3 create trigger [OPTIONS] \ | `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | | `-d` | `--database` | _({{< req >}})_ Name of the database to operate on | | | `--token` | _({{< req >}})_ Authentication token | -| | `--plugin-filename` | _({{< req >}})_ Name of the file, stored in the server's `plugin-dir`, that contains the Python plugin code to run | +| `-p` | `--path` | Path to plugin file or directory (single `.py` file or directory containing `__init__.py` for multifile plugins). Can be local path (with `--upload`) or server path. Replaces `--plugin-filename`. | +| | `--upload` | Upload local plugin files to the server. Requires admin token. Use with `--path` to specify local files. | +| | `--plugin-filename` | _(Deprecated: use `--path` instead)_ Name of the file, stored in the server's `plugin-dir`, that contains the Python plugin code to run | | | `--trigger-spec` | Trigger specification: `table:`, `all_tables`, `every:`, `cron:`, or `request:` | | | `--trigger-arguments` | Additional arguments for the trigger, in the format `key=value`, separated by commas (for example, `arg1=val1,arg2=val2`) | | | `--disabled` | Create the trigger in disabled state | @@ -59,6 +61,8 @@ The following examples show how to use the `influxdb3 create trigger` command to - [Create a trigger for all tables](#create-a-trigger-for-all-tables) - [Create a trigger with a schedule](#create-a-trigger-with-a-schedule) - [Create a trigger for HTTP requests](#create-a-trigger-for-http-requests) +- [Create a trigger with a multifile plugin](#create-a-trigger-with-a-multifile-plugin) +- [Upload and create a trigger with a local plugin](#upload-and-create-a-trigger-with-a-local-plugin) - [Create a trigger with additional arguments](#create-a-trigger-with-additional-arguments) - [Create a disabled trigger](#create-a-disabled-trigger) - [Create a trigger with error handling](#create-a-trigger-with-error-handling) @@ -168,6 +172,65 @@ influxdb3 create trigger \ `PLUGIN_FILENAME` must implement the [HTTP request plugin](/influxdb3/version/plugins/#create-an-http-request-plugin) interface. +### Create a trigger with a multifile plugin + +Create a trigger using a plugin organized in multiple files. The plugin directory must contain an `__init__.py` file. + + + +```bash +influxdb3 create trigger \ + --database DATABASE_NAME \ + --token AUTH_TOKEN \ + --path "my_complex_plugin" \ + --trigger-spec "every:5m" \ + TRIGGER_NAME +``` + +The `--path` points to a directory in the server's `plugin-dir` with the following structure: + +``` +my_complex_plugin/ +β”œβ”€β”€ __init__.py # Required entry point +β”œβ”€β”€ processors.py # Supporting modules +└── utils.py +``` + +For more information about multifile plugins, see [Create your plugin file](/influxdb3/version/plugins/#create-your-plugin-file). + +### Upload and create a trigger with a local plugin + +Upload plugin files from your local machine and create a trigger in a single command. Requires admin token. + + + +```bash +# Upload single-file plugin +influxdb3 create trigger \ + --database DATABASE_NAME \ + --token AUTH_TOKEN \ + --path "/local/path/to/plugin.py" \ + --upload \ + --trigger-spec "every:1m" \ + TRIGGER_NAME + +# Upload multifile plugin directory +influxdb3 create trigger \ + --database DATABASE_NAME \ + --token AUTH_TOKEN \ + --path "/local/path/to/plugin-dir" \ + --upload \ + --trigger-spec "table:TABLE_NAME" \ + TRIGGER_NAME +``` + +The `--upload` flag transfers local files to the server's plugin directory. This is useful for: +- Local plugin development and testing +- Deploying plugins without SSH access +- Automating plugin deployment + +For more information, see [Upload plugins from local machine](/influxdb3/version/plugins/#upload-plugins-from-local-machine). + ### Create a trigger with additional arguments ```bash diff --git a/content/shared/influxdb3-cli/show/_index.md b/content/shared/influxdb3-cli/show/_index.md index eea02dcf0..b8c93fd7e 100644 --- a/content/shared/influxdb3-cli/show/_index.md +++ b/content/shared/influxdb3-cli/show/_index.md @@ -15,6 +15,7 @@ influxdb3 show | :---------------------------------------------------------------------- | :--------------------------------------------- | | [databases](/influxdb3/version/reference/cli/influxdb3/show/databases/) | List database | {{% show-in "enterprise" %}}| [license](/influxdb3/version/reference/cli/influxdb3/show/license/) | Display license information |{{% /show-in %}} +| [plugins](/influxdb3/version/reference/cli/influxdb3/show/plugins/) | List loaded plugins | | [system](/influxdb3/version/reference/cli/influxdb3/show/system/) | Display system table data | | [tokens](/influxdb3/version/reference/cli/influxdb3/show/tokens/) | List authentication tokens | | help | Print command help or the help of a subcommand | diff --git a/content/shared/influxdb3-cli/show/plugins.md b/content/shared/influxdb3-cli/show/plugins.md new file mode 100644 index 000000000..557f14abf --- /dev/null +++ b/content/shared/influxdb3-cli/show/plugins.md @@ -0,0 +1,88 @@ +The `influxdb3 show plugins` command lists loaded Processing Engine plugins in your +{{< product-name >}} server. + +## Usage + + + +```bash +influxdb3 show plugins [OPTIONS] +``` + +## Options + +| Option | | Description | +| :----- | :--------------- | :--------------------------------------------------------------------------------------- | +| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | +| | `--token` | _({{< req >}})_ Authentication token | +| | `--format` | Output format (`pretty` _(default)_, `json`, `jsonl`, `csv`, or `parquet`) | +| | `--output` | Path where to save output when using the `parquet` format | +| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information | + +### Option environment variables + +You can use the following environment variables to set command options: + +| Environment Variable | Option | +| :-------------------- | :-------- | +| `INFLUXDB3_HOST_URL` | `--host` | +| `INFLUXDB3_AUTH_TOKEN`| `--token` | + +## Output + +The command returns information about loaded plugin files: + +- **plugin_name**: Name of a trigger using this plugin +- **file_name**: Plugin filename +- **file_path**: Full server path to the plugin file +- **size_bytes**: File size in bytes +- **last_modified**: Last modification timestamp (milliseconds since epoch) + +> [!Note] +> This command queries the `system.plugin_files` table in the `_internal` database. +> For more advanced queries and filtering, see [Query system data](/influxdb3/version/admin/query-system-data/). + +## Examples + +- [List all plugins](#list-all-plugins) +- [List plugins in different output formats](#list-plugins-in-different-output-formats) +- [Output plugins to a Parquet file](#output-plugins-to-a-parquet-file) + +### List all plugins + + + +```bash +influxdb3 show plugins +``` + +### List plugins in different output formats + +You can specify the output format using the `--format` option: + + + +```bash +# JSON format +influxdb3 show plugins --format json + +# JSON Lines format +influxdb3 show plugins --format jsonl + +# CSV format +influxdb3 show plugins --format csv +``` + +### Output plugins to a Parquet file + +[Parquet](https://parquet.apache.org/) is a binary format. +Use the `--output` option to specify the file where you want to save the Parquet data. + + +```bash +influxdb3 show plugins \ + --format parquet \ + --output /Users/me/plugins.parquet +``` diff --git a/content/shared/influxdb3-cli/update/_index.md b/content/shared/influxdb3-cli/update/_index.md index afe2a22db..a5b61c1c6 100644 --- a/content/shared/influxdb3-cli/update/_index.md +++ b/content/shared/influxdb3-cli/update/_index.md @@ -1,4 +1,4 @@ -The `influxdb3 update` command updates resources such as databases and tables. +The `influxdb3 update` command updates resources in your {{< product-name >}} instance. ## Usage @@ -15,6 +15,7 @@ influxdb3 update | :----------------------------------------------------------------- | :--------------------- | | [database](/influxdb3/version/reference/cli/influxdb3/update/database/) | Update a database | | [table](/influxdb3/version/reference/cli/influxdb3/update/table/) | Update a table | +| [trigger](/influxdb3/version/reference/cli/influxdb3/update/trigger/) | Update a trigger | | help | Print command help or the help of a subcommand | {{% /show-in %}} @@ -22,6 +23,7 @@ influxdb3 update | Subcommand | Description | | :----------------------------------------------------------------- | :--------------------- | | [database](/influxdb3/version/reference/cli/influxdb3/update/database/) | Update a database | +| [trigger](/influxdb3/version/reference/cli/influxdb3/update/trigger/) | Update a trigger | | help | Print command help or the help of a subcommand | {{% /show-in %}} diff --git a/content/shared/influxdb3-cli/update/trigger.md b/content/shared/influxdb3-cli/update/trigger.md new file mode 100644 index 000000000..3420d2127 --- /dev/null +++ b/content/shared/influxdb3-cli/update/trigger.md @@ -0,0 +1,174 @@ +The `influxdb3 update trigger` command updates an existing trigger in your {{< product-name >}} instance. + +Use this command to update trigger plugin code, configuration, or behavior without recreating the trigger. This preserves trigger history and configuration while allowing you to iterate on plugin development. + +## Usage + + + +```bash +influxdb3 update trigger [OPTIONS] \ + --database \ + --trigger-name +``` + +## Arguments + +- **`DATABASE_NAME`**: (Required) The name of the database containing the trigger. +- **`TRIGGER_NAME`**: (Required) The name of the trigger to update. + +## Options + +| Option | | Description | +| :----- | :------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------- | +| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) | +| `-d` | `--database` | _({{< req >}})_ Name of the database containing the trigger | +| | `--trigger-name` | _({{< req >}})_ Name of the trigger to update | +| `-p` | `--path` | Path to plugin file or directory (single `.py` file or directory containing `__init__.py` for multifile plugins). Can be local path (with `--upload`) or server path. | +| | `--upload` | Upload local plugin files to the server. Requires admin token. Use with `--path` to specify local files. | +| | `--trigger-arguments`| Additional arguments for the trigger, in the format `key=value`, separated by commas (for example, `arg1=val1,arg2=val2`) | +| | `--disabled` | Set the trigger state to disabled | +| | `--enabled` | Set the trigger state to enabled | +| | `--error-behavior` | Error handling behavior: `log`, `retry`, or `disable` | +| | `--token` | Authentication token | +| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information | + +### Option environment variables + +You can use the following environment variables instead of providing CLI options directly: + +| Environment Variable | Option | +| :------------------------ | :----------- | +| `INFLUXDB3_HOST_URL` | `--host` | +| `INFLUXDB3_DATABASE_NAME` | `--database` | +| `INFLUXDB3_AUTH_TOKEN` | `--token` | +| `INFLUXDB3_TLS_CA` | `--tls-ca` | + +## Examples + +The following examples show how to update triggers in different scenarios. + +- [Update trigger plugin code](#update-trigger-plugin-code) +- [Upload and update with a local plugin](#upload-and-update-with-a-local-plugin) +- [Update trigger arguments](#update-trigger-arguments) +- [Enable or disable a trigger](#enable-or-disable-a-trigger) +- [Update error handling behavior](#update-error-handling-behavior) + +--- + +Replace the following placeholders with your values: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Database name +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: Authentication token +- {{% code-placeholder-key %}}`TRIGGER_NAME`{{% /code-placeholder-key %}}: Name of the trigger to update + +{{% code-placeholders "(DATABASE|TRIGGER)_NAME|AUTH_TOKEN" %}} + +### Update trigger plugin code + +Update a trigger to use modified plugin code from the server's plugin directory. + + + +```bash +influxdb3 update trigger \ + --database DATABASE_NAME \ + --trigger-name TRIGGER_NAME \ + --path "my_plugin.py" \ + --token AUTH_TOKEN +``` + +### Upload and update with a local plugin + +Upload new plugin code from your local machine and update the trigger in a single operation. Requires admin token. + + + +```bash +# Upload single-file plugin +influxdb3 update trigger \ + --database DATABASE_NAME \ + --trigger-name TRIGGER_NAME \ + --path "/local/path/to/updated_plugin.py" \ + --upload \ + --token AUTH_TOKEN + +# Upload multifile plugin directory +influxdb3 update trigger \ + --database DATABASE_NAME \ + --trigger-name TRIGGER_NAME \ + --path "/local/path/to/plugin_directory" \ + --upload \ + --token AUTH_TOKEN +``` + +The `--upload` flag transfers local files to the server's plugin directory, making it easy to iterate on plugin development without manual file copying. + +### Update trigger arguments + +Modify the arguments passed to a trigger's plugin code. + + + +```bash +influxdb3 update trigger \ + --database DATABASE_NAME \ + --trigger-name TRIGGER_NAME \ + --trigger-arguments threshold=100,window=5m \ + --token AUTH_TOKEN +``` + +### Enable or disable a trigger + +Change the trigger's enabled state without modifying other configuration. + + + +```bash +# Disable a trigger +influxdb3 update trigger \ + --database DATABASE_NAME \ + --trigger-name TRIGGER_NAME \ + --disabled \ + --token AUTH_TOKEN + +# Enable a trigger +influxdb3 update trigger \ + --database DATABASE_NAME \ + --trigger-name TRIGGER_NAME \ + --enabled \ + --token AUTH_TOKEN +``` + +### Update error handling behavior + +Change how the trigger responds to errors. + + + +```bash +# Log errors without retrying +influxdb3 update trigger \ + --database DATABASE_NAME \ + --trigger-name TRIGGER_NAME \ + --error-behavior log \ + --token AUTH_TOKEN + +# Retry on errors +influxdb3 update trigger \ + --database DATABASE_NAME \ + --trigger-name TRIGGER_NAME \ + --error-behavior retry \ + --token AUTH_TOKEN + +# Disable trigger on error +influxdb3 update trigger \ + --database DATABASE_NAME \ + --trigger-name TRIGGER_NAME \ + --error-behavior disable \ + --token AUTH_TOKEN +``` + +{{% /code-placeholders %}} diff --git a/content/shared/influxdb3-get-started/setup.md b/content/shared/influxdb3-get-started/setup.md index 3024c6f74..f3fb52fb4 100644 --- a/content/shared/influxdb3-get-started/setup.md +++ b/content/shared/influxdb3-get-started/setup.md @@ -1,5 +1,6 @@ - [Prerequisites](#prerequisites) +- [Quick-Start Mode (Development)](#quick-start-mode-development) - [Start InfluxDB](#start-influxdb) - [Object store examples](#object-store-examples) {{% show-in "enterprise" %}} @@ -21,6 +22,56 @@ To get started, you'll need: - A directory on your local disk where you can persist data (used by examples in this guide) - S3-compatible object store and credentials +## Quick-Start Mode (Development) + +For development, testing, and home use, you can start {{% product-name %}} without +any arguments. The system automatically generates required configuration values +based on your system's hostname: + +```bash +influxdb3 +``` + +When you run `influxdb3` without arguments, the following values are auto-generated: + +{{% show-in "enterprise" %}} +- **`node-id`**: `{hostname}-node` (or `primary-node` if hostname is unavailable) +- **`cluster-id`**: `{hostname}-cluster` (or `primary-cluster` if hostname is unavailable) +{{% /show-in %}} +{{% show-in "core" %}} +- **`node-id`**: `{hostname}-node` (or `primary-node` if hostname is unavailable) +{{% /show-in %}} +- **`object-store`**: `file` +- **`data-dir`**: `~/.influxdb` + +The system displays warning messages showing the auto-generated identifiers: + +{{% show-in "enterprise" %}} +``` +Using auto-generated node id: mylaptop-node. For production deployments, explicitly set --node-id +Using auto-generated cluster id: mylaptop-cluster. For production deployments, explicitly set --cluster-id +``` +{{% /show-in %}} +{{% show-in "core" %}} +``` +Using auto-generated node id: mylaptop-node. For production deployments, explicitly set --node-id +``` +{{% /show-in %}} + +> [!Important] +> #### When to use quick-start mode +> +> Quick-start mode is designed for development, testing, and home lab environments +> where simplicity is prioritized over explicit configuration. +> +> **For production deployments**, use explicit configuration values with the +> [`influxdb3 serve` command](/influxdb3/version/reference/cli/influxdb3/serve/) +> as shown in the [Start InfluxDB](#start-influxdb) section below. + +**Configuration precedence**: Environment variables override auto-generated defaults. +For example, if you set `INFLUXDB3_NODE_IDENTIFIER_PREFIX=my-node`, the system +uses `my-node` instead of generating `{hostname}-node`. + ## Start InfluxDB Use the [`influxdb3 serve` command](/influxdb3/version/reference/cli/influxdb3/serve/) diff --git a/content/shared/influxdb3-plugins/_index.md b/content/shared/influxdb3-plugins/_index.md index 754115f31..3d48fc5c1 100644 --- a/content/shared/influxdb3-plugins/_index.md +++ b/content/shared/influxdb3-plugins/_index.md @@ -24,8 +24,12 @@ Once you have all the prerequisites in place, follow these steps to implement th - [Set up the Processing Engine](#set-up-the-processing-engine) - [Add a Processing Engine plugin](#add-a-processing-engine-plugin) + - [Upload plugins from local machine](#upload-plugins-from-local-machine) + - [Update existing plugins](#update-existing-plugins) + - [View loaded plugins](#view-loaded-plugins) - [Set up a trigger](#set-up-a-trigger) - [Manage plugin dependencies](#manage-plugin-dependencies) +- [Plugin security](#plugin-security) {{% show-in "enterprise" %}} - [Distributed cluster considerations](#distributed-cluster-considerations) {{% /show-in %}} @@ -227,10 +231,51 @@ Choose a plugin type based on your automation goals: #### Create your plugin file +Plugins now support both single-file and multifile architectures: + +**Single-file plugins:** - Create a `.py` file in your plugins directory - Add the appropriate function signature based on your chosen plugin type - Write your processing logic inside the function +**Multifile plugins:** +- Create a directory in your plugins directory +- Add an `__init__.py` file as the entry point (required) +- Organize supporting modules in additional `.py` files +- Import and use modules within your plugin code + +##### Example multifile plugin structure + +``` +my_plugin/ +β”œβ”€β”€ __init__.py # Required - entry point with trigger function +β”œβ”€β”€ utils.py # Supporting module +β”œβ”€β”€ processors.py # Data processing functions +└── config.py # Configuration helpers +``` + +The `__init__.py` file must contain your trigger function: + +```python +# my_plugin/__init__.py +from .processors import process_data +from .config import get_settings + +def process_writes(influxdb3_local, table_batches, args=None): + settings = get_settings() + for table_batch in table_batches: + process_data(influxdb3_local, table_batch, settings) +``` + +Supporting modules can contain helper functions: + +```python +# my_plugin/processors.py +def process_data(influxdb3_local, table_batch, settings): + # Processing logic here + pass +``` + After writing your plugin, [create a trigger](#use-the-create-trigger-command) to connect it to a database event and define when it runs. #### Create a data write plugin @@ -313,6 +358,122 @@ After writing your plugin: - [Install any Python dependencies](#manage-plugin-dependencies) your plugin requires - Learn how to [extend plugins with the API](/influxdb3/version/extend-plugin/) +### Upload plugins from local machine + +For local development and testing, you can upload plugin files directly from your machine when creating triggers. This eliminates the need to manually copy files to the server's plugin directory. + +Use the `--upload` flag with `--path` to transfer local files or directories: + +```bash +# Upload single-file plugin +influxdb3 create trigger \ + --trigger-spec "every:10s" \ + --path "/local/path/to/plugin.py" \ + --upload \ + --database metrics \ + my_trigger + +# Upload multifile plugin directory +influxdb3 create trigger \ + --trigger-spec "every:30s" \ + --path "/local/path/to/plugin-dir" \ + --upload \ + --database metrics \ + complex_trigger +``` + +> [!Important] +> #### Admin privileges required +> +> Plugin uploads require an admin token. This security measure prevents unauthorized code execution on the server. + +**When to use plugin upload:** +- Local plugin development and testing +- Deploying plugins without SSH access to the server +- Rapid iteration on plugin code +- Automating plugin deployment in CI/CD pipelines + +For more information, see the [`influxdb3 create trigger` CLI reference](/influxdb3/version/reference/cli/influxdb3/create/trigger/). + +### Update existing plugins + +Modify plugin code for running triggers without recreating them. This allows you to iterate on plugin development while preserving trigger configuration and history. + +Use the `influxdb3 update trigger` command: + +```bash +# Update single-file plugin +influxdb3 update trigger \ + --database metrics \ + --trigger-name my_trigger \ + --path "/path/to/updated/plugin.py" + +# Update multifile plugin +influxdb3 update trigger \ + --database metrics \ + --trigger-name complex_trigger \ + --path "/path/to/updated/plugin-dir" +``` + +The update operation: +- Replaces plugin files immediately +- Preserves trigger configuration (spec, schedule, arguments) +- Requires admin token for security +- Works with both local paths and uploaded files + +For complete reference, see [`influxdb3 update trigger`](/influxdb3/version/reference/cli/influxdb3/update/trigger/). + +### View loaded plugins + +Monitor which plugins are loaded in your system for operational visibility and troubleshooting. + +**Option 1: Use the CLI command** + +```bash +# List all plugins +influxdb3 show plugins --token $ADMIN_TOKEN + +# JSON format for programmatic access +influxdb3 show plugins --format json --token $ADMIN_TOKEN +``` + +**Option 2: Query the system table** + +The `system.plugin_files` table in the `_internal` database provides detailed plugin file information: + +```bash +influxdb3 query \ + -d _internal \ + "SELECT * FROM system.plugin_files ORDER BY plugin_name" \ + --token $ADMIN_TOKEN +``` + +**Available columns:** +- `plugin_name` (String): Trigger name +- `file_name` (String): Plugin file name +- `file_path` (String): Full server path +- `size_bytes` (Int64): File size +- `last_modified` (Int64): Modification timestamp (milliseconds) + +**Example queries:** + +```sql +-- Find plugins by name +SELECT * FROM system.plugin_files WHERE plugin_name = 'my_trigger'; + +-- Find large plugins +SELECT plugin_name, size_bytes +FROM system.plugin_files +WHERE size_bytes > 10000; + +-- Check modification times +SELECT plugin_name, file_name, last_modified +FROM system.plugin_files +ORDER BY last_modified DESC; +``` + +For more information, see the [`influxdb3 show plugins` reference](/influxdb3/version/reference/cli/influxdb3/show/plugins/) and [Query system data](/influxdb3/version/admin/query-system-data/#query-plugin-files). + ## Set up a trigger ### Understand trigger types @@ -597,6 +758,115 @@ These examples install the specified Python package (for example, pandas) into t InfluxDB creates a Python virtual environment in your plugins directory with the specified packages installed. +### Disable package installation for secure environments + +For air-gapped deployments or environments with strict security requirements, you can disable Python package installation while maintaining Processing Engine functionality. + +Start the server with `--package-manager disabled`: + +```bash +influxdb3 serve \ + --node-id node0 \ + --object-store file \ + --data-dir ~/.influxdb3 \ + --plugin-dir ~/.plugins \ + --package-manager disabled +``` + +When package installation is disabled: +- The Processing Engine continues to function normally for triggers +- Plugin code executes without restrictions +- Package installation commands are blocked +- Pre-installed dependencies in the virtual environment remain available + +**Pre-install required dependencies:** + +Before disabling the package manager, install all required Python packages: + +```bash +# Install packages first +influxdb3 install package pandas requests numpy + +# Then start with disabled package manager +influxdb3 serve \ + --plugin-dir ~/.plugins \ + --package-manager disabled +``` + +**Use cases for disabled package management:** +- Air-gapped environments without internet access +- Compliance requirements prohibiting runtime package installation +- Centrally managed dependency environments +- Security policies requiring pre-approved packages only + +For more configuration options, see [--package-manager](/influxdb3/version/reference/config-options/#package-manager). + +## Plugin security + +The Processing Engine includes security features to protect your {{% product-name %}} instance from unauthorized code execution and file system attacks. + +### Plugin path validation + +All plugin file paths are validated to prevent directory traversal attacks. The system blocks: + +- **Relative paths with parent directory references** (`../`, `../../`) +- **Absolute paths** (`/etc/passwd`, `/usr/bin/script.py`) +- **Symlinks that escape the plugin directory** + +When creating or updating triggers, plugin paths must resolve within the configured `--plugin-dir`. + +**Example of blocked paths:** + +```bash +# These will be rejected +influxdb3 create trigger \ + --path "../../../etc/passwd" \ # Blocked: parent directory traversal + ... + +influxdb3 create trigger \ + --path "/tmp/malicious.py" \ # Blocked: absolute path + ... +``` + +**Valid plugin paths:** + +```bash +# These are allowed +influxdb3 create trigger \ + --path "myapp/plugin.py" \ # Relative to plugin-dir + ... + +influxdb3 create trigger \ + --path "transforms/data.py" \ # Subdirectory in plugin-dir + ... +``` + +### Upload and update permissions + +Plugin upload and update operations require admin tokens to prevent unauthorized code deployment: + +- `--upload` flag requires admin privileges +- `update trigger` command requires admin token +- Standard resource tokens cannot upload or modify plugin code + +This security model ensures only administrators can introduce or modify executable code in your database. + +### Best practices + +**For development:** +- Use the `--upload` flag to deploy plugins during development +- Test plugins in non-production environments first +- Review plugin code before deployment + +**For production:** +- Pre-deploy plugins to the server's plugin directory via secure file transfer +- Use custom plugin repositories for vetted, approved plugins +- Disable package installation (`--package-manager disabled`) in locked-down environments +- Audit plugin files using the [`system.plugin_files` table](#view-loaded-plugins) +- Implement change control processes for plugin updates + +For more security configuration options, see [Configuration options](/influxdb3/version/reference/config-options/). + {{% show-in "enterprise" %}} ## Distributed cluster considerations From ffbe4e5e188af63cbfa9c341e6699157e3ef8ce0 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Thu, 30 Oct 2025 11:04:04 -0400 Subject: [PATCH 5/6] chore: update notifications for InfluxDB 3.6 release (#6498) * chore: update notifications for InfluxDB 3.6 release Updated the notifications for InfluxDB version 3.6, including changes to the title, slug, and message content. * Update data/notifications.yaml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- data/notifications.yaml | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/data/notifications.yaml b/data/notifications.yaml index da1eb80ec..4701acddc 100644 --- a/data/notifications.yaml +++ b/data/notifications.yaml @@ -40,29 +40,23 @@ # - [The plan for InfluxDB 3.0 Open Source](https://influxdata.com/blog/the-plan-for-influxdb-3-0-open-source) # - [InfluxDB 3.0 benchmarks](https://influxdata.com/blog/influxdb-3-0-is-2.5x-45x-faster-compared-to-influxdb-open-source/) -- id: influxdb3.5-explorer-1.3 +- id: influxdb3.6-explorer-1.4 level: note scope: - / - title: New in InfluxDB 3.5 + title: New in InfluxDB 3.6 slug: | - Key enhancements in InfluxDB 3.5 and the InfluxDB 3 Explorer 1.3. + Key enhancements in InfluxDB 3.6 and the InfluxDB 3 Explorer 1.4. - See the Blog Post + See the Blog Post message: | - InfluxDB 3.5 is now available for both Core and Enterprise, introducing - custom plugin repository support, - enhanced operational visibility with queryable CLI parameters and manual node - management, stronger security controls, and general performance improvements. - - InfluxDB 3 Explorer 1.3 brings powerful new capabilities including Dashboards - (beta) for saving and organizing your favorite queries, and cache querying for - instant access to Last Value and Distinct Value cachesβ€”making Explorer a more - comprehensive workspace for time series monitoring and analysis. + InfluxDB 3.6 is now available for both Core and Enterprise. This release introduces + the 1.4 update to InfluxDB 3 Explorer, featuring the beta launch of Ask AI, along + with new capabilities for simple startup and expanded functionality in the Processing Engine. For more information, check out: - - [See the announcement blog post](https://www.influxdata.com/blog/influxdb-3-5/) + - [See the announcement blog post](https://www.influxdata.com/blog/influxdb-3-6/) - [InfluxDB 3 Core release notes](/influxdb3/core/release-notes/) - [InfluxDB 3 Enterprise release notes](/influxdb3/enterprise/release-notes/) - [Get Started with InfluxDB 3 Explorer](/influxdb3/explorer/get-started/) From efd288fdb8d5cf71d261dd5519a88eb7400cad88 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Fri, 31 Oct 2025 15:07:01 -0400 Subject: [PATCH 6/6] fix(lint): disable remark formatting for content files to preserve GitHub Alerts (#6502) Remark-stringify escapes square brackets in GitHub Alerts syntax (> [!Note] becomes > \[!Note]), breaking alert rendering. Changes: - Remove lint-markdown-content pre-commit hook from lefthook.yml - Configure remark-lint-no-undefined-references to allow GitHub Alerts - Add remark-lint-no-undefined-references dependency - Unescaped GitHub Alerts in content/create.md Content files now preserve GitHub Alerts syntax while instruction files continue to use remark auto-formatting (they don't contain alerts). Vale provides adequate style linting for content files. Closes #6501 See: https://github.com/remarkjs/remark-gfm/issues/53 --- .ci/remark-lint/.remark-lint.js | 29 +++++++++++++++++++---------- .ci/remark-lint/package.json | 3 ++- lefthook.yml | 20 -------------------- 3 files changed, 21 insertions(+), 31 deletions(-) diff --git a/.ci/remark-lint/.remark-lint.js b/.ci/remark-lint/.remark-lint.js index a53718b14..596e84a9b 100644 --- a/.ci/remark-lint/.remark-lint.js +++ b/.ci/remark-lint/.remark-lint.js @@ -4,22 +4,31 @@ import remarkPresetLintMarkdownStyleGuide from 'remark-preset-lint-markdown-styl import remarkFrontmatter from 'remark-frontmatter'; import remarkFrontmatterSchema from 'remark-lint-frontmatter-schema'; import remarkNoShellDollars from 'remark-lint-no-shell-dollars'; +import remarkLintNoUndefinedReferences from 'remark-lint-no-undefined-references'; import remarkToc from 'remark-toc'; const remarkConfig = { settings: { bullet: '-', - plugins: [ - remarkPresetLintConsistent, - remarkPresetLintRecommended, - remarkPresetLintMarkdownStyleGuide, - remarkFrontmatter, - remarkFrontmatterSchema, - remarkNoShellDollars, - // Generate a table of contents in `## Contents` - [remarkToc, { heading: '' }], - ], }, + plugins: [ + remarkPresetLintConsistent, + remarkPresetLintRecommended, + remarkPresetLintMarkdownStyleGuide, + remarkFrontmatter, + remarkFrontmatterSchema, + remarkNoShellDollars, + // Override no-undefined-references to allow GitHub Alerts syntax + // This prevents lint warnings for [!Note], [!Tip], etc. in blockquotes + [ + remarkLintNoUndefinedReferences, + { + allow: ['!Note', '!Tip', '!Important', '!Warning', '!Caution'], + }, + ], + // Generate a table of contents in `## Contents` + [remarkToc, { heading: '' }], + ], }; export default remarkConfig; diff --git a/.ci/remark-lint/package.json b/.ci/remark-lint/package.json index 3ba9a210a..d62218a23 100644 --- a/.ci/remark-lint/package.json +++ b/.ci/remark-lint/package.json @@ -9,6 +9,7 @@ "remark-preset-lint-recommended": "7.0.0", "remark-frontmatter": "5.0.0", "remark-lint-frontmatter-schema": "3.15.4", - "remark-lint-no-shell-dollars": "4.0.0" + "remark-lint-no-shell-dollars": "4.0.0", + "remark-lint-no-undefined-references": "5.0.2" } } diff --git a/lefthook.yml b/lefthook.yml index 175fbfd48..e910ae114 100644 --- a/lefthook.yml +++ b/lefthook.yml @@ -22,26 +22,6 @@ pre-commit: docker compose run --rm --name remark-lint remark-lint $files --output --quiet || \ { echo "⚠️ Remark found formatting issues in instruction files. Automatic formatting applied."; } stage_fixed: true - # Report markdown formatting issues in content/api-docs without auto-fixing - lint-markdown-content: - tags: lint - glob: "{api-docs/**/*.md,content/**/*.md}" - run: | - # Prepend /workdir/ to staged files since repository is mounted at /workdir in container - files=$(echo '{staged_files}' | sed 's|^|/workdir/|g; s| | /workdir/|g') - # Run remark to check for formatting differences (without --output, shows diff in stdout) - # If output differs from input, fail the commit - for file in $files; do - original=$(cat "${file#/workdir/}") - formatted=$(docker compose run --rm --name remark-lint-content remark-lint "$file" 2>/dev/null | tail -n +2) - if [ "$original" != "$formatted" ]; then - echo "❌ Markdown formatting issues in ${file#/workdir/}" - echo " Run: docker compose run --rm remark-lint $file --output" - echo " Or manually fix the formatting to match remark style" - exit 1 - fi - done - echo "βœ… All content files are properly formatted" # Lint instruction and repository documentation files with generic Vale config lint-instructions: tags: lint