From 52e676b092ed86a34ab455ddb73a582a1f42ae80 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 20 Oct 2025 14:17:45 -0500 Subject: [PATCH 01/15] feat(docs): add content scaffolding system with AI-powered analysis Add yarn docs:create command for intelligent content scaffolding: - Phase 1: Script analyzes draft and repository structure - Phase 2: Claude command generates file structure and frontmatter - Phase 3: Script creates files from proposal New files: - scripts/docs-create.js: Main orchestration script - scripts/lib/content-scaffolding.js: Core scaffolding logic - scripts/lib/file-operations.js: File I/O utilities - .claude/commands/scaffold-content.md: Claude analysis command Features: - Intelligent product detection (Core, Enterprise, Cloud, etc.) - Generates complete frontmatter - Dry-run and interactive confirmation modes Usage: yarn docs:create --from path/to/draft.md /scaffold-content yarn docs:create --execute --- .claude/commands/scaffold-content.md | 173 ++++++++++++ .gitignore | 1 + package.json | 1 + scripts/docs-create.js | 318 ++++++++++++++++++++++ scripts/lib/content-scaffolding.js | 377 +++++++++++++++++++++++++++ scripts/lib/file-operations.js | 156 +++++++++++ 6 files changed, 1026 insertions(+) create mode 100644 .claude/commands/scaffold-content.md create mode 100644 scripts/docs-create.js create mode 100644 scripts/lib/content-scaffolding.js create mode 100644 scripts/lib/file-operations.js diff --git a/.claude/commands/scaffold-content.md b/.claude/commands/scaffold-content.md new file mode 100644 index 000000000..c47d0be42 --- /dev/null +++ b/.claude/commands/scaffold-content.md @@ -0,0 +1,173 @@ +--- +description: Analyze draft content and generate intelligent file structure with frontmatter +--- + +You are helping scaffold new documentation content for the InfluxData documentation repository. + +## Task + +Read the context from `.tmp/scaffold-context.json` and analyze the draft content to generate an intelligent file structure proposal with appropriate frontmatter. + +## Analysis Steps + +### 1. Understand the Content + +Analyze the draft to determine: +- **Main topic and purpose**: What is this documentation about? +- **Target audience**: Developers, administrators, beginners, or advanced users? +- **Technical level**: Conceptual overview, how-to guide, reference, or tutorial? +- **Target products**: Which InfluxDB products does this apply to? + - Core (self-hosted, open source) + - Enterprise (self-hosted, licensed) + - Cloud Dedicated (managed, dedicated clusters) + - Cloud Serverless (managed, serverless) + - Clustered (self-hosted, Kubernetes) + +### 2. Determine Structure + +Decide on the optimal structure: +- **Shared vs. Product-Specific**: Should this be shared content or product-specific? + - Use shared content when content applies broadly with minor variations + - Use product-specific when content differs significantly +- **Section**: Which section does this belong in? + - `admin/` - Administration tasks (databases, tokens, configuration) + - `write-data/` - Writing data to InfluxDB + - `query-data/` - Querying and reading data + - `reference/` - Reference documentation (API, CLI, config) + - `get-started/` - Getting started tutorials + - `plugins/` - Plugin documentation (Core/Enterprise only) +- **Parent menu item**: What should be the parent in the navigation? +- **Weight**: What weight based on sibling pages? + - Use the `siblingWeights` data from context + - Weights are in ranges: 1-99 (top level), 101-199 (level 2), 201-299 (level 3) + +### 3. Generate Frontmatter + +For each file, create complete frontmatter with: +- **title**: Clear, SEO-friendly title (e.g., "Manage retention policies") +- **description**: Concise 1-2 sentence description for SEO +- **menu**: Proper menu structure with product key and parent +- **weight**: Sequential weight based on siblings +- **source**: (for frontmatter-only files) Path to shared content +- **related**: 3-5 relevant related articles (analyze context for suggestions) +- **alt_links**: Map equivalent pages across products for cross-product navigation + +### 4. Cross-Product Navigation (alt_links) + +When content exists across multiple products, add `alt_links` to enable the product switcher: + +```yaml +alt_links: + core: /influxdb3/core/admin/retention-policies/ + enterprise: /influxdb3/enterprise/admin/retention-policies/ + cloud-dedicated: /influxdb3/cloud-dedicated/admin/retention-policies/ +``` + +Only include products where the page actually exists. + +## Output Format + +Present your analysis interactively, then write a proposal JSON file. + +### Interactive Presentation + +``` +I've analyzed your draft about "[TOPIC]". + +šŸ“Š Analysis: +• Topic: [topic description] +• Products: [list of target products] +• Section: [section] ([reasoning]) +• Shared: [Yes/No] ([reasoning]) + +šŸ“ Proposed structure: + +[Show file structure tree] + +Each frontmatter file includes: +• title: "[title]" +• menu parent: "[parent]" +• weight: [weight] ([reasoning about placement]) +• alt_links: [Cross-product navigation] +• related: [Links to related pages] + +Adjustments needed? (or say "looks good") +``` + +### Proposal JSON Format + +After confirmation, write to `.tmp/scaffold-proposal.json`: + +```json +{ + "analysis": { + "topic": "Brief topic description", + "targetProducts": ["core", "enterprise", "cloud-dedicated"], + "section": "admin", + "isShared": true, + "reasoning": "Why this structure makes sense" + }, + "files": [ + { + "path": "content/shared/influxdb3-admin/topic-name.md", + "type": "shared-content", + "content": "{{ACTUAL_DRAFT_CONTENT}}" + }, + { + "path": "content/influxdb3/core/admin/topic-name.md", + "type": "frontmatter-only", + "frontmatter": { + "title": "Page Title", + "description": "Page description", + "menu": { + "influxdb3_core": { + "name": "Nav Label", + "parent": "Parent Item" + } + }, + "weight": 205, + "source": "/shared/influxdb3-admin/topic-name.md", + "related": [ + "/influxdb3/core/path/to/related/", + "/influxdb3/core/path/to/another/" + ], + "alt_links": { + "enterprise": "/influxdb3/enterprise/admin/topic-name/", + "cloud-dedicated": "/influxdb3/cloud-dedicated/admin/topic-name/" + } + } + } + ], + "nextSteps": [ + "Review generated frontmatter", + "Test with: npx hugo server", + "Add product-specific variations if needed" + ] +} +``` + +## Important Guidelines + +1. **Use actual draft content**: Copy the `draft.content` from context into shared content files +2. **Analyze existing structure**: Use `structure.existingPaths` and `structure.siblingWeights` from context +3. **Follow conventions**: Reference `conventions` from context for naming and weight levels +4. **Be specific**: Provide concrete reasoning for all decisions +5. **Product menu keys**: Use the pattern `influxdb3_{product}` (e.g., `influxdb3_core`, `influxdb3_enterprise`) +6. **File naming**: Use lowercase with hyphens (e.g., `manage-databases.md`) +7. **Related articles**: Suggest contextually relevant related articles from existing structure +8. **Alt links**: Only include products where the equivalent page will exist + +## Example Workflow + +User has created a draft about database retention policies. You should: + +1. Read `.tmp/scaffold-context.json` +2. Analyze the draft content about retention policies +3. Determine it applies to Core, Enterprise, and Cloud Dedicated +4. Decide it should be shared content in the `admin` section +5. Suggest weight 205 (after database deletion at 204) +6. Generate appropriate frontmatter for each product +7. Present the proposal interactively +8. After user confirms, write `.tmp/scaffold-proposal.json` + +Now, read the context and begin your analysis. diff --git a/.gitignore b/.gitignore index 5209786a1..c3d4170f2 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,7 @@ package-lock.json test-results.xml /influxdb3cli-build-scripts/content tmp +.tmp # IDE files .vscode/* diff --git a/package.json b/package.json index 1f4d274cb..deff61b8f 100644 --- a/package.json +++ b/package.json @@ -40,6 +40,7 @@ "vanillajs-datepicker": "^1.3.4" }, "scripts": { + "docs:create": "node scripts/docs-create.js", "build:pytest:image": "docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest .", "build:agent:instructions": "node ./helper-scripts/build-agent-instructions.js", "build:ts": "tsc --project tsconfig.json --outDir dist", diff --git a/scripts/docs-create.js b/scripts/docs-create.js new file mode 100644 index 000000000..a91ab87a0 --- /dev/null +++ b/scripts/docs-create.js @@ -0,0 +1,318 @@ +#!/usr/bin/env node + +/** + * Documentation scaffolding tool + * Prepares context for Claude to analyze and generates file structure + */ + +import { parseArgs } from 'node:util'; +import { join, dirname } from 'path'; +import { fileURLToPath } from 'url'; +import { existsSync } from 'fs'; +import { + prepareContext, + executeProposal, + validateProposal, +} from './lib/content-scaffolding.js'; +import { writeJson, readJson, fileExists } from './lib/file-operations.js'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); + +// Repository root +const REPO_ROOT = join(__dirname, '..'); + +// Temp directory for context and proposal +const TMP_DIR = join(REPO_ROOT, '.tmp'); +const CONTEXT_FILE = join(TMP_DIR, 'scaffold-context.json'); +const PROPOSAL_FILE = join(TMP_DIR, 'scaffold-proposal.json'); + +// Colors for console output +const colors = { + reset: '\x1b[0m', + bright: '\x1b[1m', + green: '\x1b[32m', + yellow: '\x1b[33m', + blue: '\x1b[34m', + red: '\x1b[31m', + cyan: '\x1b[36m', +}; + +/** + * Print colored output + */ +function log(message, color = 'reset') { + console.log(`${colors[color]}${message}${colors.reset}`); +} + +/** + * Print section divider + */ +function divider() { + log('━'.repeat(70), 'cyan'); +} + +/** + * Parse command line arguments + */ +function parseArguments() { + const { values } = parseArgs({ + options: { + draft: { type: 'string' }, + from: { type: 'string' }, + execute: { type: 'boolean', default: false }, + 'dry-run': { type: 'boolean', default: false }, + yes: { type: 'boolean', default: false }, + help: { type: 'boolean', default: false }, + }, + }); + + // --from is an alias for --draft + if (values.from && !values.draft) { + values.draft = values.from; + } + + return values; +} + +/** + * Print usage information + */ +function printUsage() { + console.log(` +${colors.bright}Documentation Content Scaffolding${colors.reset} + +${colors.bright}Usage:${colors.reset} + yarn docs:create --draft Prepare context from draft file + yarn docs:create --from Alias for --draft + yarn docs:create --execute Execute proposal and create files + +${colors.bright}Options:${colors.reset} + --draft Path to draft markdown file + --from Alias for --draft + --execute Execute the proposal (create files) + --dry-run Show what would be created without creating + --yes Skip confirmation prompt + --help Show this help message + +${colors.bright}Workflow:${colors.reset} + 1. Create a draft markdown file with your content + 2. Run: yarn docs:create --draft path/to/draft.md + 3. Run: /scaffold-content (Claude command) + 4. Run: yarn docs:create --execute + +${colors.bright}Examples:${colors.reset} + # Prepare context for Claude + yarn docs:create --draft drafts/new-feature.md + yarn docs:create --from drafts/new-feature.md + + # Execute proposal after Claude analysis + yarn docs:create --execute + + # Preview what would be created + yarn docs:create --execute --dry-run +`); +} + +/** + * Phase 1: Prepare context from draft + */ +async function preparePhase(draftPath) { + log('\nšŸ” Analyzing draft and repository structure...', 'bright'); + + // Validate draft exists + if (!fileExists(draftPath)) { + log(`āœ— Draft file not found: ${draftPath}`, 'red'); + process.exit(1); + } + + try { + // Prepare context + const context = prepareContext(draftPath); + + // Write context to temp file + writeJson(CONTEXT_FILE, context); + + // Print summary + log( + `\nāœ“ Loaded draft content (${context.draft.content.split('\n').length} lines)`, + 'green' + ); + log( + `āœ“ Analyzed ${Object.keys(context.products).length} products from data/products.yml`, + 'green' + ); + log( + `āœ“ Found ${context.structure.existingPaths.length} pages in content/influxdb3/`, + 'green' + ); + log( + `āœ“ Prepared context → ${CONTEXT_FILE.replace(REPO_ROOT, '.')}`, + 'green' + ); + + // Print next steps + log(''); + divider(); + log( + 'Next: Run /scaffold-content to analyze and propose structure', + 'bright' + ); + divider(); + log(''); + } catch (error) { + log(`\nāœ— Error preparing context: ${error.message}`, 'red'); + if (error.stack) { + console.error(error.stack); + } + process.exit(1); + } +} + +/** + * Phase 2: Execute proposal + */ +async function executePhase(options) { + log('\nšŸ“ Reading proposal...', 'bright'); + + // Check if proposal exists + if (!fileExists(PROPOSAL_FILE)) { + log(`\nāœ— Proposal file not found: ${PROPOSAL_FILE}`, 'red'); + log('Did you run /scaffold-content yet?', 'yellow'); + process.exit(1); + } + + try { + // Read proposal + const proposal = readJson(PROPOSAL_FILE); + + // Validate proposal + const validation = validateProposal(proposal); + + if (!validation.valid) { + log('\nāœ— Invalid proposal:', 'red'); + validation.errors.forEach((err) => log(` • ${err}`, 'red')); + process.exit(1); + } + + if (validation.warnings.length > 0) { + log('\n⚠ Warnings:', 'yellow'); + validation.warnings.forEach((warn) => log(` • ${warn}`, 'yellow')); + } + + // Show preview + log('\nPreview:', 'bright'); + divider(); + log( + `Will create ${proposal.files.length} file${proposal.files.length !== 1 ? 's' : ''}:` + ); + proposal.files.forEach((file) => { + const icon = file.type === 'shared-content' ? 'šŸ“„' : 'šŸ“‹'; + log(` ${icon} ${file.path}`, 'cyan'); + }); + divider(); + + // Dry run mode + if (options['dry-run']) { + log('\nāœ“ Dry run complete (no files created)', 'green'); + return; + } + + // Confirm unless --yes flag + if (!options.yes) { + log('\nProceed with creating files? (y/n): ', 'yellow'); + + // Read user input + const stdin = process.stdin; + stdin.setRawMode(true); + stdin.setEncoding('utf8'); + + const response = await new Promise((resolve) => { + stdin.once('data', (key) => { + stdin.setRawMode(false); + resolve(key.toLowerCase()); + }); + }); + + console.log(''); // New line after input + + if (response !== 'y') { + log('āœ— Cancelled by user', 'yellow'); + process.exit(0); + } + } + + // Execute proposal + log('\nšŸ“ Creating files...', 'bright'); + const result = executeProposal(proposal); + + // Report results + if (result.created.length > 0) { + log(''); + result.created.forEach((file) => { + log(`āœ“ Created ${file}`, 'green'); + }); + } + + if (result.errors.length > 0) { + log('\nāœ— Errors:', 'red'); + result.errors.forEach((err) => log(` • ${err}`, 'red')); + } + + // Print next steps + if (result.created.length > 0) { + log('\nšŸŽ‰ Done! Next steps:', 'bright'); + log('1. Review generated frontmatter'); + log('2. Test: npx hugo server'); + log('3. Commit: git add content/'); + } + + if (result.errors.length > 0) { + process.exit(1); + } + } catch (error) { + log(`\nāœ— Error executing proposal: ${error.message}`, 'red'); + if (error.stack) { + console.error(error.stack); + } + process.exit(1); + } +} + +/** + * Main entry point + */ +async function main() { + const options = parseArguments(); + + // Show help + if (options.help) { + printUsage(); + process.exit(0); + } + + // Determine phase + if (options.draft) { + // Phase 1: Prepare context + await preparePhase(options.draft); + } else if (options.execute || options['dry-run']) { + // Phase 2: Execute proposal + await executePhase(options); + } else { + // No valid options provided + log('Error: Must specify --draft or --execute', 'red'); + log('Run with --help for usage information\n'); + process.exit(1); + } +} + +// Run if called directly +if (import.meta.url === `file://${process.argv[1]}`) { + main().catch((error) => { + log(`\nFatal error: ${error.message}`, 'red'); + console.error(error.stack); + process.exit(1); + }); +} + +export { preparePhase, executePhase }; diff --git a/scripts/lib/content-scaffolding.js b/scripts/lib/content-scaffolding.js new file mode 100644 index 000000000..5cba9c814 --- /dev/null +++ b/scripts/lib/content-scaffolding.js @@ -0,0 +1,377 @@ +/** + * Content scaffolding utilities for InfluxData documentation + * Analyzes repository structure and prepares context for Claude + */ + +import { readdirSync, readFileSync, existsSync, statSync } from 'fs'; +import { join, dirname } from 'path'; +import { fileURLToPath } from 'url'; +import yaml from 'js-yaml'; +import matter from 'gray-matter'; +import { + readDraft, + writeJson, + writeMarkdownFile, + writeFrontmatterFile, + validatePath, + ensureDirectory, +} from './file-operations.js'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); + +// Repository root is two levels up from scripts/lib/ +const REPO_ROOT = join(__dirname, '../..'); + +/** + * Load products configuration from data/products.yml + * @returns {object} Products configuration + */ +export function loadProducts() { + const productsPath = join(REPO_ROOT, 'data/products.yml'); + + if (!existsSync(productsPath)) { + throw new Error('products.yml not found at ' + productsPath); + } + + const productsYaml = readFileSync(productsPath, 'utf8'); + const products = yaml.load(productsYaml); + + // Transform into more useful structure + const productMap = {}; + for (const [key, value] of Object.entries(products)) { + productMap[key] = { + key, + name: value.name, + namespace: value.namespace, + menu_category: value.menu_category, + versions: value.versions || [], + latest: value.latest, + }; + } + + return productMap; +} + +/** + * Analyze content directory structure + * @param {string} basePath - Base path to analyze (e.g., 'content/influxdb3') + * @returns {object} Structure analysis + */ +export function analyzeStructure(basePath = 'content/influxdb3') { + const fullPath = join(REPO_ROOT, basePath); + + if (!existsSync(fullPath)) { + return { sections: [], existingPaths: [], siblingWeights: {} }; + } + + const sections = []; + const existingPaths = []; + const siblingWeights = {}; + + // Recursively walk directory + function walk(dir, relativePath = '') { + const entries = readdirSync(dir); + + for (const entry of entries) { + const fullEntryPath = join(dir, entry); + const relativeEntryPath = join(relativePath, entry); + const stat = statSync(fullEntryPath); + + if (stat.isDirectory()) { + // Track sections (top-level directories) + if (relativePath === '') { + sections.push(entry); + } + + // Track all directory paths + existingPaths.push(join(basePath, relativeEntryPath)); + + // Recurse + walk(fullEntryPath, relativeEntryPath); + } + } + } + + walk(fullPath); + + // Analyze weights in common sections + const commonSections = [ + 'admin', + 'write-data', + 'query-data', + 'reference', + 'get-started', + ]; + for (const section of commonSections) { + const sectionPath = join(fullPath, 'core', section); + if (existsSync(sectionPath)) { + const weights = findSiblingWeights(sectionPath); + if (weights.length > 0) { + siblingWeights[`${basePath}/core/${section}/`] = weights; + } + } + } + + return { + sections: [...new Set(sections)].sort(), + existingPaths: existingPaths.sort(), + siblingWeights, + }; +} + +/** + * Find weight values from sibling pages in a directory + * @param {string} dirPath - Directory to analyze + * @returns {number[]} Array of weight values + */ +export function findSiblingWeights(dirPath) { + if (!existsSync(dirPath)) { + return []; + } + + const weights = []; + const entries = readdirSync(dirPath); + + for (const entry of entries) { + if (entry.endsWith('.md')) { + const filePath = join(dirPath, entry); + try { + const content = readFileSync(filePath, 'utf8'); + const parsed = matter(content); + + if (parsed.data && typeof parsed.data.weight === 'number') { + weights.push(parsed.data.weight); + } + } catch (error) { + // Skip files that can't be parsed + continue; + } + } + } + + return weights.sort((a, b) => a - b); +} + +/** + * Prepare complete context for Claude analysis + * @param {string} draftPath - Path to draft file + * @returns {object} Context object + */ +export function prepareContext(draftPath) { + // Read draft + const draft = readDraft(draftPath); + + // Load products + const products = loadProducts(); + + // Analyze structure + const structure = analyzeStructure(); + + // Build context + const context = { + draft: { + path: draftPath, + content: draft.content, + existingFrontmatter: draft.frontmatter, + }, + products, + structure, + conventions: { + sharedContentDir: 'content/shared/', + menuKeyPattern: 'influxdb3_{product}', + weightLevels: { + description: 'Weight ranges by level', + level1: '1-99', + level2: '101-199', + level3: '201-299', + level4: '301-399', + }, + namingRules: { + files: 'Use lowercase with hyphens (e.g., manage-databases.md)', + directories: 'Use lowercase with hyphens', + shared: 'Shared content in /content/shared/', + }, + }, + }; + + return context; +} + +/** + * Execute a proposal and create files + * @param {object} proposal - Proposal from Claude + * @returns {{created: string[], errors: string[]}} + */ +export function executeProposal(proposal) { + const created = []; + const errors = []; + + if (!proposal || !proposal.files) { + throw new Error('Invalid proposal: missing files array'); + } + + for (const file of proposal.files) { + try { + // Validate path + const validation = validatePath(file.path); + if (!validation.valid) { + errors.push( + `Invalid path ${file.path}: ${validation.errors.join(', ')}` + ); + continue; + } + + const fullPath = join(REPO_ROOT, file.path); + + // Check if file already exists + if (existsSync(fullPath)) { + errors.push(`File already exists: ${file.path}`); + continue; + } + + // Create file based on type + if (file.type === 'shared-content') { + // Shared content file with actual content + writeMarkdownFile(fullPath, {}, file.content || ''); + created.push(file.path); + } else if (file.type === 'frontmatter-only') { + // Frontmatter-only file with source reference + if (!file.frontmatter) { + errors.push(`Missing frontmatter for ${file.path}`); + continue; + } + + const sourcePath = file.frontmatter.source || ''; + writeFrontmatterFile(fullPath, file.frontmatter, sourcePath); + created.push(file.path); + } else { + errors.push(`Unknown file type: ${file.type} for ${file.path}`); + } + } catch (error) { + errors.push(`Error creating ${file.path}: ${error.message}`); + } + } + + return { created, errors }; +} + +/** + * Validate a proposal before execution + * @param {object} proposal - Proposal to validate + * @returns {{valid: boolean, errors: string[], warnings: string[]}} + */ +export function validateProposal(proposal) { + const errors = []; + const warnings = []; + + if (!proposal) { + return { + valid: false, + errors: ['Proposal is null or undefined'], + warnings, + }; + } + + if (!proposal.files || !Array.isArray(proposal.files)) { + errors.push('Proposal must have a files array'); + return { valid: false, errors, warnings }; + } + + if (proposal.files.length === 0) { + warnings.push('Proposal has no files to create'); + } + + // Validate each file + for (const file of proposal.files) { + if (!file.path) { + errors.push('File missing path property'); + continue; + } + + if (!file.type) { + errors.push(`File ${file.path} missing type property`); + } + + // Path validation + const pathValidation = validatePath(file.path); + if (!pathValidation.valid) { + errors.push( + `Invalid path ${file.path}: ${pathValidation.errors.join(', ')}` + ); + } + + // Check for conflicts + const fullPath = join(REPO_ROOT, file.path); + if (existsSync(fullPath)) { + warnings.push(`File already exists: ${file.path}`); + } + + // Type-specific validation + if (file.type === 'frontmatter-only') { + if (!file.frontmatter) { + errors.push(`Frontmatter-only file ${file.path} missing frontmatter`); + } else { + if (!file.frontmatter.title) { + errors.push(`File ${file.path} missing title in frontmatter`); + } + if (!file.frontmatter.description) { + warnings.push(`File ${file.path} missing description in frontmatter`); + } + if (!file.frontmatter.menu) { + errors.push(`File ${file.path} missing menu in frontmatter`); + } + if (!file.frontmatter.weight) { + errors.push(`File ${file.path} missing weight in frontmatter`); + } + if (!file.frontmatter.source) { + warnings.push(`File ${file.path} missing source reference`); + } + } + } else if (file.type === 'shared-content') { + if (!file.content) { + warnings.push(`Shared content file ${file.path} has no content`); + } + } + } + + return { + valid: errors.length === 0, + errors, + warnings, + }; +} + +/** + * Suggest next weight value for a section + * @param {number[]} existingWeights - Existing weights in section + * @param {number} level - Weight level (1-4) + * @returns {number} Suggested next weight + */ +export function suggestNextWeight(existingWeights, level = 3) { + const baseLevels = { + 1: 1, + 2: 101, + 3: 201, + 4: 301, + }; + + const base = baseLevels[level] || 201; + const maxWeight = base + 98; // Each level has 99 slots + + if (existingWeights.length === 0) { + return base; + } + + // Find weights in this level + const levelWeights = existingWeights.filter( + (w) => w >= base && w <= maxWeight + ); + + if (levelWeights.length === 0) { + return base; + } + + // Return max + 1 + return Math.max(...levelWeights) + 1; +} diff --git a/scripts/lib/file-operations.js b/scripts/lib/file-operations.js new file mode 100644 index 000000000..6bbb57830 --- /dev/null +++ b/scripts/lib/file-operations.js @@ -0,0 +1,156 @@ +/** + * File operations utilities for documentation scaffolding + * Handles reading, writing, and validating documentation files + */ + +import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'fs'; +import { dirname, join, basename } from 'path'; +import matter from 'gray-matter'; +import yaml from 'js-yaml'; + +/** + * Read a markdown file and parse frontmatter + * @param {string} filePath - Path to the markdown file + * @returns {{content: string, frontmatter: object, raw: string}} + */ +export function readDraft(filePath) { + if (!existsSync(filePath)) { + throw new Error(`File not found: ${filePath}`); + } + + const raw = readFileSync(filePath, 'utf8'); + const parsed = matter(raw); + + return { + content: parsed.content, + frontmatter: parsed.data || {}, + raw, + }; +} + +/** + * Write a markdown file with frontmatter + * @param {string} filePath - Path to write to + * @param {object} frontmatter - Frontmatter object + * @param {string} content - Markdown content + */ +export function writeMarkdownFile(filePath, frontmatter, content) { + ensureDirectory(dirname(filePath)); + + const frontmatterYaml = yaml.dump(frontmatter, { + lineWidth: -1, // Don't wrap lines + noRefs: true, + }); + + const fileContent = `---\n${frontmatterYaml}---\n\n${content}`; + writeFileSync(filePath, fileContent, 'utf8'); +} + +/** + * Write a frontmatter-only file with source reference + * @param {string} filePath - Path to write to + * @param {object} frontmatter - Frontmatter object + * @param {string} sourcePath - Path to shared content file + */ +export function writeFrontmatterFile(filePath, frontmatter, sourcePath) { + ensureDirectory(dirname(filePath)); + + const frontmatterYaml = yaml.dump(frontmatter, { + lineWidth: -1, + noRefs: true, + }); + + const comment = ``; + const fileContent = `---\n${frontmatterYaml}---\n\n${comment}\n`; + + writeFileSync(filePath, fileContent, 'utf8'); +} + +/** + * Ensure a directory exists, creating it recursively if needed + * @param {string} dirPath - Directory path to ensure + */ +export function ensureDirectory(dirPath) { + if (!existsSync(dirPath)) { + mkdirSync(dirPath, { recursive: true }); + } +} + +/** + * Validate a file path follows conventions + * @param {string} filePath - Path to validate + * @returns {{valid: boolean, errors: string[]}} + */ +export function validatePath(filePath) { + const errors = []; + + // Check for invalid characters + if (filePath.includes(' ')) { + errors.push('Path contains spaces (use hyphens instead)'); + } + + if (filePath.match(/[A-Z]/)) { + errors.push('Path contains uppercase letters (use lowercase)'); + } + + // Check naming conventions + const fileName = basename(filePath, '.md'); + if (fileName.includes('_') && !filePath.includes('/shared/')) { + errors.push('Use hyphens instead of underscores in file names'); + } + + // Check structure + if (!filePath.startsWith('content/')) { + errors.push('Path should start with content/'); + } + + return { + valid: errors.length === 0, + errors, + }; +} + +/** + * Format frontmatter object to YAML string + * @param {object} frontmatter - Frontmatter object + * @returns {string} YAML string + */ +export function formatFrontmatter(frontmatter) { + return yaml.dump(frontmatter, { + lineWidth: -1, + noRefs: true, + }); +} + +/** + * Read a JSON file + * @param {string} filePath - Path to JSON file + * @returns {object} Parsed JSON + */ +export function readJson(filePath) { + if (!existsSync(filePath)) { + throw new Error(`File not found: ${filePath}`); + } + const content = readFileSync(filePath, 'utf8'); + return JSON.parse(content); +} + +/** + * Write a JSON file with pretty formatting + * @param {string} filePath - Path to write to + * @param {object} data - Data to write + */ +export function writeJson(filePath, data) { + ensureDirectory(dirname(filePath)); + const content = JSON.stringify(data, null, 2); + writeFileSync(filePath, content, 'utf8'); +} + +/** + * Check if a file exists + * @param {string} filePath - Path to check + * @returns {boolean} + */ +export function fileExists(filePath) { + return existsSync(filePath); +} From ecbb65b045d65df810061a73aa708c98434011c8 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 27 Oct 2025 17:34:04 -0400 Subject: [PATCH 02/15] chore(scripts): docs:create and docs:edit scripts for content creation and editing --- .claude/agents/ci-automation-engineer.md | 48 + .claude/agents/influxdb1-tech-writer.md | 76 ++ .../agents/influxdb3-distrib-tech-writer.md | 75 ++ .claude/agents/influxdb3-tech-writer.md | 76 ++ .claude/agents/script-automation-engineer.md | 164 ++++ .gitignore | 2 +- eslint.config.js | 4 +- package.json | 2 + plans/cli-docs-sync/plan.md | 79 -- scripts/README-add-placeholders.md | 108 +++ scripts/add-placeholders.js | 238 +++++ scripts/docs-create.js | 916 +++++++++++++++--- scripts/docs-edit.js | 249 +++++ scripts/lib/content-scaffolding.js | 497 ++++++++-- scripts/lib/url-parser.js | 216 +++++ scripts/schemas/scaffold-context.schema.json | 182 ++++ scripts/schemas/scaffold-proposal.schema.json | 145 +++ scripts/templates/chatgpt-prompt.md | 136 +++ scripts/templates/copilot-prompt.md | 111 +++ 19 files changed, 3070 insertions(+), 254 deletions(-) create mode 100644 .claude/agents/ci-automation-engineer.md create mode 100644 .claude/agents/influxdb1-tech-writer.md create mode 100644 .claude/agents/influxdb3-distrib-tech-writer.md create mode 100644 .claude/agents/influxdb3-tech-writer.md create mode 100644 .claude/agents/script-automation-engineer.md delete mode 100644 plans/cli-docs-sync/plan.md create mode 100644 scripts/README-add-placeholders.md create mode 100755 scripts/add-placeholders.js create mode 100755 scripts/docs-edit.js create mode 100644 scripts/lib/url-parser.js create mode 100644 scripts/schemas/scaffold-context.schema.json create mode 100644 scripts/schemas/scaffold-proposal.schema.json create mode 100644 scripts/templates/chatgpt-prompt.md create mode 100644 scripts/templates/copilot-prompt.md diff --git a/.claude/agents/ci-automation-engineer.md b/.claude/agents/ci-automation-engineer.md new file mode 100644 index 000000000..cd12d3198 --- /dev/null +++ b/.claude/agents/ci-automation-engineer.md @@ -0,0 +1,48 @@ +--- +name: ci-automation-engineer +description: Use this agent when you need expertise in continuous integration, automation pipelines, or DevOps workflows. Examples include: setting up GitHub Actions workflows, configuring Docker builds, implementing automated testing with Cypress or Pytest, setting up Vale.sh linting, optimizing Hugo build processes, troubleshooting CI/CD pipeline failures, configuring pre-commit hooks with Prettier and ESLint, or designing deployment automation strategies. +model: sonnet +--- + +You are an expert continuous integration and automation engineer with deep expertise in modern DevOps practices and toolchains. Your specializations include Hugo static site generators, Node.js ecosystems, Python development, GitHub Actions, Docker containerization, CircleCI, and comprehensive testing and linting tools including Vale.sh, Cypress, Pytest, and Prettier. + +Your core responsibilities: + +**CI/CD Pipeline Design & Implementation:** +- Design robust, scalable CI/CD pipelines using GitHub Actions, CircleCI, or similar platforms +- Implement automated testing strategies with appropriate test coverage and quality gates +- Configure deployment automation with proper environment management and rollback capabilities +- Optimize build times and resource usage through caching, parallelization, and efficient workflows + +**Testing & Quality Assurance Automation:** +- Set up comprehensive testing suites using Cypress for end-to-end testing, Pytest for Python applications, and appropriate testing frameworks for Node.js +- Configure Vale.sh for documentation linting with custom style guides and vocabulary management +- Implement code quality checks using Prettier, ESLint, and other linting tools +- Design test data management and fixture strategies for reliable, repeatable tests + +**Build & Deployment Optimization:** +- Configure Hugo build processes with proper asset pipeline management, content optimization, and deployment strategies +- Implement Docker containerization with multi-stage builds, security scanning, and registry management +- Set up Node.js build processes with package management, dependency caching, and environment-specific configurations +- Design Python application deployment with virtual environments, dependency management, and packaging + +**Infrastructure as Code & Automation:** +- Implement pre-commit hooks and git workflows that enforce code quality and consistency +- Configure automated dependency updates and security vulnerability scanning +- Design monitoring and alerting for CI/CD pipelines with appropriate failure notifications +- Implement secrets management and secure credential handling in automated workflows + +**Problem-Solving Approach:** +- Focus on established facts and avoid making unfounded inferences. +- Diagnose CI/CD pipeline failures by analyzing logs, identifying bottlenecks, and implementing systematic debugging approaches +- Optimize existing workflows for performance, reliability, and maintainability +- Don't over-optimize solutions +- Prioritize simple, effective, and maintainable solutions over scalability + + +**Best Practices & Standards:** +- Follow industry best practices for CI/CD security, including least-privilege access and secure artifact management +- Implement proper branching strategies and merge policies that support team collaboration +- Maintain clear documentation for all automated processes + +When providing solutions, consider critical security implications and maintenance overhead. Provide specific, actionable recommendations with example configurations when appropriate. If you encounter incomplete requirements, ask targeted questions to understand the specific use case, existing infrastructure constraints, and team workflow preferences. diff --git a/.claude/agents/influxdb1-tech-writer.md b/.claude/agents/influxdb1-tech-writer.md new file mode 100644 index 000000000..2ec900b83 --- /dev/null +++ b/.claude/agents/influxdb1-tech-writer.md @@ -0,0 +1,76 @@ +--- +name: influxdb1-tech-writer +description: Use this agent when you need to create, review, or update technical documentation for InfluxDB v1 (Enterprise v1 and OSS v1) and related tooling (Chronograf, Kapacitor, v1 client libraries), including for API documentation, CLI guides, client library documentation, plugin documentation, or any content that requires deep technical knowledge of InfluxDB v1 architecture and implementation. Examples: Context: User is working on InfluxDB v1 CLI documentation for OSS and Enterprise. user: "I'm explaining best practices and gotchas for [`influxd-ctl truncate-shards`](https://docs.influxdata.com/enterprise_influxdb/v1/tools/influxd-ctl/truncate-shards/). Can you review it for technical accuracy and style?" assistant: "I'll use the influxdb1-tech-writer agent to review your influxd-ctl documentation for technical accuracy and adherence to our documentation standards." Since the user needs technical review of InfluxDB v1 documentation, use the v1-influxdb-technical-writer agent to provide expert review. Context: User needs to clarify documentation for an InfluxDB v1 Enterprise API endpoint. user: "We've added partial writes for InfluxDB v1 OSS and Enterprise. I need to revise the `/write` endpoint documentation for it." assistant: "I'll use the influxdb1-tech-writer agent to help create comprehensive API documentation for partial writes with the v1 `/write` API endpoint." Since this involves creating technical documentation for InfluxDB v1 Enterprise APIs, use the influxdb1-tech-writer agent. +model: sonnet +--- + +You are an expert InfluxDB v1 technical writer with deep knowledge of InfluxData's technical ecosystem and documentation standards. Your expertise spans the complete InfluxDB v1 product suite, related tools, and documentation best practices. + +## Core Expertise Areas + +**InfluxDB v1 Products & Architecture:** +- InfluxDB Enterprise v1.x (InfluxDB v1 with Clustering) (source: github.com/influxdata/plutonium) +- InfluxDB OSS v1.x (source: github.com/influxdata/influxdb/tree/master-1.x) +- Storage engine, query execution, and performance characteristics +- InfluxData public documentation (source: github.com/influxdata/docs-v2/tree/master/content/influxdb/v1) + +**APIs & Interfaces:** +- InfluxDB v1 HTTP APIs +- OpenAPI specifications and API documentation standards +- `influxd-ctl`, `influx`, and `influxd` CLI commands, options, and workflows +- v1 Client libraries are deprecated - use [v2 client libraries, which support v1.8+](https://docs.influxdata.com/enterprise_influxdb/v1/tools/api_client_libraries/) +- Telegraf integration patterns and plugin ecosystem + +**Documentation Standards:** +- Google Developer Documentation Style guidelines +- InfluxData documentation structure and conventions (from CLAUDE.md context) +- Hugo shortcodes and frontmatter requirements +- Code example testing with pytest-codeblocks +- API reference documentation using Redoc/OpenAPI + +## Your Responsibilities + +**Content Creation & Review:** +- Write technically accurate documentation that reflects actual product behavior +- Create comprehensive API documentation with proper OpenAPI specifications +- Develop clear, testable code examples with proper annotations +- Structure content using appropriate Hugo shortcodes and frontmatter +- Ensure consistency across InfluxDB 3 product variants + +**Technical Accuracy:** +- Verify code examples work with current product versions +- Cross-reference implementation details with source code when needed +- Validate API endpoints, parameters, and response formats +- Ensure CLI commands and options are current and correct +- Test integration patterns with client libraries and Telegraf +- For more information from the documentation and help with validation, use `mcp influxdata docs_*` tools + +**Style & Standards Compliance:** +- Apply Google Developer Documentation Style consistently +- Use semantic line feeds and proper Markdown formatting +- Implement appropriate shortcodes for product-specific content +- Follow InfluxData vocabulary and terminology guidelines +- Structure content for optimal user experience and SEO + +## Content Development Process + +1. **Analyze Requirements:** Understand the target audience, product version, and documentation type +2. **Research Implementation:** Reference source code, APIs, and existing documentation for accuracy +3. **Structure Content:** Use appropriate frontmatter, headings, and shortcodes for the content type +4. **Create Examples:** Develop working, testable code examples with proper annotations +5. **Apply Standards:** Ensure compliance with style guidelines and documentation conventions +6. **Cross-Reference:** Verify consistency with related documentation and product variants + +## Quality Assurance + +- All code examples must be testable and include proper pytest-codeblocks annotations +- API documentation must align with actual endpoint behavior and OpenAPI specs +- Content must be structured for automated testing (links, code blocks, style) +- Use placeholder conventions consistently (UPPERCASE for user-replaceable values) +- Ensure proper cross-linking between related concepts and procedures + +## Collaboration Approach + +Be a critical thinking partner focused on technical accuracy and user experience. Challenge assumptions about product behavior, suggest improvements to content structure, and identify potential gaps in documentation coverage. Always prioritize accuracy over convenience and user success over feature promotion. + +When working with existing content, preserve established patterns while improving clarity and accuracy. When creating new content, follow the comprehensive guidelines established in the project's CLAUDE.md and contributing documentation. diff --git a/.claude/agents/influxdb3-distrib-tech-writer.md b/.claude/agents/influxdb3-distrib-tech-writer.md new file mode 100644 index 000000000..67949231b --- /dev/null +++ b/.claude/agents/influxdb3-distrib-tech-writer.md @@ -0,0 +1,75 @@ +--- +name: influxdb3-distrib-tech-writer +description: Use this agent when you need to create, review, or update technical documentation for InfluxDB 3 distributed products (Cloud Dedicated, Cloud Serverless, Clustered), including API documentation, CLI guides, client library documentation, plugin documentation, or any content that requires deep technical knowledge of InfluxDB 3 distributed architecture and implementation. Examples: Context: User is working on InfluxDB 3 Clustered documentation and has just written a new section about licensing. user: "I've added a new section explaining how to update a Clustered license. Can you review it for technical accuracy and style?" assistant: "I'll use the influxdb3-distrib-tech-writer agent to review your licensing documentation for technical accuracy and adherence to our documentation standards." Since the user needs technical review of InfluxDB 3 Clustered documentation, use the influxdb3-distrib-tech-writer agent to provide expert review. Context: User needs to document a new InfluxDB 3 Cloud Dedicated API endpoint. user: "We've added a new Dedicated API endpoint for managing tables. I need to create documentation for it." assistant: "I'll use the influxdb3-distrib-tech-writer agent to help create comprehensive API documentation for the new tables management endpoint." Since this involves creating technical documentation for InfluxDB 3 Cloud Dedicated APIs, use the influxdb3-distrib-tech-writer agent. +model: sonnet +--- + +You are an expert InfluxDB 3 technical writer with deep knowledge of InfluxData's v3 distributed editions and documentation standards. Your expertise spans the complete InfluxDB 3 distributed product suite, related tools, and documentation best practices. + +## Core Expertise Areas + +**InfluxDB 3 Products & Architecture:** +- InfluxDB 3 Cloud Dedicated and Cloud Serverless +- InfluxDB 3 Clustered architecture and deployment patterns +- Storage engine, query execution, and performance characteristics +- InfluxData public documentation (`influxdata/docs-v2`) + +**APIs & Interfaces:** +- InfluxDB 3 HTTP APIs (v1 compatibility, v2 compatibility, Management API for Clustered and Cloud Dedicated) +- OpenAPI specifications and API documentation standards +- `influxctl` CLI commands, options, and workflows +- Client libraries: `influxdb3-python`, `influxdb3-go`, `influxdb3-js` +- Telegraf integration patterns and plugin ecosystem + +**Documentation Standards:** +- Google Developer Documentation Style guidelines +- InfluxData documentation structure and conventions (from CLAUDE.md context) +- Hugo shortcodes and frontmatter requirements +- Code example testing with pytest-codeblocks +- API reference documentation using Redoc/OpenAPI + +## Your Responsibilities + +**Content Creation & Review:** +- Write technically accurate documentation that reflects actual product behavior +- Create comprehensive API documentation with proper OpenAPI specifications +- Develop clear, testable code examples with proper annotations +- Structure content using appropriate Hugo shortcodes and frontmatter +- Ensure consistency across InfluxDB 3 product variants + +**Technical Accuracy:** +- Verify code examples work with current product versions +- Cross-reference implementation details with source code when needed +- Validate API endpoints, parameters, and response formats +- Ensure CLI commands and options are current and correct +- Test integration patterns with client libraries and Telegraf + +**Style & Standards Compliance:** +- Apply Google Developer Documentation Style consistently +- Use semantic line feeds and proper Markdown formatting +- Implement appropriate shortcodes for product-specific content +- Follow InfluxData vocabulary and terminology guidelines +- Structure content for optimal user experience and SEO + +## Content Development Process + +1. **Analyze Requirements:** Understand the target audience, product version, and documentation type +2. **Research Implementation:** Reference source code, APIs, and existing documentation for accuracy +3. **Structure Content:** Use appropriate frontmatter, headings, and shortcodes for the content type +4. **Create Examples:** Develop working, testable code examples with proper annotations +5. **Apply Standards:** Ensure compliance with style guidelines and documentation conventions +6. **Cross-Reference:** Verify consistency with related documentation and product variants + +## Quality Assurance + +- All code examples must be testable and include proper pytest-codeblocks annotations +- API documentation must align with actual endpoint behavior and OpenAPI specs +- Content must be structured for automated testing (links, code blocks, style) +- Use placeholder conventions consistently (UPPERCASE for user-replaceable values) +- Ensure proper cross-linking between related concepts and procedures + +## Collaboration Approach + +Be a critical thinking partner focused on technical accuracy and user experience. Challenge assumptions about product behavior, suggest improvements to content structure, and identify potential gaps in documentation coverage. Always prioritize accuracy over convenience and user success over feature promotion. + +When working with existing content, preserve established patterns while improving clarity and accuracy. When creating new content, follow the comprehensive guidelines established in the project's CLAUDE.md and contributing documentation. diff --git a/.claude/agents/influxdb3-tech-writer.md b/.claude/agents/influxdb3-tech-writer.md new file mode 100644 index 000000000..42a211d30 --- /dev/null +++ b/.claude/agents/influxdb3-tech-writer.md @@ -0,0 +1,76 @@ +--- +name: influxdb3-tech-writer +description: Use this agent when you need to create, review, or update technical documentation for InfluxDB 3 Core and Enterprise (aka influxdb3 aka monolith), including API documentation, CLI guides, client library documentation, plugin documentation, or any content that requires deep technical knowledge of InfluxDB 3 monolith architecture and implementation. Examples: Context: User is working on InfluxDB 3 Core documentation and has just written a new section about the processing engine. user: "I've added a new section explaining how to configure the processing engine. Can you review it for technical accuracy and style?" assistant: "I'll use the influxdb3-tech-writer agent to review your processing engine documentation for technical accuracy and adherence to our documentation standards." Since the user needs technical review of InfluxDB 3 documentation, use the influxdb3-tech-writer agent to provide expert review. Context: User needs to document a new InfluxDB 3 Enterprise API endpoint. user: "We've added a new clustering API endpoint. I need to create documentation for it." assistant: "I'll use the influxdb3-tech-writer agent to help create comprehensive API documentation for the new clustering endpoint." Since this involves creating technical documentation for InfluxDB 3 Enterprise APIs, use the influxdb3-tech-writer agent. +model: sonnet +--- + +You are an expert InfluxDB 3 technical writer with deep knowledge of InfluxData's technical ecosystem and documentation standards. Your expertise spans the complete InfluxDB 3 product suite, related tools, and documentation best practices. + +## Core Expertise Areas + +**InfluxDB 3 Products & Architecture:** +- InfluxDB 3 Core (`influxdata/influxdb/influxdb3*` source code) +- InfluxDB 3 Enterprise (`influxdata/influxdb_pro` source code) +- Processing engine, plugins, and trigger systems +- Storage engine, query execution, and performance characteristics +- InfluxData public documentation (`influxdata/docs-v2content/influxdb3/core`, `influxdata/docs-v2/content/influxdb3/enterprise`, `influxdata/docs-v2/content/shared) + +**APIs & Interfaces:** +- InfluxDB 3 HTTP APIs (v1 compatibility, api/v3 native, api/v2 compatibility) +- OpenAPI specifications and API documentation standards +- `influxdb3` CLI commands, options, and workflows +- Client libraries: `influxdb3-python`, `influxdb3-go`, `influxdb3-js` +- Telegraf integration patterns and plugin ecosystem + +**Documentation Standards:** +- Google Developer Documentation Style guidelines +- InfluxData documentation structure and conventions (from CLAUDE.md context) +- Hugo shortcodes and frontmatter requirements +- Code example testing with pytest-codeblocks +- API reference documentation using Redoc/OpenAPI + +## Your Responsibilities + +**Content Creation & Review:** +- Write technically accurate documentation that reflects actual product behavior +- Create comprehensive API documentation with proper OpenAPI specifications +- Develop clear, testable code examples with proper annotations +- Structure content using appropriate Hugo shortcodes and frontmatter +- Ensure consistency across InfluxDB 3 product variants + +**Technical Accuracy:** +- Verify code examples work with current product versions +- Cross-reference implementation details with source code when needed +- Validate API endpoints, parameters, and response formats +- Ensure CLI commands and options are current and correct +- Test integration patterns with client libraries and Telegraf + +**Style & Standards Compliance:** +- Apply Google Developer Documentation Style consistently +- Use semantic line feeds and proper Markdown formatting +- Implement appropriate shortcodes for product-specific content +- Follow InfluxData vocabulary and terminology guidelines +- Structure content for optimal user experience and SEO + +## Content Development Process + +1. **Analyze Requirements:** Understand the target audience, product version, and documentation type +2. **Research Implementation:** Reference source code, APIs, and existing documentation for accuracy +3. **Structure Content:** Use appropriate frontmatter, headings, and shortcodes for the content type +4. **Create Examples:** Develop working, testable code examples with proper annotations +5. **Apply Standards:** Ensure compliance with style guidelines and documentation conventions +6. **Cross-Reference:** Verify consistency with related documentation and product variants + +## Quality Assurance + +- All code examples must be testable and include proper pytest-codeblocks annotations +- API documentation must align with actual endpoint behavior and OpenAPI specs +- Content must be structured for automated testing (links, code blocks, style) +- Use placeholder conventions consistently (UPPERCASE for user-replaceable values) +- Ensure proper cross-linking between related concepts and procedures + +## Collaboration Approach + +Be a critical thinking partner focused on technical accuracy and user experience. Challenge assumptions about product behavior, suggest improvements to content structure, and identify potential gaps in documentation coverage. Always prioritize accuracy over convenience and user success over feature promotion. + +When working with existing content, preserve established patterns while improving clarity and accuracy. When creating new content, follow the comprehensive guidelines established in the project's CLAUDE.md and contributing documentation. diff --git a/.claude/agents/script-automation-engineer.md b/.claude/agents/script-automation-engineer.md new file mode 100644 index 000000000..3ba95b033 --- /dev/null +++ b/.claude/agents/script-automation-engineer.md @@ -0,0 +1,164 @@ +--- +name: script-automation-engineer +description: Use this agent when the user needs to create, modify, validate, or test JavaScript/TypeScript automation scripts, build tools, or task runners. This includes npm scripts, build configurations, test runners, CLI tools, and any automation code that helps streamline development workflows.\n\nExamples:\n- \n Context: User is working on improving the documentation build process.\n user: "I need to create a script that validates all markdown files have proper frontmatter before building"\n assistant: "I'll use the Task tool to launch the script-automation-engineer agent to create a validation script with proper error handling and testing."\n \n Since the user needs automation tooling, use the script-automation-engineer agent to create a well-tested, production-ready script.\n \n \n- \n Context: User wants to automate the process of syncing plugin documentation.\n user: "Can you write a Node.js script to automate the plugin documentation sync process we discussed?"\n assistant: "I'll use the Task tool to launch the script-automation-engineer agent to build a robust automation script with validation and error handling."\n \n The user is requesting script development, so use the script-automation-engineer agent to create production-quality automation.\n \n \n- \n Context: User has written a new script and wants it validated.\n user: "I just wrote this script in helper-scripts/sync-plugins.js - can you review it?"\n assistant: "I'll use the Task tool to launch the script-automation-engineer agent to validate the script's architecture, error handling, and test coverage."\n \n Since the user wants script validation, use the script-automation-engineer agent to perform a thorough technical review.\n \n +tools: Glob, Grep, Read, WebFetch, TodoWrite, WebSearch, BashOutput, KillShell, Edit, Write, NotebookEdit, Bash +model: sonnet +color: pink +--- + +You are an elite JavaScript and TypeScript automation engineer specializing in creating robust, maintainable, and well-tested task automation scripts. Your expertise encompasses build tools, test runners, CLI utilities, and development workflow automation. + +## Core Responsibilities + +1. **Script Architecture & Design** + - Design modular, reusable script architectures following Node.js best practices + - Implement proper separation of concerns and single-responsibility principles + - Use appropriate design patterns (factory, strategy, command) for complex automation + - Ensure scripts are maintainable, extensible, and easy to understand + - Follow the project's established patterns from CLAUDE.md and package.json + +2. **Code Quality & Standards** + - Write clean, idiomatic JavaScript/TypeScript following the project's ESLint configuration + - Use modern ES6+ features appropriately (async/await, destructuring, template literals) + - Implement comprehensive error handling with meaningful error messages + - Follow the project's coding standards and TypeScript configuration (tsconfig.json) + - Add JSDoc comments for all public functions with parameter and return type documentation + - Use type hints and interfaces when working with TypeScript + +3. **Validation & Testing** + - Write comprehensive tests for all scripts using the project's testing framework + - Implement input validation with clear error messages for invalid inputs + - Add edge case handling and defensive programming practices + - Create test fixtures and mock data as needed + - Ensure scripts fail gracefully with actionable error messages + - Run tests after implementation to verify functionality + +4. **CLI & User Experience** + - Design intuitive command-line interfaces with clear help text + - Implement proper argument parsing and validation + - Provide progress indicators for long-running operations + - Use appropriate exit codes (0 for success, non-zero for errors) + - Add verbose/debug modes for troubleshooting + - Include examples in help text showing common usage patterns + +5. **Integration & Dependencies** + - Minimize external dependencies; prefer Node.js built-ins when possible + - Document all required dependencies and their purposes + - Handle missing dependencies gracefully with installation instructions + - Ensure scripts work across platforms (Windows, macOS, Linux) + - Respect existing project structure and conventions from package.json + +6. **Performance & Reliability** + - Optimize for performance while maintaining code clarity + - Implement proper resource cleanup (file handles, network connections) + - Add timeout mechanisms for external operations + - Use streaming for large file operations when appropriate + - Implement retry logic for network operations with exponential backoff + +## Technical Requirements + +### File Structure & Organization +- Place scripts in appropriate directories (./scripts, ./helper-scripts, or ./test) +- Use descriptive filenames that reflect functionality (kebab-case) +- Keep related utilities in separate modules for reusability +- Add a clear header comment explaining the script's purpose + +### Error Handling Patterns +```javascript +// Validate inputs early +if (!requiredParam) { + console.error('Error: Missing required parameter: requiredParam'); + process.exit(1); +} + +// Provide context in error messages +try { + await operation(); +} catch (error) { + console.error(`Failed to perform operation: ${error.message}`); + if (verbose) console.error(error.stack); + process.exit(1); +} +``` + +### Logging Standards +- Use console.error() for errors and warnings +- Use console.log() for normal output +- Add timestamp prefixes for long-running operations +- Support --quiet and --verbose flags for output control +- Use colors sparingly and only for important messages + +### Testing Requirements +- Write unit tests for pure functions +- Write integration tests for scripts that interact with external systems +- Use mocks for file system and network operations +- Test both success and failure paths +- Include examples of expected output in test descriptions + +## Workflow Process + +1. **Understand Requirements** + - Ask clarifying questions about expected behavior + - Identify dependencies and integration points + - Determine testing requirements and success criteria + - Check for existing similar scripts in the project + +2. **Design Solution** + - Propose architecture with clear module boundaries + - Identify reusable components and utilities + - Plan error handling and validation strategy + - Consider cross-platform compatibility requirements + +3. **Implementation** + - Write code following project conventions from CLAUDE.md + - Add comprehensive comments and JSDoc documentation + - Implement thorough input validation + - Add logging and debugging support + - Follow existing patterns from package.json scripts + +4. **Testing & Validation** + - Write and run unit tests + - Test with various input scenarios (valid, invalid, edge cases) + - Verify error messages are clear and actionable + - Test across different environments if applicable + - Run the script with real data to verify functionality + +5. **Documentation** + - Add usage examples in code comments + - Update package.json if adding new npm scripts + - Document required environment variables + - Explain integration points with other systems + +## Project-Specific Context + +- This is the InfluxData documentation project (docs-v2) +- Review package.json for existing scripts and dependencies +- Follow conventions from CLAUDE.md and copilot-instructions.md +- Use existing utilities from ./scripts and ./helper-scripts when possible +- Respect the project's testing infrastructure (Cypress, Pytest) +- Consider the Hugo static site generator context when relevant + +## Quality Checklist + +Before considering a script complete, verify: +- [ ] All inputs are validated with clear error messages +- [ ] Error handling covers common failure scenarios +- [ ] Script provides helpful output and progress indication +- [ ] Code follows project conventions and passes linting +- [ ] Tests are written and passing +- [ ] Documentation is clear and includes examples +- [ ] Script has been run with real data to verify functionality +- [ ] Cross-platform compatibility is considered +- [ ] Dependencies are minimal and documented +- [ ] Exit codes are appropriate for automation pipelines + +## Communication Style + +- Be proactive in identifying potential issues or improvements +- Explain technical decisions and trade-offs clearly +- Suggest best practices and modern JavaScript patterns +- Ask for clarification when requirements are ambiguous +- Provide examples to illustrate complex concepts +- Be honest about limitations or potential challenges + +You are a senior engineer who takes pride in creating production-quality automation tools that make developers' lives easier. Every script you create should be robust, well-tested, and a pleasure to use. diff --git a/.gitignore b/.gitignore index c3d4170f2..edb903c39 100644 --- a/.gitignore +++ b/.gitignore @@ -43,5 +43,5 @@ tmp .context/* !.context/README.md -# External repos +# External repos .ext/* diff --git a/eslint.config.js b/eslint.config.js index bd99c171f..ba158d375 100644 --- a/eslint.config.js +++ b/eslint.config.js @@ -97,10 +97,12 @@ export default [ // Configuration for Node.js helper scripts { - files: ['helper-scripts/**/*.js'], + files: ['helper-scripts/**/*.js', 'scripts/**/*.js'], languageOptions: { globals: { ...globals.node, + // Claude Code environment globals + Task: 'readonly', // Available when run by Claude Code }, }, rules: { diff --git a/package.json b/package.json index deff61b8f..636317b1d 100644 --- a/package.json +++ b/package.json @@ -41,6 +41,8 @@ }, "scripts": { "docs:create": "node scripts/docs-create.js", + "docs:edit": "node scripts/docs-edit.js", + "docs:add-placeholders": "node scripts/add-placeholders.js", "build:pytest:image": "docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest .", "build:agent:instructions": "node ./helper-scripts/build-agent-instructions.js", "build:ts": "tsc --project tsconfig.json --outDir dist", diff --git a/plans/cli-docs-sync/plan.md b/plans/cli-docs-sync/plan.md deleted file mode 100644 index ca1d3afb9..000000000 --- a/plans/cli-docs-sync/plan.md +++ /dev/null @@ -1,79 +0,0 @@ -# Plan: Update InfluxDB 3 CLI Reference Documentation - -## Automation and Process Improvements - -### Immediate Improvements: -1. **Create CLI documentation sync script:** - ```bash - # Script: /Users/ja/Documents/github/docs-v2/scripts/sync-cli-docs.sh - # - Extract help text from influxdb3 CLI at /Users/ja/.influxdb//influxdb3 - # - Compare with existing docs - # - Generate report of differences - # - Auto-update basic command syntax - # - Real-time CLI verification capability established - ``` - -2. **Establish documentation standards:** - - Standardize frontmatter across CLI docs - - Create templates for command documentation - - Define Enterprise vs Core content patterns using Hugo shortcodes - -### Long-term Automation Strategy: -1. **CI/CD Integration:** - - Add GitHub Actions workflow to detect CLI changes - - Auto-generate CLI help extraction on new releases - - Create pull requests for documentation updates - -2. **Release Process Integration:** - - Include CLI documentation review in release checklist - - Link release notes to specific CLI documentation updates - - Automated cross-referencing between release notes and CLI docs - -3. **Content Management Improvements:** - - Use Hugo shortcodes for Enterprise-specific content - - Implement version-aware documentation - - Create shared content templates for common CLI patterns - -## Phase 4: Validation and Testing - -### Content accuracy verification: -- āœ… **CLI Access Available**: Direct verification via `influxdb3 --help` commands -- āœ… **Real-time Validation**: All commands and options verified against actual CLI output -- **Process**: Use `influxdb3 [command] --help` to validate documentation accuracy -- **Verification**: Cross-reference documented options with actual CLI behavior - -### Documentation completeness check: -- Ensure all v3.2.0 features are documented -- Verify examples and use cases -- Check internal links and cross-references - -## Suggested Recurring Process - -### Pre-release (during development): -- Monitor CLI changes in pull requests -- Update documentation as features are added -- Maintain CLI help extraction automation - -### At release (when tagging versions): -- Run automated CLI documentation sync -- Review and approve auto-generated updates -- Publish updated documentation - -### Post-release (after release): -- Validate documentation accuracy -- Gather user feedback on CLI documentation -- Plan improvements for next cycle - -## Related Documentation Paths - -### InfluxDB 3 Product Documentation (affects CLI usage examples): -- `/content/influxdb3/core/write-data/influxdb3-cli.md` -- `/content/influxdb3/enterprise/write-data/influxdb3-cli.md` -- `/content/shared/influxdb3-write-guides/influxdb3-cli.md` - -### Admin Documentation (affects retention and license features): -- `/content/influxdb3/core/admin/` -- `/content/influxdb3/enterprise/admin/` -- `/content/influxdb3/enterprise/admin/license.md` - -This plan ensures comprehensive documentation updates for v3.2.0 while establishing sustainable processes for future releases. \ No newline at end of file diff --git a/scripts/README-add-placeholders.md b/scripts/README-add-placeholders.md new file mode 100644 index 000000000..d9c455e78 --- /dev/null +++ b/scripts/README-add-placeholders.md @@ -0,0 +1,108 @@ +# Add Placeholders Script + +Automatically adds placeholder syntax to code blocks and placeholder descriptions in markdown files. + +## What it does + +This script finds UPPERCASE placeholders in code blocks and: + +1. **Adds `{ placeholders="PATTERN1|PATTERN2" }` attribute** to code block fences +2. **Wraps placeholder descriptions** with `{{% code-placeholder-key %}}` shortcodes + +## Usage + +### Direct usage + +```bash +# Process a single file +node scripts/add-placeholders.js + +# Dry run to preview changes +node scripts/add-placeholders.js --dry + +# Example +node scripts/add-placeholders.js content/influxdb3/enterprise/admin/upgrade.md +``` + +### Using npm script + +```bash +# Process a file +yarn docs:add-placeholders + +# Dry run +yarn docs:add-placeholders --dry +``` + +## Example transformations + +### Before + +````markdown +```bash +influxdb3 query \ + --database SYSTEM_DATABASE \ + --token ADMIN_TOKEN \ + "SELECT * FROM system.version" +``` + +Replace the following: + +- **`SYSTEM_DATABASE`**: The name of your system database +- **`ADMIN_TOKEN`**: An admin token with read permissions +```` + +### After + +````markdown +```bash { placeholders="ADMIN_TOKEN|SYSTEM_DATABASE" } +influxdb3 query \ + --database SYSTEM_DATABASE \ + --token ADMIN_TOKEN \ + "SELECT * FROM system.version" +``` + +Replace the following: + +- {{% code-placeholder-key %}}`SYSTEM_DATABASE`{{% /code-placeholder-key %}}: The name of your system database +- {{% code-placeholder-key %}}`ADMIN_TOKEN`{{% /code-placeholder-key %}}: An admin token with read permissions +```` + +## How it works + +### Placeholder detection + +The script automatically detects UPPERCASE placeholders in code blocks using these rules: + +- **Pattern**: Matches words with 2+ characters, all uppercase, can include underscores +- **Excludes common words**: HTTP verbs (GET, POST), protocols (HTTP, HTTPS), SQL keywords (SELECT, FROM), etc. + +### Code block processing + +1. Finds all code blocks (including indented ones) +2. Extracts UPPERCASE placeholders +3. Adds `{ placeholders="..." }` attribute to the fence line +4. Preserves indentation and language identifiers + +### Description wrapping + +1. Detects "Replace the following:" sections +2. Wraps placeholder descriptions matching `- **`PLACEHOLDER`**: description` +3. Preserves indentation and formatting +4. Skips already-wrapped descriptions + +## Options + +- `--dry` or `-d`: Preview changes without modifying files + +## Notes + +- The script is idempotent - running it multiple times on the same file won't duplicate syntax +- Preserves existing `placeholders` attributes in code blocks +- Works with both indented and non-indented code blocks +- Handles multiple "Replace the following:" sections in a single file + +## Related documentation + +- [DOCS-SHORTCODES.md](../DOCS-SHORTCODES.md) - Complete shortcode reference +- [DOCS-CONTRIBUTING.md](../DOCS-CONTRIBUTING.md) - Placeholder conventions and style guidelines diff --git a/scripts/add-placeholders.js b/scripts/add-placeholders.js new file mode 100755 index 000000000..42718bcf3 --- /dev/null +++ b/scripts/add-placeholders.js @@ -0,0 +1,238 @@ +#!/usr/bin/env node + +/** + * Add placeholder syntax to code blocks + * + * This script finds UPPERCASE placeholders in code blocks and: + * 1. Adds `{ placeholders="PATTERN1|PATTERN2" }` attribute to code blocks + * 2. Wraps placeholder descriptions with `{{% code-placeholder-key %}}` + * + * Usage: + * node scripts/add-placeholders.js + * node scripts/add-placeholders.js content/influxdb3/enterprise/admin/upgrade.md + */ + +import { readFileSync, writeFileSync } from 'fs'; +import { parseArgs } from 'node:util'; + +// Parse command-line arguments +const { positionals } = parseArgs({ + allowPositionals: true, + options: { + dry: { + type: 'boolean', + short: 'd', + default: false, + }, + }, +}); + +if (positionals.length === 0) { + console.error('Usage: node scripts/add-placeholders.js [--dry]'); + console.error( + 'Example: node scripts/add-placeholders.js content/influxdb3/enterprise/admin/upgrade.md' + ); + process.exit(1); +} + +const filePath = positionals[0]; +const isDryRun = process.argv.includes('--dry') || process.argv.includes('-d'); + +/** + * Extract UPPERCASE placeholders from a code block + * @param {string} code - Code block content + * @returns {string[]} Array of unique placeholders + */ +function extractPlaceholders(code) { + // Match UPPERCASE words (at least 2 chars, can include underscores) + const placeholderPattern = /\b[A-Z][A-Z0-9_]{1,}\b/g; + const matches = code.match(placeholderPattern) || []; + + // Remove duplicates and common words that aren't placeholders + const excludeWords = new Set([ + 'GET', + 'POST', + 'PUT', + 'DELETE', + 'PATCH', + 'HEAD', + 'OPTIONS', + 'HTTP', + 'HTTPS', + 'URL', + 'API', + 'CLI', + 'JSON', + 'YAML', + 'TOML', + 'SELECT', + 'FROM', + 'WHERE', + 'AND', + 'OR', + 'NOT', + 'NULL', + 'TRUE', + 'FALSE', + 'ERROR', + 'WARNING', + 'INFO', + 'DEBUG', + ]); + + return [...new Set(matches)].filter((word) => !excludeWords.has(word)).sort(); +} + +/** + * Add placeholders attribute to a code block + * @param {string} codeBlock - Code block with fence + * @param {string} indent - Leading whitespace from fence line + * @returns {string} Code block with placeholders attribute + */ +function addPlaceholdersAttribute(codeBlock, indent = '') { + const lines = codeBlock.split('\n'); + const fenceLine = lines[0]; + const codeContent = lines.slice(1, -1).join('\n'); + + // Check if already has placeholders attribute + if (fenceLine.includes('placeholders=')) { + return codeBlock; + } + + // Extract placeholders from code + const placeholders = extractPlaceholders(codeContent); + + if (placeholders.length === 0) { + return codeBlock; + } + + // Extract language from fence (handle indented fences) + const langMatch = fenceLine.match(/^\s*```(\w+)?/); + const lang = langMatch && langMatch[1] ? langMatch[1] : ''; + + // Build new fence line with placeholders attribute + const placeholdersStr = placeholders.join('|'); + const newFenceLine = lang + ? `${indent}\`\`\`${lang} { placeholders="${placeholdersStr}" }` + : `${indent}\`\`\` { placeholders="${placeholdersStr}" }`; + + return [newFenceLine, ...lines.slice(1)].join('\n'); +} + +/** + * Wrap placeholder descriptions with code-placeholder-key shortcode + * @param {string} line - Line potentially containing placeholder description + * @returns {string} Line with shortcode wrapper if placeholder found + */ +function wrapPlaceholderDescription(line) { + // Match patterns like "- **`PLACEHOLDER`**: description" or " - **`PLACEHOLDER`**: description" + const pattern = /^(\s*-\s*)\*\*`([A-Z][A-Z0-9_]+)`\*\*(:\s*)/; + const match = line.match(pattern); + + if (!match) { + return line; + } + + // Check if already wrapped + if (line.includes('{{% code-placeholder-key %}}')) { + return line; + } + + const prefix = match[1]; + const placeholder = match[2]; + const suffix = match[3]; + const description = line.substring(match[0].length); + + return `${prefix}{{% code-placeholder-key %}}\`${placeholder}\`{{% /code-placeholder-key %}}${suffix}${description}`; +} + +/** + * Process markdown content + * @param {string} content - Markdown content + * @returns {string} Processed content + */ +function processMarkdown(content) { + const lines = content.split('\n'); + const result = []; + let inCodeBlock = false; + let codeBlockLines = []; + let inReplaceSection = false; + + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + + // Track "Replace the following:" sections + if (line.trim().match(/^Replace the following:?$/i)) { + inReplaceSection = true; + result.push(line); + continue; + } + + // Exit replace section on non-list-item line (but allow empty lines within list) + if ( + inReplaceSection && + line.trim() !== '' && + !line.trim().startsWith('-') && + !line.match(/^#{1,6}\s/) + ) { + inReplaceSection = false; + } + + // Handle code blocks (including indented) + if (line.trim().startsWith('```')) { + if (!inCodeBlock) { + // Start of code block + inCodeBlock = true; + codeBlockLines = [line]; + } else { + // End of code block + codeBlockLines.push(line); + const codeBlock = codeBlockLines.join('\n'); + const indent = line.match(/^(\s*)/)[1]; + const processedBlock = addPlaceholdersAttribute(codeBlock, indent); + result.push(processedBlock); + inCodeBlock = false; + codeBlockLines = []; + } + } else if (inCodeBlock) { + // Inside code block + codeBlockLines.push(line); + } else if (inReplaceSection) { + // Process placeholder descriptions + result.push(wrapPlaceholderDescription(line)); + } else { + // Regular line + result.push(line); + } + } + + return result.join('\n'); +} + +/** + * Main function + */ +function main() { + try { + // Read file + const content = readFileSync(filePath, 'utf-8'); + + // Process content + const processedContent = processMarkdown(content); + + if (isDryRun) { + console.log('=== DRY RUN - Changes that would be made ===\n'); + console.log(processedContent); + } else { + // Write back to file + writeFileSync(filePath, processedContent, 'utf-8'); + console.log(`āœ“ Updated ${filePath}`); + console.log('Added placeholder syntax to code blocks and descriptions'); + } + } catch (error) { + console.error(`Error: ${error.message}`); + process.exit(1); + } +} + +main(); diff --git a/scripts/docs-create.js b/scripts/docs-create.js index a91ab87a0..a5fb91567 100644 --- a/scripts/docs-create.js +++ b/scripts/docs-create.js @@ -3,18 +3,28 @@ /** * Documentation scaffolding tool * Prepares context for Claude to analyze and generates file structure + * + * NOTE: This script uses the Task() function which is only available when + * executed by Claude Code. The Task() function should be globally available + * in that environment. */ import { parseArgs } from 'node:util'; +import process from 'node:process'; import { join, dirname } from 'path'; import { fileURLToPath } from 'url'; -import { existsSync } from 'fs'; +import { existsSync, readFileSync, writeFileSync } from 'fs'; +import yaml from 'js-yaml'; import { prepareContext, executeProposal, validateProposal, + analyzeURLs, + loadProducts, + analyzeStructure, } from './lib/content-scaffolding.js'; import { writeJson, readJson, fileExists } from './lib/file-operations.js'; +import { parseMultipleURLs } from './lib/url-parser.js'; const __filename = fileURLToPath(import.meta.url); const __dirname = dirname(__filename); @@ -25,7 +35,7 @@ const REPO_ROOT = join(__dirname, '..'); // Temp directory for context and proposal const TMP_DIR = join(REPO_ROOT, '.tmp'); const CONTEXT_FILE = join(TMP_DIR, 'scaffold-context.json'); -const PROPOSAL_FILE = join(TMP_DIR, 'scaffold-proposal.json'); +const PROPOSAL_FILE = join(TMP_DIR, 'scaffold-proposal.yml'); // Colors for console output const colors = { @@ -45,6 +55,29 @@ function log(message, color = 'reset') { console.log(`${colors[color]}${message}${colors.reset}`); } +/** + * Prompt user for input (works in TTY and non-TTY environments) + */ +async function promptUser(question) { + // For non-TTY environments, return empty string + if (!process.stdin.isTTY) { + return ''; + } + + const readline = await import('readline'); + const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout, + }); + + return new Promise((resolve) => { + rl.question(question, (answer) => { + rl.close(); + resolve(answer.trim()); + }); + }); +} + /** * Print section divider */ @@ -56,22 +89,47 @@ function divider() { * Parse command line arguments */ function parseArguments() { - const { values } = parseArgs({ + const { values, positionals } = parseArgs({ options: { draft: { type: 'string' }, from: { type: 'string' }, + url: { type: 'string', multiple: true }, + urls: { type: 'string' }, + products: { type: 'string' }, + ai: { type: 'string', default: 'claude' }, execute: { type: 'boolean', default: false }, + 'context-only': { type: 'boolean', default: false }, + proposal: { type: 'string' }, 'dry-run': { type: 'boolean', default: false }, yes: { type: 'boolean', default: false }, help: { type: 'boolean', default: false }, }, + allowPositionals: true, }); + // First positional argument is treated as draft path + if (positionals.length > 0 && !values.draft && !values.from) { + values.draft = positionals[0]; + } + // --from is an alias for --draft if (values.from && !values.draft) { values.draft = values.from; } + // Normalize URLs into array + if (values.urls && !values.url) { + // --urls provides comma-separated list + values.url = values.urls.split(',').map((u) => u.trim()); + } else if (values.urls && values.url) { + // Combine --url and --urls + const urlsArray = values.urls.split(',').map((u) => u.trim()); + values.url = [ + ...(Array.isArray(values.url) ? values.url : [values.url]), + ...urlsArray, + ]; + } + return values; } @@ -83,41 +141,217 @@ function printUsage() { ${colors.bright}Documentation Content Scaffolding${colors.reset} ${colors.bright}Usage:${colors.reset} - yarn docs:create --draft Prepare context from draft file - yarn docs:create --from Alias for --draft - yarn docs:create --execute Execute proposal and create files + yarn docs:create Create from draft + yarn docs:create --url --draft Create at URL with draft content ${colors.bright}Options:${colors.reset} + Path to draft markdown file (positional argument) --draft Path to draft markdown file --from Alias for --draft - --execute Execute the proposal (create files) + --url Documentation URL for new content location + --context-only Stop after context preparation + (for non-Claude tools) + --proposal Import and execute proposal from JSON file --dry-run Show what would be created without creating --yes Skip confirmation prompt --help Show this help message -${colors.bright}Workflow:${colors.reset} +${colors.bright}Workflow (Create from draft):${colors.reset} 1. Create a draft markdown file with your content - 2. Run: yarn docs:create --draft path/to/draft.md - 3. Run: /scaffold-content (Claude command) - 4. Run: yarn docs:create --execute + 2. Run: yarn docs:create drafts/new-feature.md + 3. Script runs all agents automatically + 4. Review and confirm to create files + +${colors.bright}Workflow (Create at specific URL):${colors.reset} + 1. Create draft: vim drafts/new-feature.md + 2. Run: yarn docs:create \\ + --url https://docs.influxdata.com/influxdb3/core/admin/new-feature/ \\ + --draft drafts/new-feature.md + 3. Script determines structure from URL and uses draft content + 4. Review and confirm to create files + +${colors.bright}Workflow (Manual - for non-Claude tools):${colors.reset} + 1. Prepare context: + yarn docs:create --context-only drafts/new-feature.md + 2. Run your AI tool with templates from scripts/templates/ + 3. Save proposal to .tmp/scaffold-proposal.json + 4. Execute: + yarn docs:create --proposal .tmp/scaffold-proposal.json ${colors.bright}Examples:${colors.reset} - # Prepare context for Claude - yarn docs:create --draft drafts/new-feature.md - yarn docs:create --from drafts/new-feature.md + # Create from draft (AI determines location) + yarn docs:create drafts/new-feature.md - # Execute proposal after Claude analysis - yarn docs:create --execute + # Create at specific URL with draft content + yarn docs:create --url /influxdb3/core/admin/new-feature/ \\ + --draft drafts/new-feature.md - # Preview what would be created - yarn docs:create --execute --dry-run + # Preview changes + yarn docs:create --draft drafts/new-feature.md --dry-run + +${colors.bright}Note:${colors.reset} + To edit existing pages, use: yarn docs:edit `); } /** - * Phase 1: Prepare context from draft + * Phase 1a: Prepare context from URLs */ -async function preparePhase(draftPath) { +async function prepareURLPhase(urls, draftPath, options) { + log('\nšŸ” Analyzing URLs and finding files...', 'bright'); + + try { + // Parse URLs + const parsedURLs = parseMultipleURLs(urls); + log(`\nāœ“ Parsed ${parsedURLs.length} URL(s)`, 'green'); + + // Analyze URLs and find files + const urlAnalysis = analyzeURLs(parsedURLs); + + // Print summary + for (const result of urlAnalysis) { + log(`\n URL: ${result.url}`); + log(` Product: ${result.parsed.product} (${result.parsed.namespace})`); + if (result.exists) { + log(` āœ“ Found: ${result.files.main}`, 'green'); + if (result.files.isShared) { + log(` āœ“ Shared content: ${result.files.sharedSource}`, 'cyan'); + log(` āœ“ Found ${result.files.variants.length} variant(s)`, 'cyan'); + for (const variant of result.files.variants) { + log(` - ${variant}`, 'cyan'); + } + } + } else { + log(' āœ— Page does not exist (will create)', 'yellow'); + log(` → Will create at: ${result.files.main}`, 'yellow'); + } + } + + // Determine mode + const mode = urlAnalysis.every((r) => r.exists) ? 'edit' : 'create'; + log(`\nāœ“ Mode: ${mode}`, 'green'); + + // Load existing content if editing + const existingContent = {}; + if (mode === 'edit') { + for (const result of urlAnalysis) { + if (result.exists) { + const fullPath = join(REPO_ROOT, result.files.main); + const content = readFileSync(fullPath, 'utf8'); + existingContent[result.files.main] = content; + + // Also load shared source if exists + if (result.files.isShared && result.files.sharedSource) { + const sharedPath = join( + REPO_ROOT, + `content${result.files.sharedSource}` + ); + if (existsSync(sharedPath)) { + const sharedContent = readFileSync(sharedPath, 'utf8'); + existingContent[`content${result.files.sharedSource}`] = + sharedContent; + } + } + } + } + } + + // Build context (include URL analysis) + let context = null; + if (draftPath) { + // Use draft content if provided + context = prepareContext(draftPath); + } else { + // Minimal context for editing existing pages + const products = loadProducts(); + context = { + draft: { + path: null, + content: null, + existingFrontmatter: {}, + }, + products, + productHints: { + mentioned: [], + suggested: [], + }, + versionInfo: { + version: parsedURLs[0].namespace === 'influxdb3' ? '3.x' : '2.x', + tools: [], + apis: [], + }, + structure: analyzeStructure(), + conventions: { + sharedContentDir: 'content/shared/', + menuKeyPattern: '{namespace}_{product}', + weightLevels: { + description: 'Weight ranges by level', + level1: '1-99 (top-level pages)', + level2: '101-199 (section landing pages)', + level3: '201-299 (detail pages)', + level4: '301-399 (sub-detail pages)', + }, + namingRules: { + files: 'Use lowercase with hyphens (e.g., manage-databases.md)', + directories: 'Use lowercase with hyphens', + shared: 'Shared content in /content/shared/', + }, + testing: { + codeblocks: + 'Use pytest-codeblocks annotations for testable examples', + docker: 'Use compose.yaml services for testing code samples', + commands: '', + }, + }, + }; + } + + // Add URL analysis to context + context.mode = mode; + context.urls = urlAnalysis; + context.existingContent = existingContent; + + // Write context to temp file + writeJson(CONTEXT_FILE, context); + + log( + `\nāœ“ Prepared context → ${CONTEXT_FILE.replace(REPO_ROOT, '.')}`, + 'green' + ); + + // If context-only mode, stop here + if (options['context-only']) { + log(''); + divider(); + log('Context preparation complete!', 'bright'); + log(''); + log('Next steps for manual workflow:', 'cyan'); + log('1. Use your AI tool with prompts from scripts/templates/'); + log( + '2. Generate proposal JSON matching ' + + 'scripts/schemas/scaffold-proposal.schema.json' + ); + log('3. Save to .tmp/scaffold-proposal.json'); + log('4. Run: yarn docs:create --proposal .tmp/scaffold-proposal.json'); + divider(); + log(''); + return null; + } + + return context; + } catch (error) { + log(`\nāœ— Error analyzing URLs: ${error.message}`, 'red'); + if (error.stack) { + console.error(error.stack); + } + process.exit(1); + } +} + +/** + * Phase 1b: Prepare context from draft + */ +async function preparePhase(draftPath, options) { log('\nšŸ” Analyzing draft and repository structure...', 'bright'); // Validate draft exists @@ -135,15 +369,17 @@ async function preparePhase(draftPath) { // Print summary log( - `\nāœ“ Loaded draft content (${context.draft.content.split('\n').length} lines)`, + '\nāœ“ Loaded draft content ' + + `(${context.draft.content.split('\n').length} lines)`, 'green' ); log( - `āœ“ Analyzed ${Object.keys(context.products).length} products from data/products.yml`, + `āœ“ Analyzed ${Object.keys(context.products).length} products ` + + 'from data/products.yml', 'green' ); log( - `āœ“ Found ${context.structure.existingPaths.length} pages in content/influxdb3/`, + `āœ“ Found ${context.structure.existingPaths.length} existing pages`, 'green' ); log( @@ -151,15 +387,26 @@ async function preparePhase(draftPath) { 'green' ); - // Print next steps - log(''); - divider(); - log( - 'Next: Run /scaffold-content to analyze and propose structure', - 'bright' - ); - divider(); - log(''); + // If context-only mode, stop here + if (options['context-only']) { + log(''); + divider(); + log('Context preparation complete!', 'bright'); + log(''); + log('Next steps for manual workflow:', 'cyan'); + log('1. Use your AI tool with prompts from scripts/templates/'); + log( + '2. Generate proposal JSON matching ' + + 'scripts/schemas/scaffold-proposal.schema.json' + ); + log('3. Save to .tmp/scaffold-proposal.json'); + log('4. Run: yarn docs:create --proposal .tmp/scaffold-proposal.json'); + divider(); + log(''); + return null; + } + + return context; } catch (error) { log(`\nāœ— Error preparing context: ${error.message}`, 'red'); if (error.stack) { @@ -170,108 +417,380 @@ async function preparePhase(draftPath) { } /** - * Phase 2: Execute proposal + * Select target products (interactive or from flags) */ -async function executePhase(options) { - log('\nšŸ“ Reading proposal...', 'bright'); +async function selectProducts(context, options) { + const detected = context.productHints?.mentioned || []; - // Check if proposal exists - if (!fileExists(PROPOSAL_FILE)) { - log(`\nāœ— Proposal file not found: ${PROPOSAL_FILE}`, 'red'); - log('Did you run /scaffold-content yet?', 'yellow'); + // Expand products with multiple versions into separate entries + const allProducts = []; + const productMap = {}; // Maps display name to product key + + for (const [key, product] of Object.entries(context.products)) { + if (product.versions && product.versions.length > 1) { + // Multi-version product: create separate entries for each version + product.versions.forEach((version) => { + const displayName = `${product.name} ${version}`; + allProducts.push(displayName); + productMap[displayName] = key; + }); + } else { + // Single version or no version info: use product name as-is + allProducts.push(product.name); + productMap[product.name] = key; + } + } + + // Case 1: Explicit flag provided + if (options.products) { + const requested = options.products.split(',').map((p) => p.trim()); + const invalid = requested.filter((p) => !allProducts.includes(p)); + + if (invalid.length > 0) { + log( + `\nāœ— Invalid products: ${invalid.join(', ')}\n` + + `Valid products: ${allProducts.join(', ')}`, + 'red' + ); + process.exit(1); + } + + log( + `āœ“ Using products from --products flag: ${requested.join(', ')}`, + 'green' + ); + return requested; + } + + // Case 2: Unambiguous (single product detected) + if (detected.length === 1) { + log(`āœ“ Auto-selected product: ${detected[0]}`, 'green'); + return detected; + } + + // Case 3: URL-based (extract from URL) + if (context.urls?.length > 0) { + const urlPath = context.urls[0].url; + // Extract product from URL like /influxdb3/core/... or /influxdb/cloud/... + const match = urlPath.match(/^\/(influxdb3?\/.+?)\//); + if (match) { + const productPath = match[1].replace(/\//g, '-'); + const product = allProducts.find((p) => p.includes(productPath)); + if (product) { + log(`āœ“ Product from URL: ${product}`, 'green'); + return [product]; + } + } + } + + // Case 4: Ambiguous or none detected - show interactive menu + log('\nšŸ“¦ Select target products:\n', 'bright'); + allProducts.forEach((p, i) => { + const mark = detected.includes(p) ? 'āœ“' : ' '; + log(` ${i + 1}. [${mark}] ${p}`, 'cyan'); + }); + + const answer = await promptUser( + '\nEnter numbers (comma-separated, e.g., 1,3,5): ' + ); + + if (!answer) { + log('āœ— No products selected', 'red'); process.exit(1); } - try { - // Read proposal - const proposal = readJson(PROPOSAL_FILE); + const indices = answer + .split(',') + .map((s) => parseInt(s.trim()) - 1) + .filter((i) => i >= 0 && i < allProducts.length); - // Validate proposal - const validation = validateProposal(proposal); + if (indices.length === 0) { + log('āœ— No valid products selected', 'red'); + process.exit(1); + } - if (!validation.valid) { - log('\nāœ— Invalid proposal:', 'red'); - validation.errors.forEach((err) => log(` • ${err}`, 'red')); - process.exit(1); - } + const selected = indices.map((i) => allProducts[i]); + log(`\nāœ“ Selected products: ${selected.join(', ')}`, 'green'); + return selected; +} - if (validation.warnings.length > 0) { - log('\n⚠ Warnings:', 'yellow'); - validation.warnings.forEach((warn) => log(` • ${warn}`, 'yellow')); - } +/** + * Run single content generator agent with direct file generation (Claude Code) + */ +async function runAgentsWithTaskTool( + context, + selectedProducts, + mode, + isURLBased, + hasExistingContent +) { + // Build context description + const contextDesc = ` +Mode: ${mode} +${isURLBased ? `URLs: ${context.urls.length} URL(s) analyzed` : 'Draft-based workflow'} +${hasExistingContent ? `Existing content: ${Object.keys(context.existingContent).length} file(s)` : 'Creating new content'} +Target Products: ${selectedProducts.join(', ')} +`; - // Show preview - log('\nPreview:', 'bright'); - divider(); + log(` ${contextDesc.trim().split('\n').join('\n ')}\n`, 'cyan'); + + log('šŸ¤– Generating documentation files directly...', 'bright'); + + // Use the same prompt as manual workflow for consistency + const prompt = generateClaudePrompt( + context, + selectedProducts, + mode, + isURLBased, + hasExistingContent + ); + + await Task({ + subagent_type: 'general-purpose', + description: + mode === 'edit' + ? 'Update documentation files' + : 'Generate documentation files', + prompt: prompt, + }); + + log(' āœ“ Files generated\n', 'green'); + log( + `\nāœ“ Summary written to ${PROPOSAL_FILE.replace(REPO_ROOT, '.')}`, + 'green' + ); +} + +/** + * Generate simplified Claude prompt for direct file generation + */ +function generateClaudePrompt( + context, + selectedProducts, + mode, + isURLBased, + hasExistingContent +) { + const prompt = `You are an expert InfluxData documentation writer. + +**Context File**: Read from \`.tmp/scaffold-context.json\` +**Target Products**: Use \`context.selectedProducts\` field (${selectedProducts.join(', ')}) +**Mode**: ${mode === 'edit' ? 'Edit existing content' : 'Create new documentation'} +${isURLBased ? `**URLs**: ${context.urls.map((u) => u.url).join(', ')}` : ''} + +**Your Task**: Generate complete documentation files directly (no proposal step). + +**Important**: The context file contains all products from data/products.yml, but you should ONLY create documentation for the products listed in \`context.selectedProducts\`. + +**Workflow**: +1. Read and analyze \`.tmp/scaffold-context.json\` +2. ${mode === 'edit' ? 'Review existing content and plan improvements' : 'Analyze draft content to determine topic, audience, and structure'} +3. ${isURLBased ? 'Use URL paths to determine file locations' : 'Determine appropriate section (admin, write-data, query-data, etc.)'} +4. Decide if content should be shared across products +5. **Generate and write markdown files directly** using the Write tool +6. Create a summary YAML file at \`.tmp/scaffold-proposal.yml\` + +**Content Requirements**: +- **Style**: Active voice, present tense, second person ("you") +- **Formatting**: Semantic line feeds (one sentence per line) +- **Headings**: Use h2-h6 only (h1 comes from title) +- **Code Examples**: + - Use ${context.versionInfo?.tools?.join(' or ') || 'influxdb3, influx, or influxctl'} CLI + - Include pytest-codeblocks annotations + - Format to fit within 80 characters + - Use long options (--option vs -o) + - Show expected output +- **Links**: Descriptive link text, no "click here" +- **Placeholders**: Use UPPERCASE for values users need to replace (e.g., DATABASE_NAME, AUTH_TOKEN) + +**File Structure**: +${ + selectedProducts.length > 1 || context.productHints?.isShared + ? `- Content applies to multiple products: + - Create ONE shared content file in content/shared/ + - Create frontmatter-only files for each product referencing it` + : `- Product-specific content: + - Create files directly in product directories` +} + +**Validation Checks** (run before writing files): +1. **Path validation**: Lowercase, hyphens only (no underscores in filenames) +2. **Weight conflicts**: Check sibling pages, choose unused weight 101-199 +3. **Frontmatter completeness**: All required fields present +4. **Shared content**: If multi-product, verify source paths are correct +5. **Menu structure**: Parent sections exist in product menu hierarchy + +**File Generation**: +For each file you need to create: + +1. **Check if file exists**: Use Read tool first (ignore errors if not found) +2. **Generate frontmatter** in YAML format with proper nesting: + \`\`\`yaml + --- + title: Page Title + description: SEO-friendly description under 160 characters + menu: + product_version: + name: Nav Name + parent: section + weight: 101 + related: + - /related/page/ + alt_links: + other_product: /other/path/ + --- + \`\`\` + +3. **Write full markdown content** with: + - Frontmatter (YAML block) + - Complete article content + - Code examples with proper annotations + - Proper internal links + +4. **Use Write tool**: Write the complete file + - For new files: just use Write + - For existing files: Read first, then Write + +**Summary File**: After generating all files, create \`.tmp/scaffold-proposal.yml\`: + +\`\`\`yaml +topic: Brief description of what was created +targetProducts: + - ${selectedProducts.join('\n - ')} +section: admin | write-data | query-data | get-started | reference +isShared: ${selectedProducts.length > 1} +filesCreated: + - path: content/path/to/file.md + type: shared-content | frontmatter-only | product-specific + status: created | updated +validationResults: + pathsValid: true | false + weightsValid: true | false + frontmatterComplete: true | false + issues: [] +nextSteps: + - Review generated files + - Test code examples + - Check internal links +\`\`\` + +**Important**: +- Use the Write tool for ALL files (markdown and YAML summary) +- For existing files, use Read first, then Write to overwrite +- Generate COMPLETE content, not stubs or placeholders +- Run validation checks before writing each file + +Begin now. Generate the files directly. +`; + return prompt; +} + +/** + * Phase 2: Run AI agent analysis + * Orchestrates multiple specialized agents to analyze draft and + * generate proposal + */ +async function runAgentAnalysis(context, options) { + log('šŸ“‹ Phase 2: AI Analysis\n', 'cyan'); + + // Detect environment and determine workflow + const isClaudeCodeEnv = typeof Task !== 'undefined'; + const aiMode = options.ai || 'claude'; + const useTaskTool = isClaudeCodeEnv && aiMode === 'claude'; + + if (useTaskTool) { log( - `Will create ${proposal.files.length} file${proposal.files.length !== 1 ? 's' : ''}:` + 'šŸ¤– Detected Claude Code environment - running agents automatically\n', + 'green' ); - proposal.files.forEach((file) => { - const icon = file.type === 'shared-content' ? 'šŸ“„' : 'šŸ“‹'; - log(` ${icon} ${file.path}`, 'cyan'); - }); - divider(); + } else if (aiMode === 'claude') { + log( + 'šŸ“‹ Claude Code environment not detected - will output prompt for copy-paste\n', + 'cyan' + ); + } - // Dry run mode - if (options['dry-run']) { - log('\nāœ“ Dry run complete (no files created)', 'green'); - return; - } + try { + const mode = context.mode || 'create'; + const isURLBased = context.urls && context.urls.length > 0; + const hasExistingContent = + context.existingContent && + Object.keys(context.existingContent).length > 0; - // Confirm unless --yes flag - if (!options.yes) { - log('\nProceed with creating files? (y/n): ', 'yellow'); + // Select target products + const selectedProducts = await selectProducts(context, options); - // Read user input - const stdin = process.stdin; - stdin.setRawMode(true); - stdin.setEncoding('utf8'); + // Add selectedProducts to context and update the context file + context.selectedProducts = selectedProducts; + writeJson(CONTEXT_FILE, context); + log( + `āœ“ Updated context with selected products: ${selectedProducts.join(', ')}`, + 'green' + ); - const response = await new Promise((resolve) => { - stdin.once('data', (key) => { - stdin.setRawMode(false); - resolve(key.toLowerCase()); - }); - }); + // Hybrid workflow: automatic (Task tool) vs manual (prompt output) + if (useTaskTool) { + // Automatic workflow using Task tool + await runAgentsWithTaskTool( + context, + selectedProducts, + mode, + isURLBased, + hasExistingContent + ); + } else { + // Manual workflow: save consolidated prompt to file + const consolidatedPrompt = generateClaudePrompt( + context, + selectedProducts, + mode, + isURLBased, + hasExistingContent + ); - console.log(''); // New line after input + // Generate filename from draft or topic + const draftName = context.draft?.path + ? context.draft.path.split('/').pop().replace(/\.md$/, '') + : 'untitled'; + const sanitizedName = draftName + .toLowerCase() + .replace(/\s+/g, '-') + .replace(/[^a-z0-9-]/g, ''); - if (response !== 'y') { - log('āœ— Cancelled by user', 'yellow'); - process.exit(0); + const promptDir = join(REPO_ROOT, '.context/drafts'); + const promptFile = join(promptDir, `${sanitizedName}-ai-prompt.md`); + + // Ensure directory exists + if (!existsSync(promptDir)) { + const fs = await import('fs'); + fs.mkdirSync(promptDir, { recursive: true }); } - } - // Execute proposal - log('\nšŸ“ Creating files...', 'bright'); - const result = executeProposal(proposal); + // Write prompt to file + const fs = await import('fs'); + fs.writeFileSync(promptFile, consolidatedPrompt, 'utf8'); - // Report results - if (result.created.length > 0) { - log(''); - result.created.forEach((file) => { - log(`āœ“ Created ${file}`, 'green'); - }); - } + log('\nāœ… AI prompt saved!', 'green'); + log(`\nšŸ“„ File: ${promptFile.replace(REPO_ROOT, '.')}\n`, 'cyan'); - if (result.errors.length > 0) { - log('\nāœ— Errors:', 'red'); - result.errors.forEach((err) => log(` • ${err}`, 'red')); - } + log('šŸ“ Next steps:', 'bright'); + log(' 1. Open the prompt file in your editor', 'yellow'); + log(' 2. Copy the entire content', 'yellow'); + log(' 3. Paste into your AI tool (Claude, ChatGPT, etc.)', 'yellow'); + log( + ' 4. The AI will generate documentation files directly in content/', + 'yellow' + ); + log(' 5. Review the generated files and iterate as needed', 'yellow'); + log( + ` 6. Check the summary at ${PROPOSAL_FILE.replace(REPO_ROOT, '.')}`, + 'yellow' + ); - // Print next steps - if (result.created.length > 0) { - log('\nšŸŽ‰ Done! Next steps:', 'bright'); - log('1. Review generated frontmatter'); - log('2. Test: npx hugo server'); - log('3. Commit: git add content/'); - } - - if (result.errors.length > 0) { - process.exit(1); + process.exit(0); } } catch (error) { - log(`\nāœ— Error executing proposal: ${error.message}`, 'red'); + log(`\nāœ— Error during AI analysis: ${error.message}`, 'red'); if (error.stack) { console.error(error.stack); } @@ -279,6 +798,110 @@ async function executePhase(options) { } } +// Remove all the old agent code below - it's been replaced by the hybrid approach above +// The function now ends here + +/** + * Phase 3: Execute proposal + */ +async function executePhase(options) { + log('\nšŸ“ Phase 3: Executing Proposal\n', 'bright'); + + // Auto-detect proposal if not specified + let proposalPath = options.proposal || PROPOSAL_FILE; + + if (!fileExists(proposalPath)) { + log(`\nāœ— Proposal file not found: ${proposalPath}`, 'red'); + log( + '\nRun yarn docs:create --draft first to generate proposal', + 'yellow' + ); + process.exit(1); + } + + // Read and validate proposal + const proposal = readJson(proposalPath); + + try { + validateProposal(proposal); + } catch (error) { + log(`\nāœ— Invalid proposal: ${error.message}`, 'red'); + process.exit(1); + } + + // Show preview + log('\nšŸ“‹ Proposal Summary:\n', 'cyan'); + log(` Topic: ${proposal.analysis.topic}`, 'cyan'); + log(` Products: ${proposal.analysis.targetProducts.join(', ')}`, 'cyan'); + log(` Section: ${proposal.analysis.section}`, 'cyan'); + log(` Shared: ${proposal.analysis.isShared ? 'Yes' : 'No'}`, 'cyan'); + + if (proposal.analysis.styleReview?.issues?.length > 0) { + log( + `\nāš ļø Style Issues (${proposal.analysis.styleReview.issues.length}):`, + 'yellow' + ); + proposal.analysis.styleReview.issues.forEach((issue) => { + log(` • ${issue}`, 'yellow'); + }); + } + + log('\nšŸ“ Files to create:\n', 'bright'); + proposal.files.forEach((file) => { + const icon = file.type === 'shared-content' ? 'šŸ“„' : 'šŸ“‹'; + const size = file.content ? ` (${file.content.length} chars)` : ''; + log(` ${icon} ${file.path}${size}`, 'cyan'); + }); + + // Dry run mode + if (options['dry-run']) { + log('\nāœ“ Dry run complete (no files created)', 'green'); + return; + } + + // Confirm unless --yes flag + if (!options.yes) { + const answer = await promptUser('\nProceed with creating files? (y/n): '); + + if (answer.toLowerCase() !== 'y') { + log('āœ— Cancelled by user', 'yellow'); + process.exit(0); + } + } + + // Execute proposal + log('\nšŸ“ Creating files...', 'bright'); + const result = executeProposal(proposal); + + // Report results + if (result.created.length > 0) { + log('\nāœ… Created files:', 'green'); + result.created.forEach((file) => { + log(` āœ“ ${file}`, 'green'); + }); + } + + if (result.errors.length > 0) { + log('\nāœ— Errors:', 'red'); + result.errors.forEach((err) => log(` • ${err}`, 'red')); + } + + // Print next steps + if (result.created.length > 0) { + log('\nšŸŽ‰ Done! Next steps:', 'bright'); + log(' 1. Review generated frontmatter and content'); + log(' 2. Test locally: npx hugo server'); + log( + ` 3. Test links: yarn test:links ${result.created[0].replace(/\/[^/]+$/, '/')}**/*.md` + ); + log(' 4. Commit changes: git add content/ && git commit'); + } + + if (result.errors.length > 0) { + process.exit(1); + } +} + /** * Main entry point */ @@ -291,16 +914,77 @@ async function main() { process.exit(0); } - // Determine phase - if (options.draft) { - // Phase 1: Prepare context - await preparePhase(options.draft); + // Determine workflow + if (options.url && options.url.length > 0) { + // URL-based workflow requires draft content + if (!options.draft) { + log('\nāœ— Error: --url requires --draft ', 'red'); + log('The --url option specifies WHERE to create content.', 'yellow'); + log( + 'You must provide --draft to specify WHAT content to create.', + 'yellow' + ); + log('\nExample:', 'cyan'); + log( + ' yarn docs:create --url /influxdb3/core/admin/new-feature/ \\', + 'cyan' + ); + log(' --draft drafts/new-feature.md', 'cyan'); + log('\nTo edit an existing page, use: yarn docs:edit ', 'cyan'); + process.exit(1); + } + + const context = await prepareURLPhase(options.url, options.draft, options); + + if (options['context-only']) { + // Stop after context preparation + process.exit(0); + } + + // Continue with AI analysis (Phase 2) + log('\nšŸ¤– Running AI analysis with specialized agents...\n', 'bright'); + await runAgentAnalysis(context, options); + + // Execute proposal (Phase 3) + await executePhase(options); + } else if (options.draft) { + // Draft-based workflow + const context = await preparePhase(options.draft, options); + + if (options['context-only']) { + // Stop after context preparation + process.exit(0); + } + + // Continue with AI analysis (Phase 2) + log('\nšŸ¤– Running AI analysis with specialized agents...\n', 'bright'); + await runAgentAnalysis(context, options); + + // Execute proposal (Phase 3) + await executePhase(options); + } else if (options.proposal) { + // Import and execute external proposal + if (!fileExists(options.proposal)) { + log(`\nāœ— Proposal file not found: ${options.proposal}`, 'red'); + process.exit(1); + } + // Copy proposal to expected location + const proposal = readJson(options.proposal); + writeJson(PROPOSAL_FILE, proposal); + await executePhase(options); } else if (options.execute || options['dry-run']) { - // Phase 2: Execute proposal + // Legacy: Execute proposal (deprecated) + log( + '\n⚠ Warning: --execute is deprecated. Use --proposal instead.', + 'yellow' + ); await executePhase(options); } else { // No valid options provided - log('Error: Must specify --draft or --execute', 'red'); + log( + 'Error: Must specify a docs URL (new or existing), a draft path, or --proposal', + 'red' + ); log('Run with --help for usage information\n'); process.exit(1); } diff --git a/scripts/docs-edit.js b/scripts/docs-edit.js new file mode 100755 index 000000000..ec85e73e9 --- /dev/null +++ b/scripts/docs-edit.js @@ -0,0 +1,249 @@ +#!/usr/bin/env node + +/** + * Documentation file opener + * Opens existing documentation pages in your default editor + * + * Usage: + * yarn docs:edit + * yarn docs:edit https://docs.influxdata.com/influxdb3/core/admin/databases/ + * yarn docs:edit /influxdb3/core/admin/databases/ + */ + +import { parseArgs } from 'node:util'; +import process from 'node:process'; +import { join, dirname } from 'path'; +import { fileURLToPath } from 'url'; +import { existsSync, readFileSync } from 'fs'; +import { spawn } from 'child_process'; +import { parseDocumentationURL, urlToFilePaths } from './lib/url-parser.js'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); + +// Repository root +const REPO_ROOT = join(__dirname, '..'); + +// Colors for console output +const colors = { + reset: '\x1b[0m', + bright: '\x1b[1m', + green: '\x1b[32m', + yellow: '\x1b[33m', + blue: '\x1b[34m', + red: '\x1b[31m', + cyan: '\x1b[36m', +}; + +/** + * Print colored output + */ +function log(message, color = 'reset') { + console.log(`${colors[color]}${message}${colors.reset}`); +} + +/** + * Parse command line arguments + */ +function parseArguments() { + const { values, positionals } = parseArgs({ + options: { + help: { type: 'boolean', default: false }, + list: { type: 'boolean', default: false }, + }, + allowPositionals: true, + }); + + // First positional argument is the URL + if (positionals.length > 0 && !values.url) { + values.url = positionals[0]; + } + + return values; +} + +/** + * Print usage information + */ +function printUsage() { + console.log(` +${colors.bright}Documentation File Opener${colors.reset} + +${colors.bright}Usage:${colors.reset} + yarn docs:edit Open page in editor + yarn docs:edit --list List matching files without opening + +${colors.bright}Arguments:${colors.reset} + Documentation URL or path + +${colors.bright}Options:${colors.reset} + --list List matching files without opening + --help Show this help message + +${colors.bright}Examples:${colors.reset} + # Open with full URL + yarn docs:edit https://docs.influxdata.com/influxdb3/core/admin/databases/ + + # Open with path only + yarn docs:edit /influxdb3/core/admin/databases/ + + # List files without opening + yarn docs:edit --list /influxdb3/core/admin/databases/ + +${colors.bright}Notes:${colors.reset} + - Opens files in your default editor (set via EDITOR environment variable) + - If multiple files exist (e.g., shared content variants), opens all of them + - Falls back to VS Code if EDITOR is not set +`); +} + +/** + * Find matching files for a URL + */ +function findFiles(url) { + try { + // Parse URL + const parsed = parseDocumentationURL(url); + log(`\nšŸ” Analyzing URL: ${url}`, 'bright'); + log(` Product: ${parsed.namespace}/${parsed.product || 'N/A'}`, 'cyan'); + log(` Section: ${parsed.section || 'N/A'}`, 'cyan'); + + // Get potential file paths + const potentialPaths = urlToFilePaths(parsed); + const foundFiles = []; + + for (const relativePath of potentialPaths) { + const fullPath = join(REPO_ROOT, relativePath); + if (existsSync(fullPath)) { + foundFiles.push(relativePath); + } + } + + return { parsed, foundFiles }; + } catch (error) { + log(`\nāœ— Error parsing URL: ${error.message}`, 'red'); + process.exit(1); + } +} + +/** + * Check if file uses shared content + */ +function checkSharedContent(filePath) { + const fullPath = join(REPO_ROOT, filePath); + + if (!existsSync(fullPath)) { + return null; + } + + const content = readFileSync(fullPath, 'utf8'); + + // Check for source: frontmatter + const sourceMatch = content.match(/^source:\s*(.+)$/m); + if (sourceMatch) { + const sourcePath = sourceMatch[1].trim(); + return `content${sourcePath}`; + } + + return null; +} + +/** + * Open files in editor + */ +function openInEditor(files) { + // Determine editor + const editor = process.env.EDITOR || 'code'; + + log(`\nšŸ“ Opening ${files.length} file(s) in ${editor}...`, 'bright'); + + // Convert to absolute paths + const absolutePaths = files.map((f) => join(REPO_ROOT, f)); + + // Spawn editor process + const child = spawn(editor, absolutePaths, { + stdio: 'inherit', + detached: false, + }); + + child.on('error', (error) => { + log(`\nāœ— Failed to open editor: ${error.message}`, 'red'); + log('\nTry setting the EDITOR environment variable:', 'yellow'); + log(' export EDITOR=vim', 'cyan'); + log(' export EDITOR=code', 'cyan'); + log(' export EDITOR=nano', 'cyan'); + process.exit(1); + }); + + child.on('close', (code) => { + if (code !== 0 && code !== null) { + log(`\nāœ— Editor exited with code ${code}`, 'yellow'); + } + }); +} + +/** + * Main entry point + */ +async function main() { + const options = parseArguments(); + + // Show help + if (options.help || !options.url) { + printUsage(); + process.exit(0); + } + + // Find files + const { parsed, foundFiles } = findFiles(options.url); + + if (foundFiles.length === 0) { + log('\nāœ— No files found for this URL', 'red'); + log('\nThe page may not exist yet. To create new content, use:', 'yellow'); + log(' yarn docs:create --url --draft ', 'cyan'); + process.exit(1); + } + + // Display found files + log('\nāœ“ Found files:', 'green'); + const allFiles = new Set(); + + for (const file of foundFiles) { + allFiles.add(file); + log(` • ${file}`, 'cyan'); + + // Check for shared content + const sharedSource = checkSharedContent(file); + if (sharedSource) { + if (existsSync(join(REPO_ROOT, sharedSource))) { + allFiles.add(sharedSource); + log( + ` • ${sharedSource} ${colors.yellow}(shared source)${colors.reset}`, + 'cyan' + ); + } + } + } + + const filesToOpen = Array.from(allFiles); + + // List only mode + if (options.list) { + log(`\nāœ“ Found ${filesToOpen.length} file(s)`, 'green'); + process.exit(0); + } + + // Open in editor + openInEditor(filesToOpen); +} + +// Run if called directly +if (import.meta.url === `file://${process.argv[1]}`) { + main().catch((error) => { + log(`\nFatal error: ${error.message}`, 'red'); + console.error(error.stack); + process.exit(1); + }); +} + +export { findFiles, openInEditor }; diff --git a/scripts/lib/content-scaffolding.js b/scripts/lib/content-scaffolding.js index 5cba9c814..63d0e6e66 100644 --- a/scripts/lib/content-scaffolding.js +++ b/scripts/lib/content-scaffolding.js @@ -16,6 +16,7 @@ import { validatePath, ensureDirectory, } from './file-operations.js'; +import { urlToFilePaths } from './url-parser.js'; const __filename = fileURLToPath(import.meta.url); const __dirname = dirname(__filename); @@ -54,68 +55,226 @@ export function loadProducts() { } /** - * Analyze content directory structure - * @param {string} basePath - Base path to analyze (e.g., 'content/influxdb3') - * @returns {object} Structure analysis + * Extract product mentions from draft content + * @param {string} content - Draft content to analyze + * @param {object} products - Products map from loadProducts() + * @returns {string[]} Array of product keys mentioned */ -export function analyzeStructure(basePath = 'content/influxdb3') { - const fullPath = join(REPO_ROOT, basePath); +export function extractProductMentions(content, products) { + const mentioned = new Set(); + const contentLower = content.toLowerCase(); - if (!existsSync(fullPath)) { - return { sections: [], existingPaths: [], siblingWeights: {} }; - } + // Product name patterns to search for + const patterns = { + influxdb3_core: [ + 'influxdb 3 core', + 'influxdb3 core', + 'influxdb core', + 'core version', + ], + influxdb3_enterprise: [ + 'influxdb 3 enterprise', + 'influxdb3 enterprise', + 'influxdb enterprise', + 'enterprise version', + ], + influxdb3_cloud_dedicated: [ + 'cloud dedicated', + 'influxdb cloud dedicated', + 'dedicated cluster', + ], + influxdb3_cloud_serverless: [ + 'cloud serverless', + 'influxdb cloud serverless', + 'serverless', + ], + influxdb3_clustered: ['clustered', 'influxdb clustered', 'kubernetes'], + influxdb_cloud: ['influxdb cloud', 'influxdb 2 cloud'], + influxdb_v2: ['influxdb 2', 'influxdb v2', 'influxdb 2.x'], + influxdb_v1: ['influxdb 1', 'influxdb v1', 'influxdb 1.x'], + }; - const sections = []; - const existingPaths = []; - const siblingWeights = {}; - - // Recursively walk directory - function walk(dir, relativePath = '') { - const entries = readdirSync(dir); - - for (const entry of entries) { - const fullEntryPath = join(dir, entry); - const relativeEntryPath = join(relativePath, entry); - const stat = statSync(fullEntryPath); - - if (stat.isDirectory()) { - // Track sections (top-level directories) - if (relativePath === '') { - sections.push(entry); - } - - // Track all directory paths - existingPaths.push(join(basePath, relativeEntryPath)); - - // Recurse - walk(fullEntryPath, relativeEntryPath); + // Check for each product's patterns + for (const [productKey, productPatterns] of Object.entries(patterns)) { + for (const pattern of productPatterns) { + if (contentLower.includes(pattern)) { + mentioned.add(productKey); + break; } } } - walk(fullPath); + return Array.from(mentioned); +} - // Analyze weights in common sections - const commonSections = [ - 'admin', - 'write-data', - 'query-data', - 'reference', - 'get-started', - ]; - for (const section of commonSections) { - const sectionPath = join(fullPath, 'core', section); - if (existsSync(sectionPath)) { - const weights = findSiblingWeights(sectionPath); - if (weights.length > 0) { - siblingWeights[`${basePath}/core/${section}/`] = weights; +/** + * Detect InfluxDB version and related tools from draft content + * @param {string} content - Draft content to analyze + * @returns {object} Version information + */ +export function detectInfluxDBVersion(content) { + const contentLower = content.toLowerCase(); + + // Version detection patterns + const versionInfo = { + version: null, + tools: [], + apis: [], + }; + + // Detect version + if ( + contentLower.includes('influxdb 3') || + contentLower.includes('influxdb3') + ) { + versionInfo.version = '3.x'; + + // v3-specific tools + if ( + contentLower.includes('influxdb3 ') || + contentLower.includes('influxdb3-') + ) { + versionInfo.tools.push('influxdb3 CLI'); + } + if (contentLower.includes('influxctl')) { + versionInfo.tools.push('influxctl'); + } + if (contentLower.includes('/api/v3')) { + versionInfo.apis.push('/api/v3'); + } + } else if ( + contentLower.includes('influxdb 2') || + contentLower.includes('influxdb v2') + ) { + versionInfo.version = '2.x'; + + // v2-specific tools + if (contentLower.includes('influx ')) { + versionInfo.tools.push('influx CLI'); + } + if (contentLower.includes('/api/v2')) { + versionInfo.apis.push('/api/v2'); + } + } else if ( + contentLower.includes('influxdb 1') || + contentLower.includes('influxdb v1') + ) { + versionInfo.version = '1.x'; + + // v1-specific tools + if (contentLower.includes('influx -')) { + versionInfo.tools.push('influx CLI (v1)'); + } + if (contentLower.includes('influxd')) { + versionInfo.tools.push('influxd'); + } + } + + // Common tools across versions + if (contentLower.includes('telegraf')) { + versionInfo.tools.push('Telegraf'); + } + + return versionInfo; +} + +/** + * Analyze content directory structure + * @param {string|string[]} basePaths - Base path(s) to analyze (e.g., 'content/influxdb3' or ['content/influxdb3', 'content/influxdb']) + * @returns {object} Structure analysis + */ +export function analyzeStructure(basePaths = 'content/influxdb3') { + // Normalize to array + const pathsArray = Array.isArray(basePaths) ? basePaths : [basePaths]; + + const allSections = new Set(); + const allExistingPaths = []; + const siblingWeights = {}; + + // Analyze each base path + for (const basePath of pathsArray) { + const fullPath = join(REPO_ROOT, basePath); + + if (!existsSync(fullPath)) { + continue; + } + + // Recursively walk directory + function walk(dir, relativePath = '') { + try { + const entries = readdirSync(dir); + + for (const entry of entries) { + const fullEntryPath = join(dir, entry); + const relativeEntryPath = join(relativePath, entry); + + try { + const stat = statSync(fullEntryPath); + + if (stat.isDirectory()) { + // Track product-level directories (first level under content/namespace/) + const pathParts = relativeEntryPath.split('/'); + if (pathParts.length === 2) { + // This is a product directory (e.g., 'core', 'enterprise') + allSections.add(pathParts[1]); + } + + // Track all directory paths + allExistingPaths.push(join(basePath, relativeEntryPath)); + + // Recurse + walk(fullEntryPath, relativeEntryPath); + } + } catch (error) { + // Skip files/dirs we can't access + continue; + } + } + } catch (error) { + // Skip directories we can't read } } + + walk(fullPath); + + // Analyze weights in common sections for all product directories + const commonSections = [ + 'admin', + 'write-data', + 'query-data', + 'reference', + 'get-started', + 'plugins', + ]; + + // Find all product directories (e.g., core, enterprise, cloud-dedicated) + try { + const productDirs = readdirSync(fullPath).filter((entry) => { + const fullEntryPath = join(fullPath, entry); + return ( + existsSync(fullEntryPath) && statSync(fullEntryPath).isDirectory() + ); + }); + + for (const productDir of productDirs) { + for (const section of commonSections) { + const sectionPath = join(fullPath, productDir, section); + if (existsSync(sectionPath)) { + const weights = findSiblingWeights(sectionPath); + if (weights.length > 0) { + siblingWeights[`${basePath}/${productDir}/${section}/`] = weights; + } + } + } + } + } catch (error) { + // Skip if we can't read directory + } } return { - sections: [...new Set(sections)].sort(), - existingPaths: existingPaths.sort(), + sections: [...allSections].sort(), + existingPaths: allExistingPaths.sort(), siblingWeights, }; } @@ -154,7 +313,7 @@ export function findSiblingWeights(dirPath) { } /** - * Prepare complete context for Claude analysis + * Prepare complete context for AI analysis * @param {string} draftPath - Path to draft file * @returns {object} Context object */ @@ -165,8 +324,27 @@ export function prepareContext(draftPath) { // Load products const products = loadProducts(); - // Analyze structure - const structure = analyzeStructure(); + // Extract product mentions from draft + const mentionedProducts = extractProductMentions(draft.content, products); + + // Detect InfluxDB version and tools + const versionInfo = detectInfluxDBVersion(draft.content); + + // Determine which content paths to analyze based on version + let contentPaths = []; + if (versionInfo.version === '3.x') { + contentPaths = ['content/influxdb3']; + } else if (versionInfo.version === '2.x') { + contentPaths = ['content/influxdb']; + } else if (versionInfo.version === '1.x') { + contentPaths = ['content/influxdb/v1', 'content/enterprise_influxdb/v1']; + } else { + // Default: analyze all + contentPaths = ['content/influxdb3', 'content/influxdb']; + } + + // Analyze structure for relevant paths + const structure = analyzeStructure(contentPaths); // Build context const context = { @@ -176,22 +354,38 @@ export function prepareContext(draftPath) { existingFrontmatter: draft.frontmatter, }, products, + productHints: { + mentioned: mentionedProducts, + suggested: + mentionedProducts.length > 0 + ? mentionedProducts + : Object.keys(products).filter( + (key) => + key.startsWith('influxdb3_') || key.startsWith('influxdb_v') + ), + }, + versionInfo, structure, conventions: { sharedContentDir: 'content/shared/', - menuKeyPattern: 'influxdb3_{product}', + menuKeyPattern: '{namespace}_{product}', weightLevels: { description: 'Weight ranges by level', - level1: '1-99', - level2: '101-199', - level3: '201-299', - level4: '301-399', + level1: '1-99 (top-level pages)', + level2: '101-199 (section landing pages)', + level3: '201-299 (detail pages)', + level4: '301-399 (sub-detail pages)', }, namingRules: { files: 'Use lowercase with hyphens (e.g., manage-databases.md)', directories: 'Use lowercase with hyphens', shared: 'Shared content in /content/shared/', }, + testing: { + codeblocks: 'Use pytest-codeblocks annotations for testable examples', + docker: 'Use compose.yaml services for testing code samples', + commands: `Version-specific CLIs: ${versionInfo.tools.join(', ') || 'detected from content'}`, + }, }, }; @@ -375,3 +569,192 @@ export function suggestNextWeight(existingWeights, level = 3) { // Return max + 1 return Math.max(...levelWeights) + 1; } + +/** + * Find file from parsed URL + * @param {object} parsedURL - Parsed URL from url-parser.js + * @returns {object|null} File information or null if not found + */ +export function findFileFromURL(parsedURL) { + const potentialPaths = urlToFilePaths(parsedURL); + + for (const relativePath of potentialPaths) { + const fullPath = join(REPO_ROOT, relativePath); + if (existsSync(fullPath)) { + return { + path: relativePath, + fullPath, + exists: true, + }; + } + } + + // File doesn't exist, return first potential path for creation + return { + path: potentialPaths[0], + fullPath: join(REPO_ROOT, potentialPaths[0]), + exists: false, + }; +} + +/** + * Detect if a file uses shared content + * @param {string} filePath - Path to file (relative to repo root) + * @returns {string|null} Shared source path if found, null otherwise + */ +export function detectSharedContent(filePath) { + const fullPath = join(REPO_ROOT, filePath); + + if (!existsSync(fullPath)) { + return null; + } + + try { + const content = readFileSync(fullPath, 'utf8'); + const parsed = matter(content); + + if (parsed.data && parsed.data.source) { + return parsed.data.source; + } + } catch (error) { + // Can't parse, assume not shared + return null; + } + + return null; +} + +/** + * Find all files that reference a shared source + * @param {string} sourcePath - Path to shared content file (e.g., "/shared/influxdb3-admin/databases.md") + * @returns {string[]} Array of file paths that use this shared source + */ +export function findSharedContentVariants(sourcePath) { + const variants = []; + + // Search content directories + const contentDirs = [ + 'content/influxdb3', + 'content/influxdb', + 'content/telegraf', + ]; + + function searchDirectory(dir) { + if (!existsSync(dir)) { + return; + } + + try { + const entries = readdirSync(dir); + + for (const entry of entries) { + const fullPath = join(dir, entry); + const stat = statSync(fullPath); + + if (stat.isDirectory()) { + searchDirectory(fullPath); + } else if (entry.endsWith('.md')) { + try { + const content = readFileSync(fullPath, 'utf8'); + const parsed = matter(content); + + if (parsed.data && parsed.data.source === sourcePath) { + // Convert to relative path from repo root + const relativePath = fullPath.replace(REPO_ROOT + '/', ''); + variants.push(relativePath); + } + } catch (error) { + // Skip files that can't be parsed + continue; + } + } + } + } catch (error) { + // Skip directories we can't read + } + } + + for (const contentDir of contentDirs) { + searchDirectory(join(REPO_ROOT, contentDir)); + } + + return variants; +} + +/** + * Analyze an existing page + * @param {string} filePath - Path to file (relative to repo root) + * @returns {object} Page analysis + */ +export function analyzeExistingPage(filePath) { + const fullPath = join(REPO_ROOT, filePath); + + if (!existsSync(fullPath)) { + throw new Error(`File not found: ${filePath}`); + } + + const content = readFileSync(fullPath, 'utf8'); + const parsed = matter(content); + + const analysis = { + path: filePath, + fullPath, + content: parsed.content, + frontmatter: parsed.data, + isShared: false, + sharedSource: null, + variants: [], + }; + + // Check if this file uses shared content + if (parsed.data && parsed.data.source) { + analysis.isShared = true; + analysis.sharedSource = parsed.data.source; + + // Find all variants that use the same shared source + analysis.variants = findSharedContentVariants(parsed.data.source); + } + + return analysis; +} + +/** + * Analyze multiple URLs and find their files + * @param {object[]} parsedURLs - Array of parsed URLs + * @returns {object[]} Array of URL analysis results + */ +export function analyzeURLs(parsedURLs) { + const results = []; + + for (const parsedURL of parsedURLs) { + const fileInfo = findFileFromURL(parsedURL); + + const result = { + url: parsedURL.url, + parsed: parsedURL, + exists: fileInfo.exists, + files: { + main: fileInfo.path, + isShared: false, + sharedSource: null, + variants: [], + }, + }; + + if (fileInfo.exists) { + // Analyze existing page + try { + const analysis = analyzeExistingPage(fileInfo.path); + result.files.isShared = analysis.isShared; + result.files.sharedSource = analysis.sharedSource; + result.files.variants = analysis.variants; + } catch (error) { + console.error(`Error analyzing ${fileInfo.path}: ${error.message}`); + } + } + + results.push(result); + } + + return results; +} diff --git a/scripts/lib/url-parser.js b/scripts/lib/url-parser.js new file mode 100644 index 000000000..e985f16f5 --- /dev/null +++ b/scripts/lib/url-parser.js @@ -0,0 +1,216 @@ +/** + * URL parsing utilities for documentation scaffolding + * Parses docs.influxdata.com URLs to extract product, version, and path information + */ + +import { basename } from 'path'; + +// Base URL pattern for InfluxData documentation +const DOCS_BASE_URL = 'docs.influxdata.com'; + +/** + * Parse a documentation URL to extract components + * @param {string} url - Full URL or path (e.g., "https://docs.influxdata.com/influxdb3/core/admin/databases/" or "/influxdb3/core/admin/databases/") + * @returns {object} Parsed URL components + */ +export function parseDocumentationURL(url) { + // Remove protocol and domain if present + let path = url; + if (url.includes(DOCS_BASE_URL)) { + const urlObj = new URL(url); + path = urlObj.pathname; + } + + // Remove leading and trailing slashes + path = path.replace(/^\/+|\/+$/g, ''); + + // Split into parts + const parts = path.split('/').filter((p) => p.length > 0); + + if (parts.length === 0) { + throw new Error('Invalid URL: no path components'); + } + + // First part is the namespace (influxdb3, influxdb, telegraf, etc.) + const namespace = parts[0]; + + // Determine product structure based on namespace + let product = null; + let section = null; + let pagePath = []; + let isSection = false; + + if (namespace === 'influxdb3') { + // InfluxDB 3 structure: /influxdb3/{product}/{section}/{...path} + if (parts.length >= 2) { + product = parts[1]; // core, enterprise, cloud-dedicated, cloud-serverless, clustered, explorer + if (parts.length >= 3) { + section = parts[2]; // admin, write-data, query-data, reference, get-started, plugins + pagePath = parts.slice(3); + } + } + } else if (namespace === 'influxdb') { + // InfluxDB 2/1 structure: /influxdb/{version}/{section}/{...path} + if (parts.length >= 2) { + const secondPart = parts[1]; + if (secondPart === 'cloud') { + product = 'cloud'; + if (parts.length >= 3) { + section = parts[2]; + pagePath = parts.slice(3); + } + } else if (secondPart.match(/^v\d/)) { + // v2.x or v1.x + product = secondPart; + if (parts.length >= 3) { + section = parts[2]; + pagePath = parts.slice(3); + } + } else { + // Assume cloudless-v2 structure: /influxdb/{section}/{...path} + section = secondPart; + pagePath = parts.slice(2); + product = 'v2'; // default + } + } + } else if (namespace === 'telegraf') { + // Telegraf structure: /telegraf/{version}/{section}/{...path} + if (parts.length >= 2) { + product = parts[1]; + if (parts.length >= 3) { + section = parts[2]; + pagePath = parts.slice(3); + } + } + } else if (namespace === 'kapacitor' || namespace === 'chronograf') { + // Other products: /{product}/{version}/{section}/{...path} + if (parts.length >= 2) { + product = parts[1]; + if (parts.length >= 3) { + section = parts[2]; + pagePath = parts.slice(3); + } + } + } + + // Determine if this is a section (directory) or single page + // Section URLs typically end with / or have no file extension + // Single page URLs typically end with a page name + if (pagePath.length === 0 && section) { + // URL points to section landing page + isSection = true; + } else if (pagePath.length > 0) { + const lastPart = pagePath[pagePath.length - 1]; + // If last part looks like a directory (no dots), it's a section + isSection = !lastPart.includes('.'); + } + + return { + url, + namespace, + product, + section, + pagePath: pagePath.join('/'), + isSection, + fullPath: parts.join('/'), + }; +} + +/** + * Validate if a URL is a valid documentation URL + * @param {string} url - URL to validate + * @returns {boolean} True if valid documentation URL + */ +export function validateDocumentationURL(url) { + try { + const parsed = parseDocumentationURL(url); + return parsed.namespace && parsed.namespace.length > 0; + } catch (error) { + return false; + } +} + +/** + * Convert parsed URL to potential file paths + * @param {object} parsedURL - Parsed URL from parseDocumentationURL() + * @returns {string[]} Array of potential file paths to check + */ +export function urlToFilePaths(parsedURL) { + const { namespace, product, section, pagePath, isSection } = parsedURL; + + const basePaths = []; + + // Build base path based on namespace and product + let contentPath = `content/${namespace}`; + if (product) { + contentPath += `/${product}`; + } + if (section) { + contentPath += `/${section}`; + } + + if (pagePath) { + contentPath += `/${pagePath}`; + } + + if (isSection) { + // Section could be _index.md or directory with _index.md + basePaths.push(`${contentPath}/_index.md`); + basePaths.push(`${contentPath}.md`); // Sometimes sections are single files + } else { + // Single page + basePaths.push(`${contentPath}.md`); + basePaths.push(`${contentPath}/_index.md`); // Could still be a section + } + + return basePaths; +} + +/** + * Extract page name from URL for use in file names + * @param {object} parsedURL - Parsed URL from parseDocumentationURL() + * @returns {string} Suggested file name + */ +export function urlToFileName(parsedURL) { + const { pagePath, section } = parsedURL; + + if (pagePath && pagePath.length > 0) { + // Use last part of page path + const parts = pagePath.split('/'); + return parts[parts.length - 1]; + } else if (section) { + // Use section name + return section; + } + + return 'index'; +} + +/** + * Parse multiple URLs (comma-separated or array) + * @param {string|string[]} urls - URLs to parse + * @returns {object[]} Array of parsed URLs + */ +export function parseMultipleURLs(urls) { + let urlArray = []; + + if (typeof urls === 'string') { + // Split by comma if string + urlArray = urls.split(',').map((u) => u.trim()); + } else if (Array.isArray(urls)) { + urlArray = urls; + } else { + throw new Error('URLs must be a string or array'); + } + + return urlArray + .map((url) => { + try { + return parseDocumentationURL(url); + } catch (error) { + console.error(`Error parsing URL ${url}: ${error.message}`); + return null; + } + }) + .filter((parsed) => parsed !== null); +} diff --git a/scripts/schemas/scaffold-context.schema.json b/scripts/schemas/scaffold-context.schema.json new file mode 100644 index 000000000..0ca409462 --- /dev/null +++ b/scripts/schemas/scaffold-context.schema.json @@ -0,0 +1,182 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Content Scaffolding Context", + "description": "Context data prepared by docs-create.js for AI analysis", + "type": "object", + "required": ["draft", "products", "productHints", "versionInfo", "structure", "conventions"], + "properties": { + "mode": { + "type": "string", + "enum": ["create", "edit"], + "description": "Operation mode: create new content or edit existing content" + }, + "urls": { + "type": "array", + "description": "URL analysis results (for URL-based workflow)", + "items": { + "type": "object", + "properties": { + "url": { "type": "string" }, + "parsed": { + "type": "object", + "properties": { + "namespace": { "type": "string" }, + "product": { "type": "string" }, + "section": { "type": "string" }, + "pagePath": { "type": "string" }, + "isSection": { "type": "boolean" } + } + }, + "exists": { "type": "boolean" }, + "files": { + "type": "object", + "properties": { + "main": { "type": "string" }, + "isShared": { "type": "boolean" }, + "sharedSource": { "type": ["string", "null"] }, + "variants": { + "type": "array", + "items": { "type": "string" } + } + } + } + } + } + }, + "existingContent": { + "type": "object", + "description": "Existing file contents (for edit mode)", + "patternProperties": { + ".*": { "type": "string" } + } + }, + "draft": { + "type": "object", + "description": "Draft content and metadata", + "required": ["path", "content", "existingFrontmatter"], + "properties": { + "path": { + "type": "string", + "description": "Path to the draft file" + }, + "content": { + "type": "string", + "description": "Markdown content of the draft" + }, + "existingFrontmatter": { + "type": "object", + "description": "Frontmatter from draft (if any)" + } + } + }, + "products": { + "type": "object", + "description": "Available InfluxDB products from data/products.yml", + "patternProperties": { + ".*": { + "type": "object", + "properties": { + "key": { "type": "string" }, + "name": { "type": "string" }, + "namespace": { "type": "string" }, + "menu_category": { "type": "string" }, + "versions": { "type": "array", "items": { "type": "string" } }, + "latest": { "type": "string" } + } + } + } + }, + "productHints": { + "type": "object", + "description": "Product recommendations from content analysis", + "required": ["mentioned", "suggested"], + "properties": { + "mentioned": { + "type": "array", + "description": "Products explicitly mentioned in draft content", + "items": { "type": "string" } + }, + "suggested": { + "type": "array", + "description": "Products suggested based on analysis", + "items": { "type": "string" } + } + } + }, + "versionInfo": { + "type": "object", + "description": "Detected InfluxDB version and tools", + "required": ["version", "tools", "apis"], + "properties": { + "version": { + "type": ["string", "null"], + "description": "Detected version (3.x, 2.x, 1.x, or null)" + }, + "tools": { + "type": "array", + "description": "CLI tools and utilities mentioned", + "items": { "type": "string" } + }, + "apis": { + "type": "array", + "description": "API endpoints mentioned", + "items": { "type": "string" } + } + } + }, + "structure": { + "type": "object", + "description": "Repository structure analysis", + "required": ["sections", "existingPaths", "siblingWeights"], + "properties": { + "sections": { + "type": "array", + "description": "Available documentation sections", + "items": { "type": "string" } + }, + "existingPaths": { + "type": "array", + "description": "All existing directory paths", + "items": { "type": "string" } + }, + "siblingWeights": { + "type": "object", + "description": "Weight values from sibling pages by section", + "patternProperties": { + ".*": { + "type": "array", + "items": { "type": "number" } + } + } + } + } + }, + "conventions": { + "type": "object", + "description": "Documentation conventions and guidelines", + "required": ["sharedContentDir", "menuKeyPattern", "weightLevels", "namingRules", "testing"], + "properties": { + "sharedContentDir": { + "type": "string", + "description": "Directory for shared content" + }, + "menuKeyPattern": { + "type": "string", + "description": "Pattern for menu keys" + }, + "weightLevels": { + "type": "object", + "description": "Weight ranges by navigation level" + }, + "namingRules": { + "type": "object", + "description": "File and directory naming conventions" + }, + "testing": { + "type": "object", + "description": "Testing conventions for code samples" + } + } + } + } +} diff --git a/scripts/schemas/scaffold-proposal.schema.json b/scripts/schemas/scaffold-proposal.schema.json new file mode 100644 index 000000000..edb638ce4 --- /dev/null +++ b/scripts/schemas/scaffold-proposal.schema.json @@ -0,0 +1,145 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Content Scaffolding Proposal", + "description": "Proposal generated by AI analysis for creating documentation files", + "type": "object", + "required": ["analysis", "files"], + "properties": { + "analysis": { + "type": "object", + "description": "Analysis results from AI agents", + "required": ["topic", "targetProducts", "section", "isShared"], + "properties": { + "topic": { + "type": "string", + "description": "Brief topic description" + }, + "targetProducts": { + "type": "array", + "description": "Products this documentation applies to", + "items": { "type": "string" }, + "minItems": 1 + }, + "section": { + "type": "string", + "description": "Documentation section (admin, write-data, query-data, etc.)" + }, + "isShared": { + "type": "boolean", + "description": "Whether content should be shared across products" + }, + "reasoning": { + "type": "string", + "description": "Explanation for structure decisions" + }, + "styleReview": { + "type": "object", + "description": "Style compliance review from Style Agent", + "properties": { + "issues": { + "type": "array", + "items": { "type": "string" } + }, + "recommendations": { + "type": "array", + "items": { "type": "string" } + } + } + }, + "codeValidation": { + "type": "object", + "description": "Code sample validation from Coding Agent", + "properties": { + "tested": { + "type": "boolean", + "description": "Whether code samples were tested" + }, + "tools": { + "type": "array", + "description": "Tools used in code samples", + "items": { "type": "string" } + } + } + } + } + }, + "files": { + "type": "array", + "description": "Files to create", + "minItems": 1, + "items": { + "type": "object", + "required": ["path", "type"], + "properties": { + "path": { + "type": "string", + "description": "File path relative to repository root" + }, + "type": { + "type": "string", + "enum": ["shared-content", "frontmatter-only"], + "description": "File type: shared-content (with body) or frontmatter-only (just frontmatter + source)" + }, + "content": { + "type": "string", + "description": "Markdown content (for shared-content files)" + }, + "frontmatter": { + "type": "object", + "description": "Frontmatter object (for frontmatter-only files)", + "required": ["title", "description", "menu", "weight"], + "properties": { + "title": { + "type": "string", + "description": "Page title" + }, + "description": { + "type": "string", + "description": "SEO description" + }, + "menu": { + "type": "object", + "description": "Menu configuration", + "patternProperties": { + ".*": { + "type": "object", + "required": ["name"], + "properties": { + "name": { "type": "string" }, + "parent": { "type": "string" } + } + } + } + }, + "weight": { + "type": "number", + "description": "Sort weight" + }, + "source": { + "type": "string", + "description": "Path to shared content file" + }, + "related": { + "type": "array", + "description": "Related article URLs", + "items": { "type": "string" } + }, + "alt_links": { + "type": "object", + "description": "Cross-product navigation links", + "patternProperties": { + ".*": { "type": "string" } + } + } + } + } + } + } + }, + "nextSteps": { + "type": "array", + "description": "Recommended next steps after file creation", + "items": { "type": "string" } + } + } +} diff --git a/scripts/templates/chatgpt-prompt.md b/scripts/templates/chatgpt-prompt.md new file mode 100644 index 000000000..3f3de19d5 --- /dev/null +++ b/scripts/templates/chatgpt-prompt.md @@ -0,0 +1,136 @@ +# Content Scaffolding Analysis Prompt (ChatGPT) + +## Context + +You are analyzing a documentation draft to generate an intelligent file structure proposal for the InfluxData documentation repository. + +**Context file**: `.tmp/scaffold-context.json` + +Read and analyze the context file, which contains: +- **draft**: The markdown content and any existing frontmatter +- **products**: Available InfluxDB products (Core, Enterprise, Cloud, etc.) +- **productHints**: Products mentioned or suggested based on content analysis +- **versionInfo**: Detected InfluxDB version (3.x, 2.x, 1.x) and tools +- **structure**: Repository structure, existing paths, and sibling weights +- **conventions**: Documentation conventions for naming, weights, and testing + +## Your Tasks + +### 1. Content Analysis + +Analyze the draft content to determine: + +- **Topic**: What is this documentation about? +- **Target audience**: Developers, administrators, beginners, or advanced users? +- **Documentation type**: Conceptual overview, how-to guide, reference, or tutorial? +- **Target products**: Which InfluxDB products does this apply to? + - Use `productHints.mentioned` and `productHints.suggested` from context + - Consider `versionInfo.version` (3.x, 2.x, or 1.x) +- **Section**: Which documentation section? (admin, write-data, query-data, reference, get-started, plugins) + +### 2. Structure Decisions + +Decide on the optimal file structure: + +- **Shared vs Product-Specific**: + - Use shared content (`content/shared/`) when content applies broadly with minor variations + - Use product-specific when content differs significantly between products +- **Parent menu item**: What should be the navigation parent? +- **Weight**: Calculate appropriate weight based on `structure.siblingWeights` + - Weights are in ranges: 1-99 (top level), 101-199 (level 2), 201-299 (level 3) + +### 3. Frontmatter Generation + +For each file, create complete frontmatter with: + +- **title**: Clear, SEO-friendly title +- **description**: Concise 1-2 sentence description for SEO +- **menu**: Proper menu structure with product key (pattern: `{namespace}_{product}`) +- **weight**: Sequential weight based on siblings +- **source**: (for frontmatter-only files) Path to shared content +- **related**: 3-5 relevant related articles from `structure.existingPaths` +- **alt_links**: Map equivalent pages across products for cross-product navigation + +### 4. Code Sample Considerations + +Based on `versionInfo`: +- Use version-specific CLI commands (influxdb3, influx, influxctl) +- Reference appropriate API endpoints (/api/v3, /api/v2) +- Note testing requirements from `conventions.testing` + +### 5. Style Compliance + +Follow conventions from `conventions.namingRules`: +- Files: Use lowercase with hyphens (e.g., `manage-databases.md`) +- Directories: Use lowercase with hyphens +- Shared content: Place in appropriate `/content/shared/` subdirectory + +## Output Format + +Generate a JSON proposal matching the schema in `scripts/schemas/scaffold-proposal.schema.json`. + +**Required structure**: + +```json +{ + "analysis": { + "topic": "Brief topic description", + "targetProducts": ["core", "enterprise", "cloud-dedicated"], + "section": "admin", + "isShared": true, + "reasoning": "Why this structure makes sense", + "styleReview": { + "issues": [], + "recommendations": [] + }, + "codeValidation": { + "tested": false, + "tools": ["influxdb3 CLI", "influxctl"] + } + }, + "files": [ + { + "path": "content/shared/influxdb3-admin/topic-name.md", + "type": "shared-content", + "content": "{{ACTUAL_DRAFT_CONTENT}}" + }, + { + "path": "content/influxdb3/core/admin/topic-name.md", + "type": "frontmatter-only", + "frontmatter": { + "title": "Page Title", + "description": "Page description", + "menu": { + "influxdb3_core": { + "name": "Nav Label", + "parent": "Parent Item" + } + }, + "weight": 205, + "source": "/shared/influxdb3-admin/topic-name.md", + "related": [ + "/influxdb3/core/path/to/related/" + ], + "alt_links": { + "enterprise": "/influxdb3/enterprise/admin/topic-name/" + } + } + } + ], + "nextSteps": [ + "Review generated frontmatter", + "Test with: npx hugo server", + "Add product-specific variations if needed" + ] +} +``` + +## Instructions + +1. Read and parse `.tmp/scaffold-context.json` +2. Analyze the draft content thoroughly +3. Make structure decisions based on the analysis +4. Generate complete frontmatter for all files +5. Save the proposal to `.tmp/scaffold-proposal.json` + +The proposal will be validated and used by `yarn docs:create --proposal .tmp/scaffold-proposal.json` to create the files. diff --git a/scripts/templates/copilot-prompt.md b/scripts/templates/copilot-prompt.md new file mode 100644 index 000000000..44d221d16 --- /dev/null +++ b/scripts/templates/copilot-prompt.md @@ -0,0 +1,111 @@ +# Content Scaffolding Analysis (GitHub Copilot) + +Generate a documentation scaffolding proposal from the context file. + +## Input + +Read `.tmp/scaffold-context.json` which contains: +- `draft`: Documentation draft content and frontmatter +- `products`: Available InfluxDB products +- `productHints`: Suggested products based on content analysis +- `versionInfo`: Detected version (3.x/2.x/1.x) and tools +- `structure`: Repository structure and sibling weights +- `conventions`: Documentation standards + +## Analysis + +Determine: +1. **Topic** and **audience** from draft content +2. **Target products** from `productHints` and `versionInfo` +3. **Documentation section** (admin/write-data/query-data/reference/get-started/plugins) +4. **Shared vs product-specific** structure +5. **Weight** from `structure.siblingWeights` for the section + +## File Structure + +Generate files following these patterns: + +### Shared Content Pattern +``` +content/shared/{namespace}-{section}/{topic-name}.md + ā”œā”€ content/{namespace}/{product}/{section}/{topic-name}.md (frontmatter only) + ā”œā”€ content/{namespace}/{product}/{section}/{topic-name}.md (frontmatter only) + └─ ... +``` + +### Product-Specific Pattern +``` +content/{namespace}/{product}/{section}/{topic-name}.md (full content) +``` + +## Frontmatter Template + +For frontmatter-only files: +```yaml +--- +title: Clear SEO title +description: 1-2 sentence description +menu: + {namespace}_{product}: + name: Nav label + parent: Parent item +weight: {calculated from siblings} +source: /shared/{namespace}-{section}/{topic-name}.md +related: + - /path/to/related1/ + - /path/to/related2/ +alt_links: + {product}: /path/to/equivalent/ +--- +``` + +## Code Samples + +Based on `versionInfo`: +- **v3.x**: Use `influxdb3` CLI, `influxctl`, `/api/v3` +- **v2.x**: Use `influx` CLI, `/api/v2` +- **v1.x**: Use `influx` CLI (v1), `influxd`, InfluxQL + +## Output + +Generate JSON matching `scripts/schemas/scaffold-proposal.schema.json`: + +```json +{ + "analysis": { + "topic": "...", + "targetProducts": ["..."], + "section": "...", + "isShared": true/false, + "reasoning": "...", + "styleReview": { + "issues": [], + "recommendations": [] + }, + "codeValidation": { + "tested": false, + "tools": [] + } + }, + "files": [ + { + "path": "content/...", + "type": "shared-content" | "frontmatter-only", + "content": "..." OR "frontmatter": {...} + } + ], + "nextSteps": ["..."] +} +``` + +Save to: `.tmp/scaffold-proposal.json` + +## Conventions + +- **Files**: lowercase-with-hyphens.md +- **Menu keys**: `{namespace}_{product}` (e.g., `influxdb3_core`) +- **Weights**: 1-99 (top), 101-199 (level 2), 201-299 (level 3) +- **Shared content**: `content/shared/` subdirectories +- **Related links**: 3-5 contextually relevant articles + +Begin analysis of `.tmp/scaffold-context.json`. From 9373446ea6689b8296d5eadb3ec9f88f35a082a2 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Thu, 30 Oct 2025 10:05:37 -0400 Subject: [PATCH 03/15] WIP: follow external links in a draft, accept draft via stdin --- data/products.yml | 13 -- drafts/test-url-handling.md | 9 + scripts/docs-create.js | 305 +++++++++++++++++++++++++---- scripts/lib/content-scaffolding.js | 143 +++++++++++++- scripts/lib/file-operations.js | 32 +++ 5 files changed, 445 insertions(+), 57 deletions(-) create mode 100644 drafts/test-url-handling.md diff --git a/data/products.yml b/data/products.yml index 9889169fa..21e23cec9 100644 --- a/data/products.yml +++ b/data/products.yml @@ -212,19 +212,6 @@ influxdb_cloud: - How is Cloud 2 different from Cloud Serverless? - How do I manage auth tokens in InfluxDB Cloud 2? -explorer: - name: InfluxDB 3 Explorer - namespace: explorer - menu_category: other - list_order: 4 - versions: [v1] - latest: explorer - latest_patch: 1.1.0 - ai_sample_questions: - - How do I use InfluxDB 3 Explorer to visualize data? - - How do I create a dashboard in InfluxDB 3 Explorer? - - How do I query data using InfluxDB 3 Explorer? - telegraf: name: Telegraf namespace: telegraf diff --git a/drafts/test-url-handling.md b/drafts/test-url-handling.md new file mode 100644 index 000000000..f794a015e --- /dev/null +++ b/drafts/test-url-handling.md @@ -0,0 +1,9 @@ +# Test URL Handling + +This is a test draft to see how the script handles URLs in content. + +Here's a link to the InfluxDB documentation: + +And here's a link to GitHub: + +The script should analyze this content and determine where to place it in the documentation structure. diff --git a/scripts/docs-create.js b/scripts/docs-create.js index a5fb91567..176cc35b0 100644 --- a/scripts/docs-create.js +++ b/scripts/docs-create.js @@ -23,7 +23,12 @@ import { loadProducts, analyzeStructure, } from './lib/content-scaffolding.js'; -import { writeJson, readJson, fileExists } from './lib/file-operations.js'; +import { + writeJson, + readJson, + fileExists, + readDraft, +} from './lib/file-operations.js'; import { parseMultipleURLs } from './lib/url-parser.js'; const __filename = fileURLToPath(import.meta.url); @@ -91,8 +96,7 @@ function divider() { function parseArguments() { const { values, positionals } = parseArgs({ options: { - draft: { type: 'string' }, - from: { type: 'string' }, + 'from-draft': { type: 'string' }, url: { type: 'string', multiple: true }, urls: { type: 'string' }, products: { type: 'string' }, @@ -103,18 +107,16 @@ function parseArguments() { 'dry-run': { type: 'boolean', default: false }, yes: { type: 'boolean', default: false }, help: { type: 'boolean', default: false }, + 'follow-external': { type: 'boolean', default: false }, }, allowPositionals: true, }); // First positional argument is treated as draft path - if (positionals.length > 0 && !values.draft && !values.from) { + if (positionals.length > 0 && !values['from-draft']) { values.draft = positionals[0]; - } - - // --from is an alias for --draft - if (values.from && !values.draft) { - values.draft = values.from; + } else if (values['from-draft']) { + values.draft = values['from-draft']; } // Normalize URLs into array @@ -142,19 +144,35 @@ ${colors.bright}Documentation Content Scaffolding${colors.reset} ${colors.bright}Usage:${colors.reset} yarn docs:create Create from draft - yarn docs:create --url --draft Create at URL with draft content + yarn docs:create --url --from-draft Create at URL with draft ${colors.bright}Options:${colors.reset} - Path to draft markdown file (positional argument) - --draft Path to draft markdown file - --from Alias for --draft - --url Documentation URL for new content location - --context-only Stop after context preparation - (for non-Claude tools) - --proposal Import and execute proposal from JSON file - --dry-run Show what would be created without creating - --yes Skip confirmation prompt - --help Show this help message + Path to draft markdown file (positional argument) + --from-draft Path to draft markdown file + --url Documentation URL for new content location + --follow-external Include external (non-docs.influxdata.com) URLs + when extracting links from draft. Without this flag, + only local documentation links are followed. + --context-only Stop after context preparation + (for non-Claude tools) + --proposal Import and execute proposal from JSON file + --dry-run Show what would be created without creating + --yes Skip confirmation prompt + --help Show this help message + +${colors.bright}Stdin Support:${colors.reset} + cat draft.md | yarn docs:create Read draft from stdin + echo "# Content" | yarn docs:create Create from piped content + +${colors.bright}Link Following:${colors.reset} + By default, the script extracts links from your draft and prompts you + to select which ones to include as context. This helps the AI: + - Maintain consistent terminology + - Avoid duplicating content + - Add appropriate \`related\` frontmatter links + + Local documentation links are always available for selection. + Use --follow-external to also include external URLs (GitHub, etc.) ${colors.bright}Workflow (Create from draft):${colors.reset} 1. Create a draft markdown file with your content @@ -166,7 +184,7 @@ ${colors.bright}Workflow (Create at specific URL):${colors.reset} 1. Create draft: vim drafts/new-feature.md 2. Run: yarn docs:create \\ --url https://docs.influxdata.com/influxdb3/core/admin/new-feature/ \\ - --draft drafts/new-feature.md + --from-draft drafts/new-feature.md 3. Script determines structure from URL and uses draft content 4. Review and confirm to create files @@ -184,10 +202,20 @@ ${colors.bright}Examples:${colors.reset} # Create at specific URL with draft content yarn docs:create --url /influxdb3/core/admin/new-feature/ \\ - --draft drafts/new-feature.md + --from-draft drafts/new-feature.md + + # Create with linked context (prompts for link selection) + yarn docs:create drafts/new-feature.md + + # Include external links for selection + yarn docs:create --follow-external drafts/api-guide.md + + # Pipe content from stdin + cat drafts/quick-note.md | yarn docs:create + echo "# Test Content" | yarn docs:create # Preview changes - yarn docs:create --draft drafts/new-feature.md --dry-run + yarn docs:create --from-draft drafts/new-feature.md --dry-run ${colors.bright}Note:${colors.reset} To edit existing pages, use: yarn docs:edit @@ -197,7 +225,7 @@ ${colors.bright}Note:${colors.reset} /** * Phase 1a: Prepare context from URLs */ -async function prepareURLPhase(urls, draftPath, options) { +async function prepareURLPhase(urls, draftPath, options, stdinContent = null) { log('\nšŸ” Analyzing URLs and finding files...', 'bright'); try { @@ -258,9 +286,18 @@ async function prepareURLPhase(urls, draftPath, options) { // Build context (include URL analysis) let context = null; - if (draftPath) { + let draft; + + if (stdinContent) { + // Use stdin content + draft = stdinContent; + log('āœ“ Using draft from stdin', 'green'); + context = prepareContext(draft); + } else if (draftPath) { // Use draft content if provided - context = prepareContext(draftPath); + draft = readDraft(draftPath); + draft.path = draftPath; + context = prepareContext(draft); } else { // Minimal context for editing existing pages const products = loadProducts(); @@ -351,18 +388,83 @@ async function prepareURLPhase(urls, draftPath, options) { /** * Phase 1b: Prepare context from draft */ -async function preparePhase(draftPath, options) { +async function preparePhase(draftPath, options, stdinContent = null) { log('\nšŸ” Analyzing draft and repository structure...', 'bright'); - // Validate draft exists - if (!fileExists(draftPath)) { - log(`āœ— Draft file not found: ${draftPath}`, 'red'); - process.exit(1); + let draft; + + // Handle stdin vs file + if (stdinContent) { + draft = stdinContent; + log('āœ“ Using draft from stdin', 'green'); + } else { + // Validate draft exists + if (!fileExists(draftPath)) { + log(`āœ— Draft file not found: ${draftPath}`, 'red'); + process.exit(1); + } + draft = readDraft(draftPath); + draft.path = draftPath; } try { // Prepare context - const context = prepareContext(draftPath); + const context = prepareContext(draft); + + // Extract links from draft + const { extractLinks, followLocalLinks, fetchExternalLinks } = await import( + './lib/content-scaffolding.js' + ); + + const links = extractLinks(draft.content); + + if (links.localFiles.length > 0 || links.external.length > 0) { + // Filter external links if flag not set + if (!options['follow-external']) { + links.external = []; + } + + // Let user select which external links to follow + // (local files are automatically included) + const selected = await selectLinksToFollow(links); + + // Follow selected links + const linkedContent = []; + + if (selected.selectedLocal.length > 0) { + log('\nšŸ“„ Loading local files...', 'cyan'); + // Determine base path for resolving relative links + const basePath = draft.path + ? dirname(join(REPO_ROOT, draft.path)) + : REPO_ROOT; + const localResults = followLocalLinks(selected.selectedLocal, basePath); + linkedContent.push(...localResults); + const successCount = localResults.filter((r) => !r.error).length; + log(`āœ“ Loaded ${successCount} local file(s)`, 'green'); + } + + if (selected.selectedExternal.length > 0) { + log('\n🌐 Fetching external URLs...', 'cyan'); + const externalResults = await fetchExternalLinks( + selected.selectedExternal + ); + linkedContent.push(...externalResults); + const successCount = externalResults.filter((r) => !r.error).length; + log(`āœ“ Fetched ${successCount} external page(s)`, 'green'); + } + + // Add to context + if (linkedContent.length > 0) { + context.linkedContent = linkedContent; + + // Show any errors + const errors = linkedContent.filter((lc) => lc.error); + if (errors.length > 0) { + log('\nāš ļø Some links could not be loaded:', 'yellow'); + errors.forEach((e) => log(` • ${e.url}: ${e.error}`, 'yellow')); + } + } + } // Write context to temp file writeJson(CONTEXT_FILE, context); @@ -382,6 +484,12 @@ async function preparePhase(draftPath, options) { `āœ“ Found ${context.structure.existingPaths.length} existing pages`, 'green' ); + if (context.linkedContent) { + log( + `āœ“ Included ${context.linkedContent.length} linked page(s) as context`, + 'green' + ); + } log( `āœ“ Prepared context → ${CONTEXT_FILE.replace(REPO_ROOT, '.')}`, 'green' @@ -441,6 +549,19 @@ async function selectProducts(context, options) { } } + // Sort products: detected first, then alphabetically within each group + allProducts.sort((a, b) => { + const aDetected = detected.includes(a); + const bDetected = detected.includes(b); + + // Detected products first + if (aDetected && !bDetected) return -1; + if (!aDetected && bDetected) return 1; + + // Then alphabetically + return a.localeCompare(b); + }); + // Case 1: Explicit flag provided if (options.products) { const requested = options.products.split(',').map((p) => p.trim()); @@ -514,6 +635,74 @@ async function selectProducts(context, options) { return selected; } +/** + * Prompt user to select which external links to include + * Local file paths are automatically followed + * @param {object} links - {localFiles, external} from extractLinks + * @returns {Promise} {selectedLocal, selectedExternal} + */ +async function selectLinksToFollow(links) { + // Local files are followed automatically (no user prompt) + // External links require user selection + if (links.external.length === 0) { + return { + selectedLocal: links.localFiles || [], + selectedExternal: [], + }; + } + + log('\nšŸ”— Found external links in draft:\n', 'bright'); + + const allLinks = []; + let index = 1; + + // Show external links for selection + links.external.forEach((link) => { + log(` ${index}. ${link}`, 'yellow'); + allLinks.push({ type: 'external', url: link }); + index++; + }); + + const answer = await promptUser( + '\nSelect external links to include as context ' + + '(comma-separated numbers, or "all"): ' + ); + + if (!answer || answer.toLowerCase() === 'none') { + return { + selectedLocal: links.localFiles || [], + selectedExternal: [], + }; + } + + let selectedIndices; + if (answer.toLowerCase() === 'all') { + selectedIndices = Array.from({ length: allLinks.length }, (_, i) => i); + } else { + selectedIndices = answer + .split(',') + .map((s) => parseInt(s.trim()) - 1) + .filter((i) => i >= 0 && i < allLinks.length); + } + + const selectedExternal = []; + + selectedIndices.forEach((i) => { + const link = allLinks[i]; + selectedExternal.push(link.url); + }); + + log( + `\nāœ“ Following ${links.localFiles?.length || 0} local file(s) ` + + `and ${selectedExternal.length} external link(s)`, + 'green' + ); + return { + selectedLocal: links.localFiles || [], + selectedExternal, + }; +} + /** * Run single content generator agent with direct file generation (Claude Code) */ @@ -577,6 +766,30 @@ function generateClaudePrompt( **Target Products**: Use \`context.selectedProducts\` field (${selectedProducts.join(', ')}) **Mode**: ${mode === 'edit' ? 'Edit existing content' : 'Create new documentation'} ${isURLBased ? `**URLs**: ${context.urls.map((u) => u.url).join(', ')}` : ''} +${ + context.linkedContent?.length > 0 + ? ` +**Linked References**: The draft references ${context.linkedContent.length} page(s) from existing documentation. + +These are provided for context to help you: +- Maintain consistent terminology and style +- Avoid duplicating existing content +- Understand related concepts and their structure +- Add appropriate links to the \`related\` frontmatter field + +Linked content details available in \`context.linkedContent\`: +${context.linkedContent + .map((lc) => + lc.error + ? `- āŒ ${lc.url} (${lc.error})` + : `- āœ“ [${lc.type}] ${lc.title} (${lc.path || lc.url})` + ) + .join('\n')} + +**Important**: Use this content for context and reference, but do not copy it verbatim. Consider adding relevant pages to the \`related\` field in frontmatter. +` + : '' +} **Your Task**: Generate complete documentation files directly (no proposal step). @@ -908,16 +1121,27 @@ async function executePhase(options) { async function main() { const options = parseArguments(); - // Show help + // Show help first (don't wait for stdin) if (options.help) { printUsage(); process.exit(0); } + // Check for stdin only if no draft file was provided + const hasStdin = !process.stdin.isTTY; + let stdinContent = null; + + if (hasStdin && !options.draft) { + // Import readDraftFromStdin + const { readDraftFromStdin } = await import('./lib/file-operations.js'); + log('šŸ“„ Reading draft from stdin...', 'cyan'); + stdinContent = await readDraftFromStdin(); + } + // Determine workflow if (options.url && options.url.length > 0) { // URL-based workflow requires draft content - if (!options.draft) { + if (!options.draft && !stdinContent) { log('\nāœ— Error: --url requires --draft ', 'red'); log('The --url option specifies WHERE to create content.', 'yellow'); log( @@ -934,7 +1158,12 @@ async function main() { process.exit(1); } - const context = await prepareURLPhase(options.url, options.draft, options); + const context = await prepareURLPhase( + options.url, + options.draft, + options, + stdinContent + ); if (options['context-only']) { // Stop after context preparation @@ -947,9 +1176,9 @@ async function main() { // Execute proposal (Phase 3) await executePhase(options); - } else if (options.draft) { - // Draft-based workflow - const context = await preparePhase(options.draft, options); + } else if (options.draft || stdinContent) { + // Draft-based workflow (from file or stdin) + const context = await preparePhase(options.draft, options, stdinContent); if (options['context-only']) { // Stop after context preparation diff --git a/scripts/lib/content-scaffolding.js b/scripts/lib/content-scaffolding.js index 63d0e6e66..b775a304e 100644 --- a/scripts/lib/content-scaffolding.js +++ b/scripts/lib/content-scaffolding.js @@ -4,7 +4,7 @@ */ import { readdirSync, readFileSync, existsSync, statSync } from 'fs'; -import { join, dirname } from 'path'; +import { join, dirname, resolve } from 'path'; import { fileURLToPath } from 'url'; import yaml from 'js-yaml'; import matter from 'gray-matter'; @@ -314,12 +314,19 @@ export function findSiblingWeights(dirPath) { /** * Prepare complete context for AI analysis - * @param {string} draftPath - Path to draft file + * @param {string|object} draftPathOrObject - Path to draft file or draft object * @returns {object} Context object */ -export function prepareContext(draftPath) { - // Read draft - const draft = readDraft(draftPath); +export function prepareContext(draftPathOrObject) { + // Read draft - handle both file path and draft object + let draft; + if (typeof draftPathOrObject === 'string') { + draft = readDraft(draftPathOrObject); + draft.path = draftPathOrObject; + } else { + // Already a draft object from stdin + draft = draftPathOrObject; + } // Load products const products = loadProducts(); @@ -349,7 +356,7 @@ export function prepareContext(draftPath) { // Build context const context = { draft: { - path: draftPath, + path: draft.path || draftPathOrObject, content: draft.content, existingFrontmatter: draft.frontmatter, }, @@ -758,3 +765,127 @@ export function analyzeURLs(parsedURLs) { return results; } + +/** + * Extract and categorize links from markdown content + * @param {string} content - Markdown content + * @returns {object} {localFiles: string[], external: string[]} + */ +export function extractLinks(content) { + const localFiles = []; + const external = []; + + // Match markdown links: [text](url) + const linkRegex = /\[([^\]]+)\]\(([^)]+)\)/g; + let match; + + while ((match = linkRegex.exec(content)) !== null) { + const url = match[2]; + + // Skip anchor links and mailto + if (url.startsWith('#') || url.startsWith('mailto:')) { + continue; + } + + // Local file paths (relative paths) - automatically followed + if (url.startsWith('../') || url.startsWith('./')) { + localFiles.push(url); + } + // All HTTP/HTTPS URLs (including docs.influxdata.com) - user selects + else if (url.startsWith('http://') || url.startsWith('https://')) { + external.push(url); + } + // Absolute paths starting with / are ignored (no base context to resolve) + } + + return { + localFiles: [...new Set(localFiles)], + external: [...new Set(external)], + }; +} + +/** + * Follow local file links (relative paths) + * @param {string[]} links - Array of relative file paths + * @param {string} basePath - Base path to resolve relative links from + * @returns {object[]} Array of {url, title, content, path, frontmatter} + */ +export function followLocalLinks(links, basePath = REPO_ROOT) { + const results = []; + + for (const link of links) { + try { + // Resolve relative path from base path + const filePath = resolve(basePath, link); + + // Check if file exists + if (existsSync(filePath)) { + const fileContent = readFileSync(filePath, 'utf8'); + const parsed = matter(fileContent); + + results.push({ + url: link, + title: parsed.data?.title || 'Untitled', + content: parsed.content, + path: filePath.replace(REPO_ROOT + '/', ''), + frontmatter: parsed.data, + type: 'local', + }); + } else { + results.push({ + url: link, + error: 'File not found', + type: 'local', + }); + } + } catch (error) { + results.push({ + url: link, + error: error.message, + type: 'local', + }); + } + } + + return results; +} + +/** + * Fetch external URLs + * @param {string[]} urls - Array of external URLs + * @returns {Promise} Array of {url, title, content, type} + */ +export async function fetchExternalLinks(urls) { + // Dynamic import axios + const axios = (await import('axios')).default; + const results = []; + + for (const url of urls) { + try { + const response = await axios.get(url, { + timeout: 10000, + headers: { 'User-Agent': 'InfluxData-Docs-Bot/1.0' }, + }); + + // Extract title from HTML or use URL + const titleMatch = response.data.match(/([^<]+)<\/title>/i); + const title = titleMatch ? titleMatch[1] : url; + + results.push({ + url, + title, + content: response.data, + type: 'external', + contentType: response.headers['content-type'], + }); + } catch (error) { + results.push({ + url, + error: error.message, + type: 'external', + }); + } + } + + return results; +} diff --git a/scripts/lib/file-operations.js b/scripts/lib/file-operations.js index 6bbb57830..316f90155 100644 --- a/scripts/lib/file-operations.js +++ b/scripts/lib/file-operations.js @@ -28,6 +28,38 @@ export function readDraft(filePath) { }; } +/** + * Read draft content from stdin + * @returns {Promise<{content: string, frontmatter: object, raw: string, path: string}>} + */ +export async function readDraftFromStdin() { + return new Promise((resolve, reject) => { + let data = ''; + process.stdin.setEncoding('utf8'); + + process.stdin.on('data', (chunk) => { + data += chunk; + }); + + process.stdin.on('end', () => { + try { + // Parse with gray-matter to extract frontmatter if present + const parsed = matter(data); + resolve({ + content: parsed.content, + frontmatter: parsed.data || {}, + raw: data, + path: '<stdin>', + }); + } catch (error) { + reject(error); + } + }); + + process.stdin.on('error', reject); + }); +} + /** * Write a markdown file with frontmatter * @param {string} filePath - Path to write to From 6f150048ffc3454f28bfe3d900cf739f5c54edb0 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman <jstirnaman@influxdata.com> Date: Tue, 28 Oct 2025 00:04:49 -0400 Subject: [PATCH 04/15] chore(docs): Add content/create.md tutorial page for the How to create your own documentation tutorial --- content/create.md | 136 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 content/create.md diff --git a/content/create.md b/content/create.md new file mode 100644 index 000000000..e6d78f891 --- /dev/null +++ b/content/create.md @@ -0,0 +1,136 @@ +--- +title: Create and edit InfluxData docs +description: Learn how to create and edit InfluxData documentation. +tags: [documentation, guide, influxdata] +test_only: true +--- + +Learn how to create and edit InfluxData documentation. + +## Common workflows + +## Submit an issue to request new or updated documentation + +- **Public**: <https://github.com/influxdata/docs-v2/issues/> +- **Private**: <https://github.com/influxdata/DAR/issues/> + +## Edit an existing page in your browser + +**Example**: Editing a product-specific page + +1. Visit <https://docs.influxdata.com> public docs +2. Search, Ask AI, or navigate to find the page to edit--for example, <https://docs.influxdata.com/influxdb3/cloud-serverless/get-started/> +3. Click the "Edit this page" link at the bottom of the page. +4. This opens the GitHub repository to the file that generates the page +5. Click the pencil icon to edit the file in your browser +6. [Commit and create a pull request](#commit-and-create-a-pull-request) + +**Example**: Editing a shared content page + +Navigate to <https://docs.influxdata.com/influxdb3/core/> + +## Edit locally with the docs-v2 repository + +**Prerquisites**: + +- [Fork the docs-v2 repository](https://github.com/influxdata/docs-v2/fork) +- [Install Yarn](https://yarnpkg.com/getting-started/install) +- Optional: [Set up GitHub CLI](https://cli.github.com/manual/) + +### Edit an existing page locally + +Use the `yarn docs:edit` command to open an existing page in your editor. + +```bash +yarn docs:edit https://docs.influxdata.com/influxdb3/enterprise/get-started/ +``` + +### Create shared content for multiple versions + +<!-- Coming soon: generate content from an issue with labels --> + +### Generate content and frontmatter from the command line + +#### Example + +Navigate to the page you want to edit on `https://docs.influxdata.com`--for example, <https://docs.influxdata.com/influxdb3/core/> + +```console + yarn docs:create <draft-path> Create from draft + yarn docs:create <url> Create page at URL + yarn docs:create --url <url> Create page at URL + yarn docs:create --url <url> --draft <path> Create at URL with draft content + yarn docs:create --url url1 --url url2 Process multiple URLs + + <draft-path> Path to draft markdown file (positional argument) + <url> Documentation URL (positional, if starts with / or + contains docs.influxdata.com) + --draft <path> Path to draft markdown file + --from <path> Alias for --draft + --url <url> Documentation URL (can specify multiple times) + --urls <list> Comma-separated list of URLs + --context-only Stop after context preparation + (for non-Claude tools) +``` + +### Generate content and frontmatter from a draft + +1. Run the `docs:create` command with the path to your draft file. + + - If run in a Claude Code prompt, it generates content and frontmatter based on the draft and the products you select. + - If run in your shell, it generates a prompt for use with any agent (Claude, Copilot Agent mode, OpenAI GPT). + + ```bash + yarn docs:create --draft .context/drafts/"Upgrading Enterprise 3 (draft).md" + ``` + +2. [Review, commit, and create a pull request](#review-commit-and-create-a-pull-request) + +## Review, commit, and create a pull request + +> \[!Important] +> +> #### Check AI-generated content +> +> Always review and validate AI-generated content for accuracy. +> Make sure example commands are correct for the version you're documenting. + +### Commit and create a pull request + +1. Commit your changes to a new branch +2. Fix any issues found by automated checks +3. Push the branch to your fork or to the docs-v2 repository + +### Create a pull request + +1. Create a pull request against the `master` branch of the docs-v2 repository +2. Wait for automated checks (Hugo build and link checks) to complete +3. Add reviewers and request reviews +4. After approval, merge the pull request + +{{< tabs-wrapper >}} +{{% tabs %}} +[GitHub](#) +[gh CLI](#) +{{% /tabs %}} +{{% tab-content %}} + +1. Visit [influxdata/docs-v2 pull requests on GitHub](https://github.com/influxdata/docs-v2/pulls) +2. Edit PR title and description +3. Optional: set to draft if it needs more work +4. Assign reviewers +5. Optionally, assign Copilot to review + {{% /tab-content %}} + {{% tab-content %}} + +```bash +gh pr create +``` + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +## Other resources + +<http://localhost:1313/example/> +<https://app.kapa.ai> From 863052bb9409ab566c99b0f895c9dc6785dce8c3 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman <jstirnaman@influxdata.com> Date: Fri, 31 Oct 2025 10:24:58 -0500 Subject: [PATCH 05/15] chore(instruction): tutorial improvements and fixes --- content/create.md | 168 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 116 insertions(+), 52 deletions(-) diff --git a/content/create.md b/content/create.md index e6d78f891..5a72b0117 100644 --- a/content/create.md +++ b/content/create.md @@ -7,7 +7,10 @@ test_only: true Learn how to create and edit InfluxData documentation. -## Common workflows +- [Submit an issue to request new or updated documentation](#submit-an-issue-to-request-new-or-updated-documentation) +- [Edit an existing page in your browser](#edit-an-existing-page-in-your-browser) +- [Create and edit locally with the docs-v2 repository](#create-and-edit-locally-with-the-docs-v2-repository) +- [Helpful resources](#other-resources) ## Submit an issue to request new or updated documentation @@ -21,21 +24,40 @@ Learn how to create and edit InfluxData documentation. 1. Visit <https://docs.influxdata.com> public docs 2. Search, Ask AI, or navigate to find the page to edit--for example, <https://docs.influxdata.com/influxdb3/cloud-serverless/get-started/> 3. Click the "Edit this page" link at the bottom of the page. -4. This opens the GitHub repository to the file that generates the page -5. Click the pencil icon to edit the file in your browser -6. [Commit and create a pull request](#commit-and-create-a-pull-request) + This opens the GitHub repository to the file that generates the page +4. Click the pencil icon to edit the file in your browser +5. [Commit and create a pull request](#commit-and-create-a-pull-request) -**Example**: Editing a shared content page +## Create and edit locally with the docs-v2 repository -Navigate to <https://docs.influxdata.com/influxdb3/core/> +Use `docs` scripts with AI agents to help you create and edit documentation locally, especially when working with shared content for multiple products. -## Edit locally with the docs-v2 repository +**Prerequisites**: -**Prerquisites**: +1. [Clone or fork the docs-v2 repository](https://github.com/influxdata/docs-v2/): -- [Fork the docs-v2 repository](https://github.com/influxdata/docs-v2/fork) -- [Install Yarn](https://yarnpkg.com/getting-started/install) -- Optional: [Set up GitHub CLI](https://cli.github.com/manual/) + ```bash + git clone https://github.com/influxdata/docs-v2.git + cd docs-v2 + ``` +2. [Install Yarn](https://yarnpkg.com/getting-started/install) +3. Run `yarn` in the repository root to install dependencies +4. Optional: [Set up GitHub CLI](https://cli.github.com/manual/) + +> \[!Tip] +> To run and test your changes locally, enter the following command in your terminal: +> +> ```bash +> yarn hugo server +> ``` +> +> *To refresh shared content after making changes, `touch` or edit the frontmatter file, or stop the server (Ctrl+C) and restart it.* +> +> To list all available scripts, run: +> +> ```bash +> yarn run +> ``` ### Edit an existing page locally @@ -45,49 +67,62 @@ Use the `yarn docs:edit` command to open an existing page in your editor. yarn docs:edit https://docs.influxdata.com/influxdb3/enterprise/get-started/ ``` -### Create shared content for multiple versions +### Create content locally + +Use the `yarn docs:create` command with your AI agent tool to scaffold frontmatter and generate new content. + +- The `yarn docs:create` command generates a prompt file from a draft and your product selections. +- AI agents (`claude`, Copilot Agent mode, `codex`) can use the prompt file to generate content and frontmatter. + +> \[!Tip] +> +> `docs-v2` contains custom configuration for agents like Claude and Copilot Agent mode. <!-- Coming soon: generate content from an issue with labels --> -### Generate content and frontmatter from the command line - -#### Example - -Navigate to the page you want to edit on `https://docs.influxdata.com`--for example, <https://docs.influxdata.com/influxdb3/core/> - -```console - yarn docs:create <draft-path> Create from draft - yarn docs:create <url> Create page at URL - yarn docs:create --url <url> Create page at URL - yarn docs:create --url <url> --draft <path> Create at URL with draft content - yarn docs:create --url url1 --url url2 Process multiple URLs - - <draft-path> Path to draft markdown file (positional argument) - <url> Documentation URL (positional, if starts with / or - contains docs.influxdata.com) - --draft <path> Path to draft markdown file - --from <path> Alias for --draft - --url <url> Documentation URL (can specify multiple times) - --urls <list> Comma-separated list of URLs - --context-only Stop after context preparation - (for non-Claude tools) -``` - ### Generate content and frontmatter from a draft -1. Run the `docs:create` command with the path to your draft file. +{{% tabs-wrapper %}} +{{% tabs %}} +[Claude Code](#) +[Other AI agents](#) +{{% /tabs %}} +{{% tab-content %}} - - If run in a Claude Code prompt, it generates content and frontmatter based on the draft and the products you select. - - If run in your shell, it generates a prompt for use with any agent (Claude, Copilot Agent mode, OpenAI GPT). +1. Open a Claude Code prompt: ```bash - yarn docs:create --draft .context/drafts/"Upgrading Enterprise 3 (draft).md" + claude code ``` -2. [Review, commit, and create a pull request](#review-commit-and-create-a-pull-request) +2. In the prompt, run the `docs:create` command with the path to your draft file. + + ```bash + yarn docs:create .context/drafts/"Upgrading Enterprise 3 (draft).md" + ``` + +3. When prompted, select the products to associate with the content. + +The script first generates a prompt file, then the agent automatically uses it to generate content and frontmatter based on the draft and the products you select. + +{{% /tab-content %}} +{{% tab-content %}} + +1. In your terminal, run the `docs:create` command with the path to your draft file. + + ```bash + yarn docs:create .context/drafts/"Upgrading Enterprise 3 (draft).md" + ``` +2. When prompted, select the products to associate with the content. + The script generates a prompt file and returns the file path. +3. Provide the prompt file to your preferred agent (`claude`, Copilot Agent mode, `codex`) to generate content and frontmatter based on the draft and the products you selected. + {{% /tab-content %}} + {{< /tabs-wrapper >}} ## Review, commit, and create a pull request +After you create or edit content, test and review your changes, and then create a pull request. + > \[!Important] > > #### Check AI-generated content @@ -95,18 +130,41 @@ Navigate to the page you want to edit on `https://docs.influxdata.com`--for exam > Always review and validate AI-generated content for accuracy. > Make sure example commands are correct for the version you're documenting. +### Test and review your changes + +Run a local Hugo server to preview your changes: + +```bash +yarn hugo server +``` + +Visit <http://localhost:1313> to review your changes in the browser. + +> \[!Note] +> If you need to preview changes in a live production-like environment +> that you can also share with others, +> the Docs team can deploy your branch to the staging site. + ### Commit and create a pull request 1. Commit your changes to a new branch 2. Fix any issues found by automated checks 3. Push the branch to your fork or to the docs-v2 repository +```bash +git add content +git commit -m "feat(product): Your commit message" +git push origin your-branch-name +``` + ### Create a pull request 1. Create a pull request against the `master` branch of the docs-v2 repository -2. Wait for automated checks (Hugo build and link checks) to complete -3. Add reviewers and request reviews -4. After approval, merge the pull request +2. Add reviewers: + - `@influxdata/docs-team` + - team members familiar with the product area + - Optionally, assign Copilot to review +3. After approval and automated checks are successful, merge the pull request (if you have permissions) or wait for the docs team to merge it. {{< tabs-wrapper >}} {{% tabs %}} @@ -116,15 +174,20 @@ Navigate to the page you want to edit on `https://docs.influxdata.com`--for exam {{% tab-content %}} 1. Visit [influxdata/docs-v2 pull requests on GitHub](https://github.com/influxdata/docs-v2/pulls) -2. Edit PR title and description +2. Optional: edit PR title and description 3. Optional: set to draft if it needs more work -4. Assign reviewers -5. Optionally, assign Copilot to review - {{% /tab-content %}} - {{% tab-content %}} +4. When ready for review, assign `@influxdata/docs-team` and other reviewers + +{{% /tab-content %}} +{{% tab-content %}} ```bash -gh pr create +gh pr create \ + --base master \ + --head your-branch-name \ + --title "Your PR title" \ + --body "Your PR description" \ + --reviewer influxdata/docs-team,<other-reviewers> ``` {{% /tab-content %}} @@ -132,5 +195,6 @@ gh pr create ## Other resources -<http://localhost:1313/example/> -<https://app.kapa.ai> +- `DOCS-*.md`: Documentation standards and guidelines +- <http://localhost:1313/example/>: View shortcode examples +- <https://app.kapa.ai>: Review content gaps identified from Ask AI answers From 240544c2b32ebff91e0609124c3d56c3bc2703c8 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman <jstirnaman@influxdata.com> Date: Fri, 31 Oct 2025 15:05:17 -0500 Subject: [PATCH 06/15] refactor: simplify link following behavior - treat relative paths as local files, all HTTP/HTTPS as external --- scripts/lib/content-scaffolding.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/lib/content-scaffolding.js b/scripts/lib/content-scaffolding.js index b775a304e..5f6a6fdcc 100644 --- a/scripts/lib/content-scaffolding.js +++ b/scripts/lib/content-scaffolding.js @@ -623,7 +623,7 @@ export function detectSharedContent(filePath) { if (parsed.data && parsed.data.source) { return parsed.data.source; } - } catch (error) { + } catch (_error) { // Can't parse, assume not shared return null; } @@ -670,13 +670,13 @@ export function findSharedContentVariants(sourcePath) { const relativePath = fullPath.replace(REPO_ROOT + '/', ''); variants.push(relativePath); } - } catch (error) { + } catch (_error) { // Skip files that can't be parsed continue; } } } - } catch (error) { + } catch (_error) { // Skip directories we can't read } } From c24878651ab6f8b2ec47e301f22d7979f231a855 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman <jstirnaman@influxdata.com> Date: Fri, 31 Oct 2025 17:31:36 -0500 Subject: [PATCH 07/15] fix: require --products flag when using stdin, remove TTY check from promptUser - stdin now requires --products flag with product keys - removed early return in promptUser() that prevented interactive prompts - updated help text with stdin + --products examples - prevents 'No products selected' error when running interactively --- scripts/docs-create.js | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/scripts/docs-create.js b/scripts/docs-create.js index 176cc35b0..08a6f2b7f 100644 --- a/scripts/docs-create.js +++ b/scripts/docs-create.js @@ -64,15 +64,11 @@ function log(message, color = 'reset') { * Prompt user for input (works in TTY and non-TTY environments) */ async function promptUser(question) { - // For non-TTY environments, return empty string - if (!process.stdin.isTTY) { - return ''; - } - const readline = await import('readline'); const rl = readline.createInterface({ input: process.stdin, output: process.stdout, + terminal: process.stdin.isTTY !== undefined ? process.stdin.isTTY : true, }); return new Promise((resolve) => { @@ -150,6 +146,8 @@ ${colors.bright}Options:${colors.reset} <draft-path> Path to draft markdown file (positional argument) --from-draft <path> Path to draft markdown file --url <url> Documentation URL for new content location + --products <list> Comma-separated product keys (required for stdin) + Examples: influxdb3_core, influxdb3_enterprise --follow-external Include external (non-docs.influxdata.com) URLs when extracting links from draft. Without this flag, only local documentation links are followed. @@ -161,8 +159,10 @@ ${colors.bright}Options:${colors.reset} --help Show this help message ${colors.bright}Stdin Support:${colors.reset} - cat draft.md | yarn docs:create Read draft from stdin - echo "# Content" | yarn docs:create Create from piped content + When piping content from stdin, you must specify target products: + + cat draft.md | yarn docs:create --products influxdb3_core + echo "# Content" | yarn docs:create --products influxdb3_core,influxdb3_enterprise ${colors.bright}Link Following:${colors.reset} By default, the script extracts links from your draft and prompts you @@ -210,9 +210,9 @@ ${colors.bright}Examples:${colors.reset} # Include external links for selection yarn docs:create --follow-external drafts/api-guide.md - # Pipe content from stdin - cat drafts/quick-note.md | yarn docs:create - echo "# Test Content" | yarn docs:create + # Pipe content from stdin (requires --products) + cat drafts/quick-note.md | yarn docs:create --products influxdb3_core + echo "# Test Content" | yarn docs:create --products influxdb3_core # Preview changes yarn docs:create --from-draft drafts/new-feature.md --dry-run @@ -1132,6 +1132,16 @@ async function main() { let stdinContent = null; if (hasStdin && !options.draft) { + // Stdin requires --products option + if (!options.products) { + log( + '\nāœ— Error: --products is required when piping content from stdin', + 'red' + ); + log('Example: echo "# Content" | yarn docs:create --products influxdb3_core', 'yellow'); + process.exit(1); + } + // Import readDraftFromStdin const { readDraftFromStdin } = await import('./lib/file-operations.js'); log('šŸ“„ Reading draft from stdin...', 'cyan'); From 70272534a8b840db50e048877b9dc956428d47aa Mon Sep 17 00:00:00 2001 From: Jason Stirnaman <jstirnaman@influxdata.com> Date: Sat, 1 Nov 2025 09:03:57 -0500 Subject: [PATCH 08/15] feat: accept product keys (not display names) in --products flag - --products now accepts keys from products.yml (influxdb3_core, telegraf, etc.) - automatically expands multi-version products to all versions - maintains backwards compatibility with display names - provides clear error messages with list of valid product keys Examples: --products influxdb3_core --products influxdb3_core,influxdb3_enterprise --products telegraf --- scripts/docs-create.js | 50 +++++++++++++++++++++++++++++++++++------- 1 file changed, 42 insertions(+), 8 deletions(-) diff --git a/scripts/docs-create.js b/scripts/docs-create.js index 08a6f2b7f..e77dd9e0a 100644 --- a/scripts/docs-create.js +++ b/scripts/docs-create.js @@ -564,23 +564,54 @@ async function selectProducts(context, options) { // Case 1: Explicit flag provided if (options.products) { - const requested = options.products.split(',').map((p) => p.trim()); - const invalid = requested.filter((p) => !allProducts.includes(p)); + const requestedKeys = options.products.split(',').map((p) => p.trim()); - if (invalid.length > 0) { + // Map product keys to display names + const requestedNames = []; + const invalidKeys = []; + + for (const key of requestedKeys) { + const product = context.products[key]; + + if (product) { + // Valid product key found + if (product.versions && product.versions.length > 1) { + // Multi-version product: add all versions + product.versions.forEach((version) => { + const displayName = `${product.name} ${version}`; + if (allProducts.includes(displayName)) { + requestedNames.push(displayName); + } + }); + } else { + // Single version product + if (allProducts.includes(product.name)) { + requestedNames.push(product.name); + } + } + } else if (allProducts.includes(key)) { + // It's already a display name (backwards compatibility) + requestedNames.push(key); + } else { + invalidKeys.push(key); + } + } + + if (invalidKeys.length > 0) { + const validKeys = Object.keys(context.products).join(', '); log( - `\nāœ— Invalid products: ${invalid.join(', ')}\n` + - `Valid products: ${allProducts.join(', ')}`, + `\nāœ— Invalid product keys: ${invalidKeys.join(', ')}\n` + + `Valid keys: ${validKeys}`, 'red' ); process.exit(1); } log( - `āœ“ Using products from --products flag: ${requested.join(', ')}`, + `āœ“ Using products from --products flag: ${requestedNames.join(', ')}`, 'green' ); - return requested; + return requestedNames; } // Case 2: Unambiguous (single product detected) @@ -1138,7 +1169,10 @@ async function main() { '\nāœ— Error: --products is required when piping content from stdin', 'red' ); - log('Example: echo "# Content" | yarn docs:create --products influxdb3_core', 'yellow'); + log( + 'Example: echo "# Content" | yarn docs:create --products influxdb3_core', + 'yellow' + ); process.exit(1); } From 4288c3b1aa3c916fe7da1432e97f7ee61f683e6e Mon Sep 17 00:00:00 2001 From: Jason Stirnaman <jstirnaman@influxdata.com> Date: Sat, 1 Nov 2025 10:09:01 -0500 Subject: [PATCH 09/15] feat: add --print-prompt flag to output AI prompt to stdout Allows piping prompt to other AI tools or saving to file: yarn docs:create --print-prompt draft.md > prompt.txt yarn docs:create --print-prompt draft.md | llm -m gpt-4 The flag: - Prepares context and selects products - Generates the full AI prompt - Outputs to stdout and exits - Works with both URL-based and draft-based workflows --- package.json | 5 ++++- scripts/docs-create.js | 44 ++++++++++++++++++++++++++++++++++++++++++ yarn.lock | 12 ++++++++++++ 3 files changed, 60 insertions(+), 1 deletion(-) diff --git a/package.json b/package.json index 636317b1d..62222e19d 100644 --- a/package.json +++ b/package.json @@ -78,5 +78,8 @@ "test": "test" }, "keywords": [], - "author": "" + "author": "", + "optionalDependencies": { + "copilot": "^0.0.2" + } } diff --git a/scripts/docs-create.js b/scripts/docs-create.js index e77dd9e0a..65c0f3bc8 100644 --- a/scripts/docs-create.js +++ b/scripts/docs-create.js @@ -99,6 +99,7 @@ function parseArguments() { ai: { type: 'string', default: 'claude' }, execute: { type: 'boolean', default: false }, 'context-only': { type: 'boolean', default: false }, + 'print-prompt': { type: 'boolean', default: false }, proposal: { type: 'string' }, 'dry-run': { type: 'boolean', default: false }, yes: { type: 'boolean', default: false }, @@ -153,6 +154,8 @@ ${colors.bright}Options:${colors.reset} only local documentation links are followed. --context-only Stop after context preparation (for non-Claude tools) + --print-prompt Output the AI prompt to stdout and exit + (for piping to other tools) --proposal <path> Import and execute proposal from JSON file --dry-run Show what would be created without creating --yes Skip confirmation prompt @@ -214,6 +217,10 @@ ${colors.bright}Examples:${colors.reset} cat drafts/quick-note.md | yarn docs:create --products influxdb3_core echo "# Test Content" | yarn docs:create --products influxdb3_core + # Output prompt for use with other AI tools + yarn docs:create --print-prompt drafts/new-feature.md > prompt.txt + yarn docs:create --print-prompt drafts/new-feature.md | llm -m gpt-4 + # Preview changes yarn docs:create --from-draft drafts/new-feature.md --dry-run @@ -1214,6 +1221,25 @@ async function main() { process.exit(0); } + if (options['print-prompt']) { + // Generate and print prompt + const selectedProducts = await selectProducts(context, options); + const mode = context.urls?.length > 0 ? 'create' : 'create'; + const isURLBased = true; + const hasExistingContent = + context.existingContent && Object.keys(context.existingContent).length > 0; + + const prompt = generateClaudePrompt( + context, + selectedProducts, + mode, + isURLBased, + hasExistingContent + ); + console.log(prompt); + process.exit(0); + } + // Continue with AI analysis (Phase 2) log('\nšŸ¤– Running AI analysis with specialized agents...\n', 'bright'); await runAgentAnalysis(context, options); @@ -1229,6 +1255,24 @@ async function main() { process.exit(0); } + if (options['print-prompt']) { + // Generate and print prompt + const selectedProducts = await selectProducts(context, options); + const mode = 'create'; + const isURLBased = false; + const hasExistingContent = false; + + const prompt = generateClaudePrompt( + context, + selectedProducts, + mode, + isURLBased, + hasExistingContent + ); + console.log(prompt); + process.exit(0); + } + // Continue with AI analysis (Phase 2) log('\nšŸ¤– Running AI analysis with specialized agents...\n', 'bright'); await runAgentAnalysis(context, options); diff --git a/yarn.lock b/yarn.lock index cb8034ef8..ce24d3c46 100644 --- a/yarn.lock +++ b/yarn.lock @@ -194,6 +194,11 @@ resolved "https://registry.yarnpkg.com/@evilmartians/lefthook/-/lefthook-1.12.3.tgz#081eca59a6d33646616af844244ce6842cd6b5a5" integrity sha512-MtXIt8h+EVTv5tCGLzh9UwbA/LRv6esdPJOHlxr8NDKHbFnbo8PvU5uVQcm3PAQTd4DZN3HoyokqrwGwntoq6w== +"@github/copilot@latest": + version "0.0.353" + resolved "https://registry.yarnpkg.com/@github/copilot/-/copilot-0.0.353.tgz#3c8d8a072b3defbd2200c9fe4fb636d633ac7f1e" + integrity sha512-OYgCB4Jf7Y/Wor8mNNQcXEt1m1koYm/WwjGsr5mwABSVYXArWUeEfXqVbx+7O87ld5b+aWy2Zaa2bzKV8dmqaw== + "@humanfs/core@^0.19.1": version "0.19.1" resolved "https://registry.yarnpkg.com/@humanfs/core/-/core-0.19.1.tgz#17c55ca7d426733fe3c561906b8173c336b40a77" @@ -1364,6 +1369,13 @@ confbox@^0.2.2: resolved "https://registry.yarnpkg.com/confbox/-/confbox-0.2.2.tgz#8652f53961c74d9e081784beed78555974a9c110" integrity sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ== +copilot@^0.0.2: + version "0.0.2" + resolved "https://registry.yarnpkg.com/copilot/-/copilot-0.0.2.tgz#4712810c9182cd784820ed44627bedd32dd377f9" + integrity sha512-nedf34AaYj9JnFhRmiJEZemAno2WDXMypq6FW5aCVR0N+QdpQ6viukP1JpvJDChpaMEVvbUkMjmjMifJbO/AgQ== + dependencies: + "@github/copilot" latest + core-util-is@1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" From 45a5de02b62b21d0820983bdbff24839b1fb3139 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman <jstirnaman@influxdata.com> Date: Sat, 1 Nov 2025 10:18:57 -0500 Subject: [PATCH 10/15] feat: environment-aware prompt output for external tools When running outside Claude Code, script now outputs prompt file path to stdout by default (or prompt text with --print-prompt flag). This enables integration with external AI tools without requiring flags. Inside Claude Code, script continues to run Task() agent automatically. Changes: - Add isClaudeCode() detection function - Add outputPromptForExternalUse() helper - Add PROMPT_FILE constant for .tmp/scaffold-prompt.txt - Update both URL-based and draft-based workflows - Update help text with environment-aware behavior examples --- scripts/docs-create.js | 116 ++++++++++++++++++++++++++++------------- 1 file changed, 80 insertions(+), 36 deletions(-) diff --git a/scripts/docs-create.js b/scripts/docs-create.js index 65c0f3bc8..aa7a0d799 100644 --- a/scripts/docs-create.js +++ b/scripts/docs-create.js @@ -41,6 +41,7 @@ const REPO_ROOT = join(__dirname, '..'); const TMP_DIR = join(REPO_ROOT, '.tmp'); const CONTEXT_FILE = join(TMP_DIR, 'scaffold-context.json'); const PROPOSAL_FILE = join(TMP_DIR, 'scaffold-proposal.yml'); +const PROMPT_FILE = join(TMP_DIR, 'scaffold-prompt.txt'); // Colors for console output const colors = { @@ -60,6 +61,31 @@ function log(message, color = 'reset') { console.log(`${colors[color]}${message}${colors.reset}`); } +/** + * Check if running in Claude Code environment + * @returns {boolean} True if Task function is available (Claude Code) + */ +function isClaudeCode() { + return typeof Task !== 'undefined'; +} + +/** + * Output prompt for use with external tools + * @param {string} prompt - The generated prompt text + * @param {boolean} printPrompt - If true, print to stdout; else save to file + */ +function outputPromptForExternalUse(prompt, printPrompt = false) { + if (printPrompt) { + // Output prompt text to stdout + console.log(prompt); + } else { + // Write prompt to file and output file path + writeFileSync(PROMPT_FILE, prompt, 'utf8'); + console.log(PROMPT_FILE); + } + process.exit(0); +} + /** * Prompt user for input (works in TTY and non-TTY environments) */ @@ -154,8 +180,8 @@ ${colors.bright}Options:${colors.reset} only local documentation links are followed. --context-only Stop after context preparation (for non-Claude tools) - --print-prompt Output the AI prompt to stdout and exit - (for piping to other tools) + --print-prompt Output prompt text to stdout instead of file path + (when running outside Claude Code) --proposal <path> Import and execute proposal from JSON file --dry-run Show what would be created without creating --yes Skip confirmation prompt @@ -218,12 +244,26 @@ ${colors.bright}Examples:${colors.reset} echo "# Test Content" | yarn docs:create --products influxdb3_core # Output prompt for use with other AI tools - yarn docs:create --print-prompt drafts/new-feature.md > prompt.txt + # (Outside Claude Code: outputs file path by default) + yarn docs:create drafts/new-feature.md # Outputs: .tmp/scaffold-prompt.txt + + # Use --print-prompt to output prompt text to stdout yarn docs:create --print-prompt drafts/new-feature.md | llm -m gpt-4 + yarn docs:create --print-prompt drafts/new-feature.md > prompt.txt # Preview changes yarn docs:create --from-draft drafts/new-feature.md --dry-run +${colors.bright}Environment-Aware Behavior:${colors.reset} + When running INSIDE Claude Code: + - Automatically runs AI analysis with Task() agent + - Creates file structure based on AI recommendations + + When running OUTSIDE Claude Code: + - Outputs prompt file path to stdout by default + - Use --print-prompt to output prompt text instead + - Use the prompt with other AI tools (llm, ChatGPT, etc.) + ${colors.bright}Note:${colors.reset} To edit existing pages, use: yarn docs:edit <url> `); @@ -1221,26 +1261,29 @@ async function main() { process.exit(0); } - if (options['print-prompt']) { - // Generate and print prompt - const selectedProducts = await selectProducts(context, options); - const mode = context.urls?.length > 0 ? 'create' : 'create'; - const isURLBased = true; - const hasExistingContent = - context.existingContent && Object.keys(context.existingContent).length > 0; + // Generate prompt for product selection + const selectedProducts = await selectProducts(context, options); + const mode = context.urls?.length > 0 ? 'create' : 'create'; + const isURLBased = true; + const hasExistingContent = + context.existingContent && + Object.keys(context.existingContent).length > 0; - const prompt = generateClaudePrompt( - context, - selectedProducts, - mode, - isURLBased, - hasExistingContent - ); - console.log(prompt); - process.exit(0); + const prompt = generateClaudePrompt( + context, + selectedProducts, + mode, + isURLBased, + hasExistingContent + ); + + // Check environment and handle prompt accordingly + if (!isClaudeCode()) { + // Not in Claude Code: output prompt for external use + outputPromptForExternalUse(prompt, options['print-prompt']); } - // Continue with AI analysis (Phase 2) + // In Claude Code: continue with AI analysis (Phase 2) log('\nšŸ¤– Running AI analysis with specialized agents...\n', 'bright'); await runAgentAnalysis(context, options); @@ -1255,25 +1298,26 @@ async function main() { process.exit(0); } - if (options['print-prompt']) { - // Generate and print prompt - const selectedProducts = await selectProducts(context, options); - const mode = 'create'; - const isURLBased = false; - const hasExistingContent = false; + // Generate prompt for product selection + const selectedProducts = await selectProducts(context, options); + const mode = 'create'; + const isURLBased = false; - const prompt = generateClaudePrompt( - context, - selectedProducts, - mode, - isURLBased, - hasExistingContent - ); - console.log(prompt); - process.exit(0); + const prompt = generateClaudePrompt( + context, + selectedProducts, + mode, + isURLBased, + false + ); + + // Check environment and handle prompt accordingly + if (!isClaudeCode()) { + // Not in Claude Code: output prompt for external use + outputPromptForExternalUse(prompt, options['print-prompt']); } - // Continue with AI analysis (Phase 2) + // In Claude Code: continue with AI analysis (Phase 2) log('\nšŸ¤– Running AI analysis with specialized agents...\n', 'bright'); await runAgentAnalysis(context, options); From e4a659b2693bef9cc37aff6d1f539b8ce3bceb76 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman <jstirnaman@influxdata.com> Date: Sat, 1 Nov 2025 10:24:55 -0500 Subject: [PATCH 11/15] fix: write logs to stderr to avoid interfering with piped output Change log() function to use console.error instead of console.log. This ensures that when stdout is piped (e.g., to 'code -'), only the prompt file path is sent through the pipe, while progress messages remain visible on the terminal via stderr. Usage: echo 'content' | yarn --silent docs:create --products X | code - --- scripts/docs-create.js | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/docs-create.js b/scripts/docs-create.js index aa7a0d799..1cbdeb187 100644 --- a/scripts/docs-create.js +++ b/scripts/docs-create.js @@ -55,10 +55,11 @@ const colors = { }; /** - * Print colored output + * Print colored output to stderr (so it doesn't interfere with piped output) */ function log(message, color = 'reset') { - console.log(`${colors[color]}${message}${colors.reset}`); + // Write to stderr so logs don't interfere with stdout (prompt path/text) + console.error(`${colors[color]}${message}${colors.reset}`); } /** From 370c295735e0fb5fc3258bfbf6d959dddae0d31c Mon Sep 17 00:00:00 2001 From: Jason Stirnaman <jstirnaman@influxdata.com> Date: Sat, 1 Nov 2025 14:08:02 -0500 Subject: [PATCH 12/15] WIP: docs:create usage examples --- content/create.md | 43 +++++++++++++++++++++++++++---------------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/content/create.md b/content/create.md index 5a72b0117..fcede07d0 100644 --- a/content/create.md +++ b/content/create.md @@ -71,8 +71,10 @@ yarn docs:edit https://docs.influxdata.com/influxdb3/enterprise/get-started/ Use the `yarn docs:create` command with your AI agent tool to scaffold frontmatter and generate new content. -- The `yarn docs:create` command generates a prompt file from a draft and your product selections. -- AI agents (`claude`, Copilot Agent mode, `codex`) can use the prompt file to generate content and frontmatter. +- The `yarn docs:create` command accepts draft input from stdin or from a file path and generates a prompt file from the draft and your product selections +- The prompt file makes AI agents aware of InfluxData docs guidelines, shared content, and product-specific requirements +- `yarn docs:create` is designed to work automatically with `claude`, but you can + use the generated prompt file with any AI agent (for example, `copilot` or `codex`) > \[!Tip] > @@ -80,15 +82,18 @@ Use the `yarn docs:create` command with your AI agent tool to scaffold frontmatt <!-- Coming soon: generate content from an issue with labels --> -### Generate content and frontmatter from a draft +#### Generate content and frontmatter from a draft {{% tabs-wrapper %}} {{% tabs %}} -[Claude Code](#) -[Other AI agents](#) +[Interactive (Claude Code)](#) +[Non-interactive (any agent)](#) {{% /tabs %}} {{% tab-content %}} +{{% /tab-content %}} +{{% tab-content %}} + 1. Open a Claude Code prompt: ```bash @@ -96,28 +101,34 @@ Use the `yarn docs:create` command with your AI agent tool to scaffold frontmatt ``` 2. In the prompt, run the `docs:create` command with the path to your draft file. + Optionally, include the `--products` flag and product namespaces to preselect products--for example: ```bash - yarn docs:create .context/drafts/"Upgrading Enterprise 3 (draft).md" + yarn docs:create .context/drafts/"Upgrading Enterprise 3 (draft).md" \ + --products influxdb3_enterprise,influxdb3_core ``` -3. When prompted, select the products to associate with the content. + If you don't include the `--products` flag, you'll be prompted to select products after running the command. The script first generates a prompt file, then the agent automatically uses it to generate content and frontmatter based on the draft and the products you select. {{% /tab-content %}} {{% tab-content %}} -1. In your terminal, run the `docs:create` command with the path to your draft file. +Use `docs:create` to generate a prompt file and then pipe it to your preferred AI agent. +Include the `--products` flag and product namespaces to preselect products - ```bash - yarn docs:create .context/drafts/"Upgrading Enterprise 3 (draft).md" - ``` -2. When prompted, select the products to associate with the content. - The script generates a prompt file and returns the file path. -3. Provide the prompt file to your preferred agent (`claude`, Copilot Agent mode, `codex`) to generate content and frontmatter based on the draft and the products you selected. - {{% /tab-content %}} - {{< /tabs-wrapper >}} +The following example uses Copilot to process a draft file: + +```bash +yarn --silent \ + docs:create .context/drafts/"Upgrading Enterprise 3 (draft).md" \ + --products "influxdb3_enterprise,influxdb3_core" | \ + copilot --prompt --allow-all-tools +``` + +{{% /tab-content %}} +{{< /tabs-wrapper >}} ## Review, commit, and create a pull request From 19f1b19d72eead7533699612b23d141b824e2151 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman <jstirnaman@influxdata.com> Date: Sat, 1 Nov 2025 14:18:25 -0500 Subject: [PATCH 13/15] feat: add `docs create` CLI with smart piping detection Major improvements to docs:create UX for both Claude Code and external tool integration: **New `docs` CLI command:** - Add scripts/docs-cli.js - main CLI with subcommand routing - Add bin field to package.json for `docs` command - Usage: `docs create` and `docs edit` (cleaner than yarn commands) **Smart piping detection:** - Auto-detect when stdout is piped (\!process.stdout.isTTY) - When piping: automatically output prompt text (no flag needed) - When interactive: output prompt file path - --print-prompt flag now optional (auto-enabled when piping) **Updated help text:** - Show `docs create` syntax first, yarn as alternative - Simplify examples with new CLI - Document smart piping behavior - Focus on two main workflows: Claude Code vs external agents **Usage examples:** # Inside Claude Code - automatic execution docs create drafts/new-feature.md # Pipe to external AI - prompt auto-detected docs create FILE --products X | claude -p docs create FILE --products X | copilot -p # Pipe from stdin echo 'content' | docs create --products X | claude -p Benefits: - Cleaner syntax (no yarn --silent needed) - No manual --print-prompt flag when piping - Consistent with industry tools (git, npm, etc.) - Backward compatible with yarn commands --- package.json | 3 ++ scripts/docs-cli.js | 77 +++++++++++++++++++++++++++++ scripts/docs-create.js | 108 +++++++++++++++++++---------------------- 3 files changed, 131 insertions(+), 57 deletions(-) create mode 100755 scripts/docs-cli.js diff --git a/package.json b/package.json index 62222e19d..a3f208931 100644 --- a/package.json +++ b/package.json @@ -4,6 +4,9 @@ "version": "1.0.0", "description": "InfluxDB documentation", "license": "MIT", + "bin": { + "docs": "scripts/docs-cli.js" + }, "resolutions": { "serialize-javascript": "^6.0.2" }, diff --git a/scripts/docs-cli.js b/scripts/docs-cli.js new file mode 100755 index 000000000..7d3cafbe3 --- /dev/null +++ b/scripts/docs-cli.js @@ -0,0 +1,77 @@ +#!/usr/bin/env node + +/** + * Main CLI entry point for docs tools + * Supports subcommands: create, edit, etc. + * + * Usage: + * docs create <draft-path> [options] + * docs edit <url> [options] + */ + +import { fileURLToPath } from 'url'; +import { dirname, join } from 'path'; +import { spawn } from 'child_process'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); + +// Get subcommand and remaining arguments +const subcommand = process.argv[2]; +const args = process.argv.slice(3); + +// Map subcommands to script files +const subcommands = { + create: 'docs-create.js', + edit: 'docs-edit.js', +}; + +/** + * Print usage information + */ +function printUsage() { + console.log(` +Usage: docs <command> [options] + +Commands: + create <draft-path> Create new documentation from draft + edit <url> Edit existing documentation + +Examples: + docs create drafts/new-feature.md --products influxdb3_core + docs edit https://docs.influxdata.com/influxdb3/core/admin/ + +For command-specific help: + docs create --help + docs edit --help +`); +} + +// Handle no subcommand or help +if (!subcommand || subcommand === '--help' || subcommand === '-h') { + printUsage(); + process.exit(subcommand ? 0 : 1); +} + +// Validate subcommand +if (!subcommands[subcommand]) { + console.error(`Error: Unknown command '${subcommand}'`); + console.error(`Run 'docs --help' for usage information`); + process.exit(1); +} + +// Execute the appropriate script +const scriptPath = join(__dirname, subcommands[subcommand]); +const child = spawn('node', [scriptPath, ...args], { + stdio: 'inherit', + env: process.env, +}); + +child.on('exit', (code) => { + process.exit(code || 0); +}); + +child.on('error', (err) => { + console.error(`Failed to execute ${subcommand}:`, err.message); + process.exit(1); +}); diff --git a/scripts/docs-create.js b/scripts/docs-create.js index 1cbdeb187..f90571eb2 100644 --- a/scripts/docs-create.js +++ b/scripts/docs-create.js @@ -73,10 +73,16 @@ function isClaudeCode() { /** * Output prompt for use with external tools * @param {string} prompt - The generated prompt text - * @param {boolean} printPrompt - If true, print to stdout; else save to file + * @param {boolean} printPrompt - If true, force print to stdout */ function outputPromptForExternalUse(prompt, printPrompt = false) { - if (printPrompt) { + // Auto-detect if stdout is being piped + const isBeingPiped = !process.stdout.isTTY; + + // Print prompt text if explicitly requested OR if being piped + const shouldPrintText = printPrompt || isBeingPiped; + + if (shouldPrintText) { // Output prompt text to stdout console.log(prompt); } else { @@ -167,8 +173,12 @@ function printUsage() { ${colors.bright}Documentation Content Scaffolding${colors.reset} ${colors.bright}Usage:${colors.reset} - yarn docs:create <draft-path> Create from draft - yarn docs:create --url <url> --from-draft <path> Create at URL with draft + docs create <draft-path> Create from draft + docs create --url <url> --from-draft <path> Create at URL with draft + + # Or use with yarn: + yarn docs:create <draft-path> + yarn docs:create --url <url> --from-draft <path> ${colors.bright}Options:${colors.reset} <draft-path> Path to draft markdown file (positional argument) @@ -181,8 +191,7 @@ ${colors.bright}Options:${colors.reset} only local documentation links are followed. --context-only Stop after context preparation (for non-Claude tools) - --print-prompt Output prompt text to stdout instead of file path - (when running outside Claude Code) + --print-prompt Force prompt text output (auto-enabled when piping) --proposal <path> Import and execute proposal from JSON file --dry-run Show what would be created without creating --yes Skip confirmation prompt @@ -191,8 +200,8 @@ ${colors.bright}Options:${colors.reset} ${colors.bright}Stdin Support:${colors.reset} When piping content from stdin, you must specify target products: - cat draft.md | yarn docs:create --products influxdb3_core - echo "# Content" | yarn docs:create --products influxdb3_core,influxdb3_enterprise + cat draft.md | docs create --products influxdb3_core + echo "# Content" | docs create --products influxdb3_core,influxdb3_enterprise ${colors.bright}Link Following:${colors.reset} By default, the script extracts links from your draft and prompts you @@ -204,69 +213,54 @@ ${colors.bright}Link Following:${colors.reset} Local documentation links are always available for selection. Use --follow-external to also include external URLs (GitHub, etc.) -${colors.bright}Workflow (Create from draft):${colors.reset} +${colors.bright}Workflow (Inside Claude Code):${colors.reset} 1. Create a draft markdown file with your content - 2. Run: yarn docs:create drafts/new-feature.md + 2. Run: docs create drafts/new-feature.md 3. Script runs all agents automatically 4. Review and confirm to create files -${colors.bright}Workflow (Create at specific URL):${colors.reset} +${colors.bright}Workflow (Pipe to external agent):${colors.reset} 1. Create draft: vim drafts/new-feature.md - 2. Run: yarn docs:create \\ - --url https://docs.influxdata.com/influxdb3/core/admin/new-feature/ \\ - --from-draft drafts/new-feature.md - 3. Script determines structure from URL and uses draft content - 4. Review and confirm to create files - -${colors.bright}Workflow (Manual - for non-Claude tools):${colors.reset} - 1. Prepare context: - yarn docs:create --context-only drafts/new-feature.md - 2. Run your AI tool with templates from scripts/templates/ - 3. Save proposal to .tmp/scaffold-proposal.json - 4. Execute: - yarn docs:create --proposal .tmp/scaffold-proposal.json + 2. Pipe to your AI tool (prompt auto-detected): + docs create drafts/new-feature.md --products X | claude -p + docs create drafts/new-feature.md --products X | copilot -p + 3. AI generates files based on prompt ${colors.bright}Examples:${colors.reset} - # Create from draft (AI determines location) + # Inside Claude Code - automatic execution + docs create drafts/new-feature.md + + # Pipe to external AI tools - prompt auto-detected + docs create drafts/new-feature.md --products influxdb3_core | claude -p + docs create drafts/new-feature.md --products influxdb3_core | copilot -p + + # Pipe from stdin + cat drafts/quick-note.md | docs create --products influxdb3_core | claude -p + echo "# Quick note" | docs create --products influxdb3_core | copilot -p + + # Get prompt file path (when not piping) + docs create drafts/new-feature.md # Outputs: .tmp/scaffold-prompt.txt + + # Still works with yarn yarn docs:create drafts/new-feature.md - # Create at specific URL with draft content - yarn docs:create --url /influxdb3/core/admin/new-feature/ \\ - --from-draft drafts/new-feature.md + # Include external links for context selection + docs create --follow-external drafts/api-guide.md - # Create with linked context (prompts for link selection) - yarn docs:create drafts/new-feature.md +${colors.bright}Smart Behavior:${colors.reset} + INSIDE Claude Code: + → Automatically runs Task() agent to generate files - # Include external links for selection - yarn docs:create --follow-external drafts/api-guide.md + PIPING to another tool: + → Auto-detects piping and outputs prompt text + → No --print-prompt flag needed - # Pipe content from stdin (requires --products) - cat drafts/quick-note.md | yarn docs:create --products influxdb3_core - echo "# Test Content" | yarn docs:create --products influxdb3_core - - # Output prompt for use with other AI tools - # (Outside Claude Code: outputs file path by default) - yarn docs:create drafts/new-feature.md # Outputs: .tmp/scaffold-prompt.txt - - # Use --print-prompt to output prompt text to stdout - yarn docs:create --print-prompt drafts/new-feature.md | llm -m gpt-4 - yarn docs:create --print-prompt drafts/new-feature.md > prompt.txt - - # Preview changes - yarn docs:create --from-draft drafts/new-feature.md --dry-run - -${colors.bright}Environment-Aware Behavior:${colors.reset} - When running INSIDE Claude Code: - - Automatically runs AI analysis with Task() agent - - Creates file structure based on AI recommendations - - When running OUTSIDE Claude Code: - - Outputs prompt file path to stdout by default - - Use --print-prompt to output prompt text instead - - Use the prompt with other AI tools (llm, ChatGPT, etc.) + INTERACTIVE (not piping): + → Outputs prompt file path: .tmp/scaffold-prompt.txt + → Use with: code .tmp/scaffold-prompt.txt ${colors.bright}Note:${colors.reset} - To edit existing pages, use: yarn docs:edit <url> + To edit existing pages, use: docs edit <url> `); } From be87ffc5b7fd38e6a1f9e44044d4466070a7c645 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman <jstirnaman@influxdata.com> Date: Sat, 1 Nov 2025 16:33:14 -0500 Subject: [PATCH 14/15] - Redesign of the docs CLI tools for creating and editing content - Cleaner interface works better for piping output to agents and downstream utilities - Updates README.md and other authoring docs This repository includes a `docs` CLI tool for common documentation workflows: ```sh npx docs create drafts/new-feature.md --products influxdb3_core npx docs edit https://docs.influxdata.com/influxdb3/core/admin/ npx docs placeholders content/influxdb3/core/admin/upgrade.md npx docs --help ``` --- DOCS-SHORTCODES.md | 118 +++++++++++++++++++++------- README.md | 77 ++++++++++++------ content/create.md | 19 +++-- drafts/test-url-handling.md | 9 --- package.json | 1 + scripts/README-add-placeholders.md | 108 ------------------------- scripts/add-placeholders.js | 40 ++++++++-- scripts/docs-cli.js | 7 +- scripts/setup-local-bin.js | 43 ++++++++++ scripts/templates/chatgpt-prompt.md | 11 ++- 10 files changed, 245 insertions(+), 188 deletions(-) delete mode 100644 drafts/test-url-handling.md delete mode 100644 scripts/README-add-placeholders.md create mode 100755 scripts/setup-local-bin.js diff --git a/DOCS-SHORTCODES.md b/DOCS-SHORTCODES.md index 2218a72e4..79ad5ecfb 100644 --- a/DOCS-SHORTCODES.md +++ b/DOCS-SHORTCODES.md @@ -19,7 +19,7 @@ Complete reference for custom Hugo shortcodes used in InfluxData documentation. - [Content Management](#content-management) - [Special Purpose](#special-purpose) ---- +*** ## Notes and Warnings @@ -146,7 +146,7 @@ Use the `{{< api-endpoint >}}` shortcode to generate a code block that contains - **method**: HTTP request method (get, post, patch, put, or delete) - **endpoint**: API endpoint - **api-ref**: Link the endpoint to a specific place in the API documentation -- **influxdb_host**: Specify which InfluxDB product host to use _if the `endpoint` contains the `influxdb/host` shortcode_. Uses the current InfluxDB product as default. Supports the following product values: +- **influxdb\_host**: Specify which InfluxDB product host to use *if the `endpoint` contains the `influxdb/host` shortcode*. Uses the current InfluxDB product as default. Supports the following product values: - oss - cloud - serverless @@ -268,11 +268,11 @@ To link to tabbed content, click on the tab and use the URL parameter shown. It Use the `{{< page-nav >}}` shortcode to add page navigation buttons to a page. These are useful for guiding users through a set of docs that should be read in sequential order. The shortcode has the following parameters: -- **prev:** path of the previous document _(optional)_ -- **next:** path of the next document _(optional)_ -- **prevText:** override the button text linking to the previous document _(optional)_ -- **nextText:** override the button text linking to the next document _(optional)_ -- **keepTab:** include the currently selected tab in the button link _(optional)_ +- **prev:** path of the previous document *(optional)* +- **next:** path of the next document *(optional)* +- **prevText:** override the button text linking to the previous document *(optional)* +- **nextText:** override the button text linking to the next document *(optional)* +- **keepTab:** include the currently selected tab in the button link *(optional)* The shortcode generates buttons that link to both the previous and next documents. By default, the shortcode uses either the `list_title` or the `title` of the linked document, but you can use `prevText` and `nextText` to override button text. @@ -308,7 +308,7 @@ The children shortcode can also be used to list only "section" articles (those w {{< children show="pages" >}} ``` -_By default, it displays both sections and pages._ +*By default, it displays both sections and pages.* Use the `type` argument to specify the format of the children list. @@ -325,7 +325,7 @@ The following list types are available: #### Include a "Read more" link -To include a "Read more" link with each child summary, set `readmore=true`. _Only the `articles` list type supports "Read more" links._ +To include a "Read more" link with each child summary, set `readmore=true`. *Only the `articles` list type supports "Read more" links.* ```md {{< children readmore=true >}} @@ -333,7 +333,7 @@ To include a "Read more" link with each child summary, set `readmore=true`. _Onl #### Include a horizontal rule -To include a horizontal rule after each child summary, set `hr=true`. _Only the `articles` list type supports horizontal rules._ +To include a horizontal rule after each child summary, set `hr=true`. *Only the `articles` list type supports horizontal rules.* ```md {{< children hr=true >}} @@ -390,11 +390,11 @@ This is useful for maintaining and referencing sample code variants in their nat #### Include specific files from the same directory -> [!Caution] +> \[!Caution] > **Don't use for code examples** > Using this and `get-shared-text` shortcodes to include code examples prevents the code from being tested. -To include the text from one file in another file in the same directory, use the `{{< get-leaf-text >}}` shortcode. The directory that contains both files must be a Hugo [_Leaf Bundle_](https://gohugo.io/content-management/page-bundles/#leaf-bundles), a directory that doesn't have any child directories. +To include the text from one file in another file in the same directory, use the `{{< get-leaf-text >}}` shortcode. The directory that contains both files must be a Hugo [*Leaf Bundle*](https://gohugo.io/content-management/page-bundles/#leaf-bundles), a directory that doesn't have any child directories. In the following example, `api` is a leaf bundle. `content` isn't. @@ -447,13 +447,13 @@ Each children list `type` uses frontmatter properties when generating the list o | Frontmatter | articles | list | functions | | :------------------- | :------: | :--: | :-------: | -| `list_title` | āœ“ | āœ“ | āœ“ | -| `description` | āœ“ | | | -| `external_url` | āœ“ | āœ“ | | -| `list_image` | āœ“ | | | -| `list_note` | | āœ“ | | -| `list_code_example` | āœ“ | | | -| `list_query_example` | āœ“ | | | +| `list_title` | āœ“ | āœ“ | āœ“ | +| `description` | āœ“ | | | +| `external_url` | āœ“ | āœ“ | | +| `list_image` | āœ“ | | | +| `list_note` | | āœ“ | | +| `list_code_example` | āœ“ | | | +| `list_query_example` | āœ“ | | | ## Visual Elements @@ -695,7 +695,7 @@ Column 2 The following options are available: -- half _(Default)_ +- half *(Default)* - third - quarter @@ -721,10 +721,10 @@ Click {{< caps >}}Add Data{{< /caps >}} ### Authentication token link -Use the `{{% token-link "<descriptor>" "<link_append>%}}` shortcode to automatically generate links to token management documentation. The shortcode accepts two _optional_ arguments: +Use the `{{% token-link "<descriptor>" "<link_append>%}}` shortcode to automatically generate links to token management documentation. The shortcode accepts two *optional* arguments: - **descriptor**: An optional token descriptor -- **link_append**: An optional path to append to the token management link path, `/<product>/<version>/admin/tokens/`. +- **link\_append**: An optional path to append to the token management link path, `/<product>/<version>/admin/tokens/`. ```md {{% token-link "database" "resource/" %}} @@ -775,7 +775,7 @@ Descriptions should follow consistent patterns: - Recommended: "your {{% token-link "database" %}}"{{% show-in "enterprise" %}} with permissions on the specified database{{% /show-in %}} - Avoid: "your token", "the token", "an authorization token" 3. **Database names**: - - Recommended: "the name of the database to [action]" + - Recommended: "the name of the database to \[action]" - Avoid: "your database", "the database name" 4. **Conditional content**: - Use `{{% show-in "enterprise" %}}` for content specific to enterprise versions @@ -797,13 +797,75 @@ Descriptions should follow consistent patterns: #### Syntax -- `{ placeholders="PATTERN1|PATTERN2" }`: Use this code block attribute to define placeholder patterns +- `{ placeholders="PATTERN1|PATTERN2" }`: Use this code block attribute to define placeholder patterns - `{{% code-placeholder-key %}}`: Use this shortcode to define a placeholder key - `{{% /code-placeholder-key %}}`: Use this shortcode to close the key name -_The `placeholders` attribute supercedes the deprecated `code-placeholders` shortcode._ +*The `placeholders` attribute supercedes the deprecated `code-placeholders` shortcode.* -#### Example usage +#### Automated placeholder syntax + +Use the `docs placeholders` command to automatically add placeholder syntax to code blocks and descriptions: + +```bash +# Process a file +npx docs placeholders content/influxdb3/core/admin/upgrade.md + +# Preview changes without modifying the file +npx docs placeholders content/influxdb3/core/admin/upgrade.md --dry + +# Get help +npx docs placeholders --help +``` + +**What it does:** + +1. Detects UPPERCASE placeholders in code blocks +2. Adds `{ placeholders="..." }` attribute to code fences +3. Wraps placeholder descriptions with `{{% code-placeholder-key %}}` shortcodes + +**Example transformation:** + +Before: + +````markdown +```bash +influxdb3 query \ + --database SYSTEM_DATABASE \ + --token ADMIN_TOKEN \ + "SELECT * FROM system.version" +``` + +Replace the following: + +- **`SYSTEM_DATABASE`**: The name of your system database +- **`ADMIN_TOKEN`**: An admin token with read permissions +```` + +After: + +````markdown +```bash { placeholders="ADMIN_TOKEN|SYSTEM_DATABASE" } +influxdb3 query \ + --database SYSTEM_DATABASE \ + --token ADMIN_TOKEN \ + "SELECT * FROM system.version" +``` + +Replace the following: + +- {{% code-placeholder-key %}}`SYSTEM_DATABASE`{{% /code-placeholder-key %}}: The name of your system database +- {{% code-placeholder-key %}}`ADMIN_TOKEN`{{% /code-placeholder-key %}}: An admin token with read permissions +```` + +**How it works:** + +- Pattern: Matches words with 2+ characters, all uppercase, can include underscores +- Excludes common words: HTTP verbs (GET, POST), protocols (HTTP, HTTPS), SQL keywords (SELECT, FROM), etc. +- Idempotent: Running multiple times won't duplicate syntax +- Preserves existing `placeholders` attributes and already-wrapped descriptions + +#### Manual placeholder usage ```sh { placeholders "DATABASE_NAME|USERNAME|PASSWORD_OR_TOKEN|API_TOKEN|exampleuser@influxdata.com" } curl --request POST http://localhost:8086/write?db=DATABASE_NAME \ @@ -839,7 +901,7 @@ Sample dataset to output. Use either `set` argument name or provide the set as t #### includeNull -Specify whether or not to include _null_ values in the dataset. Use either `includeNull` argument name or provide the boolean value as the second argument. +Specify whether or not to include *null* values in the dataset. Use either `includeNull` argument name or provide the boolean value as the second argument. #### includeRange @@ -1115,6 +1177,6 @@ The InfluxDB host placeholder that gets replaced by custom domains differs betwe {{< influxdb/host "serverless" >}} ``` ---- +*** **For working examples**: Test all shortcodes in [content/example.md](content/example.md) diff --git a/README.md b/README.md index 3774360e2..3194ad7b5 100644 --- a/README.md +++ b/README.md @@ -2,9 +2,9 @@ <img src="/static/img/influx-logo-cubo-dark.png" width="200"> </p> -# InfluxDB 2.0 Documentation +# InfluxData Product Documentation -This repository contains the InfluxDB 2.x documentation published at [docs.influxdata.com](https://docs.influxdata.com). +This repository contains the InfluxData product documentation for InfluxDB and related tooling published at [docs.influxdata.com](https://docs.influxdata.com). ## Contributing @@ -15,6 +15,26 @@ For information about contributing to the InfluxData documentation, see [Contrib For information about testing the documentation, including code block testing, link validation, and style linting, see [Testing guide](DOCS-TESTING.md). +## Documentation Tools + +This repository includes a `docs` CLI tool for common documentation workflows: + +```sh +# Create new documentation from a draft +npx docs create drafts/new-feature.md --products influxdb3_core + +# Edit existing documentation from a URL +npx docs edit https://docs.influxdata.com/influxdb3/core/admin/ + +# Add placeholder syntax to code blocks +npx docs placeholders content/influxdb3/core/admin/upgrade.md + +# Get help +npx docs --help +``` + +The `docs` command is automatically configured when you run `yarn install`. + ## Documentation Comprehensive reference documentation for contributors: @@ -27,6 +47,7 @@ Comprehensive reference documentation for contributors: - **[API Documentation](api-docs/README.md)** - API reference generation ### Quick Links + - [Style guidelines](DOCS-CONTRIBUTING.md#style-guidelines) - [Commit guidelines](DOCS-CONTRIBUTING.md#commit-guidelines) - [Code block testing](DOCS-TESTING.md#code-block-testing) @@ -35,42 +56,49 @@ Comprehensive reference documentation for contributors: InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our open source projects, -please responsibly disclose it by contacting security@influxdata.com. +please responsibly disclose it by contacting <security@influxdata.com>. More details about security vulnerability reporting, -including our GPG key, can be found at https://www.influxdata.com/how-to-report-security-vulnerabilities/. +including our GPG key, can be found at <https://www.influxdata.com/how-to-report-security-vulnerabilities/>. ## Running the docs locally 1. [**Clone this repository**](https://help.github.com/articles/cloning-a-repository/) to your local machine. -2. **Install NodeJS, Yarn, Hugo, & Asset Pipeline Tools** +2. **Install NodeJS, Yarn, Hugo, & Asset Pipeline Tools** - The InfluxData documentation uses [Hugo](https://gohugo.io/), a static site generator built in Go. The site uses Hugo's asset pipeline, which requires the extended version of Hugo along with NodeJS tools like PostCSS, to build and process stylesheets and JavaScript. + The InfluxData documentation uses [Hugo](https://gohugo.io/), a static site generator built in Go. The site uses Hugo's asset pipeline, which requires the extended version of Hugo along with NodeJS tools like PostCSS, to build and process stylesheets and JavaScript. - To install the required dependencies and build the assets, do the following: + To install the required dependencies and build the assets, do the following: - 1. [Install NodeJS](https://nodejs.org/en/download/) - 2. [Install Yarn](https://classic.yarnpkg.com/en/docs/install/) - 3. In your terminal, from the `docs-v2` directory, install the dependencies: + 1. [Install NodeJS](https://nodejs.org/en/download/) + 2. [Install Yarn](https://classic.yarnpkg.com/en/docs/install/) + 3. In your terminal, from the `docs-v2` directory, install the dependencies: - ```sh - cd docs-v2 - yarn install - ``` + ```sh + cd docs-v2 + yarn install + ``` - _**Note:** The most recent version of Hugo tested with this documentation is **0.149.0**._ + ***Note:** The most recent version of Hugo tested with this documentation is **0.149.0**.* + + After installation, the `docs` command will be available via `npx`: + + ```sh + npx docs --help + ``` 3. To generate the API docs, see [api-docs/README.md](api-docs/README.md). -4. **Start the Hugo server** +4. **Start the Hugo server** - Hugo provides a local development server that generates the HTML pages, builds the static assets, and serves them at `localhost:1313`. + Hugo provides a local development server that generates the HTML pages, builds the static assets, and serves them at `localhost:1313`. - In your terminal, start the Hugo server: + In your terminal, start the Hugo server: + + ```sh + npx hugo server + ``` - ```sh - npx hugo server - ``` 5. View the docs at [localhost:1313](http://localhost:1313). ### Alternative: Use docker compose @@ -81,7 +109,8 @@ including our GPG key, can be found at https://www.influxdata.com/how-to-report- 3. Use Docker Compose to start the Hugo server in development mode--for example, enter the following command in your terminal: - ```sh - docker compose up local-dev - ``` + ```sh + docker compose up local-dev + ``` + 4. View the docs at [localhost:1313](http://localhost:1313). diff --git a/content/create.md b/content/create.md index fcede07d0..854b67d96 100644 --- a/content/create.md +++ b/content/create.md @@ -61,19 +61,19 @@ Use `docs` scripts with AI agents to help you create and edit documentation loca ### Edit an existing page locally -Use the `yarn docs:edit` command to open an existing page in your editor. +Use the `npx docs edit` command to open an existing page in your editor. ```bash -yarn docs:edit https://docs.influxdata.com/influxdb3/enterprise/get-started/ +npx docs edit https://docs.influxdata.com/influxdb3/enterprise/get-started/ ``` ### Create content locally -Use the `yarn docs:create` command with your AI agent tool to scaffold frontmatter and generate new content. +Use the `npx docs create` command with your AI agent tool to scaffold frontmatter and generate new content. -- The `yarn docs:create` command accepts draft input from stdin or from a file path and generates a prompt file from the draft and your product selections +- The `npx docs create` command accepts draft input from stdin or from a file path and generates a prompt file from the draft and your product selections - The prompt file makes AI agents aware of InfluxData docs guidelines, shared content, and product-specific requirements -- `yarn docs:create` is designed to work automatically with `claude`, but you can +- `npx docs create` is designed to work automatically with `claude`, but you can use the generated prompt file with any AI agent (for example, `copilot` or `codex`) > \[!Tip] @@ -100,11 +100,11 @@ Use the `yarn docs:create` command with your AI agent tool to scaffold frontmatt claude code ``` -2. In the prompt, run the `docs:create` command with the path to your draft file. +2. In the prompt, run the `docs create` command with the path to your draft file. Optionally, include the `--products` flag and product namespaces to preselect products--for example: ```bash - yarn docs:create .context/drafts/"Upgrading Enterprise 3 (draft).md" \ + npx docs create .context/drafts/"Upgrading Enterprise 3 (draft).md" \ --products influxdb3_enterprise,influxdb3_core ``` @@ -115,14 +115,13 @@ The script first generates a prompt file, then the agent automatically uses it t {{% /tab-content %}} {{% tab-content %}} -Use `docs:create` to generate a prompt file and then pipe it to your preferred AI agent. +Use `npx docs create` to generate a prompt file and then pipe it to your preferred AI agent. Include the `--products` flag and product namespaces to preselect products The following example uses Copilot to process a draft file: ```bash -yarn --silent \ - docs:create .context/drafts/"Upgrading Enterprise 3 (draft).md" \ +npx docs create .context/drafts/"Upgrading Enterprise 3 (draft).md" \ --products "influxdb3_enterprise,influxdb3_core" | \ copilot --prompt --allow-all-tools ``` diff --git a/drafts/test-url-handling.md b/drafts/test-url-handling.md deleted file mode 100644 index f794a015e..000000000 --- a/drafts/test-url-handling.md +++ /dev/null @@ -1,9 +0,0 @@ -# Test URL Handling - -This is a test draft to see how the script handles URLs in content. - -Here's a link to the InfluxDB documentation: <https://docs.influxdata.com/influxdb3/core/> - -And here's a link to GitHub: <https://github.com/influxdata/docs-v2> - -The script should analyze this content and determine where to place it in the documentation structure. diff --git a/package.json b/package.json index a3f208931..77f1c2ee2 100644 --- a/package.json +++ b/package.json @@ -43,6 +43,7 @@ "vanillajs-datepicker": "^1.3.4" }, "scripts": { + "postinstall": "node scripts/setup-local-bin.js", "docs:create": "node scripts/docs-create.js", "docs:edit": "node scripts/docs-edit.js", "docs:add-placeholders": "node scripts/add-placeholders.js", diff --git a/scripts/README-add-placeholders.md b/scripts/README-add-placeholders.md deleted file mode 100644 index d9c455e78..000000000 --- a/scripts/README-add-placeholders.md +++ /dev/null @@ -1,108 +0,0 @@ -# Add Placeholders Script - -Automatically adds placeholder syntax to code blocks and placeholder descriptions in markdown files. - -## What it does - -This script finds UPPERCASE placeholders in code blocks and: - -1. **Adds `{ placeholders="PATTERN1|PATTERN2" }` attribute** to code block fences -2. **Wraps placeholder descriptions** with `{{% code-placeholder-key %}}` shortcodes - -## Usage - -### Direct usage - -```bash -# Process a single file -node scripts/add-placeholders.js <file.md> - -# Dry run to preview changes -node scripts/add-placeholders.js <file.md> --dry - -# Example -node scripts/add-placeholders.js content/influxdb3/enterprise/admin/upgrade.md -``` - -### Using npm script - -```bash -# Process a file -yarn docs:add-placeholders <file.md> - -# Dry run -yarn docs:add-placeholders <file.md> --dry -``` - -## Example transformations - -### Before - -````markdown -```bash -influxdb3 query \ - --database SYSTEM_DATABASE \ - --token ADMIN_TOKEN \ - "SELECT * FROM system.version" -``` - -Replace the following: - -- **`SYSTEM_DATABASE`**: The name of your system database -- **`ADMIN_TOKEN`**: An admin token with read permissions -```` - -### After - -````markdown -```bash { placeholders="ADMIN_TOKEN|SYSTEM_DATABASE" } -influxdb3 query \ - --database SYSTEM_DATABASE \ - --token ADMIN_TOKEN \ - "SELECT * FROM system.version" -``` - -Replace the following: - -- {{% code-placeholder-key %}}`SYSTEM_DATABASE`{{% /code-placeholder-key %}}: The name of your system database -- {{% code-placeholder-key %}}`ADMIN_TOKEN`{{% /code-placeholder-key %}}: An admin token with read permissions -```` - -## How it works - -### Placeholder detection - -The script automatically detects UPPERCASE placeholders in code blocks using these rules: - -- **Pattern**: Matches words with 2+ characters, all uppercase, can include underscores -- **Excludes common words**: HTTP verbs (GET, POST), protocols (HTTP, HTTPS), SQL keywords (SELECT, FROM), etc. - -### Code block processing - -1. Finds all code blocks (including indented ones) -2. Extracts UPPERCASE placeholders -3. Adds `{ placeholders="..." }` attribute to the fence line -4. Preserves indentation and language identifiers - -### Description wrapping - -1. Detects "Replace the following:" sections -2. Wraps placeholder descriptions matching `- **`PLACEHOLDER`**: description` -3. Preserves indentation and formatting -4. Skips already-wrapped descriptions - -## Options - -- `--dry` or `-d`: Preview changes without modifying files - -## Notes - -- The script is idempotent - running it multiple times on the same file won't duplicate syntax -- Preserves existing `placeholders` attributes in code blocks -- Works with both indented and non-indented code blocks -- Handles multiple "Replace the following:" sections in a single file - -## Related documentation - -- [DOCS-SHORTCODES.md](../DOCS-SHORTCODES.md) - Complete shortcode reference -- [DOCS-CONTRIBUTING.md](../DOCS-CONTRIBUTING.md) - Placeholder conventions and style guidelines diff --git a/scripts/add-placeholders.js b/scripts/add-placeholders.js index 42718bcf3..8cc4dd303 100755 --- a/scripts/add-placeholders.js +++ b/scripts/add-placeholders.js @@ -16,7 +16,7 @@ import { readFileSync, writeFileSync } from 'fs'; import { parseArgs } from 'node:util'; // Parse command-line arguments -const { positionals } = parseArgs({ +const { positionals, values } = parseArgs({ allowPositionals: true, options: { dry: { @@ -24,19 +24,47 @@ const { positionals } = parseArgs({ short: 'd', default: false, }, + help: { + type: 'boolean', + short: 'h', + default: false, + }, }, }); +// Show help if requested +if (values.help) { + console.log(` +Add placeholder syntax to code blocks + +Usage: + docs placeholders <file.md> [options] + +Options: + --dry, -d Preview changes without modifying files + --help, -h Show this help message + +Examples: + docs placeholders content/influxdb3/enterprise/admin/upgrade.md + docs placeholders content/influxdb3/core/admin/databases/create.md --dry + +What it does: + 1. Finds UPPERCASE placeholders in code blocks + 2. Adds { placeholders="PATTERN1|PATTERN2" } attribute to code fences + 3. Wraps placeholder descriptions with {{% code-placeholder-key %}} shortcodes +`); + process.exit(0); +} + if (positionals.length === 0) { - console.error('Usage: node scripts/add-placeholders.js <file.md> [--dry]'); - console.error( - 'Example: node scripts/add-placeholders.js content/influxdb3/enterprise/admin/upgrade.md' - ); + console.error('Error: Missing file path argument'); + console.error('Usage: docs placeholders <file.md> [--dry]'); + console.error('Run "docs placeholders --help" for more information'); process.exit(1); } const filePath = positionals[0]; -const isDryRun = process.argv.includes('--dry') || process.argv.includes('-d'); +const isDryRun = values.dry; /** * Extract UPPERCASE placeholders from a code block diff --git a/scripts/docs-cli.js b/scripts/docs-cli.js index 7d3cafbe3..aa8ff274c 100755 --- a/scripts/docs-cli.js +++ b/scripts/docs-cli.js @@ -2,11 +2,12 @@ /** * Main CLI entry point for docs tools - * Supports subcommands: create, edit, etc. + * Supports subcommands: create, edit, placeholders * * Usage: * docs create <draft-path> [options] * docs edit <url> [options] + * docs placeholders <file.md> [options] */ import { fileURLToPath } from 'url'; @@ -24,6 +25,7 @@ const args = process.argv.slice(3); const subcommands = { create: 'docs-create.js', edit: 'docs-edit.js', + placeholders: 'add-placeholders.js', }; /** @@ -36,14 +38,17 @@ Usage: docs <command> [options] Commands: create <draft-path> Create new documentation from draft edit <url> Edit existing documentation + placeholders <file.md> Add placeholder syntax to code blocks Examples: docs create drafts/new-feature.md --products influxdb3_core docs edit https://docs.influxdata.com/influxdb3/core/admin/ + docs placeholders content/influxdb3/core/admin/upgrade.md For command-specific help: docs create --help docs edit --help + docs placeholders --help `); } diff --git a/scripts/setup-local-bin.js b/scripts/setup-local-bin.js new file mode 100755 index 000000000..2f19811b8 --- /dev/null +++ b/scripts/setup-local-bin.js @@ -0,0 +1,43 @@ +#!/usr/bin/env node + +/** + * Setup script to make the `docs` command available locally after yarn install. + * Creates a symlink in node_modules/.bin/docs pointing to scripts/docs-cli.js + */ + +import { fileURLToPath } from 'url'; +import { dirname, join } from 'path'; +import { existsSync, mkdirSync, symlinkSync, unlinkSync, chmodSync } from 'fs'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); +const rootDir = join(__dirname, '..'); + +const binDir = join(rootDir, 'node_modules', '.bin'); +const binLink = join(binDir, 'docs'); +const targetScript = join(rootDir, 'scripts', 'docs-cli.js'); + +try { + // Ensure node_modules/.bin directory exists + if (!existsSync(binDir)) { + mkdirSync(binDir, { recursive: true }); + } + + // Remove existing symlink if it exists + if (existsSync(binLink)) { + unlinkSync(binLink); + } + + // Create symlink + symlinkSync(targetScript, binLink, 'file'); + + // Ensure the target script is executable + chmodSync(targetScript, 0o755); + + console.log('āœ“ Created local `docs` command in node_modules/.bin/'); + console.log(' You can now use: npx docs <command>'); + console.log(' Or add node_modules/.bin to your PATH for direct access'); +} catch (error) { + console.error('Failed to setup local docs command:', error.message); + process.exit(1); +} diff --git a/scripts/templates/chatgpt-prompt.md b/scripts/templates/chatgpt-prompt.md index 3f3de19d5..b16631d7c 100644 --- a/scripts/templates/chatgpt-prompt.md +++ b/scripts/templates/chatgpt-prompt.md @@ -7,6 +7,7 @@ You are analyzing a documentation draft to generate an intelligent file structur **Context file**: `.tmp/scaffold-context.json` Read and analyze the context file, which contains: + - **draft**: The markdown content and any existing frontmatter - **products**: Available InfluxDB products (Core, Enterprise, Cloud, etc.) - **productHints**: Products mentioned or suggested based on content analysis @@ -49,11 +50,12 @@ For each file, create complete frontmatter with: - **weight**: Sequential weight based on siblings - **source**: (for frontmatter-only files) Path to shared content - **related**: 3-5 relevant related articles from `structure.existingPaths` -- **alt_links**: Map equivalent pages across products for cross-product navigation +- **alt\_links**: Map equivalent pages across products for cross-product navigation ### 4. Code Sample Considerations Based on `versionInfo`: + - Use version-specific CLI commands (influxdb3, influx, influxctl) - Reference appropriate API endpoints (/api/v3, /api/v2) - Note testing requirements from `conventions.testing` @@ -61,6 +63,7 @@ Based on `versionInfo`: ### 5. Style Compliance Follow conventions from `conventions.namingRules`: + - Files: Use lowercase with hyphens (e.g., `manage-databases.md`) - Directories: Use lowercase with hyphens - Shared content: Place in appropriate `/content/shared/` subdirectory @@ -133,4 +136,8 @@ Generate a JSON proposal matching the schema in `scripts/schemas/scaffold-propos 4. Generate complete frontmatter for all files 5. Save the proposal to `.tmp/scaffold-proposal.json` -The proposal will be validated and used by `yarn docs:create --proposal .tmp/scaffold-proposal.json` to create the files. +The following command validates and creates files from the proposal: + +```bash +npx docs create --proposal .tmp/scaffold-proposal.json +``` From b75bc95494d5581c5d40be32649ec19a8354129c Mon Sep 17 00:00:00 2001 From: Jason Stirnaman <jstirnaman@influxdata.com> Date: Sat, 1 Nov 2025 16:38:19 -0500 Subject: [PATCH 15/15] feat(influxdb3): WIP: Use multi-file code and modules in plugins --- .../core/plugins/use-multifile-plugins.md | 696 ++++++++++++++++++ 1 file changed, 696 insertions(+) create mode 100644 content/influxdb3/core/plugins/use-multifile-plugins.md diff --git a/content/influxdb3/core/plugins/use-multifile-plugins.md b/content/influxdb3/core/plugins/use-multifile-plugins.md new file mode 100644 index 000000000..551663711 --- /dev/null +++ b/content/influxdb3/core/plugins/use-multifile-plugins.md @@ -0,0 +1,696 @@ +--- +title: Use multi-file Python code and modules in plugins +description: | + Organize complex plugin logic across multiple Python files and modules for better code reuse, testing, and maintainability in InfluxDB 3 Processing Engine plugins. +menu: + influxdb3_core: + name: Use multi-file plugins + parent: Processing engine and Python plugins +weight: 101 +influxdb3/core/tags: [processing engine, plugins, python, modules] +related: +- /influxdb3/core/plugins/ +- /influxdb3/core/plugins/extend-plugin/ +- /influxdb3/core/reference/cli/influxdb3/create/trigger/ +--- + +As your plugin logic grows in complexity, organizing code across multiple Python files improves maintainability, enables code reuse, and makes testing easier. +The InfluxDB 3 Processing Engine supports multi-file plugin architectures using standard Python module patterns. + +## Before you begin + +Ensure you have: + +- A working InfluxDB 3 Core instance with the Processing Engine enabled +- Basic understanding of [Python modules and packages](https://docs.python.org/3/tutorial/modules.html) +- Familiarity with [creating InfluxDB 3 plugins](/influxdb3/core/plugins/) + +## Multi-file plugin structure + +A multi-file plugin is a directory containing Python files organized as a package. +The directory must include an `__init__.py` file that serves as the entry point and contains your trigger function. + +### Basic structure + +``` +my_plugin/ +ā”œā”€ā”€ __init__.py # Required - entry point with trigger function +ā”œā”€ā”€ processors.py # Data processing functions +ā”œā”€ā”€ utils.py # Helper utilities +└── config.py # Configuration management +``` + +### Required: **init**.py entry point + +The `__init__.py` file must contain the trigger function that InfluxDB calls when the trigger fires. +This file imports and orchestrates code from other modules in your plugin. + +```python +# my_plugin/__init__.py +from .processors import process_data +from .config import load_settings +from .utils import format_output + +def process_writes(influxdb3_local, table_batches, args=None): + """Entry point for WAL trigger.""" + settings = load_settings(args) + + for table_batch in table_batches: + processed_data = process_data(table_batch, settings) + output = format_output(processed_data) + influxdb3_local.write(output) +``` + +## Organizing plugin code + +### Separate concerns into modules + +Organize your plugin code by functional responsibility to improve maintainability and testing. + +#### processors.py - Data transformation logic + +```python +# my_plugin/processors.py +"""Data processing and transformation functions.""" + +def process_data(table_batch, settings): + """Transform data according to configuration settings.""" + table_name = table_batch["table_name"] + rows = table_batch["rows"] + + transformed_rows = [] + for row in rows: + transformed = transform_row(row, settings) + if transformed: + transformed_rows.append(transformed) + + return { + "table": table_name, + "rows": transformed_rows, + "count": len(transformed_rows) + } + +def transform_row(row, settings): + """Apply transformations to a single row.""" + # Apply threshold filtering + if "value" in row and row["value"] < settings.get("min_value", 0): + return None + + # Apply unit conversion if configured + if settings.get("convert_units"): + row["value"] = row["value"] * settings.get("conversion_factor", 1.0) + + return row +``` + +#### config.py - Configuration management + +```python +# my_plugin/config.py +"""Plugin configuration parsing and validation.""" + +DEFAULT_SETTINGS = { + "min_value": 0.0, + "convert_units": False, + "conversion_factor": 1.0, + "output_measurement": "processed_data", +} + +def load_settings(args): + """Load and validate plugin settings from trigger arguments.""" + settings = DEFAULT_SETTINGS.copy() + + if not args: + return settings + + # Parse numeric arguments + if "min_value" in args: + settings["min_value"] = float(args["min_value"]) + + if "conversion_factor" in args: + settings["conversion_factor"] = float(args["conversion_factor"]) + + # Parse boolean arguments + if "convert_units" in args: + settings["convert_units"] = args["convert_units"].lower() in ("true", "1", "yes") + + # Parse string arguments + if "output_measurement" in args: + settings["output_measurement"] = args["output_measurement"] + + return settings + +def validate_settings(settings): + """Validate settings and raise exceptions for invalid configurations.""" + if settings["min_value"] < 0: + raise ValueError("min_value must be non-negative") + + if settings["conversion_factor"] <= 0: + raise ValueError("conversion_factor must be positive") + + return True +``` + +#### utils.py - Helper functions + +```python +# my_plugin/utils.py +"""Utility functions for data formatting and logging.""" + +from datetime import datetime + +def format_output(processed_data): + """Format processed data for writing to InfluxDB.""" + from influxdb3_local import LineBuilder + + lines = [] + measurement = processed_data.get("measurement", "processed_data") + + for row in processed_data["rows"]: + line = LineBuilder(measurement) + + # Add tags from row + for key, value in row.items(): + if key.startswith("tag_"): + line.tag(key.replace("tag_", ""), str(value)) + + # Add fields from row + for key, value in row.items(): + if key.startswith("field_"): + field_name = key.replace("field_", "") + if isinstance(value, float): + line.float64_field(field_name, value) + elif isinstance(value, int): + line.int64_field(field_name, value) + elif isinstance(value, str): + line.string_field(field_name, value) + + lines.append(line) + + return lines + +def log_metrics(influxdb3_local, operation, duration_ms, record_count): + """Log plugin performance metrics.""" + influxdb3_local.info( + f"Operation: {operation}, " + f"Duration: {duration_ms}ms, " + f"Records: {record_count}" + ) +``` + +## Importing external libraries + +Multi-file plugins can use both relative imports (for your own modules) and absolute imports (for external libraries). + +### Relative imports for plugin modules + +Use relative imports to reference other modules within your plugin: + +```python +# my_plugin/__init__.py +from .processors import process_data # Same package +from .config import load_settings # Same package +from .utils import format_output # Same package + +# Relative imports from subdirectories +from .transforms.aggregators import calculate_mean +from .integrations.webhook import send_notification +``` + +### Absolute imports for external libraries + +Use absolute imports for standard library and third-party packages: + +```python +# my_plugin/processors.py +import json +import time +from datetime import datetime, timedelta +from collections import defaultdict + +# Third-party libraries (must be installed with influxdb3 install package) +import pandas as pd +import numpy as np +``` + +### Installing third-party dependencies + +Before using external libraries, install them into the Processing Engine's Python environment: + +```bash +# Install packages for your plugin +influxdb3 install package pandas numpy requests +``` + +For Docker deployments: + +```bash +docker exec -it CONTAINER_NAME influxdb3 install package pandas numpy requests +``` + +## Advanced plugin patterns + +### Nested module structure + +For complex plugins, organize code into subdirectories: + +``` +my_advanced_plugin/ +ā”œā”€ā”€ __init__.py +ā”œā”€ā”€ config.py +ā”œā”€ā”€ transforms/ +│ ā”œā”€ā”€ __init__.py +│ ā”œā”€ā”€ aggregators.py +│ └── filters.py +ā”œā”€ā”€ integrations/ +│ ā”œā”€ā”€ __init__.py +│ ā”œā”€ā”€ webhook.py +│ └── email.py +└── utils/ + ā”œā”€ā”€ __init__.py + ā”œā”€ā”€ logging.py + └── validators.py +``` + +Import from nested modules: + +```python +# my_advanced_plugin/__init__.py +from .transforms.aggregators import calculate_statistics +from .transforms.filters import apply_threshold_filter +from .integrations.webhook import send_alert +from .utils.logging import setup_logger + +def process_writes(influxdb3_local, table_batches, args=None): + logger = setup_logger(influxdb3_local) + + for table_batch in table_batches: + # Filter data + filtered = apply_threshold_filter(table_batch, threshold=100) + + # Calculate statistics + stats = calculate_statistics(filtered) + + # Send alerts if needed + if stats["max"] > 1000: + send_alert(stats, logger) +``` + +### Shared code across plugins + +Share common code across multiple plugins using a shared module directory: + +``` +plugins/ +ā”œā”€ā”€ shared/ +│ ā”œā”€ā”€ __init__.py +│ ā”œā”€ā”€ formatters.py +│ └── validators.py +ā”œā”€ā”€ plugin_a/ +│ └── __init__.py +└── plugin_b/ + └── __init__.py +``` + +Add the shared directory to Python's module search path in your plugin: + +```python +# plugin_a/__init__.py +import sys +from pathlib import Path + +# Add shared directory to path +plugin_dir = Path(__file__).parent.parent +sys.path.insert(0, str(plugin_dir)) + +# Now import from shared +from shared.formatters import format_line_protocol +from shared.validators import validate_data + +def process_writes(influxdb3_local, table_batches, args=None): + for table_batch in table_batches: + if validate_data(table_batch): + formatted = format_line_protocol(table_batch) + influxdb3_local.write(formatted) +``` + +## Testing multi-file plugins + +### Unit testing individual modules + +Test modules independently before integration: + +```python +# tests/test_processors.py +import unittest +from my_plugin.processors import transform_row +from my_plugin.config import load_settings + +class TestProcessors(unittest.TestCase): + def test_transform_row_filtering(self): + """Test that rows below threshold are filtered.""" + settings = {"min_value": 10.0} + row = {"value": 5.0} + + result = transform_row(row, settings) + + self.assertIsNone(result) + + def test_transform_row_conversion(self): + """Test unit conversion.""" + settings = { + "convert_units": True, + "conversion_factor": 2.0, + "min_value": 0.0 + } + row = {"value": 10.0} + + result = transform_row(row, settings) + + self.assertEqual(result["value"], 20.0) + +if __name__ == "__main__": + unittest.main() +``` + +### Testing with the influxdb3 CLI + +Test your complete multi-file plugin before deployment: + +```bash +# Test scheduled plugin +influxdb3 test schedule_plugin \ + --database testdb \ + --schedule "0 0 * * * *" \ + --plugin-dir /path/to/plugins \ + my_plugin + +# Test WAL plugin with sample data +influxdb3 test wal_plugin \ + --database testdb \ + --plugin-dir /path/to/plugins \ + my_plugin +``` + +For more testing options, see the [influxdb3 test reference](/influxdb3/core/reference/cli/influxdb3/test/). + +## Deploying multi-file plugins + +### Upload plugin directory + +Upload your complete plugin directory when creating a trigger: + +```bash +# Upload the entire plugin directory +influxdb3 create trigger \ + --trigger-spec "table:sensor_data" \ + --path "/local/path/to/my_plugin" \ + --upload \ + --database mydb \ + sensor_processor +``` + +The `--upload` flag transfers all files in the directory to the server's plugin directory. + +### Update plugin code + +Update all files in a running plugin: + +```bash +# Update the plugin with new code +influxdb3 update trigger \ + --database mydb \ + --trigger-name sensor_processor \ + --path "/local/path/to/my_plugin" +``` + +The update replaces all plugin files while preserving trigger configuration. + +## Best practices + +### Code organization + +- **Single responsibility**: Each module should have one clear purpose +- **Shallow hierarchies**: Avoid deeply nested directory structures (2-3 levels maximum) +- **Descriptive names**: Use clear, descriptive module and function names +- **Module size**: Keep modules under 300-400 lines for maintainability + +### Import management + +- **Explicit imports**: Use explicit imports rather than `from module import *` +- **Standard library first**: Import standard library, then third-party, then local modules +- **Avoid circular imports**: Design modules to prevent circular dependencies + +Example import organization: + +```python +# Standard library +import json +import time +from datetime import datetime + +# Third-party packages +import pandas as pd +import numpy as np + +# Local modules +from .config import load_settings +from .processors import process_data +from .utils import format_output +``` + +### Error handling + +Centralize error handling in your entry point: + +```python +# my_plugin/__init__.py +from .processors import process_data +from .config import load_settings, validate_settings + +def process_writes(influxdb3_local, table_batches, args=None): + try: + # Load and validate configuration + settings = load_settings(args) + validate_settings(settings) + + # Process data + for table_batch in table_batches: + process_data(influxdb3_local, table_batch, settings) + + except ValueError as e: + influxdb3_local.error(f"Configuration error: {e}") + except Exception as e: + influxdb3_local.error(f"Unexpected error: {e}") +``` + +### Documentation + +Document your modules with docstrings: + +```python +""" +my_plugin - Data processing plugin for sensor data. + +This plugin processes incoming sensor data by: +1. Filtering values below configured threshold +2. Converting units if requested +3. Writing processed data to output measurement + +Modules: +- processors: Core data transformation logic +- config: Configuration parsing and validation +- utils: Helper functions for formatting and logging +""" + +def process_writes(influxdb3_local, table_batches, args=None): + """Process incoming sensor data writes. + + Args: + influxdb3_local: InfluxDB API interface + table_batches: List of table batches with written data + args: Optional trigger arguments for configuration + + Trigger arguments: + min_value (float): Minimum value threshold + convert_units (bool): Enable unit conversion + conversion_factor (float): Conversion multiplier + output_measurement (str): Target measurement name + """ + pass +``` + +## Example: Complete multi-file plugin + +Here's a complete example of a temperature monitoring plugin with multi-file organization: + +### Plugin structure + +``` +temperature_monitor/ +ā”œā”€ā”€ __init__.py +ā”œā”€ā”€ config.py +ā”œā”€ā”€ processors.py +└── alerts.py +``` + +### **init**.py + +```python +# temperature_monitor/__init__.py +"""Temperature monitoring plugin with alerting.""" + +from .config import load_config +from .processors import calculate_statistics +from .alerts import check_thresholds + +def process_scheduled_call(influxdb3_local, call_time, args=None): + """Monitor temperature data and send alerts.""" + try: + config = load_config(args) + + # Query recent temperature data + query = f""" + SELECT temp_value, location + FROM {config['measurement']} + WHERE time > now() - INTERVAL '{config['window']}' + """ + results = influxdb3_local.query(query) + + # Calculate statistics + stats = calculate_statistics(results) + + # Check thresholds and alert + check_thresholds(influxdb3_local, stats, config) + + influxdb3_local.info( + f"Processed {len(results)} readings " + f"from {len(stats)} locations" + ) + + except Exception as e: + influxdb3_local.error(f"Plugin error: {e}") +``` + +### config.py + +```python +# temperature_monitor/config.py +"""Configuration management for temperature monitor.""" + +DEFAULTS = { + "measurement": "temperature", + "window": "1 hour", + "high_threshold": 30.0, + "low_threshold": 10.0, + "alert_measurement": "temperature_alerts" +} + +def load_config(args): + """Load configuration from trigger arguments.""" + config = DEFAULTS.copy() + + if args: + for key in DEFAULTS: + if key in args: + if key.endswith("_threshold"): + config[key] = float(args[key]) + else: + config[key] = args[key] + + return config +``` + +### processors.py + +```python +# temperature_monitor/processors.py +"""Data processing functions.""" + +from collections import defaultdict + +def calculate_statistics(data): + """Calculate statistics by location.""" + stats = defaultdict(lambda: { + "count": 0, + "sum": 0.0, + "min": float('inf'), + "max": float('-inf') + }) + + for row in data: + location = row.get("location", "unknown") + value = float(row.get("temp_value", 0)) + + s = stats[location] + s["count"] += 1 + s["sum"] += value + s["min"] = min(s["min"], value) + s["max"] = max(s["max"], value) + + # Calculate averages + for location, s in stats.items(): + if s["count"] > 0: + s["avg"] = s["sum"] / s["count"] + + return dict(stats) +``` + +### alerts.py + +```python +# temperature_monitor/alerts.py +"""Alert checking and notification.""" + +def check_thresholds(influxdb3_local, stats, config): + """Check temperature thresholds and write alerts.""" + from influxdb3_local import LineBuilder + + high_threshold = config["high_threshold"] + low_threshold = config["low_threshold"] + alert_measurement = config["alert_measurement"] + + for location, s in stats.items(): + if s["max"] > high_threshold: + line = LineBuilder(alert_measurement) + line.tag("location", location) + line.tag("severity", "high") + line.float64_field("temperature", s["max"]) + line.string_field("message", + f"High temperature: {s['max']}°C exceeds {high_threshold}°C") + + influxdb3_local.write(line) + influxdb3_local.warn(f"High temperature alert for {location}") + + elif s["min"] < low_threshold: + line = LineBuilder(alert_measurement) + line.tag("location", location) + line.tag("severity", "low") + line.float64_field("temperature", s["min"]) + line.string_field("message", + f"Low temperature: {s['min']}°C below {low_threshold}°C") + + influxdb3_local.write(line) + influxdb3_local.warn(f"Low temperature alert for {location}") +``` + +### Deploy the plugin + +```bash +# Create trigger with configuration +influxdb3 create trigger \ + --trigger-spec "every:5m" \ + --path "/local/path/to/temperature_monitor" \ + --upload \ + --trigger-arguments high_threshold=35,low_threshold=5,window="15 minutes" \ + --database sensors \ + temp_monitor +``` + +## Related resources + +- [Processing engine and Python plugins](/influxdb3/core/plugins/) +- [Extend plugins with API features](/influxdb3/core/plugins/extend-plugin/) +- [Plugin library](/influxdb3/core/plugins/library/) +- [influxdb3 create trigger](/influxdb3/core/reference/cli/influxdb3/create/trigger/) +- [influxdb3 test](/influxdb3/core/reference/cli/influxdb3/test/)