Compare commits
250 Commits
link-check
...
master
Author | SHA1 | Date |
---|---|---|
|
97c76d590c | |
|
f4a93cba38 | |
|
110386261d | |
|
f93cdbe9e8 | |
|
aea3d7b46f | |
|
0335be3832 | |
|
da65919d39 | |
|
2d79a6ad16 | |
|
1418b257f7 | |
|
9e3c5ba90c | |
|
f139c60ee7 | |
|
a239552395 | |
|
a75a7e898b | |
|
083abc318b | |
|
5ccbb42139 | |
|
241b915a9c | |
|
673e0bbd4a | |
|
e4f97a35bb | |
|
512105d8d9 | |
|
3e4864a836 | |
|
41bb146a6a | |
|
e099fb45ce | |
|
3c50fffefb | |
|
2d0121871d | |
|
00df6fdfad | |
|
90926b13d4 | |
|
48ca9791c2 | |
|
c0d6b116da | |
|
c44fa799ce | |
|
d51b538703 | |
|
8f1185ae02 | |
|
e5cf37ce5c | |
|
f506921c72 | |
|
4e8da8d0e7 | |
|
4202315eb3 | |
|
c034ba8f5d | |
|
a7ca7aae86 | |
|
cf7ac2dc0e | |
|
8c20dde858 | |
|
bee5b1c19a | |
|
f0dc1932b4 | |
|
96a38392f3 | |
|
5e129995dd | |
|
65588c21af | |
|
a736140c2c | |
|
d6d7e724ae | |
|
93adc6ca97 | |
|
479f112e15 | |
|
4f2dd64ca9 | |
|
0b4b5c008e | |
|
2d13650387 | |
|
2b7aa74814 | |
|
1e1c91cfa2 | |
|
33549bed68 | |
|
d452a6646f | |
|
8bee92ba7e | |
|
59b602c856 | |
|
7cc92d6c26 | |
|
a077d6b752 | |
|
6eaa5e1eec | |
|
ac22c06d1d | |
|
e087bc5aae | |
|
db65207b0b | |
|
e8a2c8128d | |
|
55017154fb | |
|
1f62fde793 | |
|
536db0f9b4 | |
|
62880c9834 | |
|
f1eb5d0d32 | |
|
c4d218d198 | |
|
d90b57865a | |
|
d018cdedcb | |
|
e0b58c3e4c | |
|
78de3407a1 | |
|
a095127993 | |
|
07597e5e6a | |
|
74a1cc45df | |
|
8257144555 | |
|
f21c606d6b | |
|
988aef7e07 | |
|
903c16e50a | |
|
e8350a3995 | |
|
c8a2251d47 | |
|
fed9e49f04 | |
|
c748819f6d | |
|
f0117ed399 | |
|
6fcd870555 | |
|
de115edf89 | |
|
bba78ea40b | |
|
2bc9e1736d | |
|
85b89e353e | |
|
c4974d4a3d | |
|
ddb36d1a39 | |
|
bf6d5b9757 | |
|
74900a0cc9 | |
|
7676ed1699 | |
|
e0b98b6878 | |
|
767dcaeafb | |
|
e94bd563fd | |
|
be2974cea2 | |
|
6b77ec5e85 | |
|
93eb70d377 | |
|
632b99fafc | |
|
e8ccbe2739 | |
|
d9b2e9aea0 | |
|
634b6f8a2d | |
|
3bc56cc36e | |
|
8f729f9500 | |
|
a29607def3 | |
|
3f9b54541b | |
|
99c515a1b8 | |
|
84f1aa7678 | |
|
41ce560079 | |
|
f2bed3b5d8 | |
|
42d966ef8c | |
|
7b43569049 | |
|
3d8676620e | |
|
69068a64bf | |
|
615094ac7b | |
|
d41707a7cf | |
|
44a2c95518 | |
|
4b52bf3589 | |
|
0a29ccb91e | |
|
f1c5a0b408 | |
|
ef1c598ad2 | |
|
4696dfd62b | |
|
5af8e57a1b | |
|
869e22b431 | |
|
2a3e0f3a3f | |
|
ebe1ec9750 | |
|
8c0dbacdfc | |
|
93bdc26e6b | |
|
de8f5d2b34 | |
|
9e53443f3a | |
|
a44a06051d | |
|
deb6307093 | |
|
3222c7a9fb | |
|
e32a8f62cf | |
|
e5c6d1b015 | |
|
3df82f362e | |
|
377bf9a39f | |
|
3746e8666a | |
|
1a7ee3bb90 | |
|
7fcd338b2e | |
|
34ac9f53d1 | |
|
bda720fd05 | |
|
954633c14f | |
|
38083f4425 | |
|
3a4db18f6b | |
|
f587fbaf48 | |
|
942c76d0c8 | |
|
311d5d01ea | |
|
73c224c0f4 | |
|
7623ff42c4 | |
|
081952249b | |
|
5d749b58a9 | |
|
50126bbf37 | |
|
194bf0446d | |
|
372b9b5d42 | |
|
d3da8787d0 | |
|
f975f164fd | |
|
0ddd5a2257 | |
|
d49d69ba26 | |
|
2494c90cb8 | |
|
ed20daa8c5 | |
|
49f56b22fe | |
|
e7e59322ac | |
|
3aa4c0eae1 | |
|
f2f24f4b16 | |
|
ca87776147 | |
|
e2823b7688 | |
|
1de1a61589 | |
|
0281e51c2c | |
|
cecd706d0f | |
|
f2ebfde75b | |
|
0742ced3c9 | |
|
426bce18ae | |
|
e000d2689f | |
|
a816f51c29 | |
|
e42df8a4ad | |
|
127b15b6e2 | |
|
2a56bec0c9 | |
|
29ff1ba739 | |
|
c41155e575 | |
|
d52822d258 | |
|
06638c0be3 | |
|
69f524cc72 | |
|
8151caa4f3 | |
|
44b7ba7be9 | |
|
3b1d36b6b4 | |
|
73bb35d4de | |
|
a4965e07a0 | |
|
52ea0bf2cc | |
|
c1de7e71be | |
|
0c0175dc61 | |
|
a16a1ea286 | |
|
64082e9a6f | |
|
fe455525d4 | |
|
9a0d4035d8 | |
|
d61ccaea31 | |
|
e2c7d728d0 | |
|
3f4ad5fb37 | |
|
92210137b2 | |
|
c708bd8658 | |
|
b2aab8ad43 | |
|
7d95a3f95b | |
|
223e652520 | |
|
8754468dbd | |
|
9d14efe92e | |
|
4f807c9eb6 | |
|
b90b203148 | |
|
de021b48eb | |
|
683dfe233c | |
|
aaf475beef | |
|
116e4fe70a | |
|
b510e6bac1 | |
|
0a1bed8c12 | |
|
1a58595083 | |
|
fc1b55b1f0 | |
|
0001c1cfc4 | |
|
5762fc1b7f | |
|
8d6e4b9e9b | |
|
cba3b21f1c | |
|
ddb9a5584d | |
|
a21c06bb4f | |
|
9ea4acfb2b | |
|
b962b1711f | |
|
a8578bb0af | |
|
f5df3cb6f0 | |
|
98735f4bef | |
|
0546d66ac0 | |
|
e10340b6ec | |
|
73156b39c7 | |
|
a298a516ad | |
|
f44a78172c | |
|
71436aede2 | |
|
e46349188a | |
|
0aa345572b | |
|
1e7beac174 | |
|
1846623a27 | |
|
bd7d1d2663 | |
|
45bdbb409c | |
|
c0aff8f475 | |
|
77c43889e0 | |
|
886b38da4c | |
|
77aed8468f | |
|
5e9e0cc97b | |
|
09b98c0195 | |
|
2b5026142f | |
|
68bcb0f3c3 |
|
@ -0,0 +1,74 @@
|
|||
# Lychee link checker configuration
|
||||
# Generated by link-checker
|
||||
[lychee]
|
||||
# Performance settings
|
||||
|
||||
# Maximum number of retries for failed checks
|
||||
|
||||
max_retries = 3
|
||||
|
||||
# Timeout for each link check (in seconds)
|
||||
timeout = 30
|
||||
|
||||
# Maximum number of concurrent checks
|
||||
max_concurrency = 128
|
||||
|
||||
skip_code_blocks = false
|
||||
|
||||
# HTTP settings
|
||||
# Identify the tool to external services
|
||||
user_agent = "Mozilla/5.0 (compatible; link-checker)"
|
||||
|
||||
# Accept these HTTP status codes as valid
|
||||
accept = [200, 201, 202, 203, 204, 206, 301, 302, 303, 304,
|
||||
307, 308]
|
||||
|
||||
# Skip these URL schemes
|
||||
scheme = ["file", "mailto", "tel"]
|
||||
|
||||
# Exclude patterns (regex supported)
|
||||
exclude = [
|
||||
# Localhost URLs
|
||||
"^https?://localhost",
|
||||
"^https?://127\\.0\\.0\\.1",
|
||||
|
||||
# Common CI/CD environments
|
||||
"^https?://.*\\.local",
|
||||
|
||||
# Example domains used in documentation
|
||||
"^https?://example\\.(com|org|net)",
|
||||
|
||||
# Placeholder URLs from code block filtering
|
||||
"https://example.com/REMOVED_FROM_CODE_BLOCK",
|
||||
"example.com/INLINE_CODE_URL",
|
||||
|
||||
# URLs that require authentication
|
||||
"^https?://.*\\.slack\\.com",
|
||||
"^https?://.*\\.atlassian\\.net",
|
||||
|
||||
# GitHub URLs (often fail due to rate limiting and bot
|
||||
# detection)
|
||||
"^https?://github\\.com",
|
||||
|
||||
# StackExchange network URLs (often block automated requests)
|
||||
"^https?://.*\\.stackexchange\\.com",
|
||||
"^https?://stackoverflow\\.com",
|
||||
"^https?://.*\\.stackoverflow\\.com",
|
||||
|
||||
# Docker Hub URLs (rate limiting and bot detection)
|
||||
"^https?://hub\\.docker\\.com",
|
||||
|
||||
# Common documentation placeholders
|
||||
"YOUR_.*",
|
||||
"REPLACE_.*",
|
||||
"<.*>",
|
||||
]
|
||||
|
||||
# Request headers
|
||||
[headers]
|
||||
# Add custom headers here if needed
|
||||
# "Authorization" = "Bearer $GITHUB_TOKEN"
|
||||
|
||||
# Cache settings
|
||||
cache = true
|
||||
max_cache_age = "1d"
|
|
@ -0,0 +1,125 @@
|
|||
# Production Link Checker Configuration for InfluxData docs-v2
|
||||
# Optimized for performance, reliability, and reduced false positives
|
||||
[lychee]
|
||||
# Performance settings
|
||||
|
||||
# Maximum number of retries for failed checks
|
||||
|
||||
max_retries = 3
|
||||
|
||||
# Timeout for each link check (in seconds)
|
||||
timeout = 30
|
||||
|
||||
# Maximum number of concurrent checks
|
||||
max_concurrency = 128
|
||||
|
||||
skip_code_blocks = false
|
||||
|
||||
# HTTP settings
|
||||
# Identify the tool to external services
|
||||
"User-Agent" = "Mozilla/5.0 (compatible; influxdata-link-checker/1.0; +https://github.com/influxdata/docs-v2)"
|
||||
accept = [200, 201, 202, 203, 204, 206, 301, 302, 303, 304, 307, 308]
|
||||
|
||||
# Skip these URL schemes
|
||||
scheme = ["mailto", "tel"]
|
||||
|
||||
# Performance optimizations
|
||||
cache = true
|
||||
max_cache_age = "1h"
|
||||
|
||||
# Retry configuration for reliability
|
||||
include_verbatim = false
|
||||
|
||||
# Exclusion patterns for docs-v2 (regex supported)
|
||||
exclude = [
|
||||
# Localhost URLs
|
||||
"^https?://localhost",
|
||||
"^https?://127\\.0\\.0\\.1",
|
||||
|
||||
# Common CI/CD environments
|
||||
"^https?://.*\\.local",
|
||||
|
||||
# Example domains used in documentation
|
||||
"^https?://example\\.(com|org|net)",
|
||||
|
||||
# Placeholder URLs from code block filtering
|
||||
"https://example.com/REMOVED_FROM_CODE_BLOCK",
|
||||
"example.com/INLINE_CODE_URL",
|
||||
|
||||
# URLs that require authentication
|
||||
"^https?://.*\\.slack\\.com",
|
||||
"^https?://.*\\.atlassian\\.net",
|
||||
|
||||
# GitHub URLs (often fail due to rate limiting and bot
|
||||
# detection)
|
||||
"^https?://github\\.com",
|
||||
|
||||
# Social media URLs (often block bots)
|
||||
"^https?://reddit\\.com",
|
||||
"^https?://.*\\.reddit\\.com",
|
||||
|
||||
# StackExchange network URLs (often block automated requests)
|
||||
"^https?://.*\\.stackexchange\\.com",
|
||||
"^https?://stackoverflow\\.com",
|
||||
"^https?://.*\\.stackoverflow\\.com",
|
||||
|
||||
# Docker Hub URLs (rate limiting and bot detection)
|
||||
"^https?://hub\\.docker\\.com",
|
||||
|
||||
# InfluxData support URLs (certificate/SSL issues in CI)
|
||||
"^https?://support\\.influxdata\\.com",
|
||||
|
||||
# AI platforms (often block automated requests)
|
||||
"^https?://claude\\.ai",
|
||||
"^https?://.*\\.claude\\.ai",
|
||||
|
||||
# Production site URLs (when testing locally, these should be relative)
|
||||
# This excludes canonical URLs and other absolute production URLs
|
||||
# TODO: Remove after fixing canonical URL generation or link-checker domain replacement
|
||||
"^https://docs\\.influxdata\\.com/",
|
||||
|
||||
# Common documentation placeholders
|
||||
"YOUR_.*",
|
||||
"REPLACE_.*",
|
||||
"<.*>",
|
||||
]
|
||||
|
||||
# Request headers
|
||||
[headers]
|
||||
# Add custom headers here if needed
|
||||
# "Authorization" = "Bearer $GITHUB_TOKEN"
|
||||
"Accept" = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
|
||||
"Accept-Language" = "en-US,en;q=0.5"
|
||||
"Accept-Encoding" = "gzip, deflate"
|
||||
"DNT" = "1"
|
||||
"Connection" = "keep-alive"
|
||||
"Upgrade-Insecure-Requests" = "1"
|
||||
|
||||
[ci]
|
||||
# CI-specific settings
|
||||
|
||||
[ci.github_actions]
|
||||
output_format = "json"
|
||||
create_annotations = true
|
||||
fail_fast = false
|
||||
max_annotations = 50 # Limit to avoid overwhelming PR comments
|
||||
|
||||
[ci.performance]
|
||||
# Performance tuning for CI environment
|
||||
parallel_requests = 32
|
||||
connection_timeout = 10
|
||||
read_timeout = 30
|
||||
|
||||
# Resource limits
|
||||
max_memory_mb = 512
|
||||
max_execution_time_minutes = 10
|
||||
|
||||
[reporting]
|
||||
# Report configuration
|
||||
include_fragments = false
|
||||
verbose = false
|
||||
no_progress = true # Disable progress bar in CI
|
||||
|
||||
# Summary settings
|
||||
show_success_count = true
|
||||
show_skipped_count = true
|
|
@ -2,7 +2,7 @@ version: 2.1
|
|||
jobs:
|
||||
build:
|
||||
docker:
|
||||
- image: cimg/node:20.12.1
|
||||
- image: cimg/node:24.5.0
|
||||
environment:
|
||||
S3DEPLOY_VERSION: "2.11.0"
|
||||
# From https://github.com/bep/s3deploy/releases
|
||||
|
|
|
@ -0,0 +1,253 @@
|
|||
---
|
||||
name: ui-dev
|
||||
description: UI TypeScript, Hugo, and SASS (CSS) development specialist for the InfluxData docs-v2 repository
|
||||
tools: ["*"]
|
||||
author: InfluxData
|
||||
version: "1.0"
|
||||
---
|
||||
|
||||
# UI TypeScript & Hugo Development Agent
|
||||
|
||||
## Purpose
|
||||
|
||||
Specialized agent for TypeScript and Hugo development in the InfluxData docs-v2 repository. Assists with implementing TypeScript for new documentation site features while maintaining compatibility with the existing JavaScript ecosystem.
|
||||
|
||||
## Scope and Responsibilities
|
||||
|
||||
### Workflow
|
||||
|
||||
- Start by verifying a clear understanding of the requested feature or fix.
|
||||
- Ask if there's an existing plan to follow.
|
||||
- Verify any claimed changes by reading the actual files.
|
||||
|
||||
### Primary Capabilities
|
||||
|
||||
1. **TypeScript Implementation**
|
||||
- Convert existing JavaScript modules to TypeScript
|
||||
- Implement new features using TypeScript best practices
|
||||
- Maintain type safety while preserving Hugo integration
|
||||
- Configure TypeScript for Hugo's asset pipeline
|
||||
|
||||
2. **Component Development**
|
||||
- Create new component-based modules following the established registry pattern
|
||||
- Implement TypeScript interfaces for component options and state
|
||||
- Ensure proper integration with Hugo's data attributes system
|
||||
- Maintain backwards compatibility with existing JavaScript components
|
||||
|
||||
3. **Hugo Asset Pipeline Integration**
|
||||
- Configure TypeScript compilation for Hugo's build process
|
||||
- Manage module imports and exports for Hugo's ES6 module system
|
||||
- Optimize TypeScript output for production builds
|
||||
- Handle Hugo template data integration with TypeScript
|
||||
|
||||
4. **Testing and Quality Assurance**
|
||||
- Write and maintain Cypress e2e tests for TypeScript components
|
||||
- Configure ESLint rules for TypeScript code
|
||||
- Ensure proper type checking in CI/CD pipeline
|
||||
- Debug TypeScript compilation issues
|
||||
|
||||
### Technical Expertise
|
||||
|
||||
- **TypeScript Configuration**: Advanced `tsconfig.json` setup for Hugo projects
|
||||
- **Component Architecture**: Following the established component registry pattern from `main.js`
|
||||
- **Hugo Integration**: Understanding Hugo's asset pipeline and template system
|
||||
- **Module Systems**: ES6 modules, imports/exports, and Hugo's asset bundling
|
||||
- **Type Definitions**: Creating interfaces for Hugo data, component options, and external libraries
|
||||
|
||||
## Current Project Context
|
||||
|
||||
### Existing Infrastructure
|
||||
|
||||
- **Build System**: Hugo extended with PostCSS and TypeScript compilation
|
||||
- **Module Entry Point**: `assets/js/main.js` with component registry pattern
|
||||
- **TypeScript Config**: `tsconfig.json` configured for ES2020 with DOM types
|
||||
- **Testing**: Cypress for e2e testing, ESLint for code quality
|
||||
- **Component Pattern**: Data-attribute based component initialization
|
||||
|
||||
### Key Files and Patterns
|
||||
|
||||
- **Component Registry**: `main.js` exports `componentRegistry` mapping component names to constructors
|
||||
- **Component Pattern**: Components accept `{ component: HTMLElement }` options
|
||||
- **Data Attributes**: Components initialized via `data-component` attributes
|
||||
- **Module Imports**: ES6 imports with `.js` extensions for Hugo compatibility
|
||||
|
||||
### Current TypeScript Usage
|
||||
|
||||
- **Single TypeScript File**: `assets/js/influxdb-version-detector.ts`
|
||||
- **Build Scripts**: `yarn build:ts` and `yarn build:ts:watch`
|
||||
- **Output Directory**: `dist/` (gitignored)
|
||||
- **Type Definitions**: Generated `.d.ts` files for all modules
|
||||
|
||||
## Development Guidelines
|
||||
|
||||
### TypeScript Standards
|
||||
|
||||
1. **Type Safety**
|
||||
```typescript
|
||||
// Always define interfaces for component options
|
||||
interface ComponentOptions {
|
||||
component: HTMLElement;
|
||||
// Add specific component options
|
||||
}
|
||||
|
||||
// Use strict typing for Hugo data
|
||||
interface HugoDataAttribute {
|
||||
products?: string;
|
||||
influxdbUrls?: string;
|
||||
}
|
||||
```
|
||||
|
||||
2. **Component Architecture**
|
||||
```typescript
|
||||
// Follow the established component pattern
|
||||
class MyComponent {
|
||||
private container: HTMLElement;
|
||||
|
||||
constructor(options: ComponentOptions) {
|
||||
this.container = options.component;
|
||||
this.init();
|
||||
}
|
||||
|
||||
private init(): void {
|
||||
// Component initialization
|
||||
}
|
||||
}
|
||||
|
||||
// Export as component initializer
|
||||
export default function initMyComponent(options: ComponentOptions): MyComponent {
|
||||
return new MyComponent(options);
|
||||
}
|
||||
```
|
||||
|
||||
3. **Hugo Data Integration**
|
||||
```typescript
|
||||
// Parse Hugo data attributes safely
|
||||
private parseComponentData(): ParsedData {
|
||||
const rawData = this.container.getAttribute('data-products');
|
||||
if (rawData && rawData !== '#ZgotmplZ') {
|
||||
try {
|
||||
return JSON.parse(rawData);
|
||||
} catch (error) {
|
||||
console.warn('Failed to parse data:', error);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
return {};
|
||||
}
|
||||
```
|
||||
|
||||
### File Organization
|
||||
|
||||
- **TypeScript Files**: Place in `assets/js/` alongside JavaScript files
|
||||
- **Type Definitions**: Auto-generated in `dist/` directory
|
||||
- **Naming Convention**: Use same naming as JavaScript files, with `.ts` extension
|
||||
- **Imports**: Use `.js` extensions even for TypeScript files (Hugo requirement)
|
||||
|
||||
### Integration with Existing System
|
||||
|
||||
1. **Component Registry**: Add TypeScript components to the registry in `main.js`
|
||||
2. **HTML Integration**: Use `data-component` attributes to initialize components
|
||||
3. **Global Namespace**: Expose components via `window.influxdatadocs` if needed
|
||||
4. **Backwards Compatibility**: Ensure TypeScript components work with existing patterns
|
||||
|
||||
### Testing Requirements
|
||||
|
||||
1. **Cypress Tests**: Create e2e tests for TypeScript components
|
||||
2. **Type Checking**: Run `tsc --noEmit` in CI pipeline
|
||||
3. **ESLint**: Configure TypeScript-specific linting rules
|
||||
4. **Manual Testing**: Test components in Hugo development server
|
||||
|
||||
## Build and Development Workflow
|
||||
|
||||
### Development Commands
|
||||
|
||||
```bash
|
||||
# Start TypeScript compilation in watch mode
|
||||
yarn build:ts:watch
|
||||
|
||||
# Start Hugo development server
|
||||
npx hugo server
|
||||
|
||||
# Run e2e tests
|
||||
yarn test:e2e
|
||||
|
||||
# Run linting
|
||||
yarn lint
|
||||
```
|
||||
|
||||
### Component Development Process
|
||||
|
||||
1. **Create TypeScript Component**
|
||||
- Define interfaces for options and data
|
||||
- Implement component class with proper typing
|
||||
- Export initializer function
|
||||
|
||||
2. **Register Component**
|
||||
- Add to `componentRegistry` in `main.js`
|
||||
- Import with `.js` extension (Hugo requirement)
|
||||
|
||||
3. **HTML Implementation**
|
||||
- Add `data-component` attribute to trigger elements
|
||||
- Include necessary Hugo data attributes
|
||||
|
||||
4. **Testing**
|
||||
- Write Cypress tests for component functionality
|
||||
- Test Hugo data integration
|
||||
- Verify TypeScript compilation
|
||||
|
||||
### Common Patterns and Solutions
|
||||
|
||||
1. **Hugo Template Data**
|
||||
```typescript
|
||||
// Handle Hugo's security measures for JSON data
|
||||
if (dataAttribute && dataAttribute !== '#ZgotmplZ') {
|
||||
// Safe to parse
|
||||
}
|
||||
```
|
||||
|
||||
2. **DOM Type Safety**
|
||||
```typescript
|
||||
// Use type assertions for DOM queries
|
||||
const element = this.container.querySelector('#input') as HTMLInputElement;
|
||||
```
|
||||
|
||||
3. **Event Handling**
|
||||
```typescript
|
||||
// Properly type event targets
|
||||
private handleClick = (e: Event): void => {
|
||||
const target = e.target as HTMLElement;
|
||||
// Handle event
|
||||
};
|
||||
```
|
||||
|
||||
## Error Handling and Debugging
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Module Resolution**: Use `.js` extensions in imports even for TypeScript files
|
||||
2. **Hugo Data Attributes**: Handle `#ZgotmplZ` security placeholders
|
||||
3. **Type Definitions**: Ensure proper typing for external libraries used in Hugo context
|
||||
4. **Compilation Errors**: Check `tsconfig.json` settings for Hugo compatibility
|
||||
|
||||
### Debugging Tools
|
||||
|
||||
- **VS Code TypeScript**: Use built-in TypeScript language server
|
||||
- **Hugo DevTools**: Browser debugging with source maps
|
||||
- **Component Registry**: Access `window.influxdatadocs.componentRegistry` for debugging
|
||||
- **TypeScript Compiler**: Use `tsc --noEmit --pretty` for detailed error reporting
|
||||
|
||||
## Future Considerations
|
||||
|
||||
### Migration Strategy
|
||||
|
||||
1. **Gradual Migration**: Convert JavaScript modules to TypeScript incrementally
|
||||
2. **Type Definitions**: Add type definitions for existing JavaScript modules
|
||||
3. **Shared Interfaces**: Create common interfaces for Hugo data and component patterns
|
||||
4. **Documentation**: Update component documentation with TypeScript examples
|
||||
|
||||
### Enhancement Opportunities
|
||||
|
||||
1. **Strict Type Checking**: Enable stricter TypeScript compiler options
|
||||
2. **Advanced Types**: Use utility types for Hugo-specific patterns
|
||||
3. **Build Optimization**: Optimize TypeScript compilation for Hugo builds
|
||||
4. **Developer Experience**: Improve tooling and IDE support for Hugo + TypeScript development
|
|
@ -1,103 +0,0 @@
|
|||
name: 'Report Broken Links'
|
||||
description: 'Downloads broken link reports, generates PR comment, and posts results'
|
||||
|
||||
inputs:
|
||||
github-token:
|
||||
description: 'GitHub token for posting comments'
|
||||
required: false
|
||||
default: ${{ github.token }}
|
||||
max-links-per-file:
|
||||
description: 'Maximum links to show per file in comment'
|
||||
required: false
|
||||
default: '20'
|
||||
include-success-message:
|
||||
description: 'Include success message when no broken links found'
|
||||
required: false
|
||||
default: 'true'
|
||||
|
||||
outputs:
|
||||
has-broken-links:
|
||||
description: 'Whether broken links were found (true/false)'
|
||||
value: ${{ steps.generate-comment.outputs.has-broken-links }}
|
||||
broken-link-count:
|
||||
description: 'Number of broken links found'
|
||||
value: ${{ steps.generate-comment.outputs.broken-link-count }}
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Download broken link reports
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: reports
|
||||
continue-on-error: true
|
||||
|
||||
- name: Generate PR comment
|
||||
id: generate-comment
|
||||
run: |
|
||||
# Generate comment using our script
|
||||
node .github/scripts/comment-generator.js \
|
||||
--max-links ${{ inputs.max-links-per-file }} \
|
||||
${{ inputs.include-success-message == 'false' && '--no-success' || '' }} \
|
||||
--output-file comment.md \
|
||||
reports/ || echo "No reports found or errors occurred"
|
||||
|
||||
# Check if comment file was created and has content
|
||||
if [[ -f comment.md && -s comment.md ]]; then
|
||||
echo "comment-generated=true" >> $GITHUB_OUTPUT
|
||||
|
||||
# Count broken links by parsing the comment
|
||||
broken_count=$(grep -o "Found [0-9]* broken link" comment.md | grep -o "[0-9]*" || echo "0")
|
||||
echo "broken-link-count=$broken_count" >> $GITHUB_OUTPUT
|
||||
|
||||
# Check if there are actually broken links (not just a success comment)
|
||||
if [[ "$broken_count" -gt 0 ]]; then
|
||||
echo "has-broken-links=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "has-broken-links=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
else
|
||||
echo "has-broken-links=false" >> $GITHUB_OUTPUT
|
||||
echo "broken-link-count=0" >> $GITHUB_OUTPUT
|
||||
echo "comment-generated=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Post PR comment
|
||||
if: steps.generate-comment.outputs.comment-generated == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ inputs.github-token }}
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
|
||||
if (fs.existsSync('comment.md')) {
|
||||
const comment = fs.readFileSync('comment.md', 'utf8');
|
||||
|
||||
if (comment.trim()) {
|
||||
await github.rest.issues.createComment({
|
||||
issue_number: context.issue.number,
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
body: comment
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
- name: Report validation results
|
||||
run: |
|
||||
has_broken_links="${{ steps.generate-comment.outputs.has-broken-links }}"
|
||||
broken_count="${{ steps.generate-comment.outputs.broken-link-count }}"
|
||||
|
||||
if [ "$has_broken_links" = "true" ]; then
|
||||
echo "::error::❌ Link validation failed: Found $broken_count broken link(s)"
|
||||
echo "Check the PR comment for detailed broken link information"
|
||||
exit 1
|
||||
else
|
||||
echo "::notice::✅ Link validation passed successfully"
|
||||
echo "All links in the changed files are valid"
|
||||
if [ "${{ steps.generate-comment.outputs.comment-generated }}" = "true" ]; then
|
||||
echo "PR comment posted with validation summary and cache statistics"
|
||||
fi
|
||||
fi
|
||||
shell: bash
|
|
@ -1,106 +0,0 @@
|
|||
name: 'Validate Links'
|
||||
description: 'Runs e2e browser-based link validation tests against Hugo site using Cypress'
|
||||
|
||||
inputs:
|
||||
files:
|
||||
description: 'Space-separated list of files to validate'
|
||||
required: true
|
||||
product-name:
|
||||
description: 'Product name for reporting (optional)'
|
||||
required: false
|
||||
default: ''
|
||||
cache-enabled:
|
||||
description: 'Enable link validation caching'
|
||||
required: false
|
||||
default: 'true'
|
||||
cache-key:
|
||||
description: 'Cache key prefix for this validation run'
|
||||
required: false
|
||||
default: 'link-validation'
|
||||
timeout:
|
||||
description: 'Test timeout in seconds'
|
||||
required: false
|
||||
default: '900'
|
||||
|
||||
outputs:
|
||||
failed:
|
||||
description: 'Whether validation failed (true/false)'
|
||||
value: ${{ steps.validate.outputs.failed }}
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Restore link validation cache
|
||||
if: inputs.cache-enabled == 'true'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: .cache/link-validation
|
||||
key: ${{ inputs.cache-key }}-${{ runner.os }}-${{ hashFiles('content/**/*.md', 'content/**/*.html') }}
|
||||
restore-keys: |
|
||||
${{ inputs.cache-key }}-${{ runner.os }}-
|
||||
${{ inputs.cache-key }}-
|
||||
|
||||
- name: Run link validation
|
||||
shell: bash
|
||||
run: |
|
||||
# Set CI-specific environment variables
|
||||
export CI=true
|
||||
export GITHUB_ACTIONS=true
|
||||
export NODE_OPTIONS="--max-old-space-size=4096"
|
||||
|
||||
# Set test runner timeout for Hugo shutdown
|
||||
export HUGO_SHUTDOWN_TIMEOUT=5000
|
||||
|
||||
# Add timeout to prevent hanging (timeout command syntax: timeout DURATION COMMAND)
|
||||
timeout ${{ inputs.timeout }}s node cypress/support/run-e2e-specs.js ${{ inputs.files }} \
|
||||
--spec cypress/e2e/content/article-links.cy.js || {
|
||||
exit_code=$?
|
||||
|
||||
# Handle timeout specifically
|
||||
if [ $exit_code -eq 124 ]; then
|
||||
echo "::error::Link validation timed out after ${{ inputs.timeout }} seconds"
|
||||
echo "::notice::This may indicate Hugo server startup issues or very slow link validation"
|
||||
else
|
||||
echo "::error::Link validation failed with exit code $exit_code"
|
||||
fi
|
||||
|
||||
# Check for specific error patterns and logs (but don't dump full content)
|
||||
if [ -f /tmp/hugo_server.log ]; then
|
||||
echo "Hugo server log available for debugging"
|
||||
fi
|
||||
|
||||
if [ -f hugo.log ]; then
|
||||
echo "Additional Hugo log available for debugging"
|
||||
fi
|
||||
|
||||
if [ -f /tmp/broken_links_report.json ]; then
|
||||
# Only show summary, not full report (full report is uploaded as artifact)
|
||||
broken_count=$(grep -o '"url":' /tmp/broken_links_report.json | wc -l || echo "0")
|
||||
echo "Broken links report contains $broken_count entries"
|
||||
fi
|
||||
|
||||
exit $exit_code
|
||||
}
|
||||
|
||||
# Report success if we get here
|
||||
echo "::notice::✅ Link validation completed successfully"
|
||||
echo "No broken links detected in the tested files"
|
||||
|
||||
- name: Upload logs on failure
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: validation-logs-${{ inputs.product-name && inputs.product-name || 'default' }}
|
||||
path: |
|
||||
hugo.log
|
||||
/tmp/hugo_server.log
|
||||
if-no-files-found: ignore
|
||||
|
||||
|
||||
- name: Upload broken links report
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: broken-links-report${{ inputs.product-name && format('-{0}', inputs.product-name) || '' }}
|
||||
path: /tmp/broken_links_report.json
|
||||
if-no-files-found: ignore
|
|
@ -1,140 +1,57 @@
|
|||
# InfluxData Documentation Repository (docs-v2)
|
||||
|
||||
Always follow these instructions first and fallback to additional search and context gathering only when the information provided here is incomplete or found to be in error.
|
||||
This is the primary instruction file for working with the InfluxData documentation site.
|
||||
For detailed information on specific topics, refer to the specialized instruction files in `.github/instructions/`.
|
||||
|
||||
## Quick Reference
|
||||
|
||||
| Task | Command | Time | Details |
|
||||
|------|---------|------|---------|
|
||||
| Install | `CYPRESS_INSTALL_BINARY=0 yarn install` | ~4s | Skip Cypress for CI |
|
||||
| Build | `npx hugo --quiet` | ~75s | NEVER CANCEL |
|
||||
| Dev Server | `npx hugo server` | ~92s | Port 1313 |
|
||||
| Test All | `yarn test:codeblocks:all` | 15-45m | NEVER CANCEL |
|
||||
| Lint | `yarn lint` | ~1m | Pre-commit checks |
|
||||
|
||||
## Working Effectively
|
||||
|
||||
### Bootstrap, Build, and Test the Repository
|
||||
### Collaboration approach
|
||||
|
||||
Execute these commands in order to set up a complete working environment:
|
||||
Be a critical thinking partner, provide honest feedback, and identify potential issues.
|
||||
|
||||
1. **Install Node.js dependencies** (takes ~4 seconds):
|
||||
### Setup Steps
|
||||
|
||||
```bash
|
||||
# Skip Cypress binary download due to network restrictions in CI environments
|
||||
CYPRESS_INSTALL_BINARY=0 yarn install
|
||||
```
|
||||
1. Install dependencies (see Quick Reference table above)
|
||||
2. Build the static site
|
||||
3. Start development server at http://localhost:1313/
|
||||
4. Alternative: Use `docker compose up local-dev` if local setup fails
|
||||
|
||||
2. **Build the static site** (takes ~75 seconds, NEVER CANCEL - set timeout to 180+ seconds):
|
||||
### Testing
|
||||
|
||||
```bash
|
||||
npx hugo --quiet
|
||||
```
|
||||
For comprehensive testing procedures, see **[TESTING.md](../TESTING.md)**.
|
||||
|
||||
3. **Start the development server** (builds in ~92 seconds, NEVER CANCEL - set timeout to 150+ seconds):
|
||||
**Quick reference** (NEVER CANCEL long-running tests):
|
||||
- **Code blocks**: `yarn test:codeblocks:all` (15-45 minutes)
|
||||
- **Links**: `yarn test:links` (1-5 minutes, requires link-checker binary)
|
||||
- **Style**: `docker compose run -T vale content/**/*.md` (30-60 seconds)
|
||||
- **Pre-commit**: `yarn lint` (or skip with `--no-verify`)
|
||||
|
||||
```bash
|
||||
npx hugo server --bind 0.0.0.0 --port 1313
|
||||
```
|
||||
### Validation
|
||||
|
||||
- Access at: http://localhost:1313/
|
||||
- Serves 5,359+ pages and 441 static files
|
||||
- Auto-rebuilds on file changes
|
||||
|
||||
4. **Alternative Docker development setup** (use if local Hugo fails):
|
||||
```bash
|
||||
docker compose up local-dev
|
||||
```
|
||||
**Note**: May fail in restricted network environments due to Alpine package manager issues.
|
||||
|
||||
### Testing (CRITICAL: NEVER CANCEL long-running tests)
|
||||
|
||||
#### Code Block Testing (takes 5-15 minutes per product, NEVER CANCEL - set timeout to 30+ minutes):
|
||||
Test these after changes:
|
||||
|
||||
```bash
|
||||
# Build test environment first (takes ~30 seconds, may fail due to network restrictions)
|
||||
docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest .
|
||||
|
||||
# Test all products (takes 15-45 minutes total)
|
||||
yarn test:codeblocks:all
|
||||
|
||||
# Test specific products
|
||||
yarn test:codeblocks:cloud
|
||||
yarn test:codeblocks:v2
|
||||
yarn test:codeblocks:telegraf
|
||||
```
|
||||
|
||||
#### Link Validation (takes 10-30 minutes, NEVER CANCEL - set timeout to 45+ minutes):
|
||||
|
||||
```bash
|
||||
# Test all links (very long-running)
|
||||
yarn test:links
|
||||
|
||||
# Test specific files/products (faster)
|
||||
yarn test:links content/influxdb3/core/**/*.md
|
||||
yarn test:links:v3
|
||||
yarn test:links:v2
|
||||
```
|
||||
|
||||
#### Style Linting (takes 30-60 seconds):
|
||||
|
||||
```bash
|
||||
# Basic Vale linting
|
||||
docker compose run -T vale content/**/*.md
|
||||
|
||||
# Product-specific linting with custom configurations
|
||||
docker compose run -T vale --config=content/influxdb3/cloud-dedicated/.vale.ini --minAlertLevel=error content/influxdb3/cloud-dedicated/**/*.md
|
||||
```
|
||||
|
||||
#### JavaScript and CSS Linting (takes 5-10 seconds):
|
||||
|
||||
```bash
|
||||
yarn eslint assets/js/**/*.js
|
||||
yarn prettier --check "**/*.{css,js,ts,jsx,tsx}"
|
||||
```
|
||||
|
||||
### Pre-commit Hooks (automatically run, can be skipped if needed):
|
||||
|
||||
```bash
|
||||
# Run all pre-commit checks manually
|
||||
yarn lint
|
||||
|
||||
# Skip pre-commit hooks if necessary (not recommended)
|
||||
git commit -m "message" --no-verify
|
||||
```
|
||||
|
||||
## Validation Scenarios
|
||||
|
||||
Always test these scenarios after making changes to ensure full functionality:
|
||||
|
||||
### 1. Documentation Rendering Test
|
||||
|
||||
```bash
|
||||
# Start Hugo server
|
||||
npx hugo server --bind 0.0.0.0 --port 1313
|
||||
|
||||
# Verify key pages load correctly (200 status)
|
||||
# 1. Server renders pages (check 200 status)
|
||||
curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/influxdb3/core/
|
||||
curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/influxdb/v2/
|
||||
curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/telegraf/v1/
|
||||
|
||||
# Verify content contains expected elements
|
||||
curl -s http://localhost:1313/influxdb3/core/ | grep -i "influxdb"
|
||||
```
|
||||
# 2. Build outputs exist (~529MB)
|
||||
npx hugo --quiet && du -sh public/
|
||||
|
||||
### 2. Build Output Validation
|
||||
|
||||
```bash
|
||||
# Verify build completes successfully
|
||||
npx hugo --quiet
|
||||
|
||||
# Check build output exists and has reasonable size (~529MB)
|
||||
ls -la public/
|
||||
du -sh public/
|
||||
|
||||
# Verify key files exist
|
||||
file public/index.html
|
||||
file public/influxdb3/core/index.html
|
||||
```
|
||||
|
||||
### 3. Shortcode and Formatting Test
|
||||
|
||||
```bash
|
||||
# Test shortcode examples page
|
||||
# 3. Shortcodes work
|
||||
yarn test:links content/example.md
|
||||
```
|
||||
|
||||
## Repository Structure and Key Locations
|
||||
## Repository Structure
|
||||
|
||||
### Content Organization
|
||||
|
||||
|
@ -145,141 +62,60 @@ yarn test:links content/example.md
|
|||
- **Shared content**: `/content/shared/`
|
||||
- **Examples**: `/content/example.md` (comprehensive shortcode reference)
|
||||
|
||||
### Configuration Files
|
||||
### Key Files
|
||||
|
||||
- **Hugo config**: `/config/_default/`
|
||||
- **Package management**: `package.json`, `yarn.lock`
|
||||
- **Docker**: `compose.yaml`, `Dockerfile.pytest`
|
||||
- **Git hooks**: `lefthook.yml`
|
||||
- **Testing**: `cypress.config.js`, `pytest.ini` (in test directories)
|
||||
- **Linting**: `.vale.ini`, `.prettierrc.yaml`, `eslint.config.js`
|
||||
|
||||
### Build and Development
|
||||
|
||||
- **Hugo binary**: Available via `npx hugo` (version 0.148.2+)
|
||||
- **Static assets**: `/assets/` (JavaScript, CSS, images)
|
||||
- **Build output**: `/public/` (generated, ~529MB)
|
||||
- **Layouts**: `/layouts/` (Hugo templates)
|
||||
- **Data files**: `/data/` (YAML/JSON data for templates)
|
||||
- **Config**: `/config/_default/`, `package.json`, `compose.yaml`, `lefthook.yml`
|
||||
- **Testing**: `cypress.config.js`, `pytest.ini`, `.vale.ini`
|
||||
- **Assets**: `/assets/` (JS, CSS), `/layouts/` (templates), `/data/` (YAML/JSON)
|
||||
- **Build output**: `/public/` (~529MB, gitignored)
|
||||
|
||||
## Technology Stack
|
||||
|
||||
- **Static Site Generator**: Hugo (0.148.2+ extended)
|
||||
- **Package Manager**: Yarn (1.22.22+) with Node.js (20.19.4+)
|
||||
- **Testing Framework**:
|
||||
- Pytest with pytest-codeblocks (for code examples)
|
||||
- Cypress (for link validation and E2E tests)
|
||||
- Vale (for style and writing guidelines)
|
||||
- **Containerization**: Docker with Docker Compose
|
||||
- **Linting**: ESLint, Prettier, Vale
|
||||
- **Git Hooks**: Lefthook
|
||||
- **Hugo** (0.148.2+ extended) - Static site generator
|
||||
- **Node.js/Yarn** (20.19.4+/1.22.22+) - Package management
|
||||
- **Testing**: Pytest, Cypress, link-checker, Vale
|
||||
- **Tools**: Docker, ESLint, Prettier, Lefthook
|
||||
|
||||
## Common Tasks and Build Times
|
||||
## Common Issues
|
||||
|
||||
### Time Expectations (CRITICAL - NEVER CANCEL)
|
||||
### Network Restrictions
|
||||
Commands that may fail in restricted environments:
|
||||
- Docker builds (external repos)
|
||||
- `docker compose up local-dev` (Alpine packages)
|
||||
- Cypress installation (use `CYPRESS_INSTALL_BINARY=0`)
|
||||
|
||||
- **Dependency installation**: 4 seconds
|
||||
- **Hugo static build**: 75 seconds (NEVER CANCEL - timeout: 180+ seconds)
|
||||
- **Hugo server startup**: 92 seconds (NEVER CANCEL - timeout: 150+ seconds)
|
||||
- **Code block tests**: 5-15 minutes per product (NEVER CANCEL - timeout: 30+ minutes)
|
||||
- **Link validation**: 10-30 minutes (NEVER CANCEL - timeout: 45+ minutes)
|
||||
- **Style linting**: 30-60 seconds
|
||||
- **Docker image build**: 30+ seconds (may fail due to network restrictions)
|
||||
|
||||
### Network Connectivity Issues
|
||||
|
||||
In restricted environments, these commands may fail due to external dependency downloads:
|
||||
|
||||
- `docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest .` (InfluxData repositories, HashiCorp repos)
|
||||
- `docker compose up local-dev` (Alpine package manager)
|
||||
- Cypress binary installation (use `CYPRESS_INSTALL_BINARY=0`)
|
||||
|
||||
Document these limitations but proceed with available functionality.
|
||||
|
||||
### Validation Commands for CI
|
||||
|
||||
Always run these before committing changes:
|
||||
### Pre-commit Validation
|
||||
|
||||
```bash
|
||||
# Format and lint code
|
||||
# Quick validation before commits
|
||||
yarn prettier --write "**/*.{css,js,ts,jsx,tsx}"
|
||||
yarn eslint assets/js/**/*.js
|
||||
|
||||
# Test Hugo build
|
||||
npx hugo --quiet
|
||||
|
||||
# Test development server startup
|
||||
timeout 150 npx hugo server --bind 0.0.0.0 --port 1313 &
|
||||
sleep 120
|
||||
curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/
|
||||
pkill hugo
|
||||
```
|
||||
|
||||
## Key Projects in This Codebase
|
||||
## Documentation Coverage
|
||||
|
||||
1. **InfluxDB 3 Documentation** (Core, Enterprise, Clustered, Cloud Dedicated, Cloud Serverless, and InfluxDB 3 plugins for Core and Enterprise)
|
||||
2. **InfluxDB 3 Explorer** (UI)
|
||||
3. **InfluxDB v2 Documentation** (OSS and Cloud)
|
||||
3. **InfuxDB v1 Documentation** (OSS and Enterprise)
|
||||
4. **Telegraf Documentation** (agent and plugins)
|
||||
5. **Supporting Tools Documentation** (Kapacitor, Chronograf, Flux)
|
||||
6. **API Reference Documentation** (`/api-docs/`)
|
||||
7. **Shared Documentation Components** (`/content/shared/`)
|
||||
- **InfluxDB 3**: Core, Enterprise, Cloud (Dedicated/Serverless), Clustered, Explorer, plugins
|
||||
- **InfluxDB v2/v1**: OSS, Cloud, Enterprise
|
||||
- **Tools**: Telegraf, Kapacitor, Chronograf, Flux
|
||||
- **API Reference**: All InfluxDB editions
|
||||
|
||||
## Important Locations for Frequent Tasks
|
||||
## Content Guidelines
|
||||
|
||||
- **Shortcode reference**: `/content/example.md`
|
||||
- **Contributing guide**: `CONTRIBUTING.md`
|
||||
- **Testing guide**: `TESTING.md`
|
||||
- **Product configurations**: `/data/products.yml`
|
||||
- **Vale style rules**: `/.ci/vale/styles/`
|
||||
- **GitHub workflows**: `/.github/workflows/`
|
||||
- **Test scripts**: `/test/scripts/`
|
||||
- **Hugo layouts and shortcodes**: `/layouts/`
|
||||
- **CSS/JS assets**: `/assets/`
|
||||
- **Product versions**: `/data/products.yml`
|
||||
- **Query languages**: SQL, InfluxQL, Flux (per product version)
|
||||
- **Site**: https://docs.influxdata.com
|
||||
|
||||
## Content Guidelines and Style
|
||||
### Writing Documentation
|
||||
|
||||
### Documentation Structure
|
||||
For detailed guidelines, see:
|
||||
- **Frontmatter**: `.github/instructions/content.instructions.md`
|
||||
- **Shortcodes**: `.github/instructions/shortcodes-reference.instructions.md`
|
||||
- **Contributing**: `.github/instructions/contributing.instructions.md`
|
||||
|
||||
- **Product version data**: `/data/products.yml`
|
||||
- **Query Languages**: SQL, InfluxQL, Flux (use appropriate language per product version)
|
||||
- **Documentation Site**: https://docs.influxdata.com
|
||||
- **Framework**: Hugo static site generator
|
||||
### Code Examples
|
||||
|
||||
### Style Guidelines
|
||||
|
||||
- Follow Google Developer Documentation style guidelines
|
||||
- Use semantic line feeds (one sentence per line)
|
||||
- Format code examples to fit within 80 characters
|
||||
- Use long options in command line examples (`--option` instead of `-o`)
|
||||
- Use GitHub callout syntax for notes and warnings
|
||||
- Image naming: `project/version-context-description.png`
|
||||
|
||||
### Markdown and Shortcodes
|
||||
|
||||
Include proper frontmatter for all content pages:
|
||||
|
||||
```yaml
|
||||
title: # Page title (h1)
|
||||
seotitle: # SEO title
|
||||
description: # SEO description
|
||||
menu:
|
||||
product_version:
|
||||
weight: # Page order (1-99, 101-199, etc.)
|
||||
```
|
||||
|
||||
Key shortcodes (see `/content/example.md` for full reference):
|
||||
|
||||
- Notes/warnings (GitHub syntax): `> [!Note]`, `> [!Warning]`
|
||||
- Tabbed content: `{{< tabs-wrapper >}}`, `{{% tabs %}}`, `{{% tab-content %}}`
|
||||
- Code examples: `{{< code-tabs-wrapper >}}`, `{{% code-tabs %}}`, `{{% code-tab-content %}}`
|
||||
- Required elements: `{{< req >}}`
|
||||
- API endpoints: `{{< api-endpoint >}}`
|
||||
|
||||
### Code Examples and Testing
|
||||
|
||||
Provide complete, working examples with pytest annotations:
|
||||
Use pytest annotations for testable examples:
|
||||
|
||||
```python
|
||||
print("Hello, world!")
|
||||
|
@ -291,21 +127,32 @@ print("Hello, world!")
|
|||
Hello, world!
|
||||
```
|
||||
|
||||
## Troubleshooting Common Issues
|
||||
## Troubleshooting
|
||||
|
||||
1. **"Pytest collected 0 items"**: Use `python` (not `py`) for code block language identifiers
|
||||
2. **Hugo build errors**: Check `/config/_default/` for configuration issues
|
||||
3. **Docker build failures**: Expected in restricted networks - document and continue with local Hugo
|
||||
4. **Cypress installation failures**: Use `CYPRESS_INSTALL_BINARY=0 yarn install`
|
||||
5. **Link validation slow**: Use file-specific testing: `yarn test:links content/specific-file.md`
|
||||
6. **Vale linting errors**: Check `.ci/vale/styles/config/vocabularies` for accepted/rejected terms
|
||||
| Issue | Solution |
|
||||
|-------|----------|
|
||||
| Pytest collected 0 items | Use `python` not `py` for language identifier |
|
||||
| Hugo build errors | Check `/config/_default/` |
|
||||
| Docker build fails | Expected in restricted networks - use local Hugo |
|
||||
| Cypress install fails | Use `CYPRESS_INSTALL_BINARY=0 yarn install` |
|
||||
| Link validation slow | Test specific files: `yarn test:links content/file.md` |
|
||||
| Vale errors | Check `.ci/vale/styles/config/vocabularies` |
|
||||
|
||||
## Additional Instruction Files
|
||||
## Specialized Instructions
|
||||
|
||||
For specific workflows and content types, also refer to:
|
||||
For detailed information on specific topics:
|
||||
|
||||
- **InfluxDB 3 code placeholders**: `.github/instructions/influxdb3-code-placeholders.instructions.md`
|
||||
- **Contributing guidelines**: `.github/instructions/contributing.instructions.md`
|
||||
- **Content-specific instructions**: Check `.github/instructions/` directory
|
||||
| Topic | File | Description |
|
||||
|-------|------|-------------|
|
||||
| **Content** | [content.instructions.md](instructions/content.instructions.md) | Frontmatter, metadata, page structure |
|
||||
| **Shortcodes** | [shortcodes-reference.instructions.md](instructions/shortcodes-reference.instructions.md) | All available Hugo shortcodes |
|
||||
| **Contributing** | [contributing.instructions.md](instructions/contributing.instructions.md) | Style guide, workflow, CLA |
|
||||
| **API Docs** | [api-docs.instructions.md](instructions/api-docs.instructions.md) | OpenAPI spec management |
|
||||
| **Testing** | [TESTING.md](../TESTING.md) | Comprehensive testing procedures |
|
||||
| **Assets** | [assets.instructions.md](instructions/assets.instructions.md) | JavaScript and CSS development |
|
||||
|
||||
Remember: This is a large documentation site with complex build processes. Patience with build times is essential, and NEVER CANCEL long-running operations.
|
||||
## Important Notes
|
||||
|
||||
- This is a large site (5,359+ pages) with complex build processes
|
||||
- **NEVER CANCEL** long-running operations (Hugo builds, tests)
|
||||
- Set appropriate timeouts: Hugo build (180s+), tests (30+ minutes)
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
---
|
||||
applyTo: "api-docs/**/*.md, layouts/**/*.html"
|
||||
---
|
||||
|
||||
# InfluxDB API documentation
|
||||
|
||||
To edit the API reference documentation, edit the YAML files in `/api-docs`.
|
||||
|
||||
InfluxData uses [Redoc](https://github.com/Redocly/redoc) to generate the full
|
||||
InfluxDB API documentation when documentation is deployed.
|
||||
Redoc generates HTML documentation using the InfluxDB `swagger.yml`.
|
||||
For more information about generating InfluxDB API documentation, see the
|
||||
[API Documentation README](https://github.com/influxdata/docs-v2/tree/master/api-docs#readme).
|
||||
|
||||
## Generate API documentation locally
|
||||
|
||||
From `api-docs` directory:
|
||||
|
||||
1. Install dependencies. To generate the API documentation locally, you need to have [Node.js](https://nodejs.org/en/) and [Yarn](https://yarnpkg.com/getting-started/install) installed.
|
||||
```sh
|
||||
yarn install
|
||||
```
|
||||
|
||||
2. Run the script to generate the API documentation.
|
||||
|
||||
```sh
|
||||
generate-api-docs.sh
|
||||
```
|
|
@ -0,0 +1,55 @@
|
|||
---
|
||||
applyTo: "assets/**/*.md, layouts/**/*.html"
|
||||
---
|
||||
|
||||
## JavaScript in the documentation UI
|
||||
|
||||
The InfluxData documentation UI uses JavaScript with ES6+ syntax and
|
||||
`assets/js/main.js` as the entry point to import modules from.
|
||||
|
||||
|
||||
1. In your HTML file, add a `data-component` attribute to the element that will
|
||||
encapsulate the UI feature and use the JavaScript module.
|
||||
|
||||
```html
|
||||
<div data-component="my-component"></div>
|
||||
```
|
||||
|
||||
2. In `assets/js/main.js`, import your module and initialize it on the element.
|
||||
|
||||
## Debugging helpers for JavaScript
|
||||
|
||||
In your JavaScript module, import the debug helpers from `assets/js/utils/debug-helpers.js`.
|
||||
|
||||
```js
|
||||
import { debugLog, debugBreak, debugInspect } from './utils/debug-helpers.js';
|
||||
|
||||
const data = debugInspect(someData, 'Data');
|
||||
debugLog('Processing data', 'myFunction');
|
||||
|
||||
function processData() {
|
||||
// Add a breakpoint that works with DevTools
|
||||
debugBreak();
|
||||
|
||||
// Your existing code...
|
||||
}
|
||||
```
|
||||
|
||||
## Debugging with VS Code
|
||||
|
||||
1. Start Hugo in development mode--for example:
|
||||
|
||||
```bash
|
||||
yarn hugo server
|
||||
```
|
||||
|
||||
2. In VS Code, go to Run > Start Debugging, and select the "Debug JS (debug-helpers)" configuration.
|
||||
|
||||
Your system uses the configuration in `launch.json` to launch the site in Chrome
|
||||
and attach the debugger to the Developer Tools console.
|
||||
|
||||
Make sure to remove the debug statements before merging your changes.
|
||||
The debug helpers are designed to be used in development and should not be used in production.
|
||||
|
||||
_See full CONTRIBUTING.md for complete details._
|
||||
|
|
@ -2,11 +2,23 @@
|
|||
applyTo: "content/**/*.md, layouts/**/*.html"
|
||||
---
|
||||
|
||||
### Complete Frontmatter Reference
|
||||
## Frontmatter Requirements
|
||||
|
||||
Every documentation page includes frontmatter which specifies information about the page.
|
||||
Documentation pages include frontmatter which specifies information about the page.
|
||||
Include proper frontmatter for pages in `/content/`, except `/content/shared/`.
|
||||
Frontmatter populates variables in page templates and the site's navigation menu.
|
||||
|
||||
```yaml
|
||||
title: # Page title (h1)
|
||||
seotitle: # SEO title
|
||||
description: # SEO description
|
||||
menu:
|
||||
product_version:
|
||||
weight: # Page order (1-99, 101-199, etc.)
|
||||
```
|
||||
|
||||
### Complete Frontmatter Reference
|
||||
|
||||
```yaml
|
||||
title: # Title of the page used in the page's h1
|
||||
seotitle: # Page title used in the html <head> title and used in search engine results
|
||||
|
@ -196,3 +208,32 @@ When building shared content, use the `show-in` and `hide-in` shortcodes to show
|
|||
or hide blocks of content based on the current InfluxDB product/version.
|
||||
For more information, see [show-in](#show-in) and [hide-in](#hide-in).
|
||||
|
||||
#### Links in shared content
|
||||
|
||||
When creating links in shared content files, use `/influxdb3/version/` instead of the `{{% product-key %}}` shortcode.
|
||||
The keyword `version` gets replaced during the build process with the appropriate product version.
|
||||
|
||||
**Use this in shared content:**
|
||||
```markdown
|
||||
[Configuration options](/influxdb3/version/reference/config-options/)
|
||||
[CLI serve command](/influxdb3/version/reference/cli/influxdb3/serve/)
|
||||
```
|
||||
|
||||
**Not this:**
|
||||
```markdown
|
||||
[Configuration options](/influxdb3/{{% product-key %}}/reference/config-options/)
|
||||
[CLI serve command](/influxdb3/{{% product-key %}}/reference/cli/influxdb3/serve/)
|
||||
```
|
||||
|
||||
#### Shortcodes in Markdown files
|
||||
|
||||
For the complete shortcodes reference, see `/.github/instructions/shortcodes-reference.instructions.md`.
|
||||
|
||||
### Style Guidelines
|
||||
|
||||
- Follow Google Developer Documentation style guidelines
|
||||
- Use semantic line feeds (one sentence per line)
|
||||
- Format code examples to fit within 80 characters
|
||||
- Use long options in command line examples (`--option` instead of `-o`)
|
||||
- Use GitHub callout syntax for notes and warnings
|
||||
- Image naming: `project/version-context-description.png`
|
|
@ -55,7 +55,6 @@ For the linting and tests to run, you need to install:
|
|||
- **Docker**: For running Vale linter and code block tests
|
||||
- **VS Code extensions** (optional): For enhanced editing experience
|
||||
|
||||
|
||||
```sh
|
||||
git commit -m "<COMMIT_MESSAGE>" --no-verify
|
||||
```
|
||||
|
@ -82,7 +81,6 @@ _Some parts of the documentation, such as `./api-docs`, contain Markdown within
|
|||
|
||||
#### Semantic line feeds
|
||||
|
||||
|
||||
```diff
|
||||
-Data is taking off. This data is time series. You need a database that specializes in time series. You should check out InfluxDB.
|
||||
+Data is taking off. This data is time series. You need a database that specializes in time series. You need InfluxDB.
|
||||
|
@ -91,81 +89,20 @@ _Some parts of the documentation, such as `./api-docs`, contain Markdown within
|
|||
|
||||
### Essential Frontmatter Reference
|
||||
|
||||
|
||||
```yaml
|
||||
title: # Title of the page used in the page's h1
|
||||
description: # Page description displayed in search engine results
|
||||
# ... (see full CONTRIBUTING.md for complete example)
|
||||
```
|
||||
|
||||
|
||||
_See full CONTRIBUTING.md for complete details._
|
||||
|
||||
#### Notes and warnings
|
||||
|
||||
```md
|
||||
> [!Note]
|
||||
> Insert note markdown content here.
|
||||
|
||||
> [!Warning]
|
||||
> Insert warning markdown content here.
|
||||
|
||||
> [!Caution]
|
||||
> Insert caution markdown content here.
|
||||
|
||||
> [!Important]
|
||||
> Insert important markdown content here.
|
||||
|
||||
> [!Tip]
|
||||
> Insert tip markdown content here.
|
||||
```
|
||||
|
||||
#### Tabbed content
|
||||
|
||||
```md
|
||||
{{< tabs-wrapper >}}
|
||||
|
||||
{{% tabs %}}
|
||||
[Button text for tab 1](#)
|
||||
[Button text for tab 2](#)
|
||||
{{% /tabs %}}
|
||||
|
||||
{{% tab-content %}}
|
||||
Markdown content for tab 1.
|
||||
{{% /tab-content %}}
|
||||
|
||||
{{% tab-content %}}
|
||||
Markdown content for tab 2.
|
||||
{{% /tab-content %}}
|
||||
|
||||
{{< /tabs-wrapper >}}
|
||||
```
|
||||
|
||||
#### Required elements
|
||||
|
||||
```md
|
||||
{{< req >}}
|
||||
{{< req type="key" >}}
|
||||
|
||||
- {{< req "\*" >}} **This element is required**
|
||||
- {{< req "\*" >}} **This element is also required**
|
||||
- **This element is NOT required**
|
||||
```
|
||||
|
||||
For the complete shortcodes reference with all available shortcodes, see [Complete Shortcodes Reference](#complete-shortcodes-reference).
|
||||
|
||||
---
|
||||
See content.instructions.md for more details.
|
||||
|
||||
### InfluxDB API documentation
|
||||
|
||||
docs-v2 includes the InfluxDB API reference documentation in the `/api-docs` directory.
|
||||
To edit the API documentation, edit the YAML files in `/api-docs`.
|
||||
|
||||
InfluxData uses [Redoc](https://github.com/Redocly/redoc) to generate the full
|
||||
InfluxDB API documentation when documentation is deployed.
|
||||
Redoc generates HTML documentation using the InfluxDB `swagger.yml`.
|
||||
For more information about generating InfluxDB API documentation, see the
|
||||
[API Documentation README](https://github.com/influxdata/docs-v2/tree/master/api-docs#readme).
|
||||
See api-docs.instructions.md for more details.
|
||||
|
||||
---
|
||||
|
||||
|
@ -173,7 +110,7 @@ For more information about generating InfluxDB API documentation, see the
|
|||
|
||||
For comprehensive testing information, including code block testing, link validation, style linting, and advanced testing procedures, see **[TESTING.md](TESTING.md)**.
|
||||
|
||||
### Quick Testing Reference
|
||||
### Testing Code Blocks
|
||||
|
||||
```bash
|
||||
# Test code blocks
|
||||
|
@ -181,9 +118,6 @@ yarn test:codeblocks:all
|
|||
|
||||
# Test links
|
||||
yarn test:links content/influxdb3/core/**/*.md
|
||||
|
||||
# Run style linting
|
||||
docker compose run -T vale content/**/*.md
|
||||
```
|
||||
|
||||
Pre-commit hooks run automatically when you commit changes, testing your staged files with Vale, Prettier, Cypress, and Pytest. To skip hooks if needed:
|
||||
|
@ -215,16 +149,15 @@ chore(ci): update Vale configuration
|
|||
|
||||
## Reference Sections
|
||||
|
||||
|
||||
_See full CONTRIBUTING.md for complete details._
|
||||
|
||||
### Complete Frontmatter Reference
|
||||
|
||||
_For the complete Complete Frontmatter Reference reference, see frontmatter-reference.instructions.md._
|
||||
_For the complete Complete Frontmatter Reference reference, see content.instructions.md._
|
||||
|
||||
### Complete Shortcodes Reference
|
||||
|
||||
_For the complete Complete Shortcodes Reference reference, see shortcodes-reference.instructions.md._
|
||||
_For the complete Complete Shortcodes Reference reference, see content.instructions.md._
|
||||
|
||||
#### Vale style linting configuration
|
||||
|
||||
|
@ -236,52 +169,11 @@ docs-v2 includes Vale writing style linter configurations to enforce documentati
|
|||
docker compose run -T vale --config=content/influxdb/cloud-dedicated/.vale.ini --minAlertLevel=error content/influxdb/cloud-dedicated/write-data/**/*.md
|
||||
```
|
||||
|
||||
|
||||
- **Error**:
|
||||
- **Warning**: General style guide rules and best practices
|
||||
- **Suggestion**: Style preferences that may require refactoring or updates to an exceptions list
|
||||
|
||||
#### Configure style rules
|
||||
|
||||
|
||||
_See full CONTRIBUTING.md for complete details._
|
||||
|
||||
#### JavaScript in the documentation UI
|
||||
|
||||
The InfluxData documentation UI uses JavaScript with ES6+ syntax and
|
||||
`assets/js/main.js` as the entry point to import modules from
|
||||
|
||||
|
||||
1. In your HTML file, add a `data-component` attribute to the element that
|
||||
|
||||
# ... (see full CONTRIBUTING.md for complete example)
|
||||
```js
|
||||
import { debugLog, debugBreak, debugInspect } from './utils/debug-helpers.js';
|
||||
|
||||
const data = debugInspect(someData, 'Data');
|
||||
debugLog('Processing data', 'myFunction');
|
||||
|
||||
function processData() {
|
||||
// Add a breakpoint that works with DevTools
|
||||
debugBreak();
|
||||
|
||||
// Your existing code...
|
||||
}
|
||||
```
|
||||
|
||||
3. Start Hugo in development mode--for example:
|
||||
|
||||
```bash
|
||||
yarn hugo server
|
||||
```
|
||||
|
||||
4. In VS Code, go to Run > Start Debugging, and select the "Debug JS (debug-helpers)" configuration.
|
||||
|
||||
Your system uses the configuration in `launch.json` to launch the site in Chrome
|
||||
and attach the debugger to the Developer Tools console.
|
||||
|
||||
Make sure to remove the debug statements before merging your changes.
|
||||
The debug helpers are designed to be used in development and should not be used in production.
|
||||
|
||||
_See full CONTRIBUTING.md for complete details._
|
||||
|
||||
|
|
|
@ -1,89 +0,0 @@
|
|||
---
|
||||
mode: 'edit'
|
||||
applyTo: "content/{influxdb3/core,influxdb3/enterprise,shared/influxdb3*}/**"
|
||||
---
|
||||
## Best Practices
|
||||
|
||||
- Use UPPERCASE for placeholders to make them easily identifiable
|
||||
- Don't use pronouns in placeholders (e.g., "your", "this")
|
||||
- List placeholders in the same order they appear in the code
|
||||
- Provide clear descriptions including:
|
||||
- - Expected data type or format
|
||||
- - Purpose of the value
|
||||
- - Any constraints or requirements
|
||||
- Mark optional placeholders as "Optional:" in their descriptions
|
||||
- Placeholder key descriptions should fit the context of the code snippet
|
||||
- Include examples for complex formats
|
||||
|
||||
## Writing Placeholder Descriptions
|
||||
|
||||
Descriptions should follow consistent patterns:
|
||||
|
||||
1. **Admin Authentication tokens**:
|
||||
- Recommended: "a {{% token-link "admin" %}} for your {{< product-name >}} instance"
|
||||
- Avoid: "your token", "the token", "an authorization token"
|
||||
2. **Database resource tokens**:
|
||||
- Recommended: "your {{% token-link "database" %}}"{{% show-in "enterprise" %}} with permissions on the specified database{{% /show-in %}}
|
||||
- Avoid: "your token", "the token", "an authorization token"
|
||||
3. **Database names**:
|
||||
- Recommended: "the name of the database to [action]"
|
||||
- Avoid: "your database", "the database name"
|
||||
4. **Conditional content**:
|
||||
- Use `{{% show-in "enterprise" %}}` for content specific to enterprise versions
|
||||
- Example: "your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}}"
|
||||
|
||||
## Common placeholders for InfluxDB 3
|
||||
|
||||
- `AUTH_TOKEN`: your {{% token-link %}}
|
||||
- `DATABASE_NAME`: the database to use
|
||||
- `TABLE_NAME`: Name of the table/measurement to query or write to
|
||||
- `NODE_ID`: Node ID for a specific node in a cluster
|
||||
- `CLUSTER_ID`: Cluster ID for a specific cluster
|
||||
- `HOST`: InfluxDB server hostname or URL
|
||||
- `PORT`: InfluxDB server port (typically 8181)
|
||||
- `QUERY`: SQL or InfluxQL query string
|
||||
- `LINE_PROTOCOL`: Line protocol data for writes
|
||||
- `PLUGIN_FILENAME`: Name of plugin file to use
|
||||
- `CACHE_NAME`: Name for a new or existing cache
|
||||
|
||||
## Hugo shortcodes in Markdown
|
||||
|
||||
- `{{% code-placeholders "PLACEHOLDER1|PLACEHOLDER2" %}}`: Use this shortcode to define placeholders in code snippets.
|
||||
- `{{% /code-placeholders %}}`: End the shortcode.
|
||||
- `{{% code-placeholder-key %}}`: Use this shortcode to define a specific placeholder key.
|
||||
- `{{% /code-placeholder-key %}}`: End the specific placeholder key shortcode.
|
||||
|
||||
## Language-Specific Placeholder Formatting
|
||||
|
||||
- **Bash/Shell**: Use uppercase variables with no quotes or prefix
|
||||
```bash
|
||||
--database DATABASE_NAME
|
||||
```
|
||||
- Python: Use string literals with quotes
|
||||
```python
|
||||
database_name='DATABASE_NAME'
|
||||
```
|
||||
- JSON: Use key-value pairs with quotes
|
||||
```json
|
||||
{
|
||||
"database": "DATABASE_NAME"
|
||||
}
|
||||
```
|
||||
|
||||
## Real-World Examples from Documentation
|
||||
|
||||
### InfluxDB CLI Commands
|
||||
This pattern appears frequently in CLI documentation:
|
||||
|
||||
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
|
||||
```bash
|
||||
influxdb3 write \
|
||||
--database DATABASE_NAME \
|
||||
--token AUTH_TOKEN \
|
||||
--precision ns
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
Replace the following placeholders with your values:
|
||||
|
||||
{{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to write to
|
||||
{{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}{{% show-in "enterprise" %}} with write permissions on the specified database{{% /show-in %}}
|
|
@ -4,19 +4,8 @@ applyTo: "content/**/*.md, layouts/**/*.html"
|
|||
|
||||
### Complete Shortcodes Reference
|
||||
|
||||
#### Notes and warnings
|
||||
|
||||
Shortcodes are available for formatting notes and warnings in each article:
|
||||
|
||||
```md
|
||||
{{% note %}}
|
||||
Insert note markdown content here.
|
||||
{{% /note %}}
|
||||
|
||||
{{% warn %}}
|
||||
Insert warning markdown content here.
|
||||
{{% /warn %}}
|
||||
```
|
||||
influxdata/docs-v2 uses a variety of custom Hugo shortcodes to add functionality.
|
||||
For more usage examples, see the shortcode test page at `/content/example.md`.
|
||||
|
||||
#### Product data
|
||||
|
||||
|
@ -1161,22 +1150,65 @@ Supported argument values:
|
|||
{{< influxdb/host "serverless" >}}
|
||||
```
|
||||
|
||||
##### User-populated placeholders
|
||||
#### Placeholders in code samples
|
||||
|
||||
Use the `code-placeholders` shortcode to format placeholders
|
||||
as text fields that users can populate with their own values.
|
||||
The shortcode takes a regular expression for matching placeholder names.
|
||||
Use the `code-placeholder-key` shortcode to format the placeholder names in
|
||||
text that describes the placeholder--for example:
|
||||
##### Best Practices
|
||||
|
||||
```markdown
|
||||
{{% code-placeholders "DATABASE_NAME|USERNAME|PASSWORD_OR_TOKEN|API_TOKEN|exampleuser@influxdata.com" %}}
|
||||
```sh
|
||||
- Use UPPERCASE for placeholders to make them easily identifiable
|
||||
- Don't use pronouns in placeholders (e.g., "your", "this")
|
||||
- List placeholders in the same order they appear in the code
|
||||
- Provide clear descriptions including:
|
||||
- Expected data type or format
|
||||
- Purpose of the value
|
||||
- Any constraints or requirements
|
||||
- Mark optional placeholders as "Optional:" in their descriptions
|
||||
- Placeholder key descriptions should fit the context of the code snippet
|
||||
- Include examples for complex formats
|
||||
|
||||
##### Writing Placeholder Descriptions
|
||||
|
||||
Descriptions should follow consistent patterns:
|
||||
|
||||
1. **Admin Authentication tokens**:
|
||||
- Recommended: "a {{% token-link "admin" %}} for your {{< product-name >}} instance"
|
||||
- Avoid: "your token", "the token", "an authorization token"
|
||||
2. **Database resource tokens**:
|
||||
- Recommended: "your {{% token-link "database" %}}"{{% show-in "enterprise" %}} with permissions on the specified database{{% /show-in %}}
|
||||
- Avoid: "your token", "the token", "an authorization token"
|
||||
3. **Database names**:
|
||||
- Recommended: "the name of the database to [action]"
|
||||
- Avoid: "your database", "the database name"
|
||||
4. **Conditional content**:
|
||||
- Use `{{% show-in "enterprise" %}}` for content specific to enterprise versions
|
||||
- Example: "your {{% token-link "database" %}}{{% show-in "enterprise" %}} with permission to query the specified database{{% /show-in %}}"
|
||||
|
||||
##### Common placeholders for InfluxDB 3
|
||||
|
||||
- `AUTH_TOKEN`: your {{% token-link %}}
|
||||
- `DATABASE_NAME`: the database to use
|
||||
- `TABLE_NAME`: Name of the table/measurement to query or write to
|
||||
- `NODE_ID`: Node ID for a specific node in a cluster
|
||||
- `CLUSTER_ID`: Cluster ID for a specific cluster
|
||||
- `HOST`: InfluxDB server hostname or URL
|
||||
- `PORT`: InfluxDB server port (typically 8181)
|
||||
- `QUERY`: SQL or InfluxQL query string
|
||||
- `LINE_PROTOCOL`: Line protocol data for writes
|
||||
- `PLUGIN_FILENAME`: Name of plugin file to use
|
||||
- `CACHE_NAME`: Name for a new or existing cache
|
||||
|
||||
##### Syntax
|
||||
|
||||
- `{ placeholders="PATTERN1|PATTERN2" }`: Use this code block attribute to define placeholder patterns
|
||||
- `{{% code-placeholder-key %}}`: Use this shortcode to define a placeholder key
|
||||
- `{{% /code-placeholder-key %}}`: Use this shortcode to close the key name
|
||||
|
||||
##### Example usage
|
||||
|
||||
```sh { placeholders "DATABASE_NAME|USERNAME|PASSWORD_OR_TOKEN|API_TOKEN|exampleuser@influxdata.com" }
|
||||
curl --request POST http://localhost:8086/write?db=DATABASE_NAME \
|
||||
--header "Authorization: Token API_TOKEN" \
|
||||
--data-binary @path/to/line-protocol.txt
|
||||
```
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
Replace the following:
|
||||
|
||||
|
@ -1184,6 +1216,40 @@ Replace the following:
|
|||
- {{% code-placeholder-key %}}`USERNAME`{{% /code-placeholder-key %}}: your [InfluxDB 1.x username](/influxdb/v2/reference/api/influxdb-1x/#manage-credentials)
|
||||
- {{% code-placeholder-key %}}`PASSWORD_OR_TOKEN`{{% /code-placeholder-key %}}: your [InfluxDB 1.x password or InfluxDB API token](/influxdb/v2/reference/api/influxdb-1x/#manage-credentials)
|
||||
- {{% code-placeholder-key %}}`API_TOKEN`{{% /code-placeholder-key %}}: your [InfluxDB API token](/influxdb/v2/admin/tokens/)
|
||||
|
||||
**Old (deprecated) syntax**:
|
||||
|
||||
Replace the following syntax with the new `placeholders` syntax shown above.
|
||||
|
||||
- `{{% code-placeholders "PLACEHOLDER1|PLACEHOLDER2" %}}`
|
||||
- `{{% /code-placeholders %}}`
|
||||
|
||||
## Notes and warnings
|
||||
|
||||
```md
|
||||
> [!Note]
|
||||
> Insert note markdown content here.
|
||||
|
||||
> [!Warning]
|
||||
> Insert warning markdown content here.
|
||||
|
||||
> [!Caution]
|
||||
> Insert caution markdown content here.
|
||||
|
||||
> [!Important]
|
||||
> Insert important markdown content here.
|
||||
|
||||
> [!Tip]
|
||||
> Insert tip markdown content here.
|
||||
```
|
||||
|
||||
## Required elements
|
||||
|
||||
```md
|
||||
{{< req >}}
|
||||
{{< req type="key" >}}
|
||||
|
||||
- {{< req "\*" >}} **This element is required**
|
||||
- {{< req "\*" >}} **This element is also required**
|
||||
- **This element is NOT required**
|
||||
```
|
|
@ -0,0 +1,55 @@
|
|||
## InfluxDB v1 Release Documentation
|
||||
|
||||
**Release Version:** v1.x.x
|
||||
**Release Type:** [ ] OSS [ ] Enterprise [ ] Both
|
||||
|
||||
### Description
|
||||
Brief description of the release and documentation changes.
|
||||
|
||||
### Release Documentation Checklist
|
||||
|
||||
#### Release Notes
|
||||
- [ ] Generate release notes from changelog
|
||||
- [ ] OSS: Use commit messages from GitHub release tag `https://github.com/influxdata/influxdb/releases/tag/v1.x.x`
|
||||
- [ ] Enterprise: Use `https://dl.influxdata.com/enterprise/nightlies/master/CHANGELOG.md`
|
||||
- [ ] **Note**: For Enterprise releases, include important updates, features, and fixes from the corresponding OSS tag
|
||||
- [ ] Update release notes in appropriate location
|
||||
- [ ] OSS: `/content/influxdb/v1/about_the_project/releasenotes-changelog.md`
|
||||
- [ ] Enterprise: `/content/enterprise_influxdb/v1/about-the-project/release-notes.md`
|
||||
- [ ] Ensure release notes follow documentation formatting standards
|
||||
|
||||
#### Version Updates
|
||||
- [ ] Update patch version in `/data/products.yml`
|
||||
- [ ] OSS: `influxdb > v1 > latest`
|
||||
- [ ] Enterprise: `enterprise_influxdb > v1 > latest`
|
||||
- [ ] Update version references in documentation
|
||||
- [ ] Installation guides
|
||||
- [ ] Docker documentation
|
||||
- [ ] Download links
|
||||
- [ ] Code examples with version-specific commands
|
||||
|
||||
#### Content Verification
|
||||
- [ ] Review breaking changes and update migration guides if needed
|
||||
- [ ] Update compatibility matrices if applicable
|
||||
- [ ] Verify all download links work correctly
|
||||
- [ ] Check that version-specific features are documented
|
||||
|
||||
#### Testing
|
||||
- [ ] Build documentation locally and verify changes render correctly
|
||||
- [ ] Test all updated links
|
||||
- [ ] Run link validation: `yarn test:links content/influxdb/v1/**/*.md`
|
||||
- [ ] Run link validation: `yarn test:links content/enterprise_influxdb/v1/**/*.md`
|
||||
|
||||
### Related Resources
|
||||
- DAR Issue: #
|
||||
- OSS Release: https://github.com/influxdata/influxdb/releases/tag/v1.x.x
|
||||
- Enterprise Changelog: https://dl.influxdata.com/enterprise/nightlies/master/CHANGELOG.md
|
||||
- Slack Discussion: [Link to #releases thread]
|
||||
|
||||
### Post-Merge Actions
|
||||
- [ ] Verify documentation is deployed to production
|
||||
- [ ] Announce in #docs channel
|
||||
- [ ] Close related DAR issue(s)
|
||||
|
||||
---
|
||||
**Note:** For Enterprise releases, ensure you have access to the Enterprise changelog and coordinate with the release team for timing.
|
|
@ -0,0 +1,241 @@
|
|||
name: Link Check PR Changes
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'content/**/*.md'
|
||||
- 'data/**/*.yml'
|
||||
- 'layouts/**/*.html'
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
jobs:
|
||||
link-check:
|
||||
name: Check links in affected files
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Detect content changes
|
||||
id: detect
|
||||
run: |
|
||||
echo "🔍 Detecting changes between ${{ github.base_ref }} and ${{ github.sha }}"
|
||||
|
||||
# For PRs, use the GitHub Files API to get changed files
|
||||
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||
echo "Using GitHub API to detect PR changes..."
|
||||
curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
|
||||
"https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.number }}/files" \
|
||||
| jq -r '.[].filename' > all_changed_files.txt
|
||||
else
|
||||
echo "Using git diff to detect changes..."
|
||||
git diff --name-only ${{ github.event.before }}..${{ github.sha }} > all_changed_files.txt
|
||||
fi
|
||||
|
||||
# Filter for content markdown files
|
||||
CHANGED_FILES=$(grep '^content/.*\.md$' all_changed_files.txt || true)
|
||||
|
||||
echo "📁 All changed files:"
|
||||
cat all_changed_files.txt
|
||||
echo ""
|
||||
echo "📝 Content markdown files:"
|
||||
echo "$CHANGED_FILES"
|
||||
|
||||
if [[ -n "$CHANGED_FILES" ]]; then
|
||||
echo "✅ Found $(echo "$CHANGED_FILES" | wc -l) changed content file(s)"
|
||||
echo "has-changes=true" >> $GITHUB_OUTPUT
|
||||
echo "changed-content<<EOF" >> $GITHUB_OUTPUT
|
||||
echo "$CHANGED_FILES" >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
|
||||
# Check if any shared content files were modified
|
||||
SHARED_CHANGES=$(echo "$CHANGED_FILES" | grep '^content/shared/' || true)
|
||||
if [[ -n "$SHARED_CHANGES" ]]; then
|
||||
echo "has-shared-content=true" >> $GITHUB_OUTPUT
|
||||
echo "🔄 Detected shared content changes: $SHARED_CHANGES"
|
||||
else
|
||||
echo "has-shared-content=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
else
|
||||
echo "❌ No content changes detected"
|
||||
echo "has-changes=false" >> $GITHUB_OUTPUT
|
||||
echo "has-shared-content=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Skip if no content changes
|
||||
if: steps.detect.outputs.has-changes == 'false'
|
||||
run: |
|
||||
echo "No content changes detected in this PR - skipping link check"
|
||||
echo "✅ **No content changes detected** - link check skipped" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
- name: Setup Node.js
|
||||
if: steps.detect.outputs.has-changes == 'true'
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
cache: 'yarn'
|
||||
|
||||
- name: Install dependencies
|
||||
if: steps.detect.outputs.has-changes == 'true'
|
||||
run: yarn install --frozen-lockfile
|
||||
|
||||
- name: Build Hugo site
|
||||
if: steps.detect.outputs.has-changes == 'true'
|
||||
run: npx hugo --minify
|
||||
|
||||
- name: Download link-checker binary
|
||||
if: steps.detect.outputs.has-changes == 'true'
|
||||
run: |
|
||||
echo "Downloading link-checker binary from docs-v2 releases..."
|
||||
|
||||
# Download from docs-v2's own releases (always accessible)
|
||||
curl -L -H "Accept: application/vnd.github+json" \
|
||||
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
|
||||
-o link-checker-info.json \
|
||||
"https://api.github.com/repos/influxdata/docs-v2/releases/tags/link-checker-v1.2.4"
|
||||
|
||||
# Extract download URL for linux binary
|
||||
DOWNLOAD_URL=$(jq -r '.assets[] | select(.name | test("link-checker.*linux")) | .url' link-checker-info.json)
|
||||
|
||||
if [[ "$DOWNLOAD_URL" == "null" || -z "$DOWNLOAD_URL" ]]; then
|
||||
echo "❌ No linux binary found in release"
|
||||
echo "Available assets:"
|
||||
jq -r '.assets[].name' link-checker-info.json
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "📥 Downloading: $DOWNLOAD_URL"
|
||||
curl -L -H "Accept: application/octet-stream" \
|
||||
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
|
||||
-o link-checker "$DOWNLOAD_URL"
|
||||
|
||||
chmod +x link-checker
|
||||
./link-checker --version
|
||||
|
||||
- name: Verify link checker config exists
|
||||
if: steps.detect.outputs.has-changes == 'true'
|
||||
run: |
|
||||
if [[ ! -f .ci/link-checker/production.lycherc.toml ]]; then
|
||||
echo "❌ Configuration file .ci/link-checker/production.lycherc.toml not found"
|
||||
echo "Please copy production.lycherc.toml from docs-tooling/link-checker/"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Using configuration: .ci/link-checker/production.lycherc.toml"
|
||||
|
||||
- name: Map changed content to public files
|
||||
if: steps.detect.outputs.has-changes == 'true'
|
||||
id: mapping
|
||||
run: |
|
||||
echo "Mapping changed content files to public HTML files..."
|
||||
|
||||
# Create temporary file with changed content files
|
||||
echo "${{ steps.detect.outputs.changed-content }}" > changed-files.txt
|
||||
|
||||
# Map content files to public files
|
||||
PUBLIC_FILES=$(cat changed-files.txt | xargs -r ./link-checker map --existing-only)
|
||||
|
||||
if [[ -n "$PUBLIC_FILES" ]]; then
|
||||
echo "Found affected public files:"
|
||||
echo "$PUBLIC_FILES"
|
||||
echo "public-files<<EOF" >> $GITHUB_OUTPUT
|
||||
echo "$PUBLIC_FILES" >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
|
||||
# Count files for summary
|
||||
FILE_COUNT=$(echo "$PUBLIC_FILES" | wc -l)
|
||||
echo "file-count=$FILE_COUNT" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "No public files found to check"
|
||||
echo "public-files=" >> $GITHUB_OUTPUT
|
||||
echo "file-count=0" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Run link checker
|
||||
if: steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != ''
|
||||
id: link-check
|
||||
run: |
|
||||
echo "Checking links in ${{ steps.mapping.outputs.file-count }} affected files..."
|
||||
|
||||
# Create temporary file with public files list
|
||||
echo "${{ steps.mapping.outputs.public-files }}" > public-files.txt
|
||||
|
||||
# Run link checker with detailed JSON output
|
||||
set +e # Don't fail immediately on error
|
||||
|
||||
cat public-files.txt | xargs -r ./link-checker check \
|
||||
--config .ci/link-checker/production.lycherc.toml \
|
||||
--format json \
|
||||
--output link-check-results.json
|
||||
|
||||
EXIT_CODE=$?
|
||||
|
||||
if [[ -f link-check-results.json ]]; then
|
||||
# Parse results
|
||||
BROKEN_COUNT=$(jq -r '.summary.broken_count // 0' link-check-results.json)
|
||||
TOTAL_COUNT=$(jq -r '.summary.total_checked // 0' link-check-results.json)
|
||||
SUCCESS_RATE=$(jq -r '.summary.success_rate // 0' link-check-results.json)
|
||||
|
||||
echo "broken-count=$BROKEN_COUNT" >> $GITHUB_OUTPUT
|
||||
echo "total-count=$TOTAL_COUNT" >> $GITHUB_OUTPUT
|
||||
echo "success-rate=$SUCCESS_RATE" >> $GITHUB_OUTPUT
|
||||
|
||||
if [[ $BROKEN_COUNT -gt 0 ]]; then
|
||||
echo "❌ Found $BROKEN_COUNT broken links out of $TOTAL_COUNT total links"
|
||||
echo "check-result=failed" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "✅ All $TOTAL_COUNT links are valid"
|
||||
echo "check-result=passed" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
else
|
||||
echo "❌ Link check failed to generate results"
|
||||
echo "check-result=error" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
exit $EXIT_CODE
|
||||
|
||||
- name: Process and report results
|
||||
if: always() && steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != ''
|
||||
run: |
|
||||
if [[ -f link-check-results.json ]]; then
|
||||
# Create detailed error annotations for broken links
|
||||
if [[ "${{ steps.link-check.outputs.check-result }}" == "failed" ]]; then
|
||||
echo "Creating error annotations for broken links..."
|
||||
|
||||
jq -r '.broken_links[]? |
|
||||
"::error file=\(.file // "unknown"),line=\(.line // 1)::Broken link: \(.url) - \(.error // "Unknown error")"' \
|
||||
link-check-results.json || true
|
||||
fi
|
||||
|
||||
# Generate summary comment
|
||||
cat >> $GITHUB_STEP_SUMMARY << 'EOF'
|
||||
## Link Check Results
|
||||
|
||||
**Files Checked:** ${{ steps.mapping.outputs.file-count }}
|
||||
**Total Links:** ${{ steps.link-check.outputs.total-count }}
|
||||
**Broken Links:** ${{ steps.link-check.outputs.broken-count }}
|
||||
**Success Rate:** ${{ steps.link-check.outputs.success-rate }}%
|
||||
|
||||
EOF
|
||||
|
||||
if [[ "${{ steps.link-check.outputs.check-result }}" == "failed" ]]; then
|
||||
echo "❌ **Link check failed** - see annotations above for details" >> $GITHUB_STEP_SUMMARY
|
||||
else
|
||||
echo "✅ **All links are valid**" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
else
|
||||
echo "⚠️ **Link check could not complete** - no results file generated" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
- name: Upload detailed results
|
||||
if: always() && steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != ''
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: link-check-results
|
||||
path: |
|
||||
link-check-results.json
|
||||
changed-files.txt
|
||||
public-files.txt
|
||||
retention-days: 30
|
|
@ -1,148 +0,0 @@
|
|||
# PR Link Validation Workflow
|
||||
# Provides basic and parallel workflows
|
||||
# with smart strategy selection based on change volume
|
||||
name: PR Link Validation
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'content/**/*.md'
|
||||
- 'content/**/*.html'
|
||||
- 'api-docs/**/*.yml'
|
||||
- 'assets/**/*.js'
|
||||
- 'layouts/**/*.html'
|
||||
|
||||
jobs:
|
||||
# TEMPORARILY DISABLED - Remove this condition to re-enable link validation
|
||||
disabled-check:
|
||||
if: false # Set to true to re-enable the workflow
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: echo "Link validation is temporarily disabled"
|
||||
setup:
|
||||
name: Setup and Strategy Detection
|
||||
runs-on: ubuntu-latest
|
||||
if: false # TEMPORARILY DISABLED - Remove this condition to re-enable
|
||||
outputs:
|
||||
strategy: ${{ steps.determine-strategy.outputs.strategy }}
|
||||
has-changes: ${{ steps.determine-strategy.outputs.has-changes }}
|
||||
matrix: ${{ steps.determine-strategy.outputs.matrix }}
|
||||
all-files: ${{ steps.changed-files.outputs.all_changed_files }}
|
||||
cache-hit-rate: ${{ steps.determine-strategy.outputs.cache-hit-rate }}
|
||||
cache-hits: ${{ steps.determine-strategy.outputs.cache-hits }}
|
||||
cache-misses: ${{ steps.determine-strategy.outputs.cache-misses }}
|
||||
original-file-count: ${{ steps.determine-strategy.outputs.original-file-count }}
|
||||
validation-file-count: ${{ steps.determine-strategy.outputs.validation-file-count }}
|
||||
cache-message: ${{ steps.determine-strategy.outputs.message }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup docs environment
|
||||
uses: ./.github/actions/setup-docs-env
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v41
|
||||
with:
|
||||
files: |
|
||||
content/**/*.md
|
||||
content/**/*.html
|
||||
api-docs/**/*.yml
|
||||
|
||||
- name: Determine validation strategy
|
||||
id: determine-strategy
|
||||
run: |
|
||||
if [[ "${{ steps.changed-files.outputs.any_changed }}" != "true" ]]; then
|
||||
echo "No relevant files changed"
|
||||
echo "strategy=none" >> $GITHUB_OUTPUT
|
||||
echo "has-changes=false" >> $GITHUB_OUTPUT
|
||||
echo "matrix={\"include\":[]}" >> $GITHUB_OUTPUT
|
||||
echo "cache-hit-rate=100" >> $GITHUB_OUTPUT
|
||||
echo "cache-hits=0" >> $GITHUB_OUTPUT
|
||||
echo "cache-misses=0" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Use our matrix generator with cache awareness
|
||||
files="${{ steps.changed-files.outputs.all_changed_files }}"
|
||||
|
||||
echo "🔍 Analyzing ${files} for cache-aware validation..."
|
||||
|
||||
# Generate matrix and capture outputs
|
||||
result=$(node .github/scripts/matrix-generator.js \
|
||||
--min-files-parallel 10 \
|
||||
--max-concurrent 5 \
|
||||
--output-format github \
|
||||
$files)
|
||||
|
||||
# Parse all outputs from matrix generator
|
||||
while IFS='=' read -r key value; do
|
||||
case "$key" in
|
||||
strategy|has-changes|cache-hit-rate|cache-hits|cache-misses|original-file-count|validation-file-count|message)
|
||||
echo "$key=$value" >> $GITHUB_OUTPUT
|
||||
;;
|
||||
matrix)
|
||||
echo "matrix=$value" >> $GITHUB_OUTPUT
|
||||
;;
|
||||
esac
|
||||
done <<< "$result"
|
||||
|
||||
# Extract values for logging
|
||||
strategy=$(echo "$result" | grep "^strategy=" | cut -d'=' -f2)
|
||||
cache_hit_rate=$(echo "$result" | grep "^cache-hit-rate=" | cut -d'=' -f2)
|
||||
cache_message=$(echo "$result" | grep "^message=" | cut -d'=' -f2-)
|
||||
|
||||
echo "📊 Selected strategy: $strategy"
|
||||
if [[ -n "$cache_hit_rate" ]]; then
|
||||
echo "📈 Cache hit rate: ${cache_hit_rate}%"
|
||||
fi
|
||||
if [[ -n "$cache_message" ]]; then
|
||||
echo "$cache_message"
|
||||
fi
|
||||
|
||||
validate:
|
||||
name: ${{ matrix.name }}
|
||||
needs: setup
|
||||
if: false # TEMPORARILY DISABLED - Original condition: needs.setup.outputs.has-changes == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix: ${{ fromJson(needs.setup.outputs.matrix) }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup docs environment
|
||||
uses: ./.github/actions/setup-docs-env
|
||||
|
||||
- name: Validate links
|
||||
uses: ./.github/actions/validate-links
|
||||
with:
|
||||
files: ${{ matrix.files || needs.setup.outputs.all-files }}
|
||||
product-name: ${{ matrix.product }}
|
||||
cache-enabled: ${{ matrix.cacheEnabled || 'true' }}
|
||||
cache-key: link-validation-${{ hashFiles(matrix.files || needs.setup.outputs.all-files) }}
|
||||
timeout: 900
|
||||
|
||||
report:
|
||||
name: Report Results
|
||||
needs: [setup, validate]
|
||||
if: false # TEMPORARILY DISABLED - Original condition: always() && needs.setup.outputs.has-changes == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup docs environment
|
||||
uses: ./.github/actions/setup-docs-env
|
||||
|
||||
- name: Report broken links
|
||||
uses: ./.github/actions/report-broken-links
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
max-links-per-file: 20
|
|
@ -0,0 +1,68 @@
|
|||
name: Sync Link Checker Binary from docs-tooling
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Link checker version to sync (e.g., v1.2.2)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
sync-binary:
|
||||
name: Sync link-checker binary from docs-tooling
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Download binary from docs-tooling release
|
||||
run: |
|
||||
echo "Downloading link-checker ${{ inputs.version }} from docs-tooling..."
|
||||
|
||||
# Download binary from docs-tooling release
|
||||
curl -L -H "Accept: application/octet-stream" \
|
||||
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
|
||||
-o link-checker-linux-x86_64 \
|
||||
"https://github.com/influxdata/docs-tooling/releases/download/link-checker-${{ inputs.version }}/link-checker-linux-x86_64"
|
||||
|
||||
# Download checksums
|
||||
curl -L -H "Accept: application/octet-stream" \
|
||||
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
|
||||
-o checksums.txt \
|
||||
"https://github.com/influxdata/docs-tooling/releases/download/link-checker-${{ inputs.version }}/checksums.txt"
|
||||
|
||||
# Verify downloads
|
||||
ls -la link-checker-linux-x86_64 checksums.txt
|
||||
|
||||
- name: Create docs-v2 release
|
||||
run: |
|
||||
echo "Creating link-checker-${{ inputs.version }} release in docs-v2..."
|
||||
|
||||
gh release create \
|
||||
--title "Link Checker Binary ${{ inputs.version }}" \
|
||||
--notes "Link validation tooling binary for docs-v2 GitHub Actions workflows.
|
||||
|
||||
This binary is distributed from the docs-tooling repository release link-checker-${{ inputs.version }}.
|
||||
|
||||
### Usage in GitHub Actions
|
||||
|
||||
The binary is automatically downloaded by docs-v2 workflows for link validation.
|
||||
|
||||
### Manual Usage
|
||||
|
||||
\`\`\`bash
|
||||
# Download and make executable
|
||||
curl -L -o link-checker https://github.com/influxdata/docs-v2/releases/download/link-checker-${{ inputs.version }}/link-checker-linux-x86_64
|
||||
chmod +x link-checker
|
||||
|
||||
# Verify installation
|
||||
./link-checker --version
|
||||
\`\`\`
|
||||
|
||||
### Changes in ${{ inputs.version }}
|
||||
|
||||
See the [docs-tooling release](https://github.com/influxdata/docs-tooling/releases/tag/link-checker-${{ inputs.version }}) for detailed changelog." \
|
||||
link-checker-${{ inputs.version }} \
|
||||
link-checker-linux-x86_64 \
|
||||
checksums.txt
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
@ -3,11 +3,14 @@
|
|||
public
|
||||
.*.swp
|
||||
node_modules
|
||||
package-lock.json
|
||||
.config*
|
||||
**/.env*
|
||||
*.log
|
||||
/resources
|
||||
.hugo_build.lock
|
||||
|
||||
# Content generation
|
||||
/content/influxdb*/**/api/**/*.html
|
||||
!api-docs/**/.config.yml
|
||||
/api-docs/redoc-static.html*
|
||||
|
@ -16,18 +19,22 @@ node_modules
|
|||
!telegraf-build/templates
|
||||
!telegraf-build/scripts
|
||||
!telegraf-build/README.md
|
||||
|
||||
# CI/CD tool files
|
||||
/cypress/downloads/*
|
||||
/cypress/screenshots/*
|
||||
/cypress/videos/*
|
||||
.lycheecache
|
||||
test-results.xml
|
||||
/influxdb3cli-build-scripts/content
|
||||
tmp
|
||||
|
||||
# IDE files
|
||||
.vscode/*
|
||||
!.vscode/launch.json
|
||||
.idea
|
||||
**/config.toml
|
||||
package-lock.json
|
||||
tmp
|
||||
|
||||
# Context files for LLMs and AI tools
|
||||
# User context files for AI assistant tools
|
||||
.context/*
|
||||
!.context/README.md
|
||||
|
|
|
@ -33,6 +33,9 @@ call_lefthook()
|
|||
then
|
||||
"$dir/node_modules/lefthook/bin/index.js" "$@"
|
||||
|
||||
elif go tool lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
go tool lefthook "$@"
|
||||
elif bundle exec lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
bundle exec lefthook "$@"
|
||||
|
@ -42,12 +45,21 @@ call_lefthook()
|
|||
elif pnpm lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
pnpm lefthook "$@"
|
||||
elif swift package plugin lefthook >/dev/null 2>&1
|
||||
elif swift package lefthook >/dev/null 2>&1
|
||||
then
|
||||
swift package --disable-sandbox plugin lefthook "$@"
|
||||
swift package --build-path .build/lefthook --disable-sandbox lefthook "$@"
|
||||
elif command -v mint >/dev/null 2>&1
|
||||
then
|
||||
mint run csjones/lefthook-plugin "$@"
|
||||
elif uv run lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
uv run lefthook "$@"
|
||||
elif mise exec -- lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
mise exec -- lefthook "$@"
|
||||
elif devbox run lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
devbox run lefthook "$@"
|
||||
else
|
||||
echo "Can't find lefthook in PATH"
|
||||
fi
|
||||
|
|
|
@ -33,6 +33,9 @@ call_lefthook()
|
|||
then
|
||||
"$dir/node_modules/lefthook/bin/index.js" "$@"
|
||||
|
||||
elif go tool lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
go tool lefthook "$@"
|
||||
elif bundle exec lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
bundle exec lefthook "$@"
|
||||
|
@ -42,12 +45,21 @@ call_lefthook()
|
|||
elif pnpm lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
pnpm lefthook "$@"
|
||||
elif swift package plugin lefthook >/dev/null 2>&1
|
||||
elif swift package lefthook >/dev/null 2>&1
|
||||
then
|
||||
swift package --disable-sandbox plugin lefthook "$@"
|
||||
swift package --build-path .build/lefthook --disable-sandbox lefthook "$@"
|
||||
elif command -v mint >/dev/null 2>&1
|
||||
then
|
||||
mint run csjones/lefthook-plugin "$@"
|
||||
elif uv run lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
uv run lefthook "$@"
|
||||
elif mise exec -- lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
mise exec -- lefthook "$@"
|
||||
elif devbox run lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
devbox run lefthook "$@"
|
||||
else
|
||||
echo "Can't find lefthook in PATH"
|
||||
fi
|
||||
|
|
|
@ -33,6 +33,9 @@ call_lefthook()
|
|||
then
|
||||
"$dir/node_modules/lefthook/bin/index.js" "$@"
|
||||
|
||||
elif go tool lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
go tool lefthook "$@"
|
||||
elif bundle exec lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
bundle exec lefthook "$@"
|
||||
|
@ -42,12 +45,21 @@ call_lefthook()
|
|||
elif pnpm lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
pnpm lefthook "$@"
|
||||
elif swift package plugin lefthook >/dev/null 2>&1
|
||||
elif swift package lefthook >/dev/null 2>&1
|
||||
then
|
||||
swift package --disable-sandbox plugin lefthook "$@"
|
||||
swift package --build-path .build/lefthook --disable-sandbox lefthook "$@"
|
||||
elif command -v mint >/dev/null 2>&1
|
||||
then
|
||||
mint run csjones/lefthook-plugin "$@"
|
||||
elif uv run lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
uv run lefthook "$@"
|
||||
elif mise exec -- lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
mise exec -- lefthook "$@"
|
||||
elif devbox run lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
devbox run lefthook "$@"
|
||||
else
|
||||
echo "Can't find lefthook in PATH"
|
||||
fi
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
{
|
||||
"$schema": "https://raw.githubusercontent.com/modelcontextprotocol/modelcontextprotocol/refs/heads/main/schema/2025-06-18/schema.json",
|
||||
"description": "InfluxData documentation assistance via MCP server - Node.js execution",
|
||||
"mcpServers": {
|
||||
"influxdata": {
|
||||
"comment": "Use Node to run Docs MCP. To install and setup, see https://github.com/influxdata/docs-mcp-server",
|
||||
"type": "stdio",
|
||||
"command": "node",
|
||||
"args": [
|
||||
"${DOCS_MCP_SERVER_PATH}/dist/index.js"
|
||||
],
|
||||
"env": {
|
||||
"DOCS_API_KEY_FILE": "${DOCS_API_KEY_FILE:-$HOME/.env.docs-kapa-api-key}",
|
||||
"DOCS_MODE": "external-only",
|
||||
"MCP_LOG_LEVEL": "${MCP_LOG_LEVEL:-info}",
|
||||
"NODE_ENV": "${NODE_ENV:-production}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -14,18 +14,8 @@
|
|||
},
|
||||
"vale.valeCLI.config": "${workspaceFolder}/.vale.ini",
|
||||
"vale.valeCLI.minAlertLevel": "warning",
|
||||
"github.copilot.chat.codeGeneration.useInstructionFiles": true,
|
||||
"github.copilot.chat.codeGeneration.instructions": [
|
||||
{
|
||||
"file": "${workspaceFolder}/.github/copilot-instructions.md",
|
||||
}
|
||||
],
|
||||
"github.copilot.chat.pullRequestDescriptionGeneration.instructions": [
|
||||
{
|
||||
"file": "${workspaceFolder}/.github/copilot-instructions.md",
|
||||
}
|
||||
],
|
||||
"cSpell.words": [
|
||||
"influxctl"
|
||||
"influxctl",
|
||||
"preconfigured"
|
||||
]
|
||||
}
|
10
CLAUDE.md
10
CLAUDE.md
|
@ -24,15 +24,5 @@ formatting, and commonly used shortcodes.
|
|||
See @TESTING.md for comprehensive testing information, including code block
|
||||
testing, link validation, style linting, and advanced testing procedures.
|
||||
|
||||
See @.github/instructions/shortcodes-reference.instructions.md for detailed
|
||||
information about shortcodes used in this project.
|
||||
|
||||
See @.github/instructions/frontmatter-reference.instructions.md for detailed
|
||||
information about frontmatter used in this project.
|
||||
|
||||
See @.github/instructions/influxdb3-code-placeholders.instructions.md for using
|
||||
placeholders in code samples and CLI commands.
|
||||
|
||||
See @api-docs/README.md for information about the API reference documentation, how to
|
||||
generate it, and how to contribute to it.
|
||||
|
||||
|
|
301
TESTING.md
301
TESTING.md
|
@ -121,96 +121,251 @@ Potential causes:
|
|||
# This is ignored
|
||||
```
|
||||
|
||||
## Link Validation Testing
|
||||
## Link Validation with Link-Checker
|
||||
|
||||
Link validation uses Cypress for e2e browser-based testing against the Hugo site to ensure all internal and external links work correctly.
|
||||
Link validation uses the `link-checker` tool to validate internal and external links in documentation files.
|
||||
|
||||
### Basic Usage
|
||||
|
||||
#### Installation
|
||||
|
||||
**Option 1: Build from source (macOS/local development)**
|
||||
|
||||
For local development on macOS, build the link-checker from source:
|
||||
|
||||
```bash
|
||||
# Test specific files
|
||||
yarn test:links content/influxdb3/core/**/*.md
|
||||
# Clone and build link-checker
|
||||
git clone https://github.com/influxdata/docs-tooling.git
|
||||
cd docs-tooling/link-checker
|
||||
cargo build --release
|
||||
|
||||
# Test all links (may take a long time)
|
||||
yarn test:links
|
||||
|
||||
# Test by product (may take a long time)
|
||||
yarn test:links:v3
|
||||
yarn test:links:v2
|
||||
yarn test:links:telegraf
|
||||
yarn test:links:chronograf
|
||||
yarn test:links:kapacitor
|
||||
# Copy binary to your PATH or use directly
|
||||
cp target/release/link-checker /usr/local/bin/
|
||||
# OR use directly: ./target/release/link-checker
|
||||
```
|
||||
|
||||
### How Link Validation Works
|
||||
**Option 2: Download pre-built binary (GitHub Actions/Linux)**
|
||||
|
||||
The tests:
|
||||
1. Start a Hugo development server
|
||||
2. Navigate to each page in a browser
|
||||
3. Check all links for validity
|
||||
4. Report broken or invalid links
|
||||
The link-checker binary is distributed via docs-v2 releases for reliable access from GitHub Actions workflows:
|
||||
|
||||
```bash
|
||||
# Download Linux binary from docs-v2 releases
|
||||
curl -L -o link-checker \
|
||||
https://github.com/influxdata/docs-v2/releases/download/link-checker-v1.0.0/link-checker-linux-x86_64
|
||||
chmod +x link-checker
|
||||
|
||||
# Verify installation
|
||||
./link-checker --version
|
||||
```
|
||||
|
||||
> [!Note]
|
||||
> Pre-built binaries are currently Linux x86_64 only. For macOS development, use Option 1 to build from source.
|
||||
|
||||
```bash
|
||||
# Clone and build link-checker
|
||||
git clone https://github.com/influxdata/docs-tooling.git
|
||||
cd docs-tooling/link-checker
|
||||
cargo build --release
|
||||
|
||||
# Copy binary to your PATH or use directly
|
||||
cp target/release/link-checker /usr/local/bin/
|
||||
```
|
||||
|
||||
#### Binary Release Process
|
||||
|
||||
**For maintainers:** To create a new link-checker release in docs-v2:
|
||||
|
||||
1. **Create release in docs-tooling** (builds and releases binary automatically):
|
||||
```bash
|
||||
cd docs-tooling
|
||||
git tag link-checker-v1.2.x
|
||||
git push origin link-checker-v1.2.x
|
||||
```
|
||||
|
||||
2. **Manually distribute to docs-v2** (required due to private repository access):
|
||||
```bash
|
||||
# Download binary from docs-tooling release
|
||||
curl -L -H "Authorization: Bearer $(gh auth token)" \
|
||||
-o link-checker-linux-x86_64 \
|
||||
"https://github.com/influxdata/docs-tooling/releases/download/link-checker-v1.2.x/link-checker-linux-x86_64"
|
||||
|
||||
curl -L -H "Authorization: Bearer $(gh auth token)" \
|
||||
-o checksums.txt \
|
||||
"https://github.com/influxdata/docs-tooling/releases/download/link-checker-v1.2.x/checksums.txt"
|
||||
|
||||
# Create docs-v2 release
|
||||
gh release create \
|
||||
--repo influxdata/docs-v2 \
|
||||
--title "Link Checker Binary v1.2.x" \
|
||||
--notes "Link validation tooling binary for docs-v2 GitHub Actions workflows." \
|
||||
link-checker-v1.2.x \
|
||||
link-checker-linux-x86_64 \
|
||||
checksums.txt
|
||||
```
|
||||
|
||||
3. **Update workflow reference** (if needed):
|
||||
```bash
|
||||
# Update .github/workflows/pr-link-check.yml line 98 to use new version
|
||||
sed -i 's/link-checker-v[0-9.]*/link-checker-v1.2.x/' .github/workflows/pr-link-check.yml
|
||||
```
|
||||
|
||||
> [!Note]
|
||||
> The manual distribution is required because docs-tooling is a private repository and the default GitHub token doesn't have cross-repository access for private repos.
|
||||
|
||||
#### Core Commands
|
||||
|
||||
```bash
|
||||
# Map content files to public HTML files
|
||||
link-checker map content/path/to/file.md
|
||||
|
||||
# Check links in HTML files
|
||||
link-checker check public/path/to/file.html
|
||||
|
||||
# Generate configuration file
|
||||
link-checker config
|
||||
```
|
||||
|
||||
### Link Resolution Behavior
|
||||
|
||||
The link-checker automatically handles relative link resolution based on the input type:
|
||||
|
||||
**Local Files → Local Resolution**
|
||||
```bash
|
||||
# When checking local files, relative links resolve to the local filesystem
|
||||
link-checker check public/influxdb3/core/admin/scale-cluster/index.html
|
||||
# Relative link /influxdb3/clustered/tags/kubernetes/ becomes:
|
||||
# → /path/to/public/influxdb3/clustered/tags/kubernetes/index.html
|
||||
```
|
||||
|
||||
**URLs → Production Resolution**
|
||||
```bash
|
||||
# When checking URLs, relative links resolve to the production site
|
||||
link-checker check https://docs.influxdata.com/influxdb3/core/admin/scale-cluster/
|
||||
# Relative link /influxdb3/clustered/tags/kubernetes/ becomes:
|
||||
# → https://docs.influxdata.com/influxdb3/clustered/tags/kubernetes/
|
||||
```
|
||||
|
||||
**Why This Matters**
|
||||
- **Testing new content**: Tag pages generated locally will be found when testing local files
|
||||
- **Production validation**: Production URLs validate against the live site
|
||||
- **No false positives**: New content won't appear broken when testing locally before deployment
|
||||
|
||||
### Content Mapping Workflows
|
||||
|
||||
#### Scenario 1: Map and check InfluxDB 3 Core content
|
||||
|
||||
```bash
|
||||
# Map Markdown files to HTML
|
||||
link-checker map content/influxdb3/core/get-started/
|
||||
|
||||
# Check links in mapped HTML files
|
||||
link-checker check public/influxdb3/core/get-started/
|
||||
```
|
||||
|
||||
#### Scenario 2: Map and check shared CLI content
|
||||
|
||||
```bash
|
||||
# Map shared content files
|
||||
link-checker map content/shared/influxdb3-cli/
|
||||
|
||||
# Check the mapped output files
|
||||
# (link-checker map outputs the HTML file paths)
|
||||
link-checker map content/shared/influxdb3-cli/ | \
|
||||
xargs link-checker check
|
||||
```
|
||||
|
||||
#### Scenario 3: Direct HTML checking
|
||||
|
||||
```bash
|
||||
# Check HTML files directly without mapping
|
||||
link-checker check public/influxdb3/core/get-started/
|
||||
```
|
||||
|
||||
#### Combined workflow for changed files
|
||||
|
||||
```bash
|
||||
# Check only files changed in the last commit
|
||||
git diff --name-only HEAD~1 HEAD | grep '\.md$' | \
|
||||
xargs link-checker map | \
|
||||
xargs link-checker check
|
||||
```
|
||||
|
||||
### Configuration Options
|
||||
|
||||
#### Local usage (default configuration)
|
||||
|
||||
```bash
|
||||
# Uses default settings or test.lycherc.toml if present
|
||||
link-checker check public/influxdb3/core/get-started/
|
||||
```
|
||||
|
||||
#### Production usage (GitHub Actions)
|
||||
|
||||
```bash
|
||||
# Use production configuration with comprehensive exclusions
|
||||
link-checker check \
|
||||
--config .ci/link-checker/production.lycherc.toml \
|
||||
public/influxdb3/core/get-started/
|
||||
```
|
||||
|
||||
### GitHub Actions Integration
|
||||
|
||||
#### Composite Action
|
||||
**Automated Integration (docs-v2)**
|
||||
|
||||
The `.github/actions/validate-links/` composite action provides reusable link validation:
|
||||
The docs-v2 repository includes automated link checking for pull requests:
|
||||
|
||||
- **Trigger**: Runs automatically on PRs that modify content files
|
||||
- **Binary distribution**: Downloads latest pre-built binary from docs-v2 releases
|
||||
- **Smart detection**: Only checks files affected by PR changes
|
||||
- **Production config**: Uses optimized settings with exclusions for GitHub, social media, etc.
|
||||
- **Results reporting**: Broken links reported as GitHub annotations with detailed summaries
|
||||
|
||||
The workflow automatically:
|
||||
1. Detects content changes in PRs using GitHub Files API
|
||||
2. Downloads latest link-checker binary from docs-v2 releases
|
||||
3. Builds Hugo site and maps changed content to public HTML files
|
||||
4. Runs link checking with production configuration
|
||||
5. Reports results with annotations and step summaries
|
||||
|
||||
**Manual Integration (other repositories)**
|
||||
|
||||
For other repositories, you can integrate link checking manually:
|
||||
|
||||
```yaml
|
||||
- uses: ./.github/actions/validate-links
|
||||
with:
|
||||
files: "content/influxdb3/core/file.md content/influxdb/v2/file2.md"
|
||||
product-name: "core"
|
||||
cache-enabled: "true"
|
||||
cache-key: "link-validation"
|
||||
name: Link Check
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'content/**/*.md'
|
||||
|
||||
jobs:
|
||||
link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Download link-checker
|
||||
run: |
|
||||
curl -L -o link-checker \
|
||||
https://github.com/influxdata/docs-tooling/releases/latest/download/link-checker-linux-x86_64
|
||||
chmod +x link-checker
|
||||
cp target/release/link-checker ../../link-checker
|
||||
cd ../..
|
||||
|
||||
- name: Build Hugo site
|
||||
run: |
|
||||
npm install
|
||||
npx hugo --minify
|
||||
|
||||
- name: Check changed files
|
||||
run: |
|
||||
git diff --name-only origin/main HEAD | \
|
||||
grep '\.md$' | \
|
||||
xargs ./link-checker map | \
|
||||
xargs ./link-checker check \
|
||||
--config .ci/link-checker/production.lycherc.toml
|
||||
```
|
||||
|
||||
#### Matrix Generator
|
||||
|
||||
The `.github/scripts/matrix-generator.js` script provides intelligent strategy selection:
|
||||
|
||||
- **Sequential validation**: For small changes (< 10 files) or single-product changes
|
||||
- **Parallel validation**: For large changes across multiple products (up to 5 concurrent jobs)
|
||||
|
||||
Test locally:
|
||||
|
||||
```bash
|
||||
node .github/scripts/matrix-generator.js content/influxdb3/core/file1.md content/influxdb/v2/file2.md
|
||||
```
|
||||
|
||||
Configuration options:
|
||||
- `--max-concurrent <n>`: Maximum parallel jobs (default: 5)
|
||||
- `--force-sequential`: Force sequential execution
|
||||
- `--min-files-parallel <n>`: Minimum files for parallel (default: 10)
|
||||
|
||||
### Caching for Link Validation
|
||||
|
||||
Link validation supports caching to improve performance:
|
||||
|
||||
- **Cache location**: `.cache/link-validation/` (local), GitHub Actions cache (CI)
|
||||
- **Cache keys**: Based on content file hashes
|
||||
- **TTL**: 30 days by default, configurable
|
||||
|
||||
#### Cache Configuration Options
|
||||
|
||||
```bash
|
||||
# Use 7-day cache for more frequent validation
|
||||
yarn test:links --cache-ttl=7 content/influxdb3/**/*.md
|
||||
|
||||
# Use 1-day cache via environment variable
|
||||
LINK_CACHE_TTL_DAYS=1 yarn test:links content/**/*.md
|
||||
|
||||
# Clean up expired cache entries
|
||||
node .github/scripts/incremental-validator.js --cleanup
|
||||
```
|
||||
|
||||
#### How Caching Works
|
||||
|
||||
- **Cache key**: Based on file path + content hash (file changes invalidate cache immediately)
|
||||
- **External links**: Cached for the TTL period since URLs rarely change
|
||||
- **Internal links**: Effectively cached until file content changes
|
||||
- **Automatic cleanup**: Expired entries are removed on access and via `--cleanup`
|
||||
|
||||
## Style Linting (Vale)
|
||||
|
||||
Style linting uses [Vale](https://vale.sh/) to enforce documentation writing standards, branding guidelines, and vocabulary consistency.
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
# API reference documentation instructions
|
||||
See @.github/instructions/api-docs.instructions.md for the complete API reference docs editing guidelines and instructions for generating pages locally.
|
|
@ -31,6 +31,9 @@
|
|||
padding: 0;
|
||||
margin: 0;
|
||||
}
|
||||
#kapa-widget-container {
|
||||
font-family: 'Proxima Nova', sans-serif;
|
||||
}
|
||||
</style>
|
||||
{{#unless disableGoogleFont}}<link
|
||||
href="https://fonts.googleapis.com/css?family=Roboto+Mono:500,500i,700,700i|Roboto:400,400i,700,700i|Rubik:400,400i,500,500i,700,700i"
|
||||
|
@ -41,6 +44,7 @@
|
|||
<script>
|
||||
// Load Kapa AI widget after DOM content is loaded
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
const fontFamily = 'Proxima Nova, sans-serif';
|
||||
const askAI = document.createElement('script');
|
||||
askAI.type = 'text/javascript';
|
||||
askAI.async = true;
|
||||
|
@ -53,8 +57,8 @@
|
|||
askAI.setAttribute('data-project-logo', '/img/influx-logo-cubo-white.png');
|
||||
askAI.setAttribute('data-modal-disclaimer', 'This AI can access [documentation for InfluxDB, clients, and related tools](https://docs.influxdata.com). Information you submit is used in accordance with our [Privacy Policy](https://www.influxdata.com/legal/privacy-policy/).');
|
||||
askAI.setAttribute('data-modal-example-questions', 'How do I write and query data with the {{title}}?, How do I use client libraries for the {{title}}?');
|
||||
askAI.setAttribute('data-button-height', '65px');
|
||||
askAI.setAttribute('data-button-width', '65px');
|
||||
askAI.setAttribute('data-button-height', '50px');
|
||||
askAI.setAttribute('data-button-width', '50px');
|
||||
if (window.matchMedia('(max-width: 800px)').matches) {
|
||||
// For mobile devices (smaller than 600px)
|
||||
askAI.setAttribute('data-button-position-bottom', '130px');
|
||||
|
@ -62,25 +66,23 @@
|
|||
// For larger screens
|
||||
askAI.setAttribute('data-button-position-bottom', '20px');
|
||||
}
|
||||
askAI.setAttribute('data-button-text-font-family', fontFamily);
|
||||
askAI.setAttribute('data-button-text-font-size', '12.8px');
|
||||
askAI.setAttribute('data-button-text', 'Ask AI');
|
||||
askAI.setAttribute('data-conversation-button-icons-only', 'true');
|
||||
askAI.setAttribute('data-font-family', 'Proxima Nova, sans-serif');
|
||||
askAI.setAttribute('data-font-family', fontFamily);
|
||||
askAI.setAttribute('data-modal-example-questions-col-span', '8');
|
||||
askAI.setAttribute('data-modal-full-screen-on-mobile', 'true');
|
||||
askAI.setAttribute('data-modal-header-bg-color', '#d30971');
|
||||
askAI.setAttribute('data-modal-header-border-bottom', 'none');
|
||||
askAI.setAttribute('data-modal-header-padding', '.5rem');
|
||||
askAI.setAttribute('data-modal-header-text-color', '#ffffff');
|
||||
askAI.setAttribute('data-modal-x-offset', '0');
|
||||
askAI.setAttribute('data-modal-size', '640px');
|
||||
askAI.setAttribute('data-modal-y-offset', '0');
|
||||
askAI.setAttribute('data-modal-with-overlay', 'false');
|
||||
askAI.setAttribute('data-modal-inner-flex-direction', 'column');
|
||||
askAI.setAttribute('data-modal-inner-justify-content', 'end');
|
||||
askAI.setAttribute('data-modal-inner-max-width', '600px');
|
||||
askAI.setAttribute('data-modal-inner-position-left', 'auto');
|
||||
askAI.setAttribute('data-modal-inner-position-right', '50px');
|
||||
askAI.setAttribute('data-modal-inner-position-bottom', 'calc(2.5rem + 25px)');
|
||||
askAI.setAttribute('data-modal-size', '640px');
|
||||
askAI.setAttribute('data-modal-title-color', '#fff');
|
||||
askAI.setAttribute('data-modal-title-font-size', '1.25rem');
|
||||
askAI.setAttribute('data-modal-lock-scroll', 'false');
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
## JavaScript, TypeScript, and CSS in the documentation UI
|
||||
|
||||
See @.github/instructions/assets.instructions.md for the complete JavaScript, TypeScript, and SASS (CSS) development guidelines.
|
||||
|
|
@ -45,8 +45,6 @@ function initializeChat({ onChatLoad, chatAttributes }) {
|
|||
modalOverrideOpenClassAskAi: 'ask-ai-open',
|
||||
modalSize: '640px',
|
||||
modalWithOverlay: 'false',
|
||||
modalInnerMaxWidth: '800px',
|
||||
modalXOffset: 'calc(100% - 800px - .5rem)',
|
||||
modalYOffset: '10vh',
|
||||
userAnalyticsFingerprintEnabled: 'true',
|
||||
fontFamily: 'Proxima Nova, sans-serif',
|
||||
|
|
|
@ -43,7 +43,7 @@ function getStartDate() {
|
|||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// If the user has not set the startDate cookie, default the startDate to yesterday
|
||||
// If the user has not set the startDate cookie, default startDate to yesterday
|
||||
var startDate = getStartDate() || yesterday();
|
||||
|
||||
// Convert a time value to a Unix timestamp (seconds)
|
||||
|
@ -109,6 +109,49 @@ const defaultTimes = [
|
|||
}, // 1641067200
|
||||
];
|
||||
|
||||
// Helper function to update text while preserving code placeholder elements
|
||||
function updateTextNode(node, times) {
|
||||
if (node.nodeType === Node.TEXT_NODE) {
|
||||
let text = node.textContent;
|
||||
times.forEach(function (x) {
|
||||
const oldDatePart = datePart(x.rfc3339.replace(/T.*$/, ''));
|
||||
const newDatePart = datePart(x.rfc3339_new.replace(/T.*$/, ''));
|
||||
const rfc3339Regex = new RegExp(
|
||||
`${oldDatePart.year}(.*?)${oldDatePart.month}(.*?)${oldDatePart.day}`,
|
||||
'g'
|
||||
);
|
||||
const rfc3339Repl = `${newDatePart.year}$1${newDatePart.month}$2${newDatePart.day}`;
|
||||
|
||||
text = text
|
||||
.replaceAll(x.unix, x.unix_new)
|
||||
.replace(rfc3339Regex, rfc3339Repl);
|
||||
});
|
||||
node.textContent = text;
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively update timestamps in DOM while preserving structure
|
||||
function updateTimestampsInElement(element, times) {
|
||||
// Skip code placeholder elements to preserve their functionality
|
||||
if (element.classList && element.classList.contains('code-placeholder')) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Skip elements with data-component attribute (preserves all components)
|
||||
if (element.hasAttribute && element.hasAttribute('data-component')) {
|
||||
return;
|
||||
}
|
||||
|
||||
const childNodes = Array.from(element.childNodes);
|
||||
childNodes.forEach((child) => {
|
||||
if (child.nodeType === Node.TEXT_NODE) {
|
||||
updateTextNode(child, times);
|
||||
} else if (child.nodeType === Node.ELEMENT_NODE) {
|
||||
updateTimestampsInElement(child, times);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function updateTimestamps(newStartDate, seedTimes = defaultTimes) {
|
||||
// Update the times array with replacement times
|
||||
const times = seedTimes.map((x) => {
|
||||
|
@ -129,40 +172,14 @@ function updateTimestamps(newStartDate, seedTimes = defaultTimes) {
|
|||
'.custom-timestamps table',
|
||||
];
|
||||
|
||||
// Update block elements while preserving DOM structure
|
||||
$(updateBlockElWhitelist.join()).each(function () {
|
||||
var wrapper = $(this)[0];
|
||||
|
||||
times.forEach(function (x) {
|
||||
const oldDatePart = datePart(x.rfc3339.replace(/T.*$/, ''));
|
||||
const newDatePart = datePart(x.rfc3339_new.replace(/T.*$/, ''));
|
||||
const rfc3339Regex = new RegExp(
|
||||
`${oldDatePart.year}(.*?)${oldDatePart.month}(.*?)${oldDatePart.day}`,
|
||||
'g'
|
||||
);
|
||||
const rfc3339Repl = `${newDatePart.year}$1${newDatePart.month}$2${newDatePart.day}`;
|
||||
|
||||
wrapper.innerHTML = wrapper.innerHTML
|
||||
.replaceAll(x.unix, x.unix_new)
|
||||
.replaceAll(rfc3339Regex, rfc3339Repl);
|
||||
});
|
||||
updateTimestampsInElement(this, times);
|
||||
});
|
||||
|
||||
// Update span elements
|
||||
$('span.custom-timestamps').each(function () {
|
||||
var wrapper = $(this)[0];
|
||||
|
||||
times.forEach(function (x) {
|
||||
const oldDatePart = datePart(x.rfc3339.replace(/T.*$/, ''));
|
||||
const newDatePart = datePart(x.rfc3339_new.replace(/T.*$/, ''));
|
||||
const rfc3339Regex = new RegExp(
|
||||
`${oldDatePart.year}-${oldDatePart.month}-${oldDatePart.day}`,
|
||||
'g'
|
||||
);
|
||||
const rfc3339Repl = `${newDatePart.year}-${newDatePart.month}-${newDatePart.day}`;
|
||||
|
||||
wrapper.innerHTML = wrapper.innerHTML
|
||||
.replaceAll(x.unix, x.unix_new)
|
||||
.replaceAll(rfc3339Regex, rfc3339Repl);
|
||||
});
|
||||
updateTimestampsInElement(this, times);
|
||||
});
|
||||
|
||||
// Create a new seed times array with new start time for next change
|
||||
|
@ -196,10 +213,11 @@ function CustomTimeTrigger({ component }) {
|
|||
prevArrow: '<',
|
||||
});
|
||||
|
||||
//////////////////////////////////// ACTIONS ///////////////////////////////////
|
||||
/////////////////////////////////// ACTIONS //////////////////////////////////
|
||||
|
||||
// Initial update to yesterdays date ON PAGE LOAD
|
||||
// Conditionally set the start date cookie it startDate is equal to the default value
|
||||
// Initial update to yesterday's date ON PAGE LOAD
|
||||
// Conditionally set the start date cookie if startDate is equal to the
|
||||
// default value
|
||||
let updatedTimes = updateTimestamps(startDate, defaultTimes);
|
||||
|
||||
if (startDate === yesterday()) {
|
||||
|
|
|
@ -6,6 +6,7 @@ const PROPS = {
|
|||
style_domain: 'docs.influxdata.com',
|
||||
};
|
||||
|
||||
// Get the user's theme preference
|
||||
function getPreferredTheme() {
|
||||
return `${getPreference(PROPS.style_preference_name)}-theme`;
|
||||
}
|
||||
|
|
19
compose.yaml
19
compose.yaml
|
@ -307,6 +307,7 @@ services:
|
|||
influxdb3-core:
|
||||
container_name: influxdb3-core
|
||||
image: influxdb:3-core
|
||||
pull_policy: always
|
||||
# Set variables (except your auth token) for Core in the .env.3core file.
|
||||
env_file:
|
||||
- .env.3core
|
||||
|
@ -316,17 +317,21 @@ services:
|
|||
- influxdb3
|
||||
- serve
|
||||
- --node-id=node0
|
||||
- --log-filter=debug
|
||||
- --object-store=file
|
||||
- --data-dir=/var/lib/influxdb3/data
|
||||
- --plugin-dir=/var/lib/influxdb3/plugins
|
||||
- --log-filter=debug
|
||||
- --verbose
|
||||
volumes:
|
||||
- type: bind
|
||||
source: test/.influxdb3/core/data
|
||||
target: /var/lib/influxdb3/data
|
||||
- type: bind
|
||||
source: test/.influxdb3/core/plugins
|
||||
source: test/.influxdb3/plugins/influxdata
|
||||
target: /var/lib/influxdb3/plugins
|
||||
- type: bind
|
||||
source: test/.influxdb3/core/plugins
|
||||
target: /var/lib/influxdb3/plugins/custom
|
||||
environment:
|
||||
- INFLUXDB3_AUTH_TOKEN=/run/secrets/influxdb3-core-admin-token
|
||||
secrets:
|
||||
|
@ -334,6 +339,7 @@ services:
|
|||
influxdb3-enterprise:
|
||||
container_name: influxdb3-enterprise
|
||||
image: influxdb:3-enterprise
|
||||
pull_policy: always
|
||||
# Set license email and other variables (except your auth token) for Enterprise in the .env.3ent file.
|
||||
env_file:
|
||||
- .env.3ent
|
||||
|
@ -344,20 +350,23 @@ services:
|
|||
- serve
|
||||
- --node-id=node0
|
||||
- --cluster-id=cluster0
|
||||
- --log-filter=debug
|
||||
- --object-store=file
|
||||
- --data-dir=/var/lib/influxdb3/data
|
||||
- --plugin-dir=/var/lib/influxdb3/plugins
|
||||
- --log-filter=debug
|
||||
- --verbose
|
||||
environment:
|
||||
- INFLUXDB3_ENTERPRISE_LICENSE_EMAIL=${INFLUXDB3_ENTERPRISE_LICENSE_EMAIL}
|
||||
- INFLUXDB3_AUTH_TOKEN=/run/secrets/influxdb3-enterprise-admin-token
|
||||
volumes:
|
||||
- type: bind
|
||||
source: test/.influxdb3/enterprise/data
|
||||
target: /var/lib/influxdb3/data
|
||||
- type: bind
|
||||
source: test/.influxdb3/enterprise/plugins
|
||||
source: test/.influxdb3/plugins/influxdata
|
||||
target: /var/lib/influxdb3/plugins
|
||||
- type: bind
|
||||
source: test/.influxdb3/enterprise/plugins
|
||||
target: /var/lib/influxdb3/plugins/custom
|
||||
secrets:
|
||||
- influxdb3-enterprise-admin-token
|
||||
telegraf-pytest:
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
# Frontmatter and Content Instructions
|
||||
See @.github/instructions/content.instructions.md for the complete frontmatter reference and content guidelines.
|
|
@ -6,7 +6,7 @@ description: >
|
|||
monitoring data and easily create alerting and automation rules.
|
||||
menu:
|
||||
chronograf_v1:
|
||||
name: Chronograf v1.10
|
||||
name: Chronograf
|
||||
weight: 1
|
||||
---
|
||||
|
||||
|
|
|
@ -11,12 +11,9 @@ alt_links:
|
|||
v1: /influxdb/v1/about_the_project/release-notes/
|
||||
---
|
||||
|
||||
## v1.12.x {date="TBD"}
|
||||
<span id="v1.12.x"></span>
|
||||
|
||||
> [!Important]
|
||||
> #### Pre-release documentation
|
||||
>
|
||||
> This release is not yet available. [**v{{% latest-patch %}}**](#v1118) is the latest InfluxDB Enterprise v1 release.
|
||||
## v1.12.2 {date="2025-09-15"}
|
||||
|
||||
> [!Important]
|
||||
> #### Upgrade meta nodes first
|
||||
|
@ -24,61 +21,40 @@ alt_links:
|
|||
> When upgrading to InfluxDB Enterprise 1.12.1+, upgrade meta nodes before
|
||||
> upgrading data nodes.
|
||||
|
||||
## Features
|
||||
### Features
|
||||
|
||||
- Add additional log output when using
|
||||
[`influx_inspect buildtsi`](/enterprise_influxdb/v1/tools/influx_inspect/#buildtsi) to
|
||||
rebuild the TSI index.
|
||||
<!-- TODO: Uncomment with 1.12.x release:
|
||||
- Use [`influx_inspect export`](/enterprise_influxdb/v1/tools/influx_inspect/#export) with
|
||||
[`-tsmfile` option](/enterprise_influxdb/v1/tools/influx_inspect/#--tsmfile-tsm_file-) to
|
||||
export a single TSM file.
|
||||
-->
|
||||
<!-- TODO: Remove with 1.12.x release: -->
|
||||
- Use [`influx_inspect export`](/enterprise_influxdb/v1/tools/influx_inspect/#export) with
|
||||
`-tsmfile` option to
|
||||
export a single TSM file.
|
||||
- Add `-m` flag to the [`influxd-ctl show-shards` command](/enterprise_influxdb/v1/tools/influxd-ctl/show-shards/)
|
||||
to output inconsistent shards.
|
||||
- Allow the specification of a write window for retention policies.
|
||||
- Add `fluxQueryRespBytes` metric to the `/debug/vars` metrics endpoint.
|
||||
- Log whenever meta gossip times exceed expiration.
|
||||
<!-- TODO: Uncomment with 1.12.x release:
|
||||
- Add [`query-log-path` configuration option](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#query-log-path)
|
||||
to data nodes.
|
||||
- Add [`aggressive-points-per-block` configuration option](/influxdb/v1/administration/config/#aggressive-points-per-block)
|
||||
to prevent TSM files from not getting fully compacted.
|
||||
-->
|
||||
<!-- TODO: Remove with 1.12.x release: -->
|
||||
- Add `query-log-path` configuration option to data nodes.
|
||||
- Add `aggressive-points-per-block` configuration option to prevent TSM files from not getting fully compacted.
|
||||
- Log TLS configuration settings on startup.
|
||||
- Check for TLS certificate and private key permissions.
|
||||
- Add a warning if the TLS certificate is expired.
|
||||
- Add authentication to the Raft portal and add the following related _data_
|
||||
node configuration options:
|
||||
<!-- Uncomment with 1.12.x release
|
||||
- [`[meta].raft-portal-auth-required`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#raft-portal-auth-required)
|
||||
- [`[meta].raft-dialer-auth-required`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#raft-dialer-auth-required)
|
||||
-->
|
||||
<!-- TODO: Remove with 1.12.x release: -->
|
||||
- `[meta].raft-portal-auth-required`
|
||||
- `[meta].raft-dialer-auth-required`
|
||||
- Improve error handling.
|
||||
- InfluxQL updates:
|
||||
- Delete series by retention policy.
|
||||
|
||||
<!-- TODO: Uncomment with 1.12.x release:
|
||||
- Allow retention policies to discard writes that fall within their range, but
|
||||
outside of [`FUTURE LIMIT`](/enterprise_influxdb/v1/query_language/manage-database/#future-limit)
|
||||
and [`PAST LIMIT`](/enterprise_influxdb/v1/query_language/manage-database/#past-limit).
|
||||
-->
|
||||
<!-- TODO: Remove with 1.12.x release: -->
|
||||
- Allow retention policies to discard writes that fall within their range, but
|
||||
outside of `FUTURE LIMIT` and `PAST LIMIT`.
|
||||
|
||||
## Bug fixes
|
||||
### Bug fixes
|
||||
|
||||
- Fixed SSH key usage for cloning PCL/HT.
|
||||
- Log rejected writes to subscriptions.
|
||||
- Update `xxhash` and avoid `stringtoslicebyte` in the cache.
|
||||
- Prevent a panic when a shard group has no shards.
|
||||
|
@ -89,7 +65,7 @@ alt_links:
|
|||
- Update the `/shard-status` API to return the correct result and use a
|
||||
consistent "idleness" definition for shards.
|
||||
|
||||
## Other
|
||||
### Other
|
||||
|
||||
- Update Go to 1.23.5.
|
||||
- Upgrade Flux to v0.196.1.
|
||||
|
@ -230,26 +206,24 @@ alt_links:
|
|||
|
||||
## v1.11.3 {date="2023-10-12"}
|
||||
|
||||
{{% warn %}}
|
||||
#### Series file compaction on startup
|
||||
|
||||
With InfluxDB Enterprise v1.11.3, on startup, InfluxDB runs the
|
||||
`influxd_inspect -compact-series-file` command to [compact series files](/enterprise_influxdb/v1/tools/influx_inspect/#--compact-series-file-) before data nodes are started.
|
||||
Series files are stored in `_series` directories inside the
|
||||
[InfluxDB data directory](/enterprise_influxdb/v1/concepts/file-system-layout/#data-node-file-system-layout). Default: `/var/lib/data/<db-name>/_series`
|
||||
|
||||
- InfluxDB Enterprise v1.11.4+ introduces a configuration setting to optionally
|
||||
compact series on startup.
|
||||
- If any series files are corrupt, the `influx_inspect` or `influxd` processes on
|
||||
the data node may fail to start. In both cases, delete the series file
|
||||
directories before restarting the database. InfluxDB will automatically
|
||||
regenerate the deleted series files when the database is restarted.
|
||||
- To check if series files are corrupt before starting the database, run the
|
||||
[`influx_inspect verify-seriesfile` command](/enterprise_influxdb/v1/tools/influx_inspect/#verify-seriesfile)
|
||||
while the database is off-line.
|
||||
- If series files are large (20+ gigabytes), it may also be faster to delete the
|
||||
series file directories before starting the database.
|
||||
{{% /warn %}}
|
||||
> [!Important]
|
||||
> #### Series file compaction on startup
|
||||
>
|
||||
> With InfluxDB Enterprise v1.11.3, on startup, InfluxDB runs the
|
||||
> `influxd_inspect -compact-series-file` command to [compact series files](/enterprise_influxdb/v1/tools/influx_inspect/#--compact-series-file-) before data nodes are started.
|
||||
> Series files are stored in `_series` directories inside the
|
||||
> [InfluxDB data directory](/enterprise_influxdb/v1/concepts/file-system-layout/#data-node-file-system-layout). Default: `/var/lib/data/<db-name>/_series`
|
||||
>
|
||||
> - InfluxDB Enterprise v1.11.4+ introduces a configuration setting to optionally
|
||||
> compact series on startup.
|
||||
> - If any series files are corrupt, the `influx_inspect` or `influxd` processes on
|
||||
> the data node may fail to start. In both cases, delete the series file directories and [rebuild the indexes](/enterprise_influxdb/v1/administration/upgrading/#rebuild-tsi-indexes) before restarting the database. InfluxDB automatically
|
||||
> regenerates the deleted series files when the database restarts.
|
||||
> - To check if series files are corrupt before starting the database, run the
|
||||
> [`influx_inspect verify-seriesfile` command](/enterprise_influxdb/v1/tools/influx_inspect/#verify-seriesfile)
|
||||
> while the database is off-line.
|
||||
> - If series files are large (20+ gigabytes), it may be faster to delete the
|
||||
> series file directories before starting the database.
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
|
@ -1181,7 +1155,8 @@ Please see the [InfluxDB OSS release notes](/influxdb/v1/about_the_project/relea
|
|||
|
||||
## v1.5.0 {date="2018-03-06"}
|
||||
|
||||
> ***Note:*** This release builds off of the 1.5 release of InfluxDB OSS. Please see the [InfluxDB OSS release
|
||||
> [!Note]
|
||||
> This release builds off of the 1.5 release of InfluxDB OSS. Please see the [InfluxDB OSS release
|
||||
> notes](/influxdb/v1/about_the_project/release-notes/) for more information about the InfluxDB OSS release.
|
||||
|
||||
For highlights of the InfluxDB 1.5 release, see [What's new in InfluxDB 1.5](/influxdb/v1/about_the_project/whats_new/).
|
||||
|
|
|
@ -1679,9 +1679,10 @@ max-version = "tls1.3"
|
|||
Default is `"tls1.3"`.
|
||||
|
||||
Minimum version of the TLS protocol that will be negotiated.
|
||||
Valid values include: `tls1.0`, `tls1.1`, and `tls1.3`.
|
||||
Valid values include: `tls1.0`, `tls1.1`, `tls1.2`, and `tls1.3`.
|
||||
If not specified, `min-version` is the minimum TLS version specified in the [Go `crypto/tls` package](https://golang.org/pkg/crypto/tls/#pkg-constants).
|
||||
In this example, `tls1.3` specifies the minimum version as TLS 1.3.
|
||||
|
||||
In the preceding example, `min-version = "tls1.3"` specifies the minimum version as TLS 1.3.
|
||||
|
||||
Environment variable: `INFLUXDB_TLS_MIN_VERSION`
|
||||
|
||||
|
@ -1690,9 +1691,10 @@ Environment variable: `INFLUXDB_TLS_MIN_VERSION`
|
|||
Default is `"tls1.3"`.
|
||||
|
||||
The maximum version of the TLS protocol that will be negotiated.
|
||||
Valid values include: `tls1.0`, `tls1.1`, and `tls1.3`.
|
||||
Valid values include: `tls1.0`, `tls1.1`, `tls1.2`, and `tls1.3`.
|
||||
If not specified, `max-version` is the maximum TLS version specified in the [Go `crypto/tls` package](https://golang.org/pkg/crypto/tls/#pkg-constants).
|
||||
In this example, `tls1.3` specifies the maximum version as TLS 1.3.
|
||||
|
||||
In the preceding example, `max-version = "tls1.3"` specifies the maximum version as TLS 1.3.
|
||||
|
||||
Environment variable: `INFLUXDB_TLS_MAX_VERSION`
|
||||
|
||||
|
|
|
@ -40,11 +40,20 @@ cluster, and they use the
|
|||
[`influxd-ctl` tool](/enterprise_influxdb/v1/tools/influxd-ctl/) available on
|
||||
all meta nodes.
|
||||
|
||||
{{% warn %}}
|
||||
Before you begin, stop writing historical data to InfluxDB.
|
||||
Historical data have timestamps that occur at anytime in the past.
|
||||
Performing a rebalance while writing historical data can lead to data loss.
|
||||
{{% /warn %}}
|
||||
> [!Warning]
|
||||
> #### Stop writing data before rebalancing
|
||||
>
|
||||
> Before you begin, stop writing historical data to InfluxDB.
|
||||
> Historical data have timestamps that occur at anytime in the past.
|
||||
> Performing a rebalance while writing historical data can lead to data loss.
|
||||
|
||||
> [!Caution]
|
||||
> #### Risks of rebalancing with future data
|
||||
>
|
||||
> Truncating shards that contain data with future timestamps (such as forecast or prediction data)
|
||||
> can lead to overlapping shards and data duplication.
|
||||
> For more information, see [`truncate-shards` and future data](/enterprise_influxdb/v1/tools/influxd-ctl/truncate-shards/#understand-the-risks-with-future-data)
|
||||
> or [contact InfluxData support](https://support.influxdata.com).
|
||||
|
||||
## Rebalance Procedure 1: Rebalance a cluster to create space
|
||||
|
||||
|
@ -61,18 +70,23 @@ data node to expand the total disk capacity of the cluster.
|
|||
In the next steps, you will safely move shards from one of the two original data
|
||||
nodes to the new data node.
|
||||
|
||||
### Step 1: Truncate Hot Shards
|
||||
### Step 1: Truncate hot shards
|
||||
|
||||
Hot shards are shards that are currently receiving writes.
|
||||
Hot shards are shards that currently receive writes.
|
||||
Performing any action on a hot shard can lead to data inconsistency within the
|
||||
cluster which requires manual intervention from the user.
|
||||
|
||||
To prevent data inconsistency, truncate hot shards before moving any shards
|
||||
> [!Caution]
|
||||
> #### Risks of rebalancing with future data
|
||||
>
|
||||
> Truncating shards that contain data with future timestamps (such as forecast or prediction data)
|
||||
> can lead to overlapping shards and data duplication.
|
||||
> For more information, see [`truncate-shards` and future data](/enterprise_influxdb/v1/tools/influxd-ctl/truncate-shards/#understand-the-risks-with-future-data)
|
||||
> or [contact InfluxData support](https://support.influxdata.com).
|
||||
|
||||
To prevent data inconsistency, truncate shards before moving any shards
|
||||
across data nodes.
|
||||
The command below creates a new hot shard which is automatically distributed
|
||||
across all data nodes in the cluster, and the system writes all new points to
|
||||
that shard.
|
||||
All previous writes are now stored in cold shards.
|
||||
The following command truncates all hot shards and creates new shards to write data to:
|
||||
|
||||
```
|
||||
influxd-ctl truncate-shards
|
||||
|
@ -84,10 +98,11 @@ The expected output of this command is:
|
|||
Truncated shards.
|
||||
```
|
||||
|
||||
Once you truncate the shards, you can work on redistributing the cold shards
|
||||
without the threat of data inconsistency in the cluster.
|
||||
Any hot or new shards are now evenly distributed across the cluster and require
|
||||
no further intervention.
|
||||
New shards are automatically distributed across all data nodes, and InfluxDB writes new points to them.
|
||||
Previous writes are stored in cold shards.
|
||||
|
||||
After truncating shards, you can redistribute cold shards without data inconsistency.
|
||||
Hot and new shards are evenly distributed and require no further intervention.
|
||||
|
||||
### Step 2: Identify Cold Shards
|
||||
|
||||
|
@ -292,18 +307,23 @@ name duration shardGroupDuration replicaN default
|
|||
autogen 0s 1h0m0s 3 #👍 true
|
||||
```
|
||||
|
||||
### Step 2: Truncate Hot Shards
|
||||
### Step 2: Truncate hot shards
|
||||
|
||||
Hot shards are shards that are currently receiving writes.
|
||||
Hot shards are shards that currently receive writes.
|
||||
Performing any action on a hot shard can lead to data inconsistency within the
|
||||
cluster which requires manual intervention from the user.
|
||||
|
||||
To prevent data inconsistency, truncate hot shards before copying any shards
|
||||
> [!Caution]
|
||||
> #### Risks of rebalancing with future data
|
||||
>
|
||||
> Truncating shards that contain data with future timestamps (such as forecast or prediction data)
|
||||
> can lead to overlapping shards and data duplication.
|
||||
> For more information, see [`truncate-shards` and future data](/enterprise_influxdb/v1/tools/influxd-ctl/truncate-shards/#understand-the-risks-with-future-data)
|
||||
> or [contact InfluxData support](https://support.influxdata.com).
|
||||
|
||||
To prevent data inconsistency, truncate shards before copying any shards
|
||||
to the new data node.
|
||||
The command below creates a new hot shard which is automatically distributed
|
||||
across the three data nodes in the cluster, and the system writes all new points
|
||||
to that shard.
|
||||
All previous writes are now stored in cold shards.
|
||||
The following command truncates all hot shards and creates new shards to write data to:
|
||||
|
||||
```
|
||||
influxd-ctl truncate-shards
|
||||
|
@ -315,10 +335,11 @@ The expected output of this command is:
|
|||
Truncated shards.
|
||||
```
|
||||
|
||||
Once you truncate the shards, you can work on distributing the cold shards
|
||||
without the threat of data inconsistency in the cluster.
|
||||
Any hot or new shards are now automatically distributed across the cluster and
|
||||
require no further intervention.
|
||||
New shards are automatically distributed across all data nodes, and InfluxDB writes new points to them.
|
||||
Previous writes are stored in cold shards.
|
||||
|
||||
After truncating shards, you can redistribute cold shards without data inconsistency.
|
||||
Hot and new shards are evenly distributed and require no further intervention.
|
||||
|
||||
### Step 3: Identify Cold Shards
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@ We recommend the following design guidelines for most use cases:
|
|||
- [Where to store data (tag or field)](#where-to-store-data-tag-or-field)
|
||||
- [Avoid too many series](#avoid-too-many-series)
|
||||
- [Use recommended naming conventions](#use-recommended-naming-conventions)
|
||||
- [Writing data with future timestamps](#writing-data-with-future-timestamps)
|
||||
- [Shard Group Duration Management](#shard-group-duration-management)
|
||||
|
||||
## Where to store data (tag or field)
|
||||
|
@ -209,6 +210,38 @@ from(bucket:"<database>/<retention_policy>")
|
|||
> SELECT mean("temp") FROM "weather_sensor" WHERE region = 'north'
|
||||
```
|
||||
|
||||
## Writing data with future timestamps
|
||||
|
||||
When designing schemas for applications that write data with future timestamps--such as forecast data from machine learning models, predictions, or scheduled events--consider the following implications for InfluxDB Enterprise v1 cluster operations and data integrity.
|
||||
|
||||
### Understanding future data behavior
|
||||
|
||||
InfluxDB Enterprise v1 creates shards based on time ranges.
|
||||
When you write data with future timestamps, InfluxDB creates shards that cover future time periods.
|
||||
|
||||
> [!Caution]
|
||||
> #### Risks of rebalancing with future data
|
||||
>
|
||||
> Truncating shards that contain data with future timestamps (such as forecast or prediction data)
|
||||
> can lead to overlapping shards and data duplication.
|
||||
> For more information, see [`truncate-shards` and future data](/enterprise_influxdb/v1/tools/influxd-ctl/truncate-shards/#understand-the-risks-with-future-data)
|
||||
> or [contact InfluxData support](https://support.influxdata.com).
|
||||
|
||||
### Use separate databases for future data
|
||||
|
||||
When planning for data that contains future timestamps, consider isolating it in dedicated databases to:
|
||||
|
||||
- Minimize impact on real-time data operations
|
||||
- Allow targeted maintenance operations on current vs. future data
|
||||
- Simplify backup and recovery strategies for different data types
|
||||
|
||||
```sql
|
||||
# Example: Separate databases for different data types
|
||||
CREATE DATABASE "realtime_metrics"
|
||||
CREATE DATABASE "ml_forecasts"
|
||||
CREATE DATABASE "scheduled_predictions"
|
||||
```
|
||||
|
||||
## Shard group duration management
|
||||
|
||||
### Shard group duration overview
|
||||
|
|
|
@ -29,7 +29,7 @@ Certain configurations (e.g., 3 meta and 2 data node) provide high-availability
|
|||
while making certain tradeoffs in query performance when compared to a single node.
|
||||
|
||||
Further increasing the number of nodes can improve performance in both respects.
|
||||
For example, a cluster with 4 data nodes and a [replication factor](https://docs.influxdata.com/enterprise_influxdb/v1/concepts/glossary/#replication-factor)
|
||||
For example, a cluster with 4 data nodes and a [replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor)
|
||||
of 2 can support a higher volume of write traffic than a single node could.
|
||||
It can also support a higher *query* workload, as the data is replicated
|
||||
in two locations. Performance of the queries may be on par with a single
|
||||
|
|
|
@ -217,7 +217,7 @@ The temperature was ${string(v: lastReported._value)}°F."
|
|||
|
||||
The following sample data set represents fictional temperature metrics collected
|
||||
from three locations.
|
||||
It's formatted in [annotated CSV](https://v2.docs.influxdata.com/v2.0/reference/syntax/annotated-csv/) and imported
|
||||
It's formatted in [annotated CSV](/influxdb/v2/reference/syntax/annotated-csv/) and imported
|
||||
into the Flux query using the [`csv.from()` function](/flux/v0/stdlib/csv/from/).
|
||||
|
||||
Place the following at the beginning of your query to use the sample data:
|
||||
|
|
|
@ -18,7 +18,7 @@ Review configuration and hardware guidelines for InfluxDB Enterprise:
|
|||
* [Recommended cluster configurations](#recommended-cluster-configurations)
|
||||
* [Storage: type, amount, and configuration](#storage-type-amount-and-configuration)
|
||||
|
||||
For InfluxDB OSS instances, see [OSS hardware sizing guidelines](https://docs.influxdata.com/influxdb/v1/guides/hardware_sizing/).
|
||||
For InfluxDB OSS instances, see [OSS hardware sizing guidelines](/influxdb/v1/guides/hardware_sizing/).
|
||||
|
||||
> **Disclaimer:** Your numbers may vary from recommended guidelines. Guidelines provide estimated benchmarks for implementing the most performant system for your business.
|
||||
|
||||
|
|
|
@ -17,6 +17,14 @@ The `influxd-ctl truncate-shards` command truncates all shards that are currentl
|
|||
being written to (also known as "hot" shards) and creates new shards to write
|
||||
new data to.
|
||||
|
||||
> [!Caution]
|
||||
> #### Overlapping shards with forecast and future data
|
||||
>
|
||||
> Running `truncate-shards` on shards containing future timestamps can create
|
||||
> overlapping shards with duplicate data points.
|
||||
>
|
||||
> [Understand the risks with future data](#understand-the-risks-with-future-data).
|
||||
|
||||
## Usage
|
||||
|
||||
```sh
|
||||
|
@ -40,3 +48,34 @@ _Also see [`influxd-ctl` global flags](/enterprise_influxdb/v1/tools/influxd-ctl
|
|||
```bash
|
||||
influxd-ctl truncate-shards -delay 3m
|
||||
```
|
||||
|
||||
## Understand the risks with future data
|
||||
|
||||
> [!Important]
|
||||
> If you need to rebalance shards that contain future data, contact [InfluxData support](https://www.influxdata.com/contact/) for assistance.
|
||||
|
||||
When you write data points with timestamps in the future (for example, forecast data from machine learning models),
|
||||
the `truncate-shards` command behaves differently and can cause data duplication issues.
|
||||
|
||||
### How truncate-shards normally works
|
||||
|
||||
For shards containing current data:
|
||||
1. The command creates an artificial stop point in the shard at the truncation timestamp
|
||||
2. Creates a new shard starting from the truncation point
|
||||
3. Example: A one-week shard (Sunday to Saturday) becomes:
|
||||
- Shard A: Sunday to truncation point (Wednesday 2pm)
|
||||
- Shard B: Truncation point (Wednesday 2pm) to Saturday
|
||||
|
||||
This works correctly because the meta nodes understand the boundaries and route queries appropriately.
|
||||
|
||||
### The problem with future data
|
||||
|
||||
For shards containing future timestamps:
|
||||
1. The truncation doesn't cleanly split the shard at a point in time
|
||||
2. Instead, it creates overlapping shards that cover the same time period
|
||||
3. Example: If you're writing September forecast data in August:
|
||||
- Original shard: September 1-7
|
||||
- After truncation:
|
||||
- Shard A: September 1-7 (with data up to truncation)
|
||||
- Shard B: September 1-7 (for new data after truncation)
|
||||
- **Result**: Duplicate data points for the same timestamps
|
||||
|
|
|
@ -103,7 +103,7 @@ If you exceed your plan's [adjustable quotas or limits](/influxdb/cloud/account-
|
|||
|
||||
If you exceed the series cardinality limit, InfluxDB adds a rate limit event warning on the **Usage** page, and begins to reject write requests with new series. To start processing write requests again, do the following as needed:
|
||||
|
||||
- **Series cardinality limits**: If you exceed the series cardinality limit, see how to [resolve high series cardinality](https://docs.influxdata.com/influxdb/v2/write-data/best-practices/resolve-high-cardinality/).
|
||||
- **Series cardinality limits**: If you exceed the series cardinality limit, see how to [resolve high series cardinality](/influxdb/v2/write-data/best-practices/resolve-high-cardinality/).
|
||||
- **Free plan**: To raise rate limits, [upgrade to a Usage-based Plan](#upgrade-to-usage-based-plan).
|
||||
|
||||
#### Write and query limits (HTTP response code)
|
||||
|
|
|
@ -16,4 +16,4 @@ source: /shared/influxdb-v2/write-data/replication/replicate-data.md
|
|||
---
|
||||
|
||||
<!-- The content of this file is at
|
||||
// SOURCE content/shared/influxdb-v2/write-data/replication/replicate-data.md-->
|
||||
// SOURCE content/shared/influxdb-v2/write-data/replication/replicate-data.md -->
|
||||
|
|
|
@ -13,49 +13,28 @@ alt_links:
|
|||
enterprise_v1: /enterprise_influxdb/v1/about-the-project/release-notes/
|
||||
---
|
||||
|
||||
## v1.12.x {date="TBD"}
|
||||
|
||||
> [!Important]
|
||||
> #### Pre-release documentation
|
||||
>
|
||||
> This release is not yet available. [**v{{% latest-patch %}}**](#v1118) is the latest InfluxDB v1 release.
|
||||
## v1.12.2 {date="2025-09-15"}
|
||||
|
||||
## Features
|
||||
### Features
|
||||
|
||||
- Add additional log output when using
|
||||
[`influx_inspect buildtsi`](/influxdb/v1/tools/influx_inspect/#buildtsi) to
|
||||
rebuild the TSI index.
|
||||
<!-- TODO: Uncomment with 1.12.x release:
|
||||
- Use [`influx_inspect export`](/influxdb/v1/tools/influx_inspect/#export) with
|
||||
[`-tsmfile` option](/influxdb/v1/tools/influx_inspect/#--tsmfile-tsm_file-) to
|
||||
export a single TSM file.
|
||||
-->
|
||||
<!-- TODO: Remove with 1.12.x release: -->
|
||||
- Use [`influx_inspect export`](/influxdb/v1/tools/influx_inspect/#export) with
|
||||
`-tsmfile` option to
|
||||
export a single TSM file.
|
||||
|
||||
- Add `fluxQueryRespBytes` metric to the `/debug/vars` metrics endpoint.
|
||||
<!-- TODO: Uncomment with 1.12.x release:
|
||||
- Add [`aggressive-points-per-block` configuration option](/influxdb/v1/administration/config/#aggressive-points-per-block)
|
||||
to prevent TSM files from not getting fully compacted.
|
||||
-->
|
||||
<!-- TODO: Remove with 1.12.x release: -->
|
||||
- Add `aggressive-points-per-block` configuration option
|
||||
to prevent TSM files from not getting fully compacted.
|
||||
- Improve error handling.
|
||||
- InfluxQL updates:
|
||||
- Delete series by retention policy.
|
||||
<!-- TODO: Uncomment with 1.12.x release:
|
||||
- Allow retention policies to discard writes that fall within their range, but
|
||||
outside of [`FUTURE LIMIT`](/influxdb/v1/query_language/manage-database/#future-limit)
|
||||
and [`PAST LIMIT`](/influxdb/v1/query_language/manage-database/#past-limit).
|
||||
-->
|
||||
<!-- TODO: Remove with 1.12.x release: -->
|
||||
- Allow retention policies to discard writes that fall within their range, but
|
||||
outside of `FUTURE LIMIT` and `PAST LIMIT`.
|
||||
|
||||
## Bug fixes
|
||||
### Bug fixes
|
||||
|
||||
- Log rejected writes to subscriptions.
|
||||
- Update `xxhash` and avoid `stringtoslicebyte` in the cache.
|
||||
|
@ -65,7 +44,7 @@ alt_links:
|
|||
- Ensure temporary files are removed after failed compactions.
|
||||
- Do not panic on invalid multiple subqueries.
|
||||
|
||||
## Other
|
||||
### Other
|
||||
|
||||
- Update Go to 1.23.5.
|
||||
- Upgrade Flux to v0.196.1.
|
||||
|
|
|
@ -6,22 +6,12 @@ menu:
|
|||
name: Upgrade InfluxDB
|
||||
weight: 25
|
||||
parent: Administration
|
||||
related:
|
||||
- /enterprise_influxdb/v1/guides/migration/
|
||||
- /enterprise_influxdb/v1/administration/upgrading/
|
||||
---
|
||||
|
||||
|
||||
We recommend enabling Time Series Index (TSI) (step 3 of Upgrade to InfluxDB 1.11.x). [Switch between TSM and TSI](#switch-index-types) as needed. To learn more about TSI, see:
|
||||
|
||||
- [Time Series Index (TSI) overview](/influxdb/v1/concepts/time-series-index/)
|
||||
- [Time Series Index (TSI) details](/influxdb/v1/concepts/tsi-details/)
|
||||
|
||||
> **_Note:_** The default configuration continues to use TSM-based shards with in-memory indexes (as in earlier versions).
|
||||
|
||||
{{% note %}}
|
||||
### Upgrade to InfluxDB Enterprise
|
||||
|
||||
To upgrade from InfluxDB OSS to InfluxDB Enterprise, [contact InfluxData Sales](https://www.influxdata.com/contact-sales/)
|
||||
and see [Migrate to InfluxDB Enterprise](/enterprise_influxdb/v1/guides/migration/).
|
||||
{{% /note %}}
|
||||
Upgrade to the latest version of InfluxDB OSS v1.
|
||||
|
||||
## Upgrade to InfluxDB 1.11.x
|
||||
|
||||
|
@ -29,7 +19,27 @@ and see [Migrate to InfluxDB Enterprise](/enterprise_influxdb/v1/guides/migratio
|
|||
|
||||
2. Migrate configuration file customizations from your existing configuration file to the InfluxDB 1.11.x [configuration file](/influxdb/v1/administration/config/). Add or modify your environment variables as needed.
|
||||
|
||||
3. To enable TSI in InfluxDB 1.11.x, complete the following steps:
|
||||
> [!Important]
|
||||
> #### Choose your index type
|
||||
> InfluxDB 1.11.x supports two index types:
|
||||
>
|
||||
> - **Time Series Index (TSI)** - Recommended for most users. Removes RAM-based limits on series cardinality and provides better performance for high-cardinality datasets.
|
||||
> - **In-memory index (inmem)** - Default option that maintains compatibility with earlier versions but is limited by available system RAM (series cardinality is limited by available RAM).
|
||||
>
|
||||
> **When to use TSI:**
|
||||
> - General purpose production instances.
|
||||
> - Especially recommended for:
|
||||
> - High-cardinality datasets (many unique tag combinations)
|
||||
> - Experiencing high memory usage or out-of-memory errors
|
||||
> - Large production deployments
|
||||
>
|
||||
> **When to use inmem:**
|
||||
> - Small datasets when memory is not a constraint
|
||||
> - Ephemeral deployments such as development or testing environments
|
||||
>
|
||||
> To learn more about TSI, see [Time Series Index overview](/influxdb/v1/concepts/time-series-index/) and [TSI details](/influxdb/v1/concepts/tsi-details/).
|
||||
|
||||
3. **Optional:** To enable TSI in InfluxDB 1.11.x, complete the following steps:
|
||||
|
||||
1. If using the InfluxDB configuration file, find the `[data]` section, uncomment `index-version = "inmem"` and change the value to `tsi1`.
|
||||
|
||||
|
@ -43,26 +53,36 @@ and see [Migrate to InfluxDB Enterprise](/enterprise_influxdb/v1/guides/migratio
|
|||
```
|
||||
|
||||
4. Build TSI by running the [influx_inspect buildtsi](/influxdb/v1/tools/influx_inspect/#buildtsi) command.
|
||||
{{% note %}}
|
||||
Run the `buildtsi` command using the user account that you are going to run the database as, or ensure that the permissions match afterward.
|
||||
{{% /note %}}
|
||||
> [!Important]
|
||||
> Run the `buildtsi` command using the user account that you are going to run the database as, or ensure that the permissions match afterward.
|
||||
|
||||
4. Restart the `influxdb` service.
|
||||
|
||||
> [!Tip]
|
||||
> #### Switch index types anytime
|
||||
>
|
||||
> The default configuration continues to use TSM-based shards with in-memory indexes (as in earlier versions). You can [switch between TSI and inmem index types](#switch-index-types) at any time.
|
||||
|
||||
## Switch index types
|
||||
|
||||
Switch index types at any time by doing one of the following:
|
||||
You can switch between index types at any time after upgrading:
|
||||
|
||||
- To switch from to `inmem` to `tsi1`, complete steps 3 and 4 above in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x).
|
||||
- To switch from to `tsi1` to `inmem`, change `tsi1` to `inmem` by completing steps 3a-3c and 4 above in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x).
|
||||
**Switch from inmem to TSI:**
|
||||
- Complete steps 3 and 4 in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x)
|
||||
- Recommended when experiencing high memory usage or out-of-memory errors with high-cardinality data
|
||||
|
||||
**Switch from TSI to inmem:**
|
||||
- Change `tsi1` to `inmem` by completing steps 3a-3c and 4 in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x)
|
||||
- Suitable for small datasets where memory is not a constraint
|
||||
|
||||
## Downgrade InfluxDB
|
||||
|
||||
To downgrade to an earlier version, complete the procedures above in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x), replacing the version numbers with the version that you want to downgrade to.
|
||||
After downloading the release, migrating your configuration settings, and enabling TSI or TSM, make sure to [rebuild your index](/influxdb/v1/administration/rebuild-tsi-index/).
|
||||
|
||||
>**Note:** Some versions of InfluxDB may have breaking changes that impact your ability to upgrade and downgrade. For example, you cannot downgrade from InfluxDB 1.3 or later to an earlier version. Please review the applicable version of release notes to check for compatibility issues between releases.
|
||||
> [!Warning]
|
||||
> Some versions of InfluxDB may have breaking changes that impact your ability to upgrade and downgrade. For example, you cannot downgrade from InfluxDB 1.3 or later to an earlier version. Please review the applicable version of release notes to check for compatibility issues between releases.
|
||||
|
||||
## Upgrade InfluxDB Enterprise clusters
|
||||
## Upgrade to InfluxDB Enterprise
|
||||
|
||||
See [Upgrading InfluxDB Enterprise clusters](/enterprise_influxdb/v1/administration/upgrading/).
|
||||
To upgrade from InfluxDB OSS to InfluxDB Enterprise, [contact InfluxData Sales](https://www.influxdata.com/contact-sales/).
|
||||
|
|
|
@ -232,7 +232,7 @@ The temperature was ${string(v: lastReported._value)}°F."
|
|||
|
||||
The following sample data set represents fictional temperature metrics collected
|
||||
from three locations.
|
||||
It's formatted in [annotated CSV](https://v2.docs.influxdata.com/v2.0/reference/syntax/annotated-csv/) and imported
|
||||
It's formatted in [annotated CSV](/influxdb/v2/reference/syntax/annotated-csv/) and imported
|
||||
into the Flux query using the [`csv.from()` function](/flux/v0/stdlib/csv/from/).
|
||||
|
||||
Place the following at the beginning of your query to use the sample data:
|
||||
|
|
|
@ -75,8 +75,8 @@ For Ubuntu/Debian users, add the InfluxData repository with the following comman
|
|||
# Primary key fingerprint: 24C9 75CB A61A 024E E1B6 3178 7C3D 5715 9FC2 F927
|
||||
# Subkey fingerprint: 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E
|
||||
wget -q https://repos.influxdata.com/influxdata-archive.key
|
||||
gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null
|
||||
echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
|
||||
gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null
|
||||
echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
|
||||
|
@ -86,8 +86,8 @@ echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repo
|
|||
# Primary key fingerprint: 24C9 75CB A61A 024E E1B6 3178 7C3D 5715 9FC2 F927
|
||||
# Subkey fingerprint: 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E
|
||||
curl --silent --location -O https://repos.influxdata.com/influxdata-archive.key
|
||||
gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null
|
||||
echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
|
||||
gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null
|
||||
echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
|
|
@ -20,7 +20,7 @@ Responses use standard HTTP response codes and JSON format.
|
|||
To send API requests, you can use
|
||||
the [InfluxDB v1 client libraries](/influxdb/v1/tools/api_client_libraries/),
|
||||
the [InfluxDB v2 client libraries](/influxdb/v1/tools/api_client_libraries/),
|
||||
[Telegraf](https://docs.influxdata.com/telegraf/v1/),
|
||||
[Telegraf](/telegraf/v1/),
|
||||
or the client of your choice.
|
||||
|
||||
{{% note %}}
|
||||
|
|
|
@ -112,7 +112,7 @@ _If `gpg` isn't available on your system, see
|
|||
The following steps guide you through using GPG to verify InfluxDB
|
||||
binary releases:
|
||||
|
||||
1. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-system).
|
||||
1. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-os-version).
|
||||
2. Download and import the InfluxData public key.
|
||||
|
||||
`gpg --import` outputs to stderr.
|
||||
|
@ -354,8 +354,8 @@ To install {{% product-name %}} on Linux, do one of the following:
|
|||
| grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' \
|
||||
&& cat influxdata-archive.key \
|
||||
| gpg --dearmor \
|
||||
| sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null \
|
||||
&& echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' \
|
||||
| sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null \
|
||||
&& echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' \
|
||||
| sudo tee /etc/apt/sources.list.d/influxdata.list
|
||||
# Install influxdb
|
||||
sudo apt-get update && sudo apt-get install influxdb2
|
||||
|
@ -376,7 +376,7 @@ To install {{% product-name %}} on Linux, do one of the following:
|
|||
cat <<EOF | tee /etc/yum.repos.d/influxdata.repo
|
||||
[influxdata]
|
||||
name = InfluxData Repository - Stable
|
||||
baseurl = https://repos.influxdata.com/stable/${basearch}/main
|
||||
baseurl = https://repos.influxdata.com/stable/\${basearch}/main
|
||||
enabled = 1
|
||||
gpgcheck = 1
|
||||
gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-influxdata
|
||||
|
@ -473,7 +473,7 @@ _If necessary, adjust the example file paths and utilities for your system._
|
|||
https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz
|
||||
```
|
||||
|
||||
2. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-system).
|
||||
2. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-os-version).
|
||||
|
||||
3. {{< req text="Recommended:" color="magenta" >}}: Verify the authenticity of the downloaded binary--for example,
|
||||
enter the following command in your terminal.
|
||||
|
@ -675,7 +675,7 @@ data isn't deleted if you delete the container._
|
|||
flags for initial setup options and file system mounts.
|
||||
|
||||
_If you don't specify InfluxDB initial setup options, you can
|
||||
[set up manually](#set-up-influxdb) later using the UI or CLI in a running
|
||||
[set up manually](/influxdb/v2/get-started/setup/) later using the UI or CLI in a running
|
||||
container._
|
||||
|
||||
{{% code-placeholders "ADMIN_(USERNAME|PASSWORD)|ORG_NAME|BUCKET_NAME" %}}
|
||||
|
@ -731,7 +731,8 @@ and _[Operator token](/influxdb/v2/admin/tokens/#operator-token)_, and logs to s
|
|||
|
||||
You can view the Operator token in the `/etc/influxdb2/influx-configs` file and
|
||||
use it to authorize
|
||||
[creating an All Access token](#optional-create-all-access-tokens).
|
||||
[creating an All Access token](#examples).
|
||||
For more information, see [API token types](/influxdb/v2/admin/tokens/#api-token-types).
|
||||
|
||||
_To run the InfluxDB container in
|
||||
[detached mode](https://docs.docker.com/engine/reference/run/#detached-vs-foreground),
|
||||
|
@ -761,6 +762,13 @@ docker exec -it <CONTAINER_NAME> <CLI_NAME> <COMMAND>`
|
|||
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```bash
|
||||
# Create an All Access token
|
||||
docker exec -it influxdb2 influx auth create \
|
||||
--all-access \
|
||||
--token OPERATOR_TOKEN
|
||||
```
|
||||
|
||||
```bash
|
||||
# List CLI configurations
|
||||
docker exec -it influxdb2 influx config ls
|
||||
|
|
|
@ -643,7 +643,7 @@ to migrate InfluxDB key-value metadata schemas to earlier 2.x versions when nece
|
|||
|
||||
#### Telegraf
|
||||
|
||||
- Add the following new [Telegraf plugins](https://docs.influxdata.com/telegraf/v1/plugins/) to the Load Data page:
|
||||
- Add the following new [Telegraf plugins](/telegraf/v1/plugins/) to the Load Data page:
|
||||
- Alibaba (Aliyun) CloudMonitor Service Statistics (`aliyuncms`)
|
||||
- AMD ROCm System Management Interface (SMI) (`amd_rocm_smi`)
|
||||
- Counter-Strike: Global Offensive (CS:GO) (`csgo`)
|
||||
|
|
|
@ -328,7 +328,7 @@ following levels:
|
|||
- **L3**: 4 L2 files compacted together
|
||||
|
||||
Parquet files store data partitioned by time and optionally tags
|
||||
_(see [Manage data partition](https://docs.influxdata.com/influxdb3/cloud-dedicated/admin/custom-partitions/))_.
|
||||
_(see [Manage data partition](/influxdb3/cloud-dedicated/admin/custom-partitions/))_.
|
||||
After four L0 files accumulate for a partition, they're eligible for compaction.
|
||||
If the compactor is keeping up with the incoming write load, all compaction
|
||||
events have exactly four files.
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
---
|
||||
title: Undelete a table
|
||||
description: >
|
||||
Use the [`influxctl table undelete` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/table/undelete/)
|
||||
to restore a previously deleted table in your {{< product-name omit=" Cluster" >}} cluster.
|
||||
menu:
|
||||
influxdb3_cloud_dedicated:
|
||||
parent: Manage tables
|
||||
weight: 204
|
||||
list_code_example: |
|
||||
```bash { placeholders="DATABASE_NAME|TABLE_ID" }
|
||||
influxctl table undelete DATABASE_NAME TABLE_ID
|
||||
```
|
||||
related:
|
||||
- /influxdb3/cloud-dedicated/reference/cli/influxctl/table/undelete/
|
||||
- /influxdb3/cloud-dedicated/admin/tables/delete/
|
||||
- /influxdb3/cloud-dedicated/admin/tokens/table/create/
|
||||
---
|
||||
|
||||
Use the [`influxctl table undelete` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/table/undelete/)
|
||||
to restore a previously deleted table in your {{< product-name omit=" Cluster" >}} cluster.
|
||||
|
||||
> [!Important]
|
||||
> To undelete a table:
|
||||
>
|
||||
> - A new table with the same name cannot already exist.
|
||||
> - You must have appropriate permissions to manage databases.
|
||||
|
||||
When you undelete a table, it is restored with the same partition template and
|
||||
other settings as when it was deleted.
|
||||
|
||||
> [!Warning]
|
||||
> Tables can only be undeleted for
|
||||
> {{% show-in "cloud-dedicated" %}}approximately 14 days{{% /show-in %}}{{% show-in "clustered" %}}a configurable "hard-delete" grace period{{% /show-in %}}
|
||||
> after they are deleted.
|
||||
> After this grace period, all Parquet files associated with the deleted table
|
||||
> are permanently removed and the table cannot be undeleted.
|
||||
|
||||
## Undelete a table using the influxctl CLI
|
||||
|
||||
```bash { placeholders="DATABASE_NAME|TABLE_ID" }
|
||||
influxctl table undelete DATABASE_NAME TABLE_ID
|
||||
```
|
||||
|
||||
Replace the following:
|
||||
|
||||
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
|
||||
Name of the database associated with the deleted table
|
||||
- {{% code-placeholder-key %}}`TABLE_ID`{{% /code-placeholder-key %}}:
|
||||
ID of the deleted table to restore
|
||||
|
||||
> [!Tip]
|
||||
> #### View deleted table IDs
|
||||
>
|
||||
> To view the IDs of deleted tables, use the `influxctl table list` command with
|
||||
> the `--filter-status=deleted` flag--for example:
|
||||
>
|
||||
> <!--pytest.mark.skip-->
|
||||
>
|
||||
> ```bash {placeholders="DATABASE_NAME" }
|
||||
> influxctl table list --filter-status=deleted DATABASE_NAME
|
||||
> ```
|
||||
>
|
||||
> Replace {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}
|
||||
> with the name of the database associated with the table you want to undelete.
|
|
@ -9,7 +9,7 @@ menu:
|
|||
influxdb3_cloud_dedicated:
|
||||
name: Use Grafana
|
||||
parent: Visualize data
|
||||
influxdb3/cloud-dedicated/tags: [Flight client, query, visualization]
|
||||
influxdb3/cloud-dedicated/tags: [query, visualization, Grafana]
|
||||
aliases:
|
||||
- /influxdb3/cloud-dedicated/query-data/tools/grafana/
|
||||
- /influxdb3/cloud-dedicated/query-data/sql/execute-queries/grafana/
|
||||
|
@ -20,199 +20,7 @@ alt_links:
|
|||
cloud: /influxdb/cloud/tools/grafana/
|
||||
core: /influxdb3/core/visualize-data/grafana/
|
||||
enterprise: /influxdb3/enterprise/visualize-data/grafana/
|
||||
source: /content/shared/v3-process-data/visualize/grafana.md
|
||||
---
|
||||
|
||||
Use [Grafana](https://grafana.com/) to query and visualize data stored in
|
||||
{{% product-name %}}.
|
||||
|
||||
> [Grafana] enables you to query, visualize, alert on, and explore your metrics,
|
||||
> logs, and traces wherever they are stored.
|
||||
> [Grafana] provides you with tools to turn your time-series database (TSDB)
|
||||
> data into insightful graphs and visualizations.
|
||||
>
|
||||
> {{% cite %}}-- [Grafana documentation](https://grafana.com/docs/grafana/latest/introduction/){{% /cite %}}
|
||||
|
||||
<!-- TOC -->
|
||||
|
||||
- [Install Grafana or login to Grafana Cloud](#install-grafana-or-login-to-grafana-cloud)
|
||||
- [InfluxDB data source](#influxdb-data-source)
|
||||
- [Create an InfluxDB data source](#create-an-influxdb-data-source)
|
||||
- [Query InfluxDB with Grafana](#query-influxdb-with-grafana)
|
||||
- [Build visualizations with Grafana](#build-visualizations-with-grafana)
|
||||
|
||||
<!-- /TOC -->
|
||||
|
||||
## Install Grafana or login to Grafana Cloud
|
||||
|
||||
If using the open source version of **Grafana**, follow the
|
||||
[Grafana installation instructions](https://grafana.com/docs/grafana/latest/setup-grafana/installation/)
|
||||
to install Grafana for your operating system.
|
||||
If using **Grafana Cloud**, login to your Grafana Cloud instance.
|
||||
|
||||
## InfluxDB data source
|
||||
|
||||
The InfluxDB data source plugin is included in the Grafana core distribution.
|
||||
Use the plugin to query and visualize data stored in {{< product-name >}} with
|
||||
both InfluxQL and SQL.
|
||||
|
||||
> [!Note]
|
||||
> #### Grafana 10.3+
|
||||
>
|
||||
> The instructions below are for **Grafana 10.3+** which introduced the newest
|
||||
> version of the InfluxDB core plugin.
|
||||
> The updated plugin includes **SQL support** for InfluxDB 3-based products such
|
||||
> as {{< product-name >}}.
|
||||
|
||||
## Create an InfluxDB data source
|
||||
|
||||
1. In your Grafana user interface (UI), navigate to **Data Sources**.
|
||||
2. Click **Add new data source**.
|
||||
3. Search for and select the **InfluxDB** plugin.
|
||||
4. Provide a name for your data source.
|
||||
5. Under **Query Language**, select either **SQL** or **InfluxQL**:
|
||||
|
||||
{{< tabs-wrapper >}}
|
||||
{{% tabs %}}
|
||||
[SQL](#)
|
||||
[InfluxQL](#)
|
||||
{{% /tabs %}}
|
||||
{{% tab-content %}}
|
||||
<!--------------------------------- BEGIN SQL --------------------------------->
|
||||
|
||||
When creating an InfluxDB data source that uses SQL to query data:
|
||||
|
||||
1. Under **HTTP**:
|
||||
|
||||
- **URL**: Provide your {{% product-name omit=" Clustered" %}} cluster URL
|
||||
using the HTTPS protocol:
|
||||
|
||||
```
|
||||
https://{{< influxdb/host >}}
|
||||
```
|
||||
|
||||
2. Under **InfluxDB Details**:
|
||||
|
||||
- **Database**: Provide a default database name to query.
|
||||
- **Token**: Provide a [database token](/influxdb3/cloud-dedicated/admin/tokens/#database-tokens)
|
||||
with read access to the databases you want to query.
|
||||
|
||||
3. Click **Save & test**.
|
||||
|
||||
{{< img-hd src="/img/influxdb3/cloud-dedicated-grafana-influxdb-data-source-sql.png" alt="Grafana InfluxDB data source for InfluxDB Cloud Serverless that uses SQL" />}}
|
||||
|
||||
<!---------------------------------- END SQL ---------------------------------->
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
<!------------------------------- BEGIN INFLUXQL ------------------------------>
|
||||
|
||||
When creating an InfluxDB data source that uses InfluxQL to query data:
|
||||
|
||||
1. Under **HTTP**:
|
||||
|
||||
- **URL**: Provide your {{% product-name %}} cluster URL
|
||||
using the HTTPS protocol:
|
||||
|
||||
```
|
||||
https://{{< influxdb/host >}}
|
||||
```
|
||||
|
||||
2. Under **InfluxDB Details**:
|
||||
|
||||
- **Database**: Provide a default database name to query.
|
||||
- **User**: Provide an arbitrary string.
|
||||
_This credential is ignored when querying {{% product-name %}}, but it cannot be empty._
|
||||
- **Password**: Provide a [database token](/influxdb3/cloud-dedicated/admin/tokens/#database-tokens)
|
||||
with read access to the databases you want to query.
|
||||
- **HTTP Method**: Choose one of the available HTTP request methods to use when querying data:
|
||||
|
||||
- **POST** ({{< req text="Recommended" >}})
|
||||
- **GET**
|
||||
|
||||
3. Click **Save & test**.
|
||||
|
||||
{{< img-hd src="/img/influxdb3/cloud-dedicated-grafana-influxdb-data-source-influxql.png" alt="Grafana InfluxDB data source for InfluxDB Cloud Serverless using InfluxQL" />}}
|
||||
|
||||
<!-------------------------------- END INFLUXQL ------------------------------->
|
||||
{{% /tab-content %}}
|
||||
{{< /tabs-wrapper >}}
|
||||
|
||||
## Query InfluxDB with Grafana
|
||||
|
||||
After you [configure and save an InfluxDB datasource](#create-a-datasource),
|
||||
use Grafana to build, run, and inspect queries against your InfluxDB database.
|
||||
|
||||
{{< tabs-wrapper >}}
|
||||
{{% tabs %}}
|
||||
[SQL](#)
|
||||
[InfluxQL](#)
|
||||
{{% /tabs %}}
|
||||
{{% tab-content %}}
|
||||
<!--------------------------------- BEGIN SQL --------------------------------->
|
||||
|
||||
> [!Note]
|
||||
> {{% sql/sql-schema-intro %}}
|
||||
> To learn more, see [Query Data](/influxdb3/cloud-dedicated/query-data/sql/).
|
||||
|
||||
1. Click **Explore**.
|
||||
2. In the dropdown, select the saved InfluxDB data source to query.
|
||||
3. Use the SQL query form to build your query:
|
||||
- **Table**: Select the measurement to query.
|
||||
- **Column**: Select one or more fields and tags to return as columns in query results.
|
||||
|
||||
With SQL, select the `time` column to include timestamps with the data.
|
||||
Grafana relies on the `time` column to correctly graph time series data.
|
||||
|
||||
- _**Optional:**_ Toggle **filter** to generate **WHERE** clause statements.
|
||||
- **WHERE**: Configure condition expressions to include in the `WHERE` clause.
|
||||
|
||||
- _**Optional:**_ Toggle **group** to generate **GROUP BY** clause statements.
|
||||
|
||||
- **GROUP BY**: Select columns to group by.
|
||||
If you include an aggregation function in the **SELECT** list,
|
||||
you must group by one or more of the queried columns.
|
||||
SQL returns the aggregation for each group.
|
||||
|
||||
- {{< req text="Recommended" color="green" >}}:
|
||||
Toggle **order** to generate **ORDER BY** clause statements.
|
||||
|
||||
- **ORDER BY**: Select columns to sort by.
|
||||
You can sort by time and multiple fields or tags.
|
||||
To sort in descending order, select **DESC**.
|
||||
|
||||
4. {{< req text="Recommended" color="green" >}}: Change format to **Time series**.
|
||||
- Use the **Format** dropdown to change the format of the query results.
|
||||
For example, to visualize the query results as a time series, select **Time series**.
|
||||
|
||||
5. Click **Run query** to execute the query.
|
||||
|
||||
<!---------------------------------- END SQL ---------------------------------->
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
<!------------------------------- BEGIN INFLUXQL ------------------------------>
|
||||
|
||||
1. Click **Explore**.
|
||||
2. In the dropdown, select the **InfluxDB** data source that you want to query.
|
||||
3. Use the InfluxQL query form to build your query:
|
||||
- **FROM**: Select the measurement that you want to query.
|
||||
- **WHERE**: To filter the query results, enter a conditional expression.
|
||||
- **SELECT**: Select fields to query and an aggregate function to apply to each.
|
||||
The aggregate function is applied to each time interval defined in the
|
||||
`GROUP BY` clause.
|
||||
- **GROUP BY**: By default, Grafana groups data by time to downsample results
|
||||
and improve query performance.
|
||||
You can also add other tags to group by.
|
||||
4. Click **Run query** to execute the query.
|
||||
|
||||
<!-------------------------------- END INFLUXQL ------------------------------->
|
||||
{{% /tab-content %}}
|
||||
{{< /tabs-wrapper >}}
|
||||
|
||||
{{< youtube "rSsouoNsNDs" >}}
|
||||
|
||||
To learn about query management and inspection in Grafana, see the
|
||||
[Grafana Explore documentation](https://grafana.com/docs/grafana/latest/explore/).
|
||||
|
||||
## Build visualizations with Grafana
|
||||
|
||||
For a comprehensive walk-through of creating visualizations with
|
||||
Grafana, see the [Grafana documentation](https://grafana.com/docs/grafana/latest/).
|
||||
<!-- SOURCE: /content/shared/v3-process-data/visualize/grafana.md -->
|
||||
|
|
|
@ -176,8 +176,8 @@ To download the Linux `influxctl` package, do one of the following:
|
|||
# Primary key fingerprint: 24C9 75CB A61A 024E E1B6 3178 7C3D 5715 9FC2 F927
|
||||
# Subkey fingerprint: 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E
|
||||
wget -q https://repos.influxdata.com/influxdata-archive.key
|
||||
gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null
|
||||
echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
|
||||
gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null
|
||||
echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
|
||||
|
||||
sudo apt-get update && sudo apt-get install influxctl
|
||||
```
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
title: influxctl table undelete
|
||||
description: >
|
||||
The `influxctl table undelete` command undeletes a previously deleted
|
||||
table in an {{% product-name omit=" Clustered" %}} cluster.
|
||||
menu:
|
||||
influxdb3_cloud_dedicated:
|
||||
parent: influxctl table
|
||||
weight: 301
|
||||
metadata: [influxctl 2.10.4+]
|
||||
source: /shared/influxctl/table/undelete.md
|
||||
---
|
||||
|
||||
<!-- //SOURCE content/shared/influxctl/table/undelete.md -->
|
|
@ -1,7 +1,8 @@
|
|||
---
|
||||
title: InfluxDB Cloud Dedicated data durability
|
||||
description: >
|
||||
InfluxDB Cloud Dedicated replicates all time series data in the storage tier across
|
||||
Data written to {{% product-name %}} progresses through multiple stages to ensure durability, optimized performance and storage, and efficient querying. Configuration options at each stage affect system behavior, balancing reliability and resource usage.
|
||||
{{% product-name %}} replicates all time series data in the storage tier across
|
||||
multiple availability zones within a cloud region and automatically creates backups
|
||||
that can be used to restore data in the event of a node failure or data corruption.
|
||||
weight: 102
|
||||
|
@ -13,73 +14,7 @@ influxdb3/cloud-dedicated/tags: [backups, internals]
|
|||
related:
|
||||
- https://docs.aws.amazon.com/AmazonS3/latest/userguide/DataDurability.html, AWS S3 Data Durabililty
|
||||
- /influxdb3/cloud-dedicated/reference/internals/storage-engine/
|
||||
source: /shared/v3-distributed-internals-reference/durability.md
|
||||
---
|
||||
|
||||
{{< product-name >}} writes data to multiple Write-Ahead-Log (WAL) files on local
|
||||
storage and retains WALs until the data is persisted to Parquet files in object storage.
|
||||
Parquet data files in object storage are redundantly stored on multiple devices
|
||||
across a minimum of three availability zones in a cloud region.
|
||||
|
||||
## Data storage
|
||||
|
||||
In {{< product-name >}}, all measurements are stored in
|
||||
[Apache Parquet](https://parquet.apache.org/) files that represent a
|
||||
point-in-time snapshot of the data. The Parquet files are immutable and are
|
||||
never replaced nor modified. Parquet files are stored in object storage and
|
||||
referenced in the [Catalog](/influxdb3/cloud-dedicated/reference/internals/storage-engine/#catalog), which InfluxDB uses to find the appropriate Parquet files for a particular set of data.
|
||||
|
||||
### Data deletion
|
||||
|
||||
When data is deleted or expires (reaches the database's [retention period](/influxdb3/cloud-dedicated/reference/internals/data-retention/#database-retention-period)), InfluxDB performs the following steps:
|
||||
|
||||
1. Marks the associated Parquet files as deleted in the catalog.
|
||||
2. Filters out data marked for deletion from all queries.
|
||||
3. Retains Parquet files marked for deletion in object storage for approximately 30 days after the youngest data in the file ages out of retention.
|
||||
|
||||
## Data ingest
|
||||
|
||||
When data is written to {{< product-name >}}, InfluxDB first writes the data to a
|
||||
Write-Ahead-Log (WAL) on locally attached storage on the [Ingester](/influxdb3/cloud-dedicated/reference/internals/storage-engine/#ingester) node before
|
||||
acknowledging the write request. After acknowledging the write request, the
|
||||
Ingester holds the data in memory temporarily and then writes the contents of
|
||||
the WAL to Parquet files in object storage and updates the [Catalog](/influxdb3/cloud-dedicated/reference/internals/storage-engine/#catalog) to
|
||||
reference the newly created Parquet files. If an Ingester node is gracefully shut
|
||||
down (for example, during a new software deployment), it flushes the contents of
|
||||
the WAL to the Parquet files before shutting down.
|
||||
|
||||
## Backups
|
||||
|
||||
{{< product-name >}} implements the following data backup strategies:
|
||||
|
||||
- **Backup of WAL file**: The WAL file is written on locally attached storage.
|
||||
If an ingester process fails, the new ingester simply reads the WAL file on
|
||||
startup and continues normal operation. WAL files are maintained until their
|
||||
contents have been written to the Parquet files in object storage.
|
||||
For added protection, ingesters can be configured for write replication, where
|
||||
each measurement is written to two different WAL files before acknowledging
|
||||
the write.
|
||||
|
||||
- **Backup of Parquet files**: Parquet files are stored in object storage where
|
||||
they are redundantly stored on multiple devices across a minimum of three
|
||||
availability zones in a cloud region. Parquet files associated with each
|
||||
database are kept in object storage for the duration of database retention period
|
||||
plus an additional time period (approximately 30 days).
|
||||
|
||||
- **Backup of catalog**: InfluxData keeps a transaction log of all recent updates
|
||||
to the [InfluxDB catalog](/influxdb3/cloud-dedicated/reference/internals/storage-engine/#catalog) and generates a daily backup of
|
||||
the catalog. Backups are preserved for at least 30 days in object storage across a minimum
|
||||
of three availability zones.
|
||||
|
||||
## Recovery
|
||||
|
||||
InfluxData can perform the following recovery operations:
|
||||
|
||||
- **Recovery after ingester failure**: If an ingester fails, a new ingester is
|
||||
started up and reads from the WAL file for the recently ingested data.
|
||||
|
||||
- **Recovery of Parquet files**: {{< product-name >}} uses the provided object
|
||||
storage data durability to recover Parquet files.
|
||||
|
||||
- **Recovery of the catalog**: InfluxData can restore the [Catalog](/influxdb3/cloud-dedicated/reference/internals/storage-engine/#catalog) to
|
||||
the most recent daily backup and then reapply any transactions
|
||||
that occurred since the interruption.
|
||||
<!--// SOURCE - content/shared/v3-distributed-internals-reference/durability.md -->
|
||||
|
|
|
@ -67,7 +67,7 @@ by periodically creating, recording, and writing test data into test buckets.
|
|||
The service periodically executes queries to ensure the data hasn't been lost or corrupted.
|
||||
A separate instance of this service lives within each {{% product-name %}} cluster.
|
||||
Additionally, the service creates out-of-band backups in
|
||||
[line protocol](https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/),
|
||||
[line protocol](/influxdb/cloud/reference/syntax/line-protocol/),
|
||||
and ensures the backup data matches the data on disk.
|
||||
|
||||
## Cloud infrastructure
|
||||
|
|
|
@ -14,5 +14,5 @@ source: /shared/sql-reference/functions/_index.md
|
|||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/sql-reference/functions/_index.md
|
||||
// SOURCE content/shared/sql-reference/functions/_index.md
|
||||
-->
|
||||
|
|
|
@ -15,5 +15,5 @@ source: /shared/sql-reference/functions/aggregate.md
|
|||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/sql-reference/functions/aggregate.md
|
||||
// SOURCE content/shared/sql-reference/functions/aggregate.md
|
||||
-->
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
title: SQL array functions
|
||||
list_title: Array functions
|
||||
description: >
|
||||
Use array functions to create and operate on Arrow arrays or lists in SQL queries.
|
||||
menu:
|
||||
influxdb3_cloud_dedicated:
|
||||
name: Array
|
||||
parent: sql-functions
|
||||
weight: 309
|
||||
|
||||
source: /shared/sql-reference/functions/array.md
|
||||
---
|
||||
|
||||
<!--
|
||||
// SOURCE content/shared/sql-reference/functions/array.md
|
||||
-->
|
|
@ -14,6 +14,5 @@ source: /shared/sql-reference/functions/binary-string.md
|
|||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at
|
||||
// SOURCE /content/shared/sql-reference/functions/binary-string.md
|
||||
// SOURCE content/shared/sql-reference/functions/binary-string.md
|
||||
-->
|
||||
|
|
|
@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/conditional.md
|
|||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/sql-reference/functions/conditional.md
|
||||
// SOURCE content/shared/sql-reference/functions/conditional.md
|
||||
-->
|
||||
|
|
|
@ -8,12 +8,11 @@ menu:
|
|||
influxdb3_cloud_dedicated:
|
||||
name: Hashing
|
||||
parent: sql-functions
|
||||
weight: 309
|
||||
weight: 313
|
||||
|
||||
source: /shared/sql-reference/functions/hashing.md
|
||||
---
|
||||
|
||||
<!--
|
||||
The content for this page is at
|
||||
// SOURCE /content/shared/sql-reference/functions/hashing.md
|
||||
// SOURCE content/shared/sql-reference/functions/hashing.md
|
||||
-->
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
title: SQL map functions
|
||||
list_title: Map functions
|
||||
description: >
|
||||
Use map functions to create and operate on Arrow maps in SQL queries.
|
||||
menu:
|
||||
influxdb3_cloud_dedicated:
|
||||
name: Map
|
||||
parent: sql-functions
|
||||
weight: 310
|
||||
|
||||
source: /shared/sql-reference/functions/map.md
|
||||
---
|
||||
|
||||
<!--
|
||||
// SOURCE content/shared/sql-reference/functions/map.md
|
||||
-->
|
|
@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/math.md
|
|||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/sql-reference/functions/math.md
|
||||
// SOURCE content/shared/sql-reference/functions/math.md
|
||||
-->
|
||||
|
|
|
@ -7,11 +7,11 @@ menu:
|
|||
influxdb3_cloud_dedicated:
|
||||
name: Miscellaneous
|
||||
parent: sql-functions
|
||||
weight: 310
|
||||
weight: 314
|
||||
|
||||
source: /shared/sql-reference/functions/misc.md
|
||||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/sql-reference/functions/misc.md
|
||||
// SOURCE content/shared/sql-reference/functions/misc.md
|
||||
-->
|
||||
|
|
|
@ -7,12 +7,12 @@ menu:
|
|||
influxdb3_cloud_dedicated:
|
||||
name: Regular expression
|
||||
parent: sql-functions
|
||||
weight: 308
|
||||
weight: 312
|
||||
influxdb3/cloud-dedicated/tags: [regular expressions, sql]
|
||||
|
||||
source: /shared/sql-reference/functions/regular-expression.md
|
||||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/sql-reference/functions/regular-expression.md
|
||||
// SOURCE content/shared/sql-reference/functions/regular-expression.md
|
||||
-->
|
||||
|
|
|
@ -15,5 +15,5 @@ source: /shared/sql-reference/functions/selector.md
|
|||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/sql-reference/functions/selector.md
|
||||
// SOURCE content/shared/sql-reference/functions/selector.md
|
||||
-->
|
||||
|
|
|
@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/string.md
|
|||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/sql-reference/functions/string.md
|
||||
// SOURCE content/shared/sql-reference/functions/string.md
|
||||
-->
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
title: SQL struct functions
|
||||
list_title: Struct functions
|
||||
description: >
|
||||
Use struct functions to create Arrow structs in SQL queries.
|
||||
menu:
|
||||
influxdb3_cloud_dedicated:
|
||||
name: Struct
|
||||
parent: sql-functions
|
||||
weight: 311
|
||||
|
||||
source: /shared/sql-reference/functions/struct.md
|
||||
---
|
||||
|
||||
<!--
|
||||
// SOURCE content/shared/sql-reference/functions/struct.md
|
||||
-->
|
|
@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/time-and-date.md
|
|||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/sql-reference/functions/time-and-date.md
|
||||
// SOURCE content/shared/sql-reference/functions/time-and-date.md
|
||||
-->
|
||||
|
|
|
@ -8,11 +8,11 @@ menu:
|
|||
influxdb3_cloud_dedicated:
|
||||
name: Window
|
||||
parent: sql-functions
|
||||
weight: 309
|
||||
weight: 315
|
||||
|
||||
source: /shared/sql-reference/functions/window.md
|
||||
---
|
||||
|
||||
<!--
|
||||
The content for this page is at content/shared/sql-reference/functions/window.md
|
||||
// SOURCE content/shared/sql-reference/functions/window.md
|
||||
-->
|
||||
|
|
|
@ -416,9 +416,23 @@ The following example creates sample data for two series (the combination of mea
|
|||
|
||||
### Avoid sending duplicate data
|
||||
|
||||
Use Telegraf and the [Dedup processor plugin](/telegraf/v1/plugins/#processor-dedup) to filter data whose field values are exact repetitions of previous values.
|
||||
When writing duplicate points (points with the same timestamp and tag set),
|
||||
InfluxDB deduplicates the data by creating a union of the duplicate points.
|
||||
Deduplicating your data can reduce your write payload size and resource usage.
|
||||
|
||||
> [!Important]
|
||||
> #### Write ordering for duplicate points
|
||||
>
|
||||
> InfluxDB attempts to honor write ordering for duplicate points, with the most
|
||||
> recently written point taking precedence. However, when data is flushed from
|
||||
> the in-memory buffer to Parquet files—typically every 15 minutes, but
|
||||
> sometimes sooner—this ordering is not guaranteed if duplicate points are flushed
|
||||
> at the same time. As a result, the last written duplicate point may not always
|
||||
> be retained in storage.
|
||||
|
||||
Use Telegraf and the [Dedup processor plugin](/telegraf/v1/plugins/#processor-dedup)
|
||||
to filter data whose field values are exact repetitions of previous values.
|
||||
|
||||
The following example shows how to use Telegraf to remove points that repeat field values, and then write the data to InfluxDB:
|
||||
|
||||
1. In your terminal, enter the following command to create the sample data file and calculate the number of seconds between the earliest timestamp and _now_.
|
||||
|
|
|
@ -21,211 +21,7 @@ alt_links:
|
|||
cloud: /influxdb/cloud/tools/grafana/
|
||||
core: /influxdb3/core/visualize-data/grafana/
|
||||
enterprise: /influxdb3/enterprise/visualize-data/grafana/
|
||||
source: /content/shared/v3-process-data/visualize/grafana.md
|
||||
---
|
||||
|
||||
Use [Grafana](https://grafana.com/) to query and visualize data stored in
|
||||
{{% product-name %}}.
|
||||
|
||||
> [Grafana] enables you to query, visualize, alert on, and explore your metrics,
|
||||
> logs, and traces wherever they are stored.
|
||||
> [Grafana] provides you with tools to turn your time-series database (TSDB)
|
||||
> data into insightful graphs and visualizations.
|
||||
>
|
||||
> {{% cite %}}-- [Grafana documentation](https://grafana.com/docs/grafana/latest/introduction/){{% /cite %}}
|
||||
|
||||
<!-- TOC -->
|
||||
|
||||
- [Install Grafana or login to Grafana Cloud](#install-grafana-or-login-to-grafana-cloud)
|
||||
- [InfluxDB data source](#influxdb-data-source)
|
||||
- [Create an InfluxDB data source](#create-an-influxdb-data-source)
|
||||
- [Query InfluxDB with Grafana](#query-influxdb-with-grafana)
|
||||
- [Build visualizations with Grafana](#build-visualizations-with-grafana)
|
||||
|
||||
<!-- /TOC -->
|
||||
|
||||
## Install Grafana or login to Grafana Cloud
|
||||
|
||||
If using the open source version of **Grafana**, follow the
|
||||
[Grafana installation instructions](https://grafana.com/docs/grafana/latest/setup-grafana/installation/)
|
||||
to install Grafana for your operating system.
|
||||
If using **Grafana Cloud**, login to your Grafana Cloud instance.
|
||||
|
||||
## InfluxDB data source
|
||||
|
||||
The InfluxDB data source plugin is included in the Grafana core distribution.
|
||||
Use the plugin to query and visualize data stored in {{< product-name >}} with
|
||||
both InfluxQL and SQL.
|
||||
|
||||
> [!Note]
|
||||
> #### Grafana 10.3+
|
||||
>
|
||||
> The instructions below are for **Grafana 10.3+** which introduced the newest
|
||||
> version of the InfluxDB core plugin.
|
||||
> The updated plugin includes **SQL support** for InfluxDB 3-based products such
|
||||
> as {{< product-name >}}.
|
||||
|
||||
## Create an InfluxDB data source
|
||||
|
||||
Which data source you create depends on which query language you want to use to
|
||||
query {{% product-name %}}:
|
||||
|
||||
1. In your Grafana user interface (UI), navigate to **Data Sources**.
|
||||
2. Click **Add new data source**.
|
||||
3. Search for and select the **InfluxDB** plugin.
|
||||
4. Provide a name for your data source.
|
||||
5. Under **Query Language**, select either **SQL** or **InfluxQL**:
|
||||
|
||||
{{< tabs-wrapper >}}
|
||||
{{% tabs %}}
|
||||
[SQL](#)
|
||||
[InfluxQL](#)
|
||||
{{% /tabs %}}
|
||||
{{% tab-content %}}
|
||||
<!--------------------------------- BEGIN SQL --------------------------------->
|
||||
|
||||
When creating an InfluxDB data source that uses SQL to query data:
|
||||
|
||||
1. Under **HTTP**:
|
||||
|
||||
- **URL**: Provide your [{{% product-name %}} region URL](/influxdb3/cloud-serverless/reference/regions/)
|
||||
using the HTTPS protocol:
|
||||
|
||||
```
|
||||
https://{{< influxdb/host >}}
|
||||
```
|
||||
|
||||
2. Under **InfluxDB Details**:
|
||||
|
||||
- **Database**: Provide a default bucket name to query.
|
||||
In {{< product-name >}}, a bucket functions as a database.
|
||||
- **Token**: Provide an [API token](/influxdb3/cloud-serverless/admin/tokens/)
|
||||
with read access to the buckets you want to query.
|
||||
|
||||
3. Click **Save & test**.
|
||||
|
||||
{{< img-hd src="/img/influxdb3/cloud-serverless-grafana-influxdb-data-source-sql.png" alt="Grafana InfluxDB data source for InfluxDB Cloud Serverless that uses SQL" />}}
|
||||
|
||||
<!---------------------------------- END SQL ---------------------------------->
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
<!------------------------------- BEGIN INFLUXQL ------------------------------>
|
||||
|
||||
When creating an InfluxDB data source that uses InfluxQL to query data:
|
||||
|
||||
> [!Note]
|
||||
> #### Map databases and retention policies to buckets
|
||||
>
|
||||
> To query {{% product-name %}} with InfluxQL, first map database and retention policy
|
||||
> (DBRP) combinations to your InfluxDB Cloud buckets. For more information, see
|
||||
> [Map databases and retention policies to buckets](/influxdb3/cloud-serverless/query-data/influxql/dbrp/).
|
||||
|
||||
1. Under **HTTP**:
|
||||
|
||||
- **URL**: Provide your [{{% product-name %}} region URL](/influxdb3/cloud-serverless/reference/regions/)
|
||||
using the HTTPS protocol:
|
||||
|
||||
```
|
||||
https://{{< influxdb/host >}}
|
||||
```
|
||||
|
||||
2. Under **InfluxDB Details**:
|
||||
|
||||
- **Database**: Provide a database name to query.
|
||||
Use the database name that is mapped to your InfluxBD bucket.
|
||||
- **User**: Provide an arbitrary string.
|
||||
_This credential is ignored when querying {{% product-name %}}, but it cannot be empty._
|
||||
- **Password**: Provide an [API token](/influxdb3/cloud-serverless/admin/tokens/)
|
||||
with read access to the buckets you want to query.
|
||||
- **HTTP Method**: Choose one of the available HTTP request methods to use when querying data:
|
||||
|
||||
- **POST** ({{< req text="Recommended" >}})
|
||||
- **GET**
|
||||
|
||||
3. Click **Save & test**.
|
||||
|
||||
{{< img-hd src="/img/influxdb3/cloud-serverless-grafana-influxdb-data-source-influxql.png" alt="Grafana InfluxDB data source for InfluxDB Cloud Serverless using InfluxQL" />}}
|
||||
|
||||
<!-------------------------------- END INFLUXQL ------------------------------->
|
||||
{{% /tab-content %}}
|
||||
{{< /tabs-wrapper >}}
|
||||
|
||||
## Query InfluxDB with Grafana
|
||||
|
||||
After you [configure and save a FlightSQL or InfluxDB datasource](#create-a-datasource),
|
||||
use Grafana to build, run, and inspect queries against your InfluxDB bucket.
|
||||
|
||||
{{< tabs-wrapper >}}
|
||||
{{% tabs %}}
|
||||
[SQL](#)
|
||||
[InfluxQL](#)
|
||||
{{% /tabs %}}
|
||||
{{% tab-content %}}
|
||||
<!--------------------------------- BEGIN SQL --------------------------------->
|
||||
|
||||
> [!Note]
|
||||
> {{% sql/sql-schema-intro %}}
|
||||
> To learn more, see [Query Data](/influxdb3/cloud-serverless/query-data/sql/).
|
||||
|
||||
1. Click **Explore**.
|
||||
2. In the dropdown, select the saved InfluxDB data source to query.
|
||||
3. Use the SQL query form to build your query:
|
||||
- **Table**: Select the measurement to query.
|
||||
- **Column**: Select one or more fields and tags to return as columns in query results.
|
||||
|
||||
With SQL, select the `time` column to include timestamps with the data.
|
||||
Grafana relies on the `time` column to correctly graph time series data.
|
||||
|
||||
- _**Optional:**_ Toggle **filter** to generate **WHERE** clause statements.
|
||||
- **WHERE**: Configure condition expressions to include in the `WHERE` clause.
|
||||
|
||||
- _**Optional:**_ Toggle **group** to generate **GROUP BY** clause statements.
|
||||
|
||||
- **GROUP BY**: Select columns to group by.
|
||||
If you include an aggregation function in the **SELECT** list,
|
||||
you must group by one or more of the queried columns.
|
||||
SQL returns the aggregation for each group.
|
||||
|
||||
- {{< req text="Recommended" color="green" >}}:
|
||||
Toggle **order** to generate **ORDER BY** clause statements.
|
||||
|
||||
- **ORDER BY**: Select columns to sort by.
|
||||
You can sort by time and multiple fields or tags.
|
||||
To sort in descending order, select **DESC**.
|
||||
|
||||
4. {{< req text="Recommended" color="green" >}}: Change format to **Time series**.
|
||||
- Use the **Format** dropdown to change the format of the query results.
|
||||
For example, to visualize the query results as a time series, select **Time series**.
|
||||
|
||||
5. Click **Run query** to execute the query.
|
||||
|
||||
<!---------------------------------- END SQL ---------------------------------->
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
<!------------------------------- BEGIN INFLUXQL ------------------------------>
|
||||
|
||||
1. Click **Explore**.
|
||||
2. In the dropdown, select the **InfluxDB** data source that you want to query.
|
||||
3. Use the InfluxQL query form to build your query:
|
||||
- **FROM**: Select the measurement that you want to query.
|
||||
- **WHERE**: To filter the query results, enter a conditional expression.
|
||||
- **SELECT**: Select fields to query and an aggregate function to apply to each.
|
||||
The aggregate function is applied to each time interval defined in the
|
||||
`GROUP BY` clause.
|
||||
- **GROUP BY**: By default, Grafana groups data by time to downsample results
|
||||
and improve query performance.
|
||||
You can also add other tags to group by.
|
||||
4. Click **Run query** to execute the query.
|
||||
|
||||
<!-------------------------------- END INFLUXQL ------------------------------->
|
||||
{{% /tab-content %}}
|
||||
{{< /tabs-wrapper >}}
|
||||
|
||||
{{< youtube "rSsouoNsNDs" >}}
|
||||
|
||||
To learn about query management and inspection in Grafana, see the
|
||||
[Grafana Explore documentation](https://grafana.com/docs/grafana/latest/explore/).
|
||||
|
||||
## Build visualizations with Grafana
|
||||
|
||||
For a comprehensive walk-through of creating visualizations with
|
||||
Grafana, see the [Grafana documentation](https://grafana.com/docs/grafana/latest/).
|
||||
<!-- SOURCE: /content/shared/v3-process-data/visualize/grafana.md -->
|
||||
|
|
|
@ -27,7 +27,7 @@ point-in-time snapshot of the data. The Parquet files are immutable and are
|
|||
never replaced nor modified. Parquet files are stored in object storage.
|
||||
|
||||
<span id="influxdb-catalog"></span>
|
||||
The _InfluxDB catalog_ is a relational, PostreSQL-compatible database that
|
||||
The _InfluxDB catalog_ is a relational, PostgreSQL-compatible database that
|
||||
contains references to all Parquet files in object storage and is used as an
|
||||
index to find the appropriate Parquet files for a particular set of data.
|
||||
|
||||
|
|
|
@ -14,5 +14,5 @@ source: /shared/sql-reference/functions/_index.md
|
|||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/sql-reference/functions/_index.md
|
||||
-->
|
||||
// SOURCE content/shared/sql-reference/functions/_index.md
|
||||
-->
|
||||
|
|
|
@ -15,5 +15,5 @@ source: /shared/sql-reference/functions/aggregate.md
|
|||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/sql-reference/functions/aggregate.md
|
||||
// SOURCE content/shared/sql-reference/functions/aggregate.md
|
||||
-->
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
title: SQL array functions
|
||||
list_title: Array functions
|
||||
description: >
|
||||
Use array functions to create and operate on Arrow arrays or lists in SQL queries.
|
||||
menu:
|
||||
influxdb3_cloud_serverless:
|
||||
name: Array
|
||||
parent: sql-functions
|
||||
weight: 309
|
||||
|
||||
source: /shared/sql-reference/functions/array.md
|
||||
---
|
||||
|
||||
<!--
|
||||
// SOURCE content/shared/sql-reference/functions/array.md
|
||||
-->
|
|
@ -14,6 +14,5 @@ source: /shared/sql-reference/functions/binary-string.md
|
|||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at
|
||||
// SOURCE /content/shared/sql-reference/functions/binary-string.md
|
||||
// SOURCE content/shared/sql-reference/functions/binary-string.md
|
||||
-->
|
||||
|
|
|
@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/conditional.md
|
|||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/sql-reference/functions/conditional.md
|
||||
// SOURCE content/shared/sql-reference/functions/conditional.md
|
||||
-->
|
||||
|
|
|
@ -8,12 +8,11 @@ menu:
|
|||
influxdb3_cloud_serverless:
|
||||
name: Hashing
|
||||
parent: sql-functions
|
||||
weight: 309
|
||||
weight: 313
|
||||
|
||||
source: /shared/sql-reference/functions/hashing.md
|
||||
---
|
||||
|
||||
<!--
|
||||
The content for this page is at
|
||||
// SOURCE /content/shared/sql-reference/functions/hashing.md
|
||||
// SOURCE content/shared/sql-reference/functions/hashing.md
|
||||
-->
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
title: SQL map functions
|
||||
list_title: Map functions
|
||||
description: >
|
||||
Use map functions to create and operate on Arrow maps in SQL queries.
|
||||
menu:
|
||||
influxdb3_cloud_serverless:
|
||||
name: Map
|
||||
parent: sql-functions
|
||||
weight: 310
|
||||
|
||||
source: /shared/sql-reference/functions/map.md
|
||||
---
|
||||
|
||||
<!--
|
||||
// SOURCE content/shared/sql-reference/functions/map.md
|
||||
-->
|
|
@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/math.md
|
|||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/sql-reference/functions/math.md
|
||||
-->
|
||||
// SOURCE content/shared/sql-reference/functions/math.md
|
||||
-->
|
||||
|
|
|
@ -7,11 +7,11 @@ menu:
|
|||
influxdb3_cloud_serverless:
|
||||
name: Miscellaneous
|
||||
parent: sql-functions
|
||||
weight: 310
|
||||
weight: 314
|
||||
|
||||
source: /shared/sql-reference/functions/misc.md
|
||||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/sql-reference/functions/misc.md
|
||||
-->
|
||||
// SOURCE content/shared/sql-reference/functions/misc.md
|
||||
-->
|
||||
|
|
|
@ -7,12 +7,12 @@ menu:
|
|||
influxdb3_cloud_serverless:
|
||||
name: Regular expression
|
||||
parent: sql-functions
|
||||
weight: 308
|
||||
weight: 312
|
||||
influxdb3/cloud-serverless/tags: [regular expressions, sql]
|
||||
|
||||
source: /shared/sql-reference/functions/regular-expression.md
|
||||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/sql-reference/functions/regular-expression.md
|
||||
-->
|
||||
// SOURCE content/shared/sql-reference/functions/regular-expression.md
|
||||
-->
|
||||
|
|
|
@ -15,5 +15,5 @@ source: /shared/sql-reference/functions/selector.md
|
|||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/sql-reference/functions/selector.md
|
||||
// SOURCE content/shared/sql-reference/functions/selector.md
|
||||
-->
|
||||
|
|
|
@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/string.md
|
|||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/sql-reference/functions/string.md
|
||||
// SOURCE content/shared/sql-reference/functions/string.md
|
||||
-->
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
title: SQL struct functions
|
||||
list_title: Struct functions
|
||||
description: >
|
||||
Use struct functions to create Arrow structs in SQL queries.
|
||||
menu:
|
||||
influxdb3_cloud_serverless:
|
||||
name: Struct
|
||||
parent: sql-functions
|
||||
weight: 311
|
||||
|
||||
source: /shared/sql-reference/functions/struct.md
|
||||
---
|
||||
|
||||
<!--
|
||||
// SOURCE content/shared/sql-reference/functions/struct.md
|
||||
-->
|
|
@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/time-and-date.md
|
|||
---
|
||||
|
||||
<!--
|
||||
The content of this page is at /content/shared/sql-reference/functions/time-and-date.md
|
||||
// SOURCE content/shared/sql-reference/functions/time-and-date.md
|
||||
-->
|
||||
|
|
|
@ -8,11 +8,11 @@ menu:
|
|||
influxdb3_cloud_serverless:
|
||||
name: Window
|
||||
parent: sql-functions
|
||||
weight: 309
|
||||
weight: 315
|
||||
|
||||
source: /shared/sql-reference/functions/window.md
|
||||
---
|
||||
|
||||
<!--
|
||||
The content for this page is at content/shared/sql-reference/functions/window.md
|
||||
// SOURCE content/shared/sql-reference/functions/window.md
|
||||
-->
|
||||
|
|
|
@ -430,9 +430,23 @@ The following example creates sample data for two series (the combination of mea
|
|||
|
||||
### Avoid sending duplicate data
|
||||
|
||||
Use Telegraf and the [Dedup processor plugin](/telegraf/v1/plugins/#processor-dedup) to filter data whose field values are exact repetitions of previous values.
|
||||
When writing duplicate points (points with the same timestamp and tag set),
|
||||
InfluxDB deduplicates the data by creating a union of the duplicate points.
|
||||
Deduplicating your data can reduce your write payload size and resource usage.
|
||||
|
||||
> [!Important]
|
||||
> #### Write ordering for duplicate points
|
||||
>
|
||||
> InfluxDB attempts to honor write ordering for duplicate points, with the most
|
||||
> recently written point taking precedence. However, when data is flushed from
|
||||
> the in-memory buffer to Parquet files—typically every 15 minutes, but
|
||||
> sometimes sooner—this ordering is not guaranteed if duplicate points are flushed
|
||||
> at the same time. As a result, the last written duplicate point may not always
|
||||
> be retained in storage.
|
||||
|
||||
Use Telegraf and the [Dedup processor plugin](/telegraf/v1/plugins/#processor-dedup)
|
||||
to filter data whose field values are exact repetitions of previous values.
|
||||
|
||||
The following example shows how to use Telegraf to remove points that repeat field values, and then write the data to InfluxDB:
|
||||
|
||||
1. In your terminal, enter the following command to create the sample data file and calculate the number of seconds between the earliest timestamp and _now_.
|
||||
|
|
|
@ -8,9 +8,11 @@ menu:
|
|||
parent: Administer InfluxDB Clustered
|
||||
name: Scale your cluster
|
||||
weight: 207
|
||||
influxdb3/clustered/tags: [scale]
|
||||
influxdb3/clustered/tags: [scale, performance, Kubernetes]
|
||||
related:
|
||||
- /influxdb3/clustered/reference/internals/storage-engine/
|
||||
- /influxdb3/clustered/write-data/best-practices/data-lifecycle/
|
||||
- /influxdb3/clustered/query-data/troubleshoot-and-optimize/optimize-queries/
|
||||
- https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits, Kubernetes resource requests and limits
|
||||
---
|
||||
|
||||
|
@ -559,11 +561,14 @@ concurrency demands or reaches the hardware limits of your underlying nodes.
|
|||
|
||||
### Compactor
|
||||
|
||||
- **Recommended**: Maintain **1 Compactor pod** and use [vertical scaling](#vertical-scaling) (especially
|
||||
increasing the available CPU) for the Compactor.
|
||||
- **Recommended**: Maintain **1 Compactor pod** and use [vertical scaling](#vertical-scaling) for the Compactor.
|
||||
Scale CPU and memory resources together, as compactor concurrency settings scale based on memory, not CPU count.
|
||||
- Because compaction is a compute-heavy process, horizontal scaling increases compaction throughput, but not as
|
||||
efficiently as vertical scaling.
|
||||
|
||||
> [!Important]
|
||||
> When scaling the Compactor, scale CPU and memory resources together.
|
||||
|
||||
### Garbage collector
|
||||
|
||||
The [Garbage collector](/influxdb3/clustered/reference/internals/storage-engine/#garbage-collector) is a lightweight process that typically doesn't require
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
---
|
||||
title: Undelete a table
|
||||
description: >
|
||||
Use the [`influxctl table undelete` command](/influxdb3/clustered/reference/cli/influxctl/table/undelete/)
|
||||
to restore a previously deleted table in your {{< product-name omit=" Cluster" >}} cluster.
|
||||
menu:
|
||||
influxdb3_clustered:
|
||||
parent: Manage tables
|
||||
weight: 204
|
||||
list_code_example: |
|
||||
```bash { placeholders="DATABASE_NAME|TABLE_ID" }
|
||||
influxctl table undelete DATABASE_NAME TABLE_ID
|
||||
```
|
||||
related:
|
||||
- /influxdb3/clustered/reference/cli/influxctl/table/undelete/
|
||||
- /influxdb3/clustered/admin/tables/delete/
|
||||
- /influxdb3/clustered/admin/tokens/table/create/
|
||||
draft: true # hide until next clustered release
|
||||
---
|
||||
|
||||
Use the [`influxctl table undelete` command](/influxdb3/clustered/reference/cli/influxctl/table/undelete/)
|
||||
to restore a previously deleted table in your {{< product-name omit=" Cluster" >}} cluster.
|
||||
|
||||
> [!Important]
|
||||
> To undelete a table:
|
||||
>
|
||||
> - A new table with the same name cannot already exist.
|
||||
> - You must have appropriate permissions to manage databases.
|
||||
|
||||
When you undelete a table, it is restored with the same partition template and
|
||||
other settings as when it was deleted.
|
||||
|
||||
> [!Warning]
|
||||
> Tables can only be undeleted for
|
||||
> {{% show-in "cloud-dedicated" %}}approximately 14 days{{% /show-in %}}{{% show-in "clustered" %}}a configurable "hard-delete" grace period{{% /show-in %}}
|
||||
> after they are deleted.
|
||||
> After this grace period, all Parquet files associated with the deleted table
|
||||
> are permanently removed and the table cannot be undeleted.
|
||||
|
||||
## Undelete a table using the influxctl CLI
|
||||
|
||||
```bash { placeholders="DATABASE_NAME|TABLE_ID" }
|
||||
influxctl table undelete DATABASE_NAME TABLE_ID
|
||||
```
|
||||
|
||||
Replace the following:
|
||||
|
||||
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
|
||||
Name of the database associated with the deleted table
|
||||
- {{% code-placeholder-key %}}`TABLE_ID`{{% /code-placeholder-key %}}:
|
||||
ID of the deleted table to restore
|
||||
|
||||
> [!Tip]
|
||||
> #### View deleted table IDs
|
||||
>
|
||||
> To view the IDs of deleted tables, use the `influxctl table list` command with
|
||||
> the `--filter-status=deleted` flag--for example:
|
||||
>
|
||||
> <!--pytest.mark.skip-->
|
||||
>
|
||||
> ```bash {placeholders="DATABASE_NAME" }
|
||||
> influxctl table list --filter-status=deleted DATABASE_NAME
|
||||
> ```
|
||||
>
|
||||
> Replace {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}
|
||||
> with the name of the database associated with the table you want to undelete.
|
|
@ -16,13 +16,13 @@ aliases:
|
|||
- /influxdb3/clustered/install/licensing/
|
||||
---
|
||||
|
||||
Install your InfluxDB Clustered license in your cluster to authorize the use
|
||||
of the InfluxDB Clustered software.
|
||||
Install your {{% product-name %}} license in your cluster to authorize the use
|
||||
of the {{% product-name %}} software.
|
||||
|
||||
## Install your InfluxDB license
|
||||
## Install your {{% product-name %}} license
|
||||
|
||||
1. If you haven't already,
|
||||
[request an InfluxDB Clustered license](https://influxdata.com/contact-sales).
|
||||
[request an {{% product-name %}} license](https://influxdata.com/contact-sales).
|
||||
2. InfluxData provides you with a `license.yml` file that encapsulates your
|
||||
license token as a custom Kubernetes resource.
|
||||
3. Use `kubectl` to apply and create the `License` resource in your InfluxDB
|
||||
|
@ -34,28 +34,28 @@ of the InfluxDB Clustered software.
|
|||
kubectl apply --filename license.yml --namespace influxdb
|
||||
```
|
||||
|
||||
InfluxDB Clustered detects the `License` resource and extracts the credentials
|
||||
into a secret required by InfluxDB Clustered Kubernetes pods.
|
||||
{{% product-name %}} detects the `License` resource and extracts the credentials
|
||||
into a secret required by {{% product-name %}} Kubernetes pods.
|
||||
Pods validate the license secret both at startup and periodically (roughly once
|
||||
per hour) while running.
|
||||
|
||||
## Upgrade from a non-licensed release
|
||||
|
||||
If you are currently using a non-licensed preview release of InfluxDB Clustered
|
||||
If you are currently using a non-licensed preview release of {{% product-name %}}
|
||||
and want to upgrade to a licensed release, do the following:
|
||||
|
||||
1. [Install an InfluxDB license](#install-your-influxdb-license)
|
||||
1. [Install an {{% product-name %}} license](#install-your-influxdb-clustered-license)
|
||||
2. If you [use the `AppInstance` resource configuration](/influxdb3/clustered/install/set-up-cluster/configure-cluster/directly/)
|
||||
to configure your cluster, in your `myinfluxdb.yml`, update the package
|
||||
version defined in `spec.package.image` to use a licensed release.
|
||||
|
||||
If using the InfluxDB Clustered Helm chart, update the `image.tag` property
|
||||
If using the {{% product-name %}} Helm chart, update the `image.tag` property
|
||||
in your `values.yaml`to use a licensed release.
|
||||
|
||||
> [!Warning]
|
||||
> #### Upgrade to checkpoint releases first
|
||||
>
|
||||
> When upgrading InfluxDB Clustered, always upgrade to each
|
||||
> When upgrading {{% product-name %}}, always upgrade to each
|
||||
> [checkpoint release](/influxdb3/clustered/admin/upgrade/#checkpoint-releases)
|
||||
> first, before proceeding to newer versions.
|
||||
> Upgrading past a checkpoint release without first upgrading to it may result in
|
||||
|
@ -103,6 +103,33 @@ the version number to upgrade to.
|
|||
After you have activated your license, use the following signals to verify the
|
||||
license is active and functioning.
|
||||
|
||||
In your commands, replace the following:
|
||||
|
||||
- {{% code-placeholder-key %}}`NAMESPACE`{{% /code-placeholder-key %}}:
|
||||
your [InfluxDB namespace](/influxdb3/clustered/install/set-up-cluster/configure-cluster/#create-a-namespace-for-influxdb)
|
||||
- {{% code-placeholder-key %}}`POD_NAME`{{% /code-placeholder-key %}}:
|
||||
your [InfluxDB Kubernetes pod](/influxdb3/clustered/install/set-up-cluster/deploy/#inspect-cluster-pods)
|
||||
|
||||
### Verify database components
|
||||
|
||||
After you [install your license](#install-your-influxdb-clustered-license),
|
||||
run the following command to check that database pods start up and are in the
|
||||
`Running` state:
|
||||
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```bash
|
||||
kubectl get pods -l app=iox --namespace influxdb
|
||||
```
|
||||
|
||||
If a `Pod` fails to start, run the following command to view pod information:
|
||||
|
||||
<!--pytest.mark.skip-->
|
||||
|
||||
```sh { placeholders="POD_NAME" }
|
||||
kubectl describe pod POD_NAME --namespace influxdb
|
||||
```
|
||||
|
||||
### Verify the `Secret` exists
|
||||
|
||||
Run the following command to verify that the licensing activation created a
|
||||
|
@ -116,7 +143,8 @@ kubectl get secret iox-license --namespace influxdb
|
|||
|
||||
If the secret doesn't exist,
|
||||
[view `license-controller` logs](#view-license-controller-logs) for more
|
||||
information or errors.
|
||||
information or errors. For troubleshooting guidance, see
|
||||
[Manage your {{% product-name %}} license](/influxdb3/clustered/admin/licensing/).
|
||||
|
||||
### View `license controller` logs
|
||||
|
||||
|
@ -130,7 +158,20 @@ following command:
|
|||
kubectl logs deployment/license-controller --namespace influxdb
|
||||
```
|
||||
|
||||
For more information about InfluxDB Clustered licensing, see
|
||||
[Manage your InfluxDB Clustered license](/influxdb3/clustered/admin/licensing/)
|
||||
## Renew your license
|
||||
|
||||
> [!Tip]
|
||||
> Before your license expires, your InfluxData sales representative will
|
||||
> contact you about license renewal.
|
||||
> You may also contact your sales representative at any time.
|
||||
|
||||
If you have an expired license, follow the same process to [install your renewed license](#install-your-influxdb-clustered-license) using the new `license.yml` file provided by InfluxData.
|
||||
|
||||
> [!Important]
|
||||
> #### Recover from an expired license
|
||||
> If your license has already expired and your cluster pods are in a `CrashLoopBackoff` state, applying a valid renewed license will restore normal operation. For more information about license enforcement and recovery, see [Manage your {{% product-name %}} license](/influxdb3/clustered/admin/licensing/).
|
||||
|
||||
For more information about {{% product-name %}} licensing, including license enforcement, grace periods, and detailed troubleshooting, see
|
||||
[Manage your {{% product-name %}} license](/influxdb3/clustered/admin/licensing/).
|
||||
|
||||
{{< page-nav prev="/influxdb3/clustered/install/set-up-cluster/configure-cluster/" prevText="Configure your cluster" next="/influxdb3/clustered/install/set-up-cluster/deploy/" nextText="Deploy your cluster" keepTab=true >}}
|
||||
|
|
|
@ -9,7 +9,7 @@ menu:
|
|||
influxdb3_clustered:
|
||||
name: Use Grafana
|
||||
parent: Visualize data
|
||||
influxdb3/clustered/tags: [query, visualization]
|
||||
influxdb3/clustered/tags: [query, visualization, Grafana]
|
||||
aliases:
|
||||
- /influxdb3/clustered/query-data/tools/grafana/
|
||||
- /influxdb3/clustered/query-data/sql/execute-queries/grafana/
|
||||
|
@ -20,195 +20,7 @@ alt_links:
|
|||
cloud: /influxdb/cloud/tools/grafana/
|
||||
core: /influxdb3/core/visualize-data/grafana/
|
||||
enterprise: /influxdb3/enterprise/visualize-data/grafana/
|
||||
source: /content/shared/v3-process-data/visualize/grafana.md
|
||||
---
|
||||
|
||||
Use [Grafana](https://grafana.com/) to query and visualize data stored in
|
||||
{{% product-name %}}.
|
||||
|
||||
> [Grafana] enables you to query, visualize, alert on, and explore your metrics,
|
||||
> logs, and traces wherever they are stored.
|
||||
> [Grafana] provides you with tools to turn your time-series database (TSDB)
|
||||
> data into insightful graphs and visualizations.
|
||||
>
|
||||
> {{% cite %}}-- [Grafana documentation](https://grafana.com/docs/grafana/latest/introduction/){{% /cite %}}
|
||||
|
||||
- [Install Grafana or login to Grafana Cloud](#install-grafana-or-login-to-grafana-cloud)
|
||||
- [InfluxDB data source](#influxdb-data-source)
|
||||
- [Create an InfluxDB data source](#create-an-influxdb-data-source)
|
||||
- [Query InfluxDB with Grafana](#query-influxdb-with-grafana)
|
||||
- [Build visualizations with Grafana](#build-visualizations-with-grafana)
|
||||
|
||||
## Install Grafana or login to Grafana Cloud
|
||||
|
||||
If using the open source version of **Grafana**, follow the
|
||||
[Grafana installation instructions](https://grafana.com/docs/grafana/latest/setup-grafana/installation/)
|
||||
to install Grafana for your operating system.
|
||||
If using **Grafana Cloud**, login to your Grafana Cloud instance.
|
||||
|
||||
## InfluxDB data source
|
||||
|
||||
The InfluxDB data source plugin is included in the Grafana core distribution.
|
||||
Use the plugin to query and visualize data stored in {{< product-name >}} with
|
||||
both InfluxQL and SQL.
|
||||
|
||||
> [!Note]
|
||||
> #### Grafana 10.3+
|
||||
>
|
||||
> The instructions below are for **Grafana 10.3+** which introduced the newest
|
||||
> version of the InfluxDB core plugin.
|
||||
> The updated plugin includes **SQL support** for InfluxDB 3-based products such
|
||||
> as {{< product-name >}}.
|
||||
|
||||
## Create an InfluxDB data source
|
||||
|
||||
1. In your Grafana user interface (UI), navigate to **Data Sources**.
|
||||
2. Click **Add new data source**.
|
||||
3. Search for and select the **InfluxDB** plugin.
|
||||
4. Provide a name for your data source.
|
||||
5. Under **Query Language**, select either **SQL** or **InfluxQL**:
|
||||
|
||||
{{< tabs-wrapper >}}
|
||||
{{% tabs %}}
|
||||
[SQL](#)
|
||||
[InfluxQL](#)
|
||||
{{% /tabs %}}
|
||||
{{% tab-content %}}
|
||||
<!--------------------------------- BEGIN SQL --------------------------------->
|
||||
|
||||
When creating an InfluxDB data source that uses SQL to query data:
|
||||
|
||||
1. Under **HTTP**:
|
||||
|
||||
- **URL**: Provide your {{% product-name omit=" Clustered" %}} cluster URL
|
||||
using the HTTPS protocol:
|
||||
|
||||
```
|
||||
https://{{< influxdb/host >}}
|
||||
```
|
||||
|
||||
2. Under **InfluxDB Details**:
|
||||
|
||||
- **Database**: Provide a default [database](/influxdb3/clustered/admin/databases/) name to query.
|
||||
- **Token**: Provide a [database token](/influxdb3/clustered/admin/tokens/#database-tokens)
|
||||
with read access to the databases you want to query.
|
||||
|
||||
3. Click **Save & test**.
|
||||
|
||||
{{< img-hd src="/img/influxdb3/clustered-grafana-influxdb-data-source-sql.png" alt="Grafana InfluxDB data source for InfluxDB Cloud Serverless that uses SQL" />}}
|
||||
|
||||
<!---------------------------------- END SQL ---------------------------------->
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
<!------------------------------- BEGIN INFLUXQL ------------------------------>
|
||||
|
||||
When creating an InfluxDB data source that uses InfluxQL to query data:
|
||||
|
||||
1. Under **HTTP**:
|
||||
|
||||
- **URL**: Provide your [{{% product-name %}} region URL](/influxdb3/clustered/reference/regions/)
|
||||
using the HTTPS protocol:
|
||||
|
||||
```
|
||||
https://{{< influxdb/host >}}
|
||||
```
|
||||
|
||||
2. Under **InfluxDB Details**:
|
||||
|
||||
- **Database**: Provide a default [database](/influxdb3/clustered/admin/databases/) name to query.
|
||||
- **User**: Provide an arbitrary string.
|
||||
_This credential is ignored when querying {{% product-name %}}, but it cannot be empty._
|
||||
- **Password**: Provide a [database token](/influxdb3/clustered/admin/tokens/#database-tokens)
|
||||
with read access to the databases you want to query.
|
||||
- **HTTP Method**: Choose one of the available HTTP request methods to use when querying data:
|
||||
|
||||
- **POST** ({{< req text="Recommended" >}})
|
||||
- **GET**
|
||||
|
||||
3. Click **Save & test**.
|
||||
|
||||
{{< img-hd src="/img/influxdb3/clustered-grafana-influxdb-data-source-influxql.png" alt="Grafana InfluxDB data source for InfluxDB Cloud Serverless using InfluxQL" />}}
|
||||
|
||||
<!-------------------------------- END INFLUXQL ------------------------------->
|
||||
{{% /tab-content %}}
|
||||
{{< /tabs-wrapper >}}
|
||||
|
||||
## Query InfluxDB with Grafana
|
||||
|
||||
After you [configure and save an InfluxDB datasource](#create-a-datasource),
|
||||
use Grafana to build, run, and inspect queries against your InfluxDB database.
|
||||
|
||||
{{< tabs-wrapper >}}
|
||||
{{% tabs %}}
|
||||
[SQL](#)
|
||||
[InfluxQL](#)
|
||||
{{% /tabs %}}
|
||||
{{% tab-content %}}
|
||||
<!--------------------------------- BEGIN SQL --------------------------------->
|
||||
|
||||
> [!Note]
|
||||
> {{% sql/sql-schema-intro %}}
|
||||
> To learn more, see [Query Data](/influxdb3/clustered/query-data/sql/).
|
||||
|
||||
1. Click **Explore**.
|
||||
2. In the dropdown, select the saved InfluxDB data source to query.
|
||||
3. Use the SQL query form to build your query:
|
||||
- **Table**: Select the measurement to query.
|
||||
- **Column**: Select one or more fields and tags to return as columns in query results.
|
||||
|
||||
With SQL, select the `time` column to include timestamps with the data.
|
||||
Grafana relies on the `time` column to correctly graph time series data.
|
||||
|
||||
- _**Optional:**_ Toggle **filter** to generate **WHERE** clause statements.
|
||||
- **WHERE**: Configure condition expressions to include in the `WHERE` clause.
|
||||
|
||||
- _**Optional:**_ Toggle **group** to generate **GROUP BY** clause statements.
|
||||
|
||||
- **GROUP BY**: Select columns to group by.
|
||||
If you include an aggregation function in the **SELECT** list,
|
||||
you must group by one or more of the queried columns.
|
||||
SQL returns the aggregation for each group.
|
||||
|
||||
- {{< req text="Recommended" color="green" >}}:
|
||||
Toggle **order** to generate **ORDER BY** clause statements.
|
||||
|
||||
- **ORDER BY**: Select columns to sort by.
|
||||
You can sort by time and multiple fields or tags.
|
||||
To sort in descending order, select **DESC**.
|
||||
|
||||
4. {{< req text="Recommended" color="green" >}}: Change format to **Time series**.
|
||||
- Use the **Format** dropdown to change the format of the query results.
|
||||
For example, to visualize the query results as a time series, select **Time series**.
|
||||
|
||||
5. Click **Run query** to execute the query.
|
||||
|
||||
<!---------------------------------- END SQL ---------------------------------->
|
||||
{{% /tab-content %}}
|
||||
{{% tab-content %}}
|
||||
<!------------------------------- BEGIN INFLUXQL ------------------------------>
|
||||
|
||||
1. Click **Explore**.
|
||||
2. In the dropdown, select the **InfluxDB** data source that you want to query.
|
||||
3. Use the InfluxQL query form to build your query:
|
||||
- **FROM**: Select the measurement that you want to query.
|
||||
- **WHERE**: To filter the query results, enter a conditional expression.
|
||||
- **SELECT**: Select fields to query and an aggregate function to apply to each.
|
||||
The aggregate function is applied to each time interval defined in the
|
||||
`GROUP BY` clause.
|
||||
- **GROUP BY**: By default, Grafana groups data by time to downsample results
|
||||
and improve query performance.
|
||||
You can also add other tags to group by.
|
||||
4. Click **Run query** to execute the query.
|
||||
|
||||
<!-------------------------------- END INFLUXQL ------------------------------->
|
||||
{{% /tab-content %}}
|
||||
{{< /tabs-wrapper >}}
|
||||
|
||||
{{< youtube "rSsouoNsNDs" >}}
|
||||
|
||||
To learn about query management and inspection in Grafana, see the
|
||||
[Grafana Explore documentation](https://grafana.com/docs/grafana/latest/explore/).
|
||||
|
||||
## Build visualizations with Grafana
|
||||
|
||||
For a comprehensive walk-through of creating visualizations with
|
||||
Grafana, see the [Grafana documentation](https://grafana.com/docs/grafana/latest/).
|
||||
<!-- SOURCE: /content/shared/v3-process-data/visualize/grafana.md -->
|
||||
|
|
|
@ -166,8 +166,8 @@ To download the Linux `influxctl` package, do one of the following:
|
|||
# Primary key fingerprint: 24C9 75CB A61A 024E E1B6 3178 7C3D 5715 9FC2 F927
|
||||
# Subkey fingerprint: 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E
|
||||
wget -q https://repos.influxdata.com/influxdata-archive.key
|
||||
gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null
|
||||
echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
|
||||
gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null
|
||||
echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
|
||||
```
|
||||
|
||||
{{% /code-tab-content %}}
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
title: influxctl table undelete
|
||||
description: >
|
||||
The `influxctl table undelete` command undeletes a previously deleted
|
||||
table in an {{% product-name omit=" Clustered" %}} cluster.
|
||||
menu:
|
||||
influxdb3_clustered:
|
||||
parent: influxctl table
|
||||
weight: 301
|
||||
metadata: [influxctl 2.10.4+]
|
||||
source: /shared/influxctl/table/undelete.md
|
||||
draft: true # hide until next clustered release
|
||||
---
|
||||
|
||||
<!-- //SOURCE content/shared/influxctl/table/undelete.md -->
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
title: InfluxDB Clustered data durability
|
||||
description: >
|
||||
Data written to {{% product-name %}} progresses through multiple stages to ensure durability, optimized performance and storage, and efficient querying. Configuration options at each stage affect system behavior, balancing reliability and resource usage.
|
||||
weight: 102
|
||||
menu:
|
||||
influxdb3_clustered:
|
||||
name: Data durability
|
||||
parent: InfluxDB internals
|
||||
influxdb3/clustered/tags: [backups, internals]
|
||||
related:
|
||||
- https://docs.aws.amazon.com/AmazonS3/latest/userguide/DataDurability.html, AWS S3 Data Durabililty
|
||||
- /influxdb3/clustered/reference/internals/storage-engine/
|
||||
source: /shared/v3-distributed-internals-reference/durability.md
|
||||
---
|
||||
|
||||
<!--// SOURCE - content/shared/v3-distributed-internals-reference/durability.md -->
|
|
@ -62,7 +62,7 @@ by periodically creating, recording, and writing test data into test buckets.
|
|||
The service periodically executes queries to ensure the data hasn't been lost or corrupted.
|
||||
A separate instance of this service lives within each InfluxDB cluster.
|
||||
Additionally, the service creates out-of-band backups in
|
||||
[line protocol](https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/),
|
||||
[line protocol](/influxdb/cloud/reference/syntax/line-protocol/),
|
||||
and ensures the backup data matches the data on disk.
|
||||
|
||||
## Cloud infrastructure
|
||||
|
@ -229,7 +229,7 @@ User accounts can be created by InfluxData on the InfluxDB Clustered system via
|
|||
User accounts can create database tokens with data read and/or write permissions.
|
||||
API requests from custom applications require a database token with sufficient permissions.
|
||||
For more information on the types of tokens and ways to create them, see
|
||||
[Manage tokens](https://docs.influxdata.com/influxdb3/clustered/admin/tokens/).
|
||||
[Manage tokens](/influxdb3/clustered/admin/tokens/).
|
||||
|
||||
### Role-based access controls (RBAC)
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue