From 68bcb0f3c3d9b47d32bcee743e81143d02012e26 Mon Sep 17 00:00:00 2001 From: thrasherht Date: Sat, 22 Mar 2025 00:32:32 -0400 Subject: [PATCH 001/122] Update _index.md Escape ${basearch} variable to prevent bash from interpreting it. --- content/influxdb/v2/install/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb/v2/install/_index.md b/content/influxdb/v2/install/_index.md index c27d355f4..29c6251ba 100644 --- a/content/influxdb/v2/install/_index.md +++ b/content/influxdb/v2/install/_index.md @@ -377,7 +377,7 @@ To install {{% product-name %}} on Linux, do one of the following: cat < Date: Wed, 30 Jul 2025 14:39:02 -0700 Subject: [PATCH 002/122] feat: Add complete GET /query endpoint --- .../v1-compatibility/swaggerV1Compat.yml | 135 ++++++++++++++++++ 1 file changed, 135 insertions(+) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index 744692e6d..b43a670cf 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -136,6 +136,141 @@ paths: schema: $ref: '#/components/schemas/Error' /query: + get: + operationId: GetV1ExecuteQuery + tags: + - Query + summary: Query using the InfluxDB v1 HTTP API + parameters: + - $ref: '#/components/parameters/TraceSpan' + - $ref: '#/components/parameters/AuthUserV1' + - $ref: '#/components/parameters/AuthPassV1' + - in: header + name: Accept + schema: + type: string + description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps. + default: application/json + enum: + - application/json + - application/csv + - text/csv + - application/x-msgpack + - in: header + name: Accept-Encoding + description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand. + schema: + type: string + description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. + schema: + type: boolean + default: false + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + schema: + type: string + required: true + description: Bucket to query. + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: Defines the influxql query to run. + required: true + schema: + type: string + - in: query + name: rp + schema: + type: string + description: Retention policy name. + - name: epoch + description: | + Formats timestamps as unix (epoch) timestamps with the specified precision + instead of RFC3339 timestamps with nanosecond precision. + in: query + schema: + type: string + enum: + - h + - m + - s + - ms + - u + - µ + - ns + responses: + '200': + description: Query results + headers: + Content-Encoding: + description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body + schema: + type: string + description: Specifies that the response in the body is encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + Trace-Id: + description: The Trace-Id header reports the request's trace ID, if one was generated. + schema: + type: string + description: Specifies the request's trace ID. + content: + application/csv: + schema: + $ref: '#/components/schemas/InfluxQLCSVResponse' + text/csv: + schema: + $ref: '#/components/schemas/InfluxQLCSVResponse' + application/json: + schema: + $ref: '#/components/schemas/InfluxQLResponse' + examples: + influxql-chunk_size_2: + value: | + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:55Z",90,"1"],["2016-05-19T18:37:56Z",90,"1"]],"partial":true}],"partial":true}]} + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:57Z",90,"1"],["2016-05-19T18:37:58Z",90,"1"]]}]}]} + application/x-msgpack: + schema: + type: string + format: binary + '429': + description: Token is temporarily over quota. The Retry-After header describes when to try the read again. + headers: + Retry-After: + description: A non-negative decimal integer indicating the seconds to delay after the response is received. + schema: + type: integer + format: int32 + default: + description: Error processing query + content: + application/json: + schema: + $ref: '#/components/schemas/Error' post: operationId: PostQueryV1 tags: From 9117e7ab3515fdde1aa18cc0230d2a1669d38236 Mon Sep 17 00:00:00 2001 From: meelahme Date: Wed, 30 Jul 2025 14:48:52 -0700 Subject: [PATCH 003/122] feat: Add complete GET /query endpoint to cluster, dedicated, and serverless --- .../v1-compatibility/swaggerV1Compat.yml | 135 ++++++++++++++++++ .../v1-compatibility/swaggerV1Compat.yml | 135 ++++++++++++++++++ 2 files changed, 270 insertions(+) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index 55f91d971..ba5a4dff2 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -137,6 +137,141 @@ paths: schema: $ref: '#/components/schemas/Error' /query: + get: + operationId: GetV1ExecuteQuery + tags: + - Query + summary: Query using the InfluxDB v1 HTTP API + parameters: + - $ref: '#/components/parameters/TraceSpan' + - $ref: '#/components/parameters/AuthUserV1' + - $ref: '#/components/parameters/AuthPassV1' + - in: header + name: Accept + schema: + type: string + description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps. + default: application/json + enum: + - application/json + - application/csv + - text/csv + - application/x-msgpack + - in: header + name: Accept-Encoding + description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand. + schema: + type: string + description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. + schema: + type: boolean + default: false + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + schema: + type: string + required: true + description: Bucket to query. + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: Defines the influxql query to run. + required: true + schema: + type: string + - in: query + name: rp + schema: + type: string + description: Retention policy name. + - name: epoch + description: | + Formats timestamps as unix (epoch) timestamps with the specified precision + instead of RFC3339 timestamps with nanosecond precision. + in: query + schema: + type: string + enum: + - h + - m + - s + - ms + - u + - µ + - ns + responses: + '200': + description: Query results + headers: + Content-Encoding: + description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body + schema: + type: string + description: Specifies that the response in the body is encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + Trace-Id: + description: The Trace-Id header reports the request's trace ID, if one was generated. + schema: + type: string + description: Specifies the request's trace ID. + content: + application/csv: + schema: + $ref: '#/components/schemas/InfluxQLCSVResponse' + text/csv: + schema: + $ref: '#/components/schemas/InfluxQLCSVResponse' + application/json: + schema: + $ref: '#/components/schemas/InfluxQLResponse' + examples: + influxql-chunk_size_2: + value: | + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:55Z",90,"1"],["2016-05-19T18:37:56Z",90,"1"]],"partial":true}],"partial":true}]} + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:57Z",90,"1"],["2016-05-19T18:37:58Z",90,"1"]]}]}]} + application/x-msgpack: + schema: + type: string + format: binary + '429': + description: Token is temporarily over quota. The Retry-After header describes when to try the read again. + headers: + Retry-After: + description: A non-negative decimal integer indicating the seconds to delay after the response is received. + schema: + type: integer + format: int32 + default: + description: Error processing query + content: + application/json: + schema: + $ref: '#/components/schemas/Error' post: operationId: PostQueryV1 tags: diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index 36c3e08b0..b822af222 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -136,6 +136,141 @@ paths: schema: $ref: '#/components/schemas/Error' /query: + get: + operationId: GetV1ExecuteQuery + tags: + - Query + summary: Query using the InfluxDB v1 HTTP API + parameters: + - $ref: '#/components/parameters/TraceSpan' + - $ref: '#/components/parameters/AuthUserV1' + - $ref: '#/components/parameters/AuthPassV1' + - in: header + name: Accept + schema: + type: string + description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps. + default: application/json + enum: + - application/json + - application/csv + - text/csv + - application/x-msgpack + - in: header + name: Accept-Encoding + description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand. + schema: + type: string + description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. + schema: + type: boolean + default: false + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + schema: + type: string + required: true + description: Bucket to query. + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: Defines the influxql query to run. + required: true + schema: + type: string + - in: query + name: rp + schema: + type: string + description: Retention policy name. + - name: epoch + description: | + Formats timestamps as unix (epoch) timestamps with the specified precision + instead of RFC3339 timestamps with nanosecond precision. + in: query + schema: + type: string + enum: + - h + - m + - s + - ms + - u + - µ + - ns + responses: + '200': + description: Query results + headers: + Content-Encoding: + description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body + schema: + type: string + description: Specifies that the response in the body is encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + Trace-Id: + description: The Trace-Id header reports the request's trace ID, if one was generated. + schema: + type: string + description: Specifies the request's trace ID. + content: + application/csv: + schema: + $ref: '#/components/schemas/InfluxQLCSVResponse' + text/csv: + schema: + $ref: '#/components/schemas/InfluxQLCSVResponse' + application/json: + schema: + $ref: '#/components/schemas/InfluxQLResponse' + examples: + influxql-chunk_size_2: + value: | + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:55Z",90,"1"],["2016-05-19T18:37:56Z",90,"1"]],"partial":true}],"partial":true}]} + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:57Z",90,"1"],["2016-05-19T18:37:58Z",90,"1"]]}]}]} + application/x-msgpack: + schema: + type: string + format: binary + '429': + description: Token is temporarily over quota. The Retry-After header describes when to try the read again. + headers: + Retry-After: + description: A non-negative decimal integer indicating the seconds to delay after the response is received. + schema: + type: integer + format: int32 + default: + description: Error processing query + content: + application/json: + schema: + $ref: '#/components/schemas/Error' post: operationId: PostQueryV1 tags: From dbcd8dde73519e23cac4604bc7d7352df7cf93eb Mon Sep 17 00:00:00 2001 From: meelahme Date: Wed, 30 Jul 2025 15:34:42 -0700 Subject: [PATCH 004/122] docs(api): enhance POST /query with JSON chunk parameter support --- .../v1-compatibility/swaggerV1Compat.yml | 32 +++++++++++++++++++ .../v1-compatibility/swaggerV1Compat.yml | 32 +++++++++++++++++++ .../v1-compatibility/swaggerV1Compat.yml | 32 +++++++++++++++++++ 3 files changed, 96 insertions(+) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index ba5a4dff2..45dcee9e6 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -283,6 +283,38 @@ paths: text/plain: schema: type: string + application/json: + schema: + type: object + properties: + db: + type: string + description: Bucket to query. + q: + description: Defines the influxql query to run. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: | + A unix timestamp precision. + type: string + enum: + - h + - m + - s + - ms + - u + - µ + - ns parameters: - $ref: '#/components/parameters/TraceSpan' - $ref: '#/components/parameters/AuthUserV1' diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index b43a670cf..16ac580f3 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -282,6 +282,38 @@ paths: text/plain: schema: type: string + application/json: + schema: + type: object + properties: + db: + type: string + description: Bucket to query. + q: + description: Defines the influxql query to run. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: | + A unix timestamp precision. + type: string + enum: + - h + - m + - s + - ms + - u + - µ + - ns parameters: - $ref: '#/components/parameters/TraceSpan' - $ref: '#/components/parameters/AuthUserV1' diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index b822af222..08f4a617e 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -282,6 +282,38 @@ paths: text/plain: schema: type: string + application/json: + schema: + type: object + properties: + db: + type: string + description: Bucket to query. + q: + description: Defines the influxql query to run. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: | + A unix timestamp precision. + type: string + enum: + - h + - m + - s + - ms + - u + - µ + - ns parameters: - $ref: '#/components/parameters/TraceSpan' - $ref: '#/components/parameters/AuthUserV1' From 05e74088bf7f575276092e05fb0df58a4f17d3f2 Mon Sep 17 00:00:00 2001 From: Gary Fowler <97983559+garylfowler@users.noreply.github.com> Date: Wed, 30 Jul 2025 14:41:45 -1000 Subject: [PATCH 005/122] Update get-started.md Since we now allow other types of keys than Open AI, removing the word Open from this page. --- content/influxdb3/explorer/get-started.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb3/explorer/get-started.md b/content/influxdb3/explorer/get-started.md index 244191130..863ecbfec 100644 --- a/content/influxdb3/explorer/get-started.md +++ b/content/influxdb3/explorer/get-started.md @@ -86,7 +86,7 @@ To use {{% product-name %}} to query data from InfluxDB 3, navigate to The _Data Explorer_ lets you explore the schema of your database and automatically builds SQL queries by either selecting columns in the _Schema Browser_ or by using _Natural Language_ with -the {{% product-name %}} OpenAI integration. +the {{% product-name %}} AI integration. For this getting started guide, use the Schema Browser to build a SQL query that returns data from the newly written sample data set. From 67c2d19e727c601a6f8a8fa7f020bd0585bae097 Mon Sep 17 00:00:00 2001 From: meelahme Date: Thu, 31 Jul 2025 11:56:20 -0700 Subject: [PATCH 006/122] docs(api): adding api examples to DVC and LVC --- .../distinct-value-cache/create.md | 53 ++++++++++++++++++ .../distinct-value-cache/query.md | 32 +++++++++++ .../distinct-value-cache/show.md | 39 +++++++++++++ .../last-value-cache/create.md | 55 ++++++++++++++++++- .../last-value-cache/delete.md | 26 +++++++++ .../influxdb3-admin/last-value-cache/show.md | 38 +++++++++++++ 6 files changed, 242 insertions(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/create.md b/content/shared/influxdb3-admin/distinct-value-cache/create.md index c897c0dbf..a11489f63 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/create.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/create.md @@ -69,6 +69,59 @@ influxdb3 create distinct_cache \ {{% /show-in %}} +## Use the HTTP API + +You can also create a Distinct Value Cache using the [InfluxDB v3 HTTP API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/configure/distinct_cache` endpoint. + +{{% code-placeholders "(DATABASE|TABLE|DVC)_NAME|AUTH_TOKEN|COLUMNS|MAX_(CARDINALITY|AGE)" %}} + +```bash +curl -X POST "https://localhost:8181/api/v3/configure/distinct_cache" \ + -H "Authorization: Bearer AUTH_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "DATABASE_NAME", + "table": "TABLE_NAME", + "name": "DVC_NAME", + "columns": ["COLUMNS"], + "max_cardinality": MAX_CARDINALITY, + "max_age": MAX_AGE + }' +``` + +{{% /code-placeholders %}} + +### Example + +```bash +curl -X POST "https://localhost:8181/api/v3/configure/distinct_cache" \ + -H "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "example-db", + "table": "wind_data", + "name": "windDistinctCache", + "columns": ["country", "county", "city"], + "max_cardinality": 10000, + "max_age": 86400 + }' +``` + +**Response codes:** + +- `201` : Success. The distinct cache has been created. +- `204` : Not created. A distinct cache with this configuration already exists. +- `400` : Bad request. + + +> [!Note] +> #### API parameter differences +> +> - **Columns format**: The API uses a JSON array (`["country", "county", "city"]`) +> instead of the CLI's comma-delimited format (`country,county,city`). +> - **Maximum age format**: The API uses seconds (`86400`) instead of the CLI's +> [humantime format](https://docs.rs/humantime/latest/humantime/fn.parse_duration.html) (`24h`, `1 day`). + Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: diff --git a/content/shared/influxdb3-admin/distinct-value-cache/query.md b/content/shared/influxdb3-admin/distinct-value-cache/query.md index 55e0ce4d0..4fbd41e97 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/query.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/query.md @@ -31,3 +31,35 @@ FROM WHERE country = 'Spain' ``` + +## Use the HTTP API + +You can query cached data using the [InfluxDB v3 SQL query API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/query_sql` endpoint. + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TABLE_NAME|CACHE_NAME" %}} + +```bash +curl -X POST "https://localhost:8181/api/v3/query_sql" \ + -H "Authorization: Bearer AUTH_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "DATABASE_NAME", + "q": "SELECT * FROM last_cache('\''TABLE_NAME'\'', '\''CACHE_NAME'\'')", + "format": "json" + }' +``` + +{{% /code-placeholders %}} + +## Example with WHERE clause + +```bash +curl -X POST "https://localhost:8181/api/v3/query_sql" \ + -H "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "example-db", + "q": "SELECT room, temp FROM last_cache('\''home'\'', '\''homeCache'\'') WHERE room = '\''Kitchen'\''", + "format": "json" + }' +``` diff --git a/content/shared/influxdb3-admin/distinct-value-cache/show.md b/content/shared/influxdb3-admin/distinct-value-cache/show.md index 0de0e2ac0..cc778fcf2 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/show.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/show.md @@ -67,3 +67,42 @@ In the examples above, replace the following: - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{< product-name >}} {{% show-in "enterprise" %}}admin {{% /show-in %}} authentication token + +## Use the HTTP API + +You can query cache information using the [InfluxDB v3 SQL query API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/query_sql` endpoint. + +### Query all caches + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} + +```bash +curl -X POST "https://localhost:8181/api/v3/query_sql" \ + -H "Authorization: Bearer AUTH_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "DATABASE_NAME", + "q": "SELECT * FROM system.last_caches", + "format": "json" + }' + ``` + +{{% /code-placeholders %}} + +## Query specific cache details + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|CACHE_NAME" %}} + +```bash +curl -X POST "https://localhost:8181/api/v3/query_sql" \ + -H "Authorization: Bearer AUTH_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "DATABASE_NAME", + "q": "SELECT * FROM system.last_caches WHERE name = '\''CACHE_NAME'\''", + "format": "json" + }' +``` + +{{% /code-placeholders %}} + diff --git a/content/shared/influxdb3-admin/last-value-cache/create.md b/content/shared/influxdb3-admin/last-value-cache/create.md index febc66f83..873e64214 100644 --- a/content/shared/influxdb3-admin/last-value-cache/create.md +++ b/content/shared/influxdb3-admin/last-value-cache/create.md @@ -80,6 +80,59 @@ influxdb3 create last_cache \ {{% /show-in %}} +## Use the HTTP API + +You can also create a Last Value Cache using the [InfluxDB v3 HTTP API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/configure/last_cache` endpoint. + +{{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN|(KEY|VALUE)_COLUMNS|COUNT|TTL" %}} + +```bash +curl -X POST "https://localhost:8181/api/v3/configure/last_cache" \ + -H "Authorization: Bearer AUTH_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "DATABASE_NAME", + "table": "TABLE_NAME", + "name": "LVC_NAME", + "key_columns": ["KEY_COLUMNS"], + "value_columns": ["VALUE_COLUMNS"], + "count": COUNT, + "ttl": TTL + }' + ``` + + {{% /code-placeholders %}} + + ### Example + +```bash + curl -X POST "https://localhost:8181/api/v3/configure/last_cache" \ + -H "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "example-db", + "table": "home", + "name": "homeLastCache", + "key_columns": ["room", "wall"], + "value_columns": ["temp", "hum", "co"], + "count": 5, + "ttl": 14400 + }' +``` + +**Response codes:** + +- `201` : Success. Last cache created. +- `400` : Bad request. +- `401` : Unauthorized. +- `404` : Cache not found. +- `409` : Cache already exists. + +> [!Note] +> #### API parameter differences +> Column format: The API uses JSON arrays (["room", "wall"]) instead of the CLI's comma-delimited format (room,wall). +> TTL format: The API uses seconds (14400) instead of the CLI's humantime format (4h, 4 hours). + Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: @@ -116,4 +169,4 @@ The cache imports the distinct values from the table and starts caching them. > > The LVC is stored in memory, so it's important to consider the size and persistence > of the cache. For more information, see -> [Important things to know about the Last Value Cache](/influxdb3/version/admin/last-value-cache/#important-things-to-know-about-the-last-value-cache). +> [Important things to know about the Last Value Cache](/influxdb3/version/admin/last-value-cache/#important-things-to-know-about-the-last-value-cache) diff --git a/content/shared/influxdb3-admin/last-value-cache/delete.md b/content/shared/influxdb3-admin/last-value-cache/delete.md index b06ba5eb9..3ea2261aa 100644 --- a/content/shared/influxdb3-admin/last-value-cache/delete.md +++ b/content/shared/influxdb3-admin/last-value-cache/delete.md @@ -23,6 +23,32 @@ influxdb3 delete last_cache \ ``` {{% /code-placeholders %}} +## Use the HTTP API + +You can also delete a Last Value Cache using the [InfluxDB v3 HTTP API](/influxdb3/version/api/v3/). Send a `DELETE` request to the `/api/v3/configure/last_cache` endpoint with query parameters. + +{{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN" %}} + +```bash +curl -X DELETE "https://localhost:8181/api/v3/configure/last_cache?db=DATABASE_NAME&table=TABLE_NAME&name=LVC_NAME" \ + -H "Authorization: Bearer AUTH_TOKEN" +{{% /code-placeholders %}} +``` + +## Example + +```bash +curl -X DELETE "https://localhost:8181/api/v3/configure/last_cache?db=example-db&table=home&name=homeLastCache" \ + -H "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" +``` + +**Response codes:** + +- `200` : Success. The last cache has been deleted. +- `400` : Bad request. +- `401` : Unauthorized. +- `404` : Cache not found. + Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: diff --git a/content/shared/influxdb3-admin/last-value-cache/show.md b/content/shared/influxdb3-admin/last-value-cache/show.md index cf0aa7019..c9838a9f9 100644 --- a/content/shared/influxdb3-admin/last-value-cache/show.md +++ b/content/shared/influxdb3-admin/last-value-cache/show.md @@ -66,3 +66,41 @@ In the examples above, replace the following: - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{< product-name >}} {{% show-in "enterprise" %}}admin {{% /show-in %}} authentication token + +## Use the HTTP API + +You can query cache information using the [InfluxDB v3 SQL query API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/query_sql` endpoint. + +### Query all caches + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} + +```bash +curl -X POST "https://localhost:8181/api/v3/query_sql" \ + -H "Authorization: Bearer AUTH_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "DATABASE_NAME", + "q": "SELECT * FROM system.last_caches", + "format": "json" + }' + ``` + +{{% /code-placeholders %}} + +## Query specific cache details + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|CACHE_NAME" %}} + +```bash +curl -X POST "https://localhost:8181/api/v3/query_sql" \ + -H "Authorization: Bearer AUTH_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "DATABASE_NAME", + "q": "SELECT * FROM system.last_caches WHERE name = '\''CACHE_NAME'\''", + "format": "json" + }' +``` + +{{% /code-placeholders %}} \ No newline at end of file From 8a74c7da12f4c9427d2cd25ef46154620e6aefec Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 31 Jul 2025 12:02:28 -0700 Subject: [PATCH 007/122] Update content/shared/influxdb3-admin/distinct-value-cache/show.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- content/shared/influxdb3-admin/distinct-value-cache/show.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/show.md b/content/shared/influxdb3-admin/distinct-value-cache/show.md index cc778fcf2..f02229d14 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/show.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/show.md @@ -82,7 +82,7 @@ curl -X POST "https://localhost:8181/api/v3/query_sql" \ -H "Content-Type: application/json" \ -d '{ "db": "DATABASE_NAME", - "q": "SELECT * FROM system.last_caches", + "q": "SELECT * FROM system.distinct_caches", "format": "json" }' ``` From d2904df598feacecffc8f7b1547dbac9af149feb Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 31 Jul 2025 12:02:40 -0700 Subject: [PATCH 008/122] Update content/shared/influxdb3-admin/distinct-value-cache/show.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- content/shared/influxdb3-admin/distinct-value-cache/show.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/show.md b/content/shared/influxdb3-admin/distinct-value-cache/show.md index f02229d14..306ad3099 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/show.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/show.md @@ -99,7 +99,7 @@ curl -X POST "https://localhost:8181/api/v3/query_sql" \ -H "Content-Type: application/json" \ -d '{ "db": "DATABASE_NAME", - "q": "SELECT * FROM system.last_caches WHERE name = '\''CACHE_NAME'\''", + "q": "SELECT * FROM system.distinct_caches WHERE name = '\''CACHE_NAME'\''", "format": "json" }' ``` From faa973a86c29efc02542fab6cfb10a5c42ea8027 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 31 Jul 2025 12:02:52 -0700 Subject: [PATCH 009/122] Update content/shared/influxdb3-admin/last-value-cache/delete.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- content/shared/influxdb3-admin/last-value-cache/delete.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/delete.md b/content/shared/influxdb3-admin/last-value-cache/delete.md index 3ea2261aa..dbd090fe1 100644 --- a/content/shared/influxdb3-admin/last-value-cache/delete.md +++ b/content/shared/influxdb3-admin/last-value-cache/delete.md @@ -32,8 +32,6 @@ You can also delete a Last Value Cache using the [InfluxDB v3 HTTP API](/influxd ```bash curl -X DELETE "https://localhost:8181/api/v3/configure/last_cache?db=DATABASE_NAME&table=TABLE_NAME&name=LVC_NAME" \ -H "Authorization: Bearer AUTH_TOKEN" -{{% /code-placeholders %}} -``` ## Example From 5419f10fa5f59884fa187352043a620f5cc34ed9 Mon Sep 17 00:00:00 2001 From: meelahme Date: Thu, 31 Jul 2025 12:47:15 -0700 Subject: [PATCH 010/122] fix(docs): close code-placeholders shortcode in LVC delete guide --- content/shared/influxdb3-admin/last-value-cache/create.md | 2 +- content/shared/influxdb3-admin/last-value-cache/delete.md | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/create.md b/content/shared/influxdb3-admin/last-value-cache/create.md index 873e64214..db892cea6 100644 --- a/content/shared/influxdb3-admin/last-value-cache/create.md +++ b/content/shared/influxdb3-admin/last-value-cache/create.md @@ -169,4 +169,4 @@ The cache imports the distinct values from the table and starts caching them. > > The LVC is stored in memory, so it's important to consider the size and persistence > of the cache. For more information, see -> [Important things to know about the Last Value Cache](/influxdb3/version/admin/last-value-cache/#important-things-to-know-about-the-last-value-cache) +> [Important things to know about the Last Value Cache.](/influxdb3/version/admin/last-value-cache/#important-things-to-know-about-the-last-value-cache) diff --git a/content/shared/influxdb3-admin/last-value-cache/delete.md b/content/shared/influxdb3-admin/last-value-cache/delete.md index dbd090fe1..23e0765cd 100644 --- a/content/shared/influxdb3-admin/last-value-cache/delete.md +++ b/content/shared/influxdb3-admin/last-value-cache/delete.md @@ -28,10 +28,11 @@ influxdb3 delete last_cache \ You can also delete a Last Value Cache using the [InfluxDB v3 HTTP API](/influxdb3/version/api/v3/). Send a `DELETE` request to the `/api/v3/configure/last_cache` endpoint with query parameters. {{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN" %}} - ```bash curl -X DELETE "https://localhost:8181/api/v3/configure/last_cache?db=DATABASE_NAME&table=TABLE_NAME&name=LVC_NAME" \ -H "Authorization: Bearer AUTH_TOKEN" +``` +{{% /code-placeholders %}} ## Example From 105a06b73bf3e22a6efa7081c9ad75c4d31650b5 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 31 Jul 2025 23:21:23 -0700 Subject: [PATCH 011/122] Update api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index 08f4a617e..dfba93fa6 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -196,7 +196,7 @@ paths: default: false - in: query name: q - description: Defines the influxql query to run. + description: Defines the InfluxQL query to run. required: true schema: type: string From ed7fa9016dcca75936cd1ab21c08bd7a9fd58167 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 31 Jul 2025 23:21:31 -0700 Subject: [PATCH 012/122] Update api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index dfba93fa6..751b96ea9 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -290,7 +290,7 @@ paths: type: string description: Bucket to query. q: - description: Defines the influxql query to run. + description: Defines the InfluxQL query to run. type: string chunked: description: | From aa766edd7037656b972f2202c42d67fbec30ff49 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 31 Jul 2025 23:21:38 -0700 Subject: [PATCH 013/122] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../cloud-serverless/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index 16ac580f3..cdbcf785a 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -196,7 +196,7 @@ paths: default: false - in: query name: q - description: Defines the influxql query to run. + description: Defines the InfluxQL query to run. required: true schema: type: string From e9bbdf1823bad2af01ddbda7174c185fe5936f4f Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 31 Jul 2025 23:21:50 -0700 Subject: [PATCH 014/122] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../cloud-serverless/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index cdbcf785a..858effb50 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -290,7 +290,7 @@ paths: type: string description: Bucket to query. q: - description: Defines the influxql query to run. + description: Defines the InfluxQL query to run. type: string chunked: description: | From e7723172a5af617c589de83efed3719f59b15c9e Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 31 Jul 2025 23:21:58 -0700 Subject: [PATCH 015/122] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../cloud-dedicated/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index 45dcee9e6..fdf030435 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -197,7 +197,7 @@ paths: default: false - in: query name: q - description: Defines the influxql query to run. + description: Defines the InfluxQL query to run. required: true schema: type: string From 33ace566326678fdcd45ce262c05d4be44226b25 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 31 Jul 2025 23:22:06 -0700 Subject: [PATCH 016/122] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../cloud-dedicated/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index fdf030435..9c843ef9f 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -291,7 +291,7 @@ paths: type: string description: Bucket to query. q: - description: Defines the influxql query to run. + description: Defines the InfluxQL query to run. type: string chunked: description: | From 7140e46f45901b608d2eff3b368a907ac7959fd4 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:17:27 -0700 Subject: [PATCH 017/122] Update content/shared/influxdb3-admin/distinct-value-cache/query.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/distinct-value-cache/query.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/query.md b/content/shared/influxdb3-admin/distinct-value-cache/query.md index 4fbd41e97..d6656fa9b 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/query.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/query.md @@ -44,7 +44,7 @@ curl -X POST "https://localhost:8181/api/v3/query_sql" \ -H "Content-Type: application/json" \ -d '{ "db": "DATABASE_NAME", - "q": "SELECT * FROM last_cache('\''TABLE_NAME'\'', '\''CACHE_NAME'\'')", + "q": "SELECT * FROM distinct_cache('\''TABLE_NAME'\'', '\''CACHE_NAME'\'')", "format": "json" }' ``` From 0e58bb864ac9c4ca837724e0862b09850d087665 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:18:11 -0700 Subject: [PATCH 018/122] Update content/shared/influxdb3-admin/distinct-value-cache/query.md Co-authored-by: Jason Stirnaman --- .../shared/influxdb3-admin/distinct-value-cache/query.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/query.md b/content/shared/influxdb3-admin/distinct-value-cache/query.md index d6656fa9b..63349341b 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/query.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/query.md @@ -34,7 +34,11 @@ WHERE ## Use the HTTP API -You can query cached data using the [InfluxDB v3 SQL query API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/query_sql` endpoint. +To use the HTTP API to query cached data, send a `GET` or `POST` request to the `/api/v3/query_sql` endpoint and include the [`distinct_cache()`](/influxdb3/version/reference/sql/functions/cache/#distinct_cache) function in your query. + +{{% api-endpoint method="GET" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/GetExecuteQuerySQL" %}} + +{{% api-endpoint method="POST" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/PostExecuteQuerySQL" %}} {{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TABLE_NAME|CACHE_NAME" %}} From f39af3f15a10bd1929593fc9289314eff78eb0d5 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:18:24 -0700 Subject: [PATCH 019/122] Update content/shared/influxdb3-admin/distinct-value-cache/show.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/distinct-value-cache/show.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/show.md b/content/shared/influxdb3-admin/distinct-value-cache/show.md index 306ad3099..2541d606a 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/show.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/show.md @@ -70,7 +70,11 @@ In the examples above, replace the following: ## Use the HTTP API -You can query cache information using the [InfluxDB v3 SQL query API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/query_sql` endpoint. +To use the HTTP API to query and output cache information from the system table, send a `GET` or `POST` request to the `/api/v3/query_sql` endpoint. + +{{% api-endpoint method="GET" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/GetExecuteQuerySQL" %}} + +{{% api-endpoint method="POST" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/PostExecuteQuerySQL" %}} ### Query all caches From 0f5056e239df597958a9540e1bb7aceec6a63518 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:18:32 -0700 Subject: [PATCH 020/122] Update content/shared/influxdb3-admin/last-value-cache/create.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/last-value-cache/create.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/create.md b/content/shared/influxdb3-admin/last-value-cache/create.md index db892cea6..720963e79 100644 --- a/content/shared/influxdb3-admin/last-value-cache/create.md +++ b/content/shared/influxdb3-admin/last-value-cache/create.md @@ -82,7 +82,9 @@ influxdb3 create last_cache \ ## Use the HTTP API -You can also create a Last Value Cache using the [InfluxDB v3 HTTP API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/configure/last_cache` endpoint. +To use the HTTP API to create a Last Value Cache, send a `POST` request to the `/api/v3/configure/last_cache` endpoint. + +{{% api-endpoint method="POST" endpoint="/api/v3/configure/last_cache" api-ref="/influxdb3/version/api/v3/#operation/PostConfigureLastCache" %}} {{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN|(KEY|VALUE)_COLUMNS|COUNT|TTL" %}} From 99715ce70dc08469570261257a19e097b66c20b3 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:18:46 -0700 Subject: [PATCH 021/122] Update content/shared/influxdb3-admin/last-value-cache/show.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/last-value-cache/show.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/show.md b/content/shared/influxdb3-admin/last-value-cache/show.md index c9838a9f9..9fe027e86 100644 --- a/content/shared/influxdb3-admin/last-value-cache/show.md +++ b/content/shared/influxdb3-admin/last-value-cache/show.md @@ -77,9 +77,8 @@ You can query cache information using the [InfluxDB v3 SQL query API](/influxdb3 ```bash curl -X POST "https://localhost:8181/api/v3/query_sql" \ - -H "Authorization: Bearer AUTH_TOKEN" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer AUTH_TOKEN" \ + --json '{ "db": "DATABASE_NAME", "q": "SELECT * FROM system.last_caches", "format": "json" From 9c9f2c69995e9c8db32779758e991803e47e866b Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:18:56 -0700 Subject: [PATCH 022/122] Update content/shared/influxdb3-admin/last-value-cache/show.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/last-value-cache/show.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/show.md b/content/shared/influxdb3-admin/last-value-cache/show.md index 9fe027e86..27289a6db 100644 --- a/content/shared/influxdb3-admin/last-value-cache/show.md +++ b/content/shared/influxdb3-admin/last-value-cache/show.md @@ -93,9 +93,8 @@ curl -X POST "https://localhost:8181/api/v3/query_sql" \ ```bash curl -X POST "https://localhost:8181/api/v3/query_sql" \ - -H "Authorization: Bearer AUTH_TOKEN" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer AUTH_TOKEN" \ + --json '{ "db": "DATABASE_NAME", "q": "SELECT * FROM system.last_caches WHERE name = '\''CACHE_NAME'\''", "format": "json" From c281a6e10191ab4d681edfe63e99c2192b3f5bf7 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:19:14 -0700 Subject: [PATCH 023/122] Update content/shared/influxdb3-admin/distinct-value-cache/create.md Co-authored-by: Jason Stirnaman --- .../shared/influxdb3-admin/distinct-value-cache/create.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/create.md b/content/shared/influxdb3-admin/distinct-value-cache/create.md index a11489f63..229fd665a 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/create.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/create.md @@ -95,9 +95,8 @@ curl -X POST "https://localhost:8181/api/v3/configure/distinct_cache" \ ```bash curl -X POST "https://localhost:8181/api/v3/configure/distinct_cache" \ - -H "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \ + --json '{ "db": "example-db", "table": "wind_data", "name": "windDistinctCache", From a4023bae6dcad70a715e9376a32aee94793aeab8 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:19:25 -0700 Subject: [PATCH 024/122] Update content/shared/influxdb3-admin/distinct-value-cache/show.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/distinct-value-cache/show.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/show.md b/content/shared/influxdb3-admin/distinct-value-cache/show.md index 2541d606a..3326a8467 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/show.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/show.md @@ -99,9 +99,8 @@ curl -X POST "https://localhost:8181/api/v3/query_sql" \ ```bash curl -X POST "https://localhost:8181/api/v3/query_sql" \ - -H "Authorization: Bearer AUTH_TOKEN" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer AUTH_TOKEN" \ + --json '{ "db": "DATABASE_NAME", "q": "SELECT * FROM system.distinct_caches WHERE name = '\''CACHE_NAME'\''", "format": "json" From aad8d899aea7d2e9061b4c0ebf79162cd3eabf1f Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:19:40 -0700 Subject: [PATCH 025/122] Update content/shared/influxdb3-admin/distinct-value-cache/query.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/distinct-value-cache/query.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/query.md b/content/shared/influxdb3-admin/distinct-value-cache/query.md index 63349341b..952ce9d39 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/query.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/query.md @@ -59,9 +59,8 @@ curl -X POST "https://localhost:8181/api/v3/query_sql" \ ```bash curl -X POST "https://localhost:8181/api/v3/query_sql" \ - -H "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \ + --json '{ "db": "example-db", "q": "SELECT room, temp FROM last_cache('\''home'\'', '\''homeCache'\'') WHERE room = '\''Kitchen'\''", "format": "json" From 866ee9b11d26acfea5bcfe6e2ffdb31f72cfa8bc Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:21:05 -0700 Subject: [PATCH 026/122] Update content/shared/influxdb3-admin/distinct-value-cache/create.md Co-authored-by: Jason Stirnaman --- .../shared/influxdb3-admin/distinct-value-cache/create.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/create.md b/content/shared/influxdb3-admin/distinct-value-cache/create.md index 229fd665a..d0e89dc92 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/create.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/create.md @@ -77,9 +77,8 @@ You can also create a Distinct Value Cache using the [InfluxDB v3 HTTP API](/inf ```bash curl -X POST "https://localhost:8181/api/v3/configure/distinct_cache" \ - -H "Authorization: Bearer AUTH_TOKEN" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer AUTH_TOKEN" \ + --json '{ "db": "DATABASE_NAME", "table": "TABLE_NAME", "name": "DVC_NAME", From 0384cad10091c6841438ee84b7e5eeff507ed006 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:24:38 -0700 Subject: [PATCH 027/122] Update content/shared/influxdb3-admin/distinct-value-cache/query.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/distinct-value-cache/query.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/query.md b/content/shared/influxdb3-admin/distinct-value-cache/query.md index 952ce9d39..9ec48d4ee 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/query.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/query.md @@ -44,9 +44,8 @@ To use the HTTP API to query cached data, send a `GET` or `POST` request to the ```bash curl -X POST "https://localhost:8181/api/v3/query_sql" \ - -H "Authorization: Bearer AUTH_TOKEN" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer AUTH_TOKEN" \ + --json '{ "db": "DATABASE_NAME", "q": "SELECT * FROM distinct_cache('\''TABLE_NAME'\'', '\''CACHE_NAME'\'')", "format": "json" From 057de1230b225ef0fd5b287433ea26d21bc79320 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:24:48 -0700 Subject: [PATCH 028/122] Update content/shared/influxdb3-admin/distinct-value-cache/show.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/distinct-value-cache/show.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/show.md b/content/shared/influxdb3-admin/distinct-value-cache/show.md index 3326a8467..fd825711d 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/show.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/show.md @@ -82,9 +82,8 @@ To use the HTTP API to query and output cache information from the system table, ```bash curl -X POST "https://localhost:8181/api/v3/query_sql" \ - -H "Authorization: Bearer AUTH_TOKEN" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer AUTH_TOKEN" \ + --json '{ "db": "DATABASE_NAME", "q": "SELECT * FROM system.distinct_caches", "format": "json" From 419fda92b5b0b95d39a97d23872b3ff14f938d28 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:25:19 -0700 Subject: [PATCH 029/122] Update content/shared/influxdb3-admin/last-value-cache/create.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/last-value-cache/create.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/create.md b/content/shared/influxdb3-admin/last-value-cache/create.md index 720963e79..209aefcf7 100644 --- a/content/shared/influxdb3-admin/last-value-cache/create.md +++ b/content/shared/influxdb3-admin/last-value-cache/create.md @@ -90,9 +90,8 @@ To use the HTTP API to create a Last Value Cache, send a `POST` request to the ` ```bash curl -X POST "https://localhost:8181/api/v3/configure/last_cache" \ - -H "Authorization: Bearer AUTH_TOKEN" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer AUTH_TOKEN" \ + --json '{ "db": "DATABASE_NAME", "table": "TABLE_NAME", "name": "LVC_NAME", From a5c19e60cde28af2aea0747b4f2cb50436da4c4e Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:26:14 -0700 Subject: [PATCH 030/122] Update content/shared/influxdb3-admin/last-value-cache/create.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/last-value-cache/create.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/create.md b/content/shared/influxdb3-admin/last-value-cache/create.md index 209aefcf7..5e57de077 100644 --- a/content/shared/influxdb3-admin/last-value-cache/create.md +++ b/content/shared/influxdb3-admin/last-value-cache/create.md @@ -108,9 +108,8 @@ curl -X POST "https://localhost:8181/api/v3/configure/last_cache" \ ```bash curl -X POST "https://localhost:8181/api/v3/configure/last_cache" \ - -H "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \ + --json '{ "db": "example-db", "table": "home", "name": "homeLastCache", From 5ea874998c24601145606e9353e387136b0bb0b5 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:26:27 -0700 Subject: [PATCH 031/122] Update content/shared/influxdb3-admin/last-value-cache/delete.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/last-value-cache/delete.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/delete.md b/content/shared/influxdb3-admin/last-value-cache/delete.md index 23e0765cd..ba3d55b9b 100644 --- a/content/shared/influxdb3-admin/last-value-cache/delete.md +++ b/content/shared/influxdb3-admin/last-value-cache/delete.md @@ -25,7 +25,9 @@ influxdb3 delete last_cache \ ## Use the HTTP API -You can also delete a Last Value Cache using the [InfluxDB v3 HTTP API](/influxdb3/version/api/v3/). Send a `DELETE` request to the `/api/v3/configure/last_cache` endpoint with query parameters. +To use the HTTP API to delete a Last Value Cache, send a `DELETE` request to the `/api/v3/configure/last_cache` endpoint with query parameters. + +{{% api-endpoint method="DELETE" endpoint="/api/v3/configure/last_cache" api-ref="/influxdb3/core/api/v3/#operation/DeleteConfigureLastCache" %}} {{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN" %}} ```bash From 5e465a412163c25843fd1314bdbb082874d985ea Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:26:40 -0700 Subject: [PATCH 032/122] Update content/shared/influxdb3-admin/last-value-cache/delete.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/last-value-cache/delete.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/delete.md b/content/shared/influxdb3-admin/last-value-cache/delete.md index ba3d55b9b..4b7563084 100644 --- a/content/shared/influxdb3-admin/last-value-cache/delete.md +++ b/content/shared/influxdb3-admin/last-value-cache/delete.md @@ -32,7 +32,7 @@ To use the HTTP API to delete a Last Value Cache, send a `DELETE` request to the {{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN" %}} ```bash curl -X DELETE "https://localhost:8181/api/v3/configure/last_cache?db=DATABASE_NAME&table=TABLE_NAME&name=LVC_NAME" \ - -H "Authorization: Bearer AUTH_TOKEN" + --header "Authorization: Bearer AUTH_TOKEN" ``` {{% /code-placeholders %}} From 61fe70ad8ed698e56ab6dc24da1209da527e6435 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:26:50 -0700 Subject: [PATCH 033/122] Update content/shared/influxdb3-admin/last-value-cache/delete.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/last-value-cache/delete.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/delete.md b/content/shared/influxdb3-admin/last-value-cache/delete.md index 4b7563084..8f61adaa6 100644 --- a/content/shared/influxdb3-admin/last-value-cache/delete.md +++ b/content/shared/influxdb3-admin/last-value-cache/delete.md @@ -40,7 +40,7 @@ curl -X DELETE "https://localhost:8181/api/v3/configure/last_cache?db=DATABASE_N ```bash curl -X DELETE "https://localhost:8181/api/v3/configure/last_cache?db=example-db&table=home&name=homeLastCache" \ - -H "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" + --header "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" ``` **Response codes:** From 0f46f108cacddcee695b31829dd19aa875cadcea Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:27:00 -0700 Subject: [PATCH 034/122] Update content/shared/influxdb3-admin/last-value-cache/show.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/last-value-cache/show.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/show.md b/content/shared/influxdb3-admin/last-value-cache/show.md index 27289a6db..623e1c57f 100644 --- a/content/shared/influxdb3-admin/last-value-cache/show.md +++ b/content/shared/influxdb3-admin/last-value-cache/show.md @@ -69,9 +69,13 @@ In the examples above, replace the following: ## Use the HTTP API -You can query cache information using the [InfluxDB v3 SQL query API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/query_sql` endpoint. +To use the HTTP API to query and output cache information from the system table, send a `GET` or `POST` request to the `/api/v3/query_sql` endpoint. -### Query all caches +{{% api-endpoint method="GET" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/GetExecuteQuerySQL" %}} + +{{% api-endpoint method="POST" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/PostExecuteQuerySQL" %}} + +### Query all last value caches {{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} From 36085a80ea90b218210113a1016c16c356d9f1a1 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:42:13 -0700 Subject: [PATCH 035/122] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../cloud-dedicated/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index 9c843ef9f..024ecaaf6 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -289,7 +289,7 @@ paths: properties: db: type: string - description: Bucket to query. + description: Database to query from. q: description: Defines the InfluxQL query to run. type: string From 7cc0c38df6be582540f0da7cd656e6197df3ae40 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:42:28 -0700 Subject: [PATCH 036/122] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../cloud-dedicated/v1-compatibility/swaggerV1Compat.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index 024ecaaf6..7c661ce47 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -290,6 +290,10 @@ paths: db: type: string description: Database to query from. + rp: + description: | + The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). + type: string q: description: Defines the InfluxQL query to run. type: string From 6ba77d4808f9a413b5f065abed19144ae4f78951 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:42:36 -0700 Subject: [PATCH 037/122] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../cloud-dedicated/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index 7c661ce47..c2fd7a753 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -138,7 +138,7 @@ paths: $ref: '#/components/schemas/Error' /query: get: - operationId: GetV1ExecuteQuery + operationId: GetQueryV1 tags: - Query summary: Query using the InfluxDB v1 HTTP API From 6efb013714df8e42bdd0d6707ffecb2a539ab5c4 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:43:13 -0700 Subject: [PATCH 038/122] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../cloud-dedicated/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index c2fd7a753..d8c5df5f9 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -187,7 +187,7 @@ paths: schema: type: string required: true - description: Bucket to query. + description: Database to query from. - in: query name: pretty description: | From 6a20bb037218f175a1858551903fd7c17365ff9e Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:43:22 -0700 Subject: [PATCH 039/122] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../cloud-serverless/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index 858effb50..bf1029d56 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -186,7 +186,7 @@ paths: schema: type: string required: true - description: Bucket to query. + description: The database to query from. - in: query name: pretty description: | From 3b738ec6180849c506508d7ec6a6e48b4632cd78 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:43:29 -0700 Subject: [PATCH 040/122] Update api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index 751b96ea9..0c89e064e 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -186,7 +186,7 @@ paths: schema: type: string required: true - description: Bucket to query. + description: The database to query from. - in: query name: pretty description: | From 13087e49d0ab60d221a9db34498d84ef4df56dda Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:46:56 -0700 Subject: [PATCH 041/122] Update content/shared/influxdb3-admin/distinct-value-cache/create.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/distinct-value-cache/create.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/create.md b/content/shared/influxdb3-admin/distinct-value-cache/create.md index d0e89dc92..560208c34 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/create.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/create.md @@ -71,7 +71,9 @@ influxdb3 create distinct_cache \ ## Use the HTTP API -You can also create a Distinct Value Cache using the [InfluxDB v3 HTTP API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/configure/distinct_cache` endpoint. +To use the HTTP API to create a Distinct Value Cache, send a `POST` request to the `/api/v3/configure/distinct_cache` endpoint. + +{{% api-endpoint method="POST" endpoint="/api/v3/configure/distinct_cache" api-ref="/influxdb3/version/api/v3/#operation/PostConfigureDistinctCache" %}} {{% code-placeholders "(DATABASE|TABLE|DVC)_NAME|AUTH_TOKEN|COLUMNS|MAX_(CARDINALITY|AGE)" %}} From 76af7669ee3378c980903b6f80db29da0a2190b7 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 16:45:45 -0700 Subject: [PATCH 042/122] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../cloud-serverless/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index bf1029d56..43b84af62 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -288,7 +288,7 @@ paths: properties: db: type: string - description: Bucket to query. + description: The database to query from. q: description: Defines the InfluxQL query to run. type: string From 7436f0fbd862e9e70e1d2b040a3739fe8473ef4c Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Mon, 4 Aug 2025 14:37:18 -0700 Subject: [PATCH 043/122] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../cloud-dedicated/v1-compatibility/swaggerV1Compat.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index d8c5df5f9..e76225b56 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -294,6 +294,10 @@ paths: description: | The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). type: string + rp: + description: | + The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). + type: string q: description: Defines the InfluxQL query to run. type: string From c6a11dbb089ed40d4d0d4ccab01bd2062f7a5bf0 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Mon, 4 Aug 2025 14:37:47 -0700 Subject: [PATCH 044/122] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../cloud-dedicated/v1-compatibility/swaggerV1Compat.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index e76225b56..db77d64f1 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -298,6 +298,10 @@ paths: description: | The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). type: string + rp: + description: | + The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). + type: string q: description: Defines the InfluxQL query to run. type: string From 3b60c7f253a2765a718a7ba6f70b42e92cfbd7bc Mon Sep 17 00:00:00 2001 From: meelahme Date: Tue, 5 Aug 2025 13:21:16 -0700 Subject: [PATCH 045/122] docs: updating swaggerVwith rp, operationID, and bucket changed to database --- .../v1-compatibility/swaggerV1Compat.yml | 14 +++----------- .../v1-compatibility/swaggerV1Compat.yml | 8 ++++++-- .../clustered/v1-compatibility/swaggerV1Compat.yml | 8 ++++++-- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index db77d64f1..16491b315 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -66,7 +66,7 @@ paths: schema: type: string required: true - description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy. + description: Database to write to. If none exists, InfluxDB creates a database with a default 3-day retention policy. - in: query name: rp schema: @@ -187,7 +187,7 @@ paths: schema: type: string required: true - description: Database to query from. + description: The database to query from. - in: query name: pretty description: | @@ -294,14 +294,6 @@ paths: description: | The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). type: string - rp: - description: | - The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). - type: string - rp: - description: | - The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). - type: string q: description: Defines the InfluxQL query to run. type: string @@ -363,7 +355,7 @@ paths: schema: type: string required: true - description: Bucket to query. + description: Database to query. - in: query name: rp schema: diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index 43b84af62..e599a77ce 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -65,7 +65,7 @@ paths: schema: type: string required: true - description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy. + description: Database to write to. If none exists, InfluxDB creates a database with a default 3-day retention policy. - in: query name: rp schema: @@ -137,7 +137,7 @@ paths: $ref: '#/components/schemas/Error' /query: get: - operationId: GetV1ExecuteQuery + operationId: GetQueryV1 tags: - Query summary: Query using the InfluxDB v1 HTTP API @@ -289,6 +289,10 @@ paths: db: type: string description: The database to query from. + rp: + description: | + The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-serverless/admin/databases/create/#influxql-dbrp-naming-convention). + type: string q: description: Defines the InfluxQL query to run. type: string diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index 0c89e064e..a189e53f9 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -65,7 +65,7 @@ paths: schema: type: string required: true - description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy. + description: Database to write to. If none exists, InfluxDB creates a database with a default 3-day retention policy. - in: query name: rp schema: @@ -137,7 +137,7 @@ paths: $ref: '#/components/schemas/Error' /query: get: - operationId: GetV1ExecuteQuery + operationId: GetQueryV1 tags: - Query summary: Query using the InfluxDB v1 HTTP API @@ -289,6 +289,10 @@ paths: db: type: string description: Bucket to query. + rp: + description: | + The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-serverless/admin/databases/create/#influxql-dbrp-naming-convention). + type: string q: description: Defines the InfluxQL query to run. type: string From 3c2f475751be06c628863b785698a0fc32d03d3b Mon Sep 17 00:00:00 2001 From: meelahme Date: Tue, 5 Aug 2025 13:39:31 -0700 Subject: [PATCH 046/122] dox: fixing wrong indentation --- .../influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index a189e53f9..c8a797bb7 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -289,7 +289,7 @@ paths: db: type: string description: Bucket to query. - rp: + rp: description: | The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-serverless/admin/databases/create/#influxql-dbrp-naming-convention). type: string From 4a6cbb38dd712b53eec9938c378c373549e040ce Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Tue, 5 Aug 2025 13:43:53 -0700 Subject: [PATCH 047/122] Update api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index c8a797bb7..ffe39b4fc 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -288,7 +288,7 @@ paths: properties: db: type: string - description: Bucket to query. + description: Database to query. rp: description: | The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-serverless/admin/databases/create/#influxql-dbrp-naming-convention). From 7f17176865fc6f980bdce71d26aac8be2d877389 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Tue, 5 Aug 2025 13:44:08 -0700 Subject: [PATCH 048/122] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../cloud-dedicated/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index 16491b315..d3d067a62 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -289,7 +289,7 @@ paths: properties: db: type: string - description: Database to query from. + description: The database to query from. rp: description: | The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). From 5ef4d2e1e4fe0e2199f49e0b058fc854ecb30894 Mon Sep 17 00:00:00 2001 From: meelahme Date: Tue, 5 Aug 2025 13:52:18 -0700 Subject: [PATCH 049/122] docs: updating rp for cluster and serverless --- .../influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index c8a797bb7..d6f473311 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -291,7 +291,7 @@ paths: description: Bucket to query. rp: description: | - The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-serverless/admin/databases/create/#influxql-dbrp-naming-convention). + The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention). type: string q: description: Defines the InfluxQL query to run. From ae96f0154ede3ab81b76229286256f5e23b86941 Mon Sep 17 00:00:00 2001 From: meelahme Date: Tue, 5 Aug 2025 14:02:39 -0700 Subject: [PATCH 050/122] docs: updates to clous-serveless rp --- .../cloud-serverless/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index e599a77ce..c3fc9f285 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -291,7 +291,7 @@ paths: description: The database to query from. rp: description: | - The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-serverless/admin/databases/create/#influxql-dbrp-naming-convention). + The retention policy to query data from. type: string q: description: Defines the InfluxQL query to run. From a2157763c9d589c3c94fabe5c6548ae452ffe3f9 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Tue, 5 Aug 2025 14:38:10 -0700 Subject: [PATCH 051/122] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../cloud-serverless/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index c3fc9f285..9b65dfd27 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -288,7 +288,7 @@ paths: properties: db: type: string - description: The database to query from. + description: Database to query. rp: description: | The retention policy to query data from. From 65e80cad92e53f3c9c999b60fc2925297ad8917a Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Tue, 5 Aug 2025 14:38:21 -0700 Subject: [PATCH 052/122] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../cloud-serverless/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index 9b65dfd27..b1bcb5e09 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -290,7 +290,7 @@ paths: type: string description: Database to query. rp: - description: | + description: | The retention policy to query data from. type: string q: From 9ee11d3d41ba64421d97f548dc1043efa3294906 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Tue, 5 Aug 2025 14:38:30 -0700 Subject: [PATCH 053/122] Update api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index 9741acb5a..2b6305f4c 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -290,7 +290,7 @@ paths: type: string description: Database to query. rp: - description: | + description: | The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention). type: string q: From 6fb6454aa26437349ab7153f643dee69301f643d Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:54:12 -0700 Subject: [PATCH 054/122] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index d3d067a62..c429fc406 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -205,7 +205,26 @@ paths: name: rp schema: type: string - description: Retention policy name. + description: | + The retention policy name for InfluxQL compatibility + + Optional parameter that, when combined with the db parameter, forms the complete database name to query. In InfluxDB Cloud Dedicated, databases can be named using the + database_name/retention_policy_name convention for InfluxQL compatibility. + + When a request specifies both `db` and `rp`, Cloud Dedicated combines them as `db/rp` to target the database--for example: + + - If `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen` + - If only `db=mydb` is provided (no `rp`), the query targets the database named `mydb` + + Unlike InfluxDB v1 and Cloud Serverless, Cloud Dedicated does not use DBRP mappings or separate retention policy objects. This parameter exists solely for v1 API + compatibility and database naming conventions. + + _Note: The retention policy name does not control data retention in Cloud Dedicated. Data retention is determined by the database's **retention period** setting._ + + ### Related + + - [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention) + - [InfluxQL data retention policy mapping differences](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences) - name: epoch description: | Formats timestamps as unix (epoch) timestamps with the specified precision From ce5d9292c542d85cc1261595e35571fc234d8220 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:54:39 -0700 Subject: [PATCH 055/122] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index c429fc406..f4e16bc45 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -66,7 +66,22 @@ paths: schema: type: string required: true - description: Database to write to. If none exists, InfluxDB creates a database with a default 3-day retention policy. + description: | + The database to write to. + + **Database targeting:** In Cloud Dedicated, databases can be named using the `database_name/retention_policy_name` convention for InfluxQL compatibility. Cloud Dedicated does not use DBRP mappings. The db and rp parameters are used to construct the target database name following this naming convention. + + **Auto-creation behavior:** Cloud Dedicated requires databases to be created before writing data. The v1 `/write` API does not automatically create databases. If the specified + database does not exist, the write request will fail. + + Authentication: Requires a valid API token with _write_ permissions for the target database. + + ### Related + + - [Write data to InfluxDB Cloud Dedicated](/influxdb3/cloud-dedicated/write-data/) + - [Manage databases in InfluxDB Cloud Dedicated](/influxdb3/cloud-dedicated/admin/databases/) + - [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention) + - [InfluxQL data retention policy mapping differences](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences) - in: query name: rp schema: From 89db6cdd51ba67439d109e22276a011dde20c65c Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:55:09 -0700 Subject: [PATCH 056/122] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index f4e16bc45..6a9750253 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -326,7 +326,26 @@ paths: description: The database to query from. rp: description: | - The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). + The retention policy name for InfluxQL compatibility + + Optional parameter that, when combined with the db parameter, forms the complete database name to query. In InfluxDB Cloud Dedicated, databases can be named using the + database_name/retention_policy_name convention for InfluxQL compatibility. + + When a request specifies both `db` and `rp`, Cloud Dedicated combines them as `db/rp` to target the database--for example: + + - If `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen` + - If only `db=mydb` is provided (no `rp`), the query targets the database named `mydb` + + Unlike InfluxDB v1 and Cloud Serverless, Cloud Dedicated does not use DBRP mappings or separate retention policy objects. This parameter exists solely for v1 API + compatibility and database naming conventions. + + _Note: The retention policy name does not control data retention in Cloud Dedicated. Data retention is determined by the database's **retention period** setting._ + + ### Related + + - [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention) + - [Migrate data from InfluxDB 1.x to Cloud Dedicated](/influxdb3/cloud-dedicated/guides/migrate-data/migrate-1x-to-cloud-dedicated/) + - [InfluxQL data retention policy mapping differences](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences) type: string q: description: Defines the InfluxQL query to run. From bc890e4d6d71783e6e3c4d119ceffb15c7a9bcbf Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:55:21 -0700 Subject: [PATCH 057/122] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 27 ++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index b1bcb5e09..db48db47f 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -186,7 +186,32 @@ paths: schema: type: string required: true - description: The database to query from. + description: | + The database name for InfluxQL queries + + Required parameter that specifies the database to query via DBRP (Database Retention Policy) mapping. In Cloud Serverless, this parameter is used together with DBRP + mappings to identify which bucket to query. + + The `db` parameter (optionally combined with `rp`) must have an existing DBRP mapping that points to a bucket. Without a valid DBRP mapping, queries will fail with an + authorization error. + + **DBRP mapping requirements:** + - A DBRP mapping must exist before querying + - Mappings can be created automatically when writing data with the v1 API (if your token has permissions) + - Mappings can be created manually using the InfluxDB CLI or API + + ### Examples + - `db=mydb` - uses the default DBRP mapping for `mydb` + - `db=mydb` with `rp=weekly` - uses the DBRP mapping for `mydb/weekly` + + _Note: Unlike the v1 `/write` endpoint which can auto-create buckets and mappings, the `/query` endpoint requires pre-existing DBRP mappings. The actual data is stored in and + queried from the bucket that the DBRP mapping points to._ + + ### Related + + - [Use the InfluxDB v1 query API and InfluxQL in Cloud Serverless](/influxdb3/cloud-serverless/query-data/execute-queries/v1-http/) + - [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets) + - [Migrate from InfluxDB 1.x to Cloud Serverless](/influxdb3/cloud-serverless/guides/migrate-data/migrate-1x-to-serverless/) - in: query name: pretty description: | From a69fea8a007e2dae25af25593c365f3201e1d0ef Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:55:33 -0700 Subject: [PATCH 058/122] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 27 ++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index db48db47f..1e18adbf7 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -313,7 +313,32 @@ paths: properties: db: type: string - description: Database to query. + description: | + The database name for InfluxQL queries + + Required parameter that specifies the database to query via DBRP (Database Retention Policy) mapping. In Cloud Serverless, this parameter is used together with DBRP + mappings to identify which bucket to query. + + The `db` parameter (optionally combined with `rp`) must have an existing DBRP mapping that points to a bucket. Without a valid DBRP mapping, queries will fail with an + authorization error. + + **DBRP mapping requirements:** + - A DBRP mapping must exist before querying + - Mappings can be created automatically when writing data with the v1 API (if your token has permissions) + - Mappings can be created manually using the InfluxDB CLI or API + + ### Examples + - `db=mydb` - uses the default DBRP mapping for `mydb` + - `db=mydb` with `rp=weekly` - uses the DBRP mapping for `mydb/weekly` + + _Note: Unlike the v1 `/write` endpoint which can auto-create buckets and mappings, the `/query` endpoint requires pre-existing DBRP mappings. The actual data is stored in and + queried from the bucket that the DBRP mapping points to._ + + ### Related + + - [Execute InfluxQL queries using the v1 API](/influxdb3/cloud-serverless/query-data/execute-queries/influxql/api/v1-http/) + - [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets) + - [Manage DBRP mappings in Cloud Serverless](/influxdb3/cloud-serverless/admin/dbrp/) rp: description: | The retention policy to query data from. From c6bd7bb726208d45c65893791997f4262065f9f5 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:55:44 -0700 Subject: [PATCH 059/122] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 22 ++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index 1e18adbf7..08a07ef8b 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -341,7 +341,27 @@ paths: - [Manage DBRP mappings in Cloud Serverless](/influxdb3/cloud-serverless/admin/dbrp/) rp: description: | - The retention policy to query data from. + The retention policy name for InfluxQL queries + + Optional parameter that specifies the retention policy to use when querying data with InfluxQL. In Cloud Serverless, this parameter works with DBRP (Database Retention + Policy) mappings to identify the target bucket. + + When provided together with the `db` parameter, Cloud Serverless uses the DBRP mapping to determine which bucket to query. The combination of `db` and `rp` must have an + existing DBRP mapping that points to a bucket. If no `rp` is specified, Cloud Serverless uses the default retention policy mapping for the database. + + Requirements: A DBRP mapping must exist for the db/rp combination before you can query data. DBRP mappings can be created: + - Automatically when writing data with the v1 API (if your token has sufficient permissions) + - Manually using the InfluxDB CLI or API + + Example: If `db=mydb` and `rp=weekly`, the query uses the DBRP mapping for `mydb/weekly` to determine which bucket to query. + + _Note: The retention policy name is used only for DBRP mapping. Actual data retention is controlled by the target bucket's retention period setting, not by the retention policy name._ + + ### Related + + - [Execute InfluxQL queries using the v1 API](/influxdb3/cloud-serverless/query-data/execute-queries/influxql/api/v1-http/) + - [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets) + - [Manage DBRP mappings in Cloud Serverless](/influxdb3/cloud-serverless/admin/dbrp/) type: string q: description: Defines the InfluxQL query to run. From 4d77cde02ea4f0fa0f4ced8669d5d35ab7350266 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:55:55 -0700 Subject: [PATCH 060/122] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index 6a9750253..128021d19 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -323,7 +323,29 @@ paths: properties: db: type: string - description: The database to query from. + description: | + The database name for InfluxQL queries. + + Required parameter that specifies the database to query. + In InfluxDB Cloud Dedicated, this can be either: + - A simple database name (for example, `mydb`) + - The database portion of a `database_name/retention_policy_name` naming convention (used together with the `rp` parameter) + + When used alone, `db` specifies the complete database name to query. When used with the `rp` parameter, they combine to form the full database name as `db/rp`--for example, if `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen`. + + Unlike InfluxDB Cloud Serverless, Cloud Dedicated does not use DBRP mappings. The database name directly corresponds to an existing database in your Cloud Dedicated cluster. + + Examples: + - `db=mydb` - queries the database named `mydb` + - `db=mydb` with `rp=autogen` - queries the database named `mydb/autogen` + + _Note: The specified database must exist in your Cloud Dedicated cluster. Queries will fail if the database does not exist._ + + ### Related + + - [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention) + - [Migrate data from InfluxDB 1.x to Cloud Dedicated](/influxdb3/cloud-dedicated/guides/migrate-data/migrate-1x-to-cloud-dedicated/) + - [InfluxQL data retention policy mapping differences between InfluxDB Cloud Dedicated and Cloud Serverless](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences) rp: description: | The retention policy name for InfluxQL compatibility From 33cae0b3e2154587d92959eebad9c362c82353ac Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:56:05 -0700 Subject: [PATCH 061/122] Update api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index 2b6305f4c..2fb6d81fc 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -291,7 +291,29 @@ paths: description: Database to query. rp: description: | - The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention). + The retention policy name for InfluxQL compatibility + + Optional parameter that, when combined with the db parameter, forms the complete database name to query. In InfluxDB Clustered, databases can be named using the + database_name/retention_policy_name convention for InfluxQL compatibility. + + When a request specifies both `db` and `rp`, InfluxDB Clustered combines them as `db/rp` to target the database--for example: + + - If `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen` + - If only `db=mydb` is provided (no `rp`), the query targets the database named `mydb` + + Unlike InfluxDB v1 and Cloud Serverless, InfluxDB Clustered does not use DBRP mappings or separate retention policy objects. This parameter exists solely for v1 API + compatibility and database naming conventions. + + Note: The retention policy name does not control data retention in InfluxDB Clustered. Data retention is determined by the database's _retention period_ setting. + + ### Related + + - [Use the v1 query API and InfluxQL to query data in InfluxDB Clustered](/influxdb3/clustered/query-data/execute-queries/influxdb-v1-api/) + - [Use the InfluxDB v1 API with InfluxDB Clustered](/influxdb3/clustered/guides/api-compatibility/v1/) + - [Manage databases in InfluxDB Clustered](/influxdb3/clustered/admin/databases/) + - [InfluxQL DBRP naming convention in InfluxDB Clustered](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention) + - [Migrate data from InfluxDB v1 to InfluxDB Clustered](/influxdb3/clustered/guides/migrate-data/migrate-1x-to-clustered/) + ``` type: string q: description: Defines the InfluxQL query to run. From 4f0a7181efa34a4705560c9114da09c91f1fbe26 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:56:21 -0700 Subject: [PATCH 062/122] Update api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index 2fb6d81fc..a2f8693c9 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -65,7 +65,23 @@ paths: schema: type: string required: true - description: Database to write to. If none exists, InfluxDB creates a database with a default 3-day retention policy. + description: | + The database to write to. + + **Database targeting:** In InfluxDB Clustered, databases can be named using the `database_name/retention_policy_name` convention for InfluxQL compatibility. InfluxDB Clustered does not use DBRP mappings. The db and rp parameters are used to construct the target database name following this naming convention. + + **Auto-creation behavior:** InfluxDB Clustered requires databases to be created before writing data. The v1 `/write` API does not automatically create databases. If the specified + database does not exist, the write request will fail. + + Authentication: Requires a valid API token with _write_ permissions for the target database. + + ### Related + + - [Write data to InfluxDB Clustered](/influxdb3/clustered/write-data/) + - [Use the InfluxDB v1 API with InfluxDB Clustered](/influxdb3/clustered/guides/api-compatibility/v1/) + - [Manage databases in InfluxDB Clustered](/influxdb3/clustered/admin/databases/) + - [InfluxQL DBRP naming convention in InfluxDB Clustered](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention) + - [Migrate data from InfluxDB v1 to InfluxDB Clustered](/influxdb3/clustered/guides/migrate-data/migrate-1x-to-clustered/) - in: query name: rp schema: From beb7bb2261ea0ce49279747b802a450a466c4c15 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 7 Aug 2025 13:13:56 -0700 Subject: [PATCH 063/122] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index 08a07ef8b..2ff111180 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -229,7 +229,29 @@ paths: name: rp schema: type: string - description: Retention policy name. + description: | + The retention policy name for InfluxQL queries + + Optional parameter that specifies the retention policy to use when querying data with InfluxQL. In Cloud Serverless, this parameter works with DBRP (Database Retention + Policy) mappings to identify the target bucket. + + When provided together with the `db` parameter, Cloud Serverless uses the DBRP mapping to determine which bucket to query. The combination of `db` and `rp` must have an + existing DBRP mapping that points to a bucket. If no `rp` is specified, Cloud Serverless uses the default retention policy mapping for the database. + + Requirements: A DBRP mapping must exist for the db/rp combination before you can query data. DBRP mappings can be created: + - Automatically when writing data with the v1 API (if your token has sufficient permissions) + - Manually using the InfluxDB CLI or API + + Example: If `db=mydb` and `rp=weekly`, the query uses the DBRP mapping for `mydb/weekly` to determine which bucket to query. + + _Note: The retention policy name is used only for DBRP mapping. Actual data retention is controlled by the target bucket's retention period setting, not by the retention + policy name._ + + ### Related + + - [Use the InfluxDB v1 query API and InfluxQL in Cloud Serverless](/influxdb3/cloud-serverless/query-data/execute-queries/v1-http/) + - [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets) + - [Migrate from InfluxDB 1.x to Cloud Serverless](/influxdb3/cloud-serverless/guides/migrate-data/migrate-1x-to-serverless/) - name: epoch description: | Formats timestamps as unix (epoch) timestamps with the specified precision From 1846623a278cf59cdee3eefd54a465c3586e8335 Mon Sep 17 00:00:00 2001 From: Abhishek Saharn <102726227+asaharn@users.noreply.github.com> Date: Mon, 11 Aug 2025 20:19:24 +0530 Subject: [PATCH 064/122] Updated Description for MS Fabric --- data/telegraf_plugins.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/data/telegraf_plugins.yml b/data/telegraf_plugins.yml index 0f9ee3fe4..ac89b5be7 100644 --- a/data/telegraf_plugins.yml +++ b/data/telegraf_plugins.yml @@ -2940,8 +2940,8 @@ output: Explorer](https://docs.microsoft.com/en-us/azure/data-explorer), [Azure Synapse Data Explorer](https://docs.microsoft.com/en-us/azure/synapse-analytics/data-explorer/data-explorer-overview), - and [Real time analytics in - Fabric](https://learn.microsoft.com/en-us/fabric/real-time-analytics/overview) + and [Real-Time Intelligence in + Fabric](https://learn.microsoft.com/fabric/real-time-intelligence/overview) services. Azure Data Explorer is a distributed, columnar store, purpose built for From 0aa345572b4e91773475e2dab6e54fd6b7c9b398 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 13 Aug 2025 14:19:54 +0000 Subject: [PATCH 067/122] Update APT signing key location from /etc/apt/trusted.gpg.d to /etc/apt/keyrings Co-authored-by: jstirnaman <212227+jstirnaman@users.noreply.github.com> --- content/influxdb/v1/introduction/install.md | 8 ++++---- content/influxdb/v2/install/_index.md | 4 ++-- .../cloud-dedicated/reference/cli/influxctl/_index.md | 4 ++-- .../influxdb3/clustered/reference/cli/influxctl/_index.md | 4 ++-- content/telegraf/v1/install.md | 8 ++++---- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/content/influxdb/v1/introduction/install.md b/content/influxdb/v1/introduction/install.md index 690799f4e..7aa6a7320 100644 --- a/content/influxdb/v1/introduction/install.md +++ b/content/influxdb/v1/introduction/install.md @@ -75,8 +75,8 @@ For Ubuntu/Debian users, add the InfluxData repository with the following comman # Primary key fingerprint: 24C9 75CB A61A 024E E1B6 3178 7C3D 5715 9FC2 F927 # Subkey fingerprint: 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E wget -q https://repos.influxdata.com/influxdata-archive.key -gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null -echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list +gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null +echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list ``` {{% /code-tab-content %}} @@ -86,8 +86,8 @@ echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repo # Primary key fingerprint: 24C9 75CB A61A 024E E1B6 3178 7C3D 5715 9FC2 F927 # Subkey fingerprint: 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E curl --silent --location -O https://repos.influxdata.com/influxdata-archive.key -gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null -echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list +gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null +echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list ``` {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} diff --git a/content/influxdb/v2/install/_index.md b/content/influxdb/v2/install/_index.md index 0932a613f..2a7b4bc3a 100644 --- a/content/influxdb/v2/install/_index.md +++ b/content/influxdb/v2/install/_index.md @@ -354,8 +354,8 @@ To install {{% product-name %}} on Linux, do one of the following: | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' \ && cat influxdata-archive.key \ | gpg --dearmor \ - | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null \ - && echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' \ + | sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null \ + && echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' \ | sudo tee /etc/apt/sources.list.d/influxdata.list # Install influxdb sudo apt-get update && sudo apt-get install influxdb2 diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/_index.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/_index.md index a8557c472..ff50c38df 100644 --- a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/_index.md +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/_index.md @@ -176,8 +176,8 @@ To download the Linux `influxctl` package, do one of the following: # Primary key fingerprint: 24C9 75CB A61A 024E E1B6 3178 7C3D 5715 9FC2 F927 # Subkey fingerprint: 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E wget -q https://repos.influxdata.com/influxdata-archive.key -gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null -echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list +gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null +echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list sudo apt-get update && sudo apt-get install influxctl ``` diff --git a/content/influxdb3/clustered/reference/cli/influxctl/_index.md b/content/influxdb3/clustered/reference/cli/influxctl/_index.md index bb7b97175..953d016e9 100644 --- a/content/influxdb3/clustered/reference/cli/influxctl/_index.md +++ b/content/influxdb3/clustered/reference/cli/influxctl/_index.md @@ -166,8 +166,8 @@ To download the Linux `influxctl` package, do one of the following: # Primary key fingerprint: 24C9 75CB A61A 024E E1B6 3178 7C3D 5715 9FC2 F927 # Subkey fingerprint: 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E wget -q https://repos.influxdata.com/influxdata-archive.key -gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null -echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list +gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null +echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list ``` {{% /code-tab-content %}} diff --git a/content/telegraf/v1/install.md b/content/telegraf/v1/install.md index 21c0d2175..9aa40d9cd 100644 --- a/content/telegraf/v1/install.md +++ b/content/telegraf/v1/install.md @@ -180,8 +180,8 @@ gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 \ | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' \ && cat influxdata-archive.key \ | gpg --dearmor \ -| sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null \ -&& echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' \ +| sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null \ +&& echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' \ | sudo tee /etc/apt/sources.list.d/influxdata.list sudo apt-get update && sudo apt-get install telegraf ``` @@ -198,8 +198,8 @@ gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive_compat.key | grep -q '^fpr:\+9D539D90D3328DC7D6C8D3B9D8FF8E1F7DF8B07E:$' \ && cat influxdata-archive_compat.key \ | gpg --dearmor \ -| sudo tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null -echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' \ +| sudo tee /etc/apt/keyrings/influxdata-archive_compat.gpg > /dev/null +echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' \ | sudo tee /etc/apt/sources.list.d/influxdata.list sudo apt-get update && sudo apt-get install telegraf ``` From 97c02baf20b3371f22bceb4f192006ce936e9518 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 13 Aug 2025 14:40:54 +0000 Subject: [PATCH 068/122] Complete comprehensive GitHub Copilot instructions with validated build and test processes Co-authored-by: jstirnaman <212227+jstirnaman@users.noreply.github.com> --- .github/copilot-instructions.md | 443 ++++++++++++++++++++------------ 1 file changed, 273 insertions(+), 170 deletions(-) diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index ffa9b01d0..99059b56f 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -1,134 +1,283 @@ -# Instructions for InfluxData Documentation +# InfluxData Documentation Repository (docs-v2) -## Purpose and scope +Always follow these instructions first and fallback to additional search and context gathering only when the information provided here is incomplete or found to be in error. -Help document InfluxData products by creating clear, accurate technical content with proper code examples, frontmatter, and formatting. +## Working Effectively -## Documentation structure +### Bootstrap, Build, and Test the Repository + +Execute these commands in order to set up a complete working environment: + +1. **Install Node.js dependencies** (takes ~4 seconds): + + ```bash + # Skip Cypress binary download due to network restrictions in CI environments + CYPRESS_INSTALL_BINARY=0 yarn install + ``` + +2. **Build the static site** (takes ~75 seconds, NEVER CANCEL - set timeout to 180+ seconds): + + ```bash + npx hugo --quiet + ``` + +3. **Start the development server** (builds in ~92 seconds, NEVER CANCEL - set timeout to 150+ seconds): + + ```bash + npx hugo server --bind 0.0.0.0 --port 1313 + ``` + + - Access at: http://localhost:1313/ + - Serves 5,359+ pages and 441 static files + - Auto-rebuilds on file changes + +4. **Alternative Docker development setup** (use if local Hugo fails): + ```bash + docker compose up local-dev + ``` + **Note**: May fail in restricted network environments due to Alpine package manager issues. + +### Testing (CRITICAL: NEVER CANCEL long-running tests) + +#### Code Block Testing (takes 5-15 minutes per product, NEVER CANCEL - set timeout to 30+ minutes): + +```bash +# Build test environment first (takes ~30 seconds, may fail due to network restrictions) +docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest . + +# Test all products (takes 15-45 minutes total) +yarn test:codeblocks:all + +# Test specific products +yarn test:codeblocks:cloud +yarn test:codeblocks:v2 +yarn test:codeblocks:telegraf +``` + +#### Link Validation (takes 10-30 minutes, NEVER CANCEL - set timeout to 45+ minutes): + +```bash +# Test all links (very long-running) +yarn test:links + +# Test specific files/products (faster) +yarn test:links content/influxdb3/core/**/*.md +yarn test:links:v3 +yarn test:links:v2 +``` + +#### Style Linting (takes 30-60 seconds): + +```bash +# Basic Vale linting +docker compose run -T vale content/**/*.md + +# Product-specific linting with custom configurations +docker compose run -T vale --config=content/influxdb3/cloud-dedicated/.vale.ini --minAlertLevel=error content/influxdb3/cloud-dedicated/**/*.md +``` + +#### JavaScript and CSS Linting (takes 5-10 seconds): + +```bash +yarn eslint assets/js/**/*.js +yarn prettier --check "**/*.{css,js,ts,jsx,tsx}" +``` + +### Pre-commit Hooks (automatically run, can be skipped if needed): + +```bash +# Run all pre-commit checks manually +yarn lint + +# Skip pre-commit hooks if necessary (not recommended) +git commit -m "message" --no-verify +``` + +## Validation Scenarios + +Always test these scenarios after making changes to ensure full functionality: + +### 1. Documentation Rendering Test + +```bash +# Start Hugo server +npx hugo server --bind 0.0.0.0 --port 1313 + +# Verify key pages load correctly (200 status) +curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/influxdb3/core/ +curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/influxdb/v2/ +curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/telegraf/v1/ + +# Verify content contains expected elements +curl -s http://localhost:1313/influxdb3/core/ | grep -i "influxdb" +``` + +### 2. Build Output Validation + +```bash +# Verify build completes successfully +npx hugo --quiet + +# Check build output exists and has reasonable size (~529MB) +ls -la public/ +du -sh public/ + +# Verify key files exist +file public/index.html +file public/influxdb3/core/index.html +``` + +### 3. Shortcode and Formatting Test + +```bash +# Test shortcode examples page +yarn test:links content/example.md +``` + +## Repository Structure and Key Locations + +### Content Organization + +- **InfluxDB 3**: `/content/influxdb3/` (core, enterprise, cloud-dedicated, cloud-serverless, clustered, explorer) +- **InfluxDB v2**: `/content/influxdb/` (v2, cloud, enterprise_influxdb, v1) +- **Telegraf**: `/content/telegraf/v1/` +- **Other tools**: `/content/kapacitor/`, `/content/chronograf/`, `/content/flux/` +- **Shared content**: `/content/shared/` +- **Examples**: `/content/example.md` (comprehensive shortcode reference) + +### Configuration Files + +- **Hugo config**: `/config/_default/` +- **Package management**: `package.json`, `yarn.lock` +- **Docker**: `compose.yaml`, `Dockerfile.pytest` +- **Git hooks**: `lefthook.yml` +- **Testing**: `cypress.config.js`, `pytest.ini` (in test directories) +- **Linting**: `.vale.ini`, `.prettierrc.yaml`, `eslint.config.js` + +### Build and Development + +- **Hugo binary**: Available via `npx hugo` (version 0.148.2+) +- **Static assets**: `/assets/` (JavaScript, CSS, images) +- **Build output**: `/public/` (generated, ~529MB) +- **Layouts**: `/layouts/` (Hugo templates) +- **Data files**: `/data/` (YAML/JSON data for templates) + +## Technology Stack + +- **Static Site Generator**: Hugo (0.148.2+ extended) +- **Package Manager**: Yarn (1.22.22+) with Node.js (20.19.4+) +- **Testing Framework**: + - Pytest with pytest-codeblocks (for code examples) + - Cypress (for link validation and E2E tests) + - Vale (for style and writing guidelines) +- **Containerization**: Docker with Docker Compose +- **Linting**: ESLint, Prettier, Vale +- **Git Hooks**: Lefthook + +## Common Tasks and Build Times + +### Time Expectations (CRITICAL - NEVER CANCEL) + +- **Dependency installation**: 4 seconds +- **Hugo static build**: 75 seconds (NEVER CANCEL - timeout: 180+ seconds) +- **Hugo server startup**: 92 seconds (NEVER CANCEL - timeout: 150+ seconds) +- **Code block tests**: 5-15 minutes per product (NEVER CANCEL - timeout: 30+ minutes) +- **Link validation**: 10-30 minutes (NEVER CANCEL - timeout: 45+ minutes) +- **Style linting**: 30-60 seconds +- **Docker image build**: 30+ seconds (may fail due to network restrictions) + +### Network Connectivity Issues + +In restricted environments, these commands may fail due to external dependency downloads: + +- `docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest .` (InfluxData repositories, HashiCorp repos) +- `docker compose up local-dev` (Alpine package manager) +- Cypress binary installation (use `CYPRESS_INSTALL_BINARY=0`) + +Document these limitations but proceed with available functionality. + +### Validation Commands for CI + +Always run these before committing changes: + +```bash +# Format and lint code +yarn prettier --write "**/*.{css,js,ts,jsx,tsx}" +yarn eslint assets/js/**/*.js + +# Test Hugo build +npx hugo --quiet + +# Test development server startup +timeout 150 npx hugo server --bind 0.0.0.0 --port 1313 & +sleep 120 +curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/ +pkill hugo +``` + +## Key Projects in This Codebase + +1. **InfluxDB 3 Documentation** (Core, Enterprise, Cloud variants) +2. **InfluxDB v2 Documentation** (OSS and Cloud) +3. **Telegraf Documentation** (agent and plugins) +4. **Supporting Tools Documentation** (Kapacitor, Chronograf, Flux) +5. **API Reference Documentation** (`/api-docs/`) +6. **Shared Documentation Components** (`/content/shared/`) + +## Important Locations for Frequent Tasks + +- **Shortcode reference**: `/content/example.md` +- **Contributing guide**: `CONTRIBUTING.md` +- **Testing guide**: `TESTING.md` +- **Product configurations**: `/data/products.yml` +- **Vale style rules**: `/.ci/vale/styles/` +- **GitHub workflows**: `/.github/workflows/` +- **Test scripts**: `/test/scripts/` +- **Hugo layouts**: `/layouts/` +- **CSS/JS assets**: `/assets/` + +## Content Guidelines and Style + +### Documentation Structure - **Product version data**: `/data/products.yml` -- **InfluxData products**: - - InfluxDB 3 Explorer - - Documentation source path: `/content/influxdb3/explorer` - - Published for the web: https://docs.influxdata.com/influxdb3/explorer/ - - InfluxDB 3 Core - - Documentation source path: `/content/influxdb3/core` - - Published for the web: https://docs.influxdata.com/influxdb3/core/ - - Code repositories: https://github.com/influxdata/influxdb, https://github.com/influxdata/influxdb3_core - - InfluxDB 3 Enterprise - - Documentation source path: `/content/influxdb3/enterprise` - - Published for the web: https://docs.influxdata.com/influxdb3/enterprise/ - - Code repositories: https://github.com/influxdata/influxdb, https://github.com/influxdata/influxdb3_enterprise - - InfluxDB Cloud Dedicated - - Documentation source path: `/content/influxdb3/cloud-dedicated` - - Published for the web: https://docs.influxdata.com/influxdb3/cloud-dedicated/ - - Code repository: https://github.com/influxdata/influxdb - - InfluxDB Cloud Serverless - - Documentation source path: `/content/influxdb3/cloud-serverless` - - Published for the web: https://docs.influxdata.com/influxdb3/cloud-serverless/ - - Code repository: https://github.com/influxdata/idpe - - InfluxDB Cloud v2 (TSM) - - Documentation source path: `/content/influxdb/cloud` - - Published for the web: https://docs.influxdata.com/influxdb/cloud/ - - Code repository: https://github.com/influxdata/idpe - - InfluxDB Clustered - - Documentation source path: `/content/influxdb3/clustered` - - Published for the web: https://docs.influxdata.com/influxdb3/clustered/ - - Code repository: https://github.com/influxdata/influxdb - - InfluxDB Enterprise v1 (1.x) - - Documentation source path: `/content/influxdb/enterprise_influxdb` - - Published for the web: https://docs.influxdata.com/enterprise_influxdb/v1/ - - Code repository: https://github.com/influxdata/influxdb - - InfluxDB OSS 1.x - - Documentation source path: `/content/influxdb/v1` - - Published for the web: https://docs.influxdata.com/influxdb/v1/ - - Code repository: https://github.com/influxdata/influxdb - - InfluxDB OSS 2.x - - Documentation source path: `/content/influxdb/v2` - - Published for the web: https://docs.influxdata.com/influxdb/v2/ - - Code repository: https://github.com/influxdata/influxdb - - Telegraf - - Documentation source path: `/content/telegraf/v1` - - Published for the web: https://docs.influxdata.com/telegraf/v1/ - - Code repository: https://github.com/influxdata/telegraf - - Kapacitor - - Documentation source path: `/content/kapacitor/v1` - - Published for the web: https://docs.influxdata.com/kapacitor/v1/ - - Code repository: https://github.com/influxdata/kapacitor - - Chronograf - - Documentation source path: `/content/chronograf/v1` - - Published for the web: https://docs.influxdata.com/chronograf/v1/ - - Code repository: https://github.com/influxdata/chronograf - - Flux - - Documentation source path: `/content/flux/v0` - - Published for the web: https://docs.influxdata.com/flux/v0/ - - Code repository: https://github.com/influxdata/flux -- **InfluxData-supported tools**: - - InfluxDB API client libraries - - Code repositories: https://github.com/InfluxCommunity - - InfluxDB 3 processing engine plugins - - Code repository: https://github.com/influxdata/influxdb3_plugins - **Query Languages**: SQL, InfluxQL, Flux (use appropriate language per product version) - **Documentation Site**: https://docs.influxdata.com -- **Repository**: https://github.com/influxdata/docs-v2 - **Framework**: Hugo static site generator -## Abbreviations and shortcuts - -- `gdd`: Google Developer Documentation style -- `3core`: InfluxDB 3 Core -- `3ent`: InfluxDB 3 Enterprise - -## Style guidelines +### Style Guidelines - Follow Google Developer Documentation style guidelines -- For API references, follow YouTube Data API style - Use semantic line feeds (one sentence per line) - Format code examples to fit within 80 characters -- Command line examples: - - Should be formatted as code blocks - - Should use long options (e.g., `--option` instead of `-o`) -- Use cURL for API examples - - Format to fit within 80 characters - - Should use `--data-urlencode` for query parameters - - Should use `--header` for headers -- Use only h2-h6 headings in content (h1 comes from frontmatter title properties) -- Use sentence case for headings -- Use GitHub callout syntax +- Use long options in command line examples (`--option` instead of `-o`) +- Use GitHub callout syntax for notes and warnings - Image naming: `project/version-context-description.png` -- Use appropriate product names and versions consistently -- Follow InfluxData vocabulary guidelines -## Markdown and shortcodes +### Markdown and Shortcodes -- Include proper frontmatter for Markdown pages in `content/**/*.md` (except for - shared content files in `content/shared/`): +Include proper frontmatter for all content pages: - ```yaml - title: # Page title (h1) - seotitle: # SEO title - list_title: # Title for article lists - description: # SEO description - menu: - product_version: - weight: # Page order (1-99, 101-199, etc.) - ``` -- Follow the shortcode examples in `content/example.md` and the documentation - for docs-v2 contributors in `CONTRIBUTING.md` -- Use provided shortcodes correctly: - - Notes/warnings: `{{% note %}}`, `{{% warn %}}` - - Product-specific: `{{% enterprise %}}`, `{{% cloud %}}` - - Tabbed content: `{{< tabs-wrapper >}}`, `{{% tabs %}}`, `{{% tab-content %}}` - - Tabbed content for code examples (without additional text): `{{< code-tabs-wrapper >}}`, `{{% code-tabs %}}`, `{{% code-tab-content %}}` - - Version links: `{{< latest >}}`, `{{< latest-patch >}}` - - API endpoints: `{{< api-endpoint >}}` - - Required elements: `{{< req >}}` - - Navigation: `{{< page-nav >}}` - - Diagrams: `{{< diagram >}}`, `{{< filesystem-diagram >}}` +```yaml +title: # Page title (h1) +seotitle: # SEO title +description: # SEO description +menu: + product_version: +weight: # Page order (1-99, 101-199, etc.) +``` -## Code examples and testing +Key shortcodes (see `/content/example.md` for full reference): -- Provide complete, working examples with proper testing annotations: +- Notes/warnings: `{{% note %}}`, `{{% warn %}}` +- Tabbed content: `{{< tabs-wrapper >}}`, `{{% tabs %}}`, `{{% tab-content %}}` +- Code examples: `{{< code-tabs-wrapper >}}`, `{{% code-tabs %}}`, `{{% code-tab-content %}}` +- Required elements: `{{< req >}}` +- API endpoints: `{{< api-endpoint >}}` + +### Code Examples and Testing + +Provide complete, working examples with pytest annotations: ```python print("Hello, world!") @@ -140,67 +289,21 @@ print("Hello, world!") Hello, world! ``` -- CLI command example: +## Troubleshooting Common Issues -```sh -influx query 'from(bucket:"example") |> range(start:-1h)' -``` +1. **"Pytest collected 0 items"**: Use `python` (not `py`) for code block language identifiers +2. **Hugo build errors**: Check `/config/_default/` for configuration issues +3. **Docker build failures**: Expected in restricted networks - document and continue with local Hugo +4. **Cypress installation failures**: Use `CYPRESS_INSTALL_BINARY=0 yarn install` +5. **Link validation slow**: Use file-specific testing: `yarn test:links content/specific-file.md` +6. **Vale linting errors**: Check `.ci/vale/styles/config/vocabularies` for accepted/rejected terms - - -``` -Table: keys: [_start, _stop, _field, _measurement] - _start:time _stop:time _field:string _measurement:string _time:time _value:float ------------------------------- ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- -``` - -- Include necessary environment variables -- Show proper credential handling for authenticated commands - -## API documentation - -- `/api-docs` contains OpenAPI spec files used for API reference documentation -- Follow OpenAPI specification patterns -- Match REST API examples to current implementation -- Include complete request/response examples -- Document required headers and authentication - -## Versioning and product differentiation - -- Clearly distinguish between different InfluxDB versions (1.x, 2.x, 3.x) -- Use correct terminology for each product variant -- Apply appropriate UI descriptions and screenshots -- Reference appropriate query language per version - -## Development tools - -- Vale.sh linter for style checking - - Configuration file: `.vale.ini` -- Docker for local development and testing -- pytest and pytest-codeblocks for validating code examples -- Use cypress for testing documentation UI and links -- Prettier for code formatting -- ESLint for JavaScript and TypeScript linting -- Lefthook (NPM package) for managing pre-commit hooks for quality assurance - -## Code style - -- Use modern JavaScript (ES6+) syntax - -## Related repositories - -- **Internal documentation assistance requests**: https://github.com/influxdata/DAR/issues Documentation - -## Additional instruction files +## Additional Instruction Files For specific workflows and content types, also refer to: -- **InfluxDB 3 code placeholders**: `.github/instructions/influxdb3-code-placeholders.instructions.md` - Guidelines for placeholder formatting, descriptions, and shortcode usage in InfluxDB 3 documentation -- **Contributing guidelines**: `.github/instructions/contributing.instructions.md` - Detailed style guidelines, shortcode usage, frontmatter requirements, and development workflows -- **Content-specific instructions**: Check `.github/instructions/` directory for specialized guidelines covering specific documentation patterns and requirements +- **InfluxDB 3 code placeholders**: `.github/instructions/influxdb3-code-placeholders.instructions.md` +- **Contributing guidelines**: `.github/instructions/contributing.instructions.md` +- **Content-specific instructions**: Check `.github/instructions/` directory -## Integration with specialized instructions - -When working on InfluxDB 3 documentation (Core/Enterprise), prioritize the placeholder guidelines from `influxdb3-code-placeholders.instructions.md`. - -For general documentation structure, shortcodes, and development workflows, follow the comprehensive guidelines in `contributing.instructions.md`. +Remember: This is a large documentation site with complex build processes. Patience with build times is essential, and NEVER CANCEL long-running operations. From 963d0a8d218cd2cb7a5cd90f89b87fb5d327df09 Mon Sep 17 00:00:00 2001 From: Peter Barnett Date: Thu, 14 Aug 2025 11:29:44 -0400 Subject: [PATCH 069/122] docs: add usage telemetry documentation --- content/influxdb3/core/reference/telemetry.md | 18 ++++ .../enterprise/reference/telemetry.md | 18 ++++ content/shared/influxdb3-admin/telemetry.md | 97 +++++++++++++++++++ 3 files changed, 133 insertions(+) create mode 100644 content/influxdb3/core/reference/telemetry.md create mode 100644 content/influxdb3/enterprise/reference/telemetry.md create mode 100644 content/shared/influxdb3-admin/telemetry.md diff --git a/content/influxdb3/core/reference/telemetry.md b/content/influxdb3/core/reference/telemetry.md new file mode 100644 index 000000000..fae9427e3 --- /dev/null +++ b/content/influxdb3/core/reference/telemetry.md @@ -0,0 +1,18 @@ +--- +title: Usage telemetry +seotitle: InfluxDB Core usage telemetry +description: > + InfluxDB Core can collect and send usage telemetry data to help improve the + product. +menu: + influxdb3_core: + parent: Reference +weight: 108 +influxdb3/core/tags: [telemetry, monitoring, metrics, observability] +source: /shared/influxdb3-admin/telemetry.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/telemetry.md b/content/influxdb3/enterprise/reference/telemetry.md new file mode 100644 index 000000000..779896464 --- /dev/null +++ b/content/influxdb3/enterprise/reference/telemetry.md @@ -0,0 +1,18 @@ +--- +title: Usage telemetry +seotitle: InfluxDB Enterprise usage telemetry +description: > + InfluxDB Enterprise can collect and send usage telemetry data to help improve the + product. +menu: + influxdb3_enterprise: + parent: Reference +weight: 108 +influxdb3/enterprise/tags: [telemetry, monitoring, metrics, observability] +source: /shared/influxdb3-admin/telemetry.md +--- + + \ No newline at end of file diff --git a/content/shared/influxdb3-admin/telemetry.md b/content/shared/influxdb3-admin/telemetry.md new file mode 100644 index 000000000..90639f993 --- /dev/null +++ b/content/shared/influxdb3-admin/telemetry.md @@ -0,0 +1,97 @@ +InfluxDB 3 can collect and send usage telemetry data to help improve the product. This page describes what telemetry data is collected, when it's collected, how it's transmitted, and how to disable it. + +## What data is collected + +{{< product-name >}} collects the following telemetry data: + +### System metrics + +- **CPU utilization**: Process-specific CPU usage (min, max, average) +- **Memory usage**: Process memory consumption in MB (min, max, average) +- **Cores**: Number of CPU cores in use +- **OS**: Operating system information +- **Version**: {{< product-name >}} version +- **Uptime**: Server uptime in seconds + +### Write metrics + +- **Write requests**: Number of write operations (min, max, average, hourly sum) +- **Write lines**: Number of lines written (min, max, average, hourly sum) +- **Write bytes**: Amount of data written in MB (min, max, average, hourly sum) + +### Query metrics + +- **Query requests**: Number of query operations (min, max, average, hourly sum) + +### Storage metrics + +- **Parquet file count**: Number of Parquet files (when available) +- **Parquet file size**: Total size of Parquet files in MB (when available) +- **Parquet row count**: Total number of rows in Parquet files (when available) + +### Processing engine metrics + +- **WAL triggers**: Write-Ahead Log trigger counts (when available) +- **Schedule triggers**: Scheduled processing trigger counts (when available) +- **Request triggers**: Request-based processing trigger counts (when available) + +### Instance information + +- **Instance ID**: Unique identifier for the server instance +- **Cluster UUID**: Unique identifier for the cluster (same as catalog UUID) +- **Storage type**: Type of object storage being used +{{% show-in "core" %}} +- **Product type**: "Core" +{{% /show-in %}} +{{% show-in "enterprise" %}} +- **Product type**: "Enterprise" +{{% /show-in %}} + +## Collection frequency + +- **System metrics** (CPU, memory): Collected every 60 seconds +- **Write and query metrics**: Collected per operation, rolled up every 60 seconds +- **Storage and processing engine metrics**: Collected at snapshot time (when available) +- **Instance information**: Static data collected once + +Telemetry data is transmitted once per hour. + +## Disable telemetry + +Disables sending telemetry data to InfluxData. + +**Default:** `false` + +| influxdb3 flag | Environment variable | +| :------------- | :------------------- | +| `--disable-telemetry-upload` | `INFLUXDB3_TELEMETRY_DISABLE_UPLOAD` | + +#### Command line flag +```sh +influxdb3 serve --disable-telemetry-upload +``` + +#### Environment variable +```sh +export INFLUXDB3_TELEMETRY_DISABLE_UPLOAD=true +``` + +When telemetry is disabled, no usage data is collected or transmitted. + +## Data handling + +The telemetry data is used by InfluxData to: + +- Understand product usage patterns +- Improve product performance and reliability +- Prioritize feature development +- Identify and resolve issues + +No personally identifiable information (PII) is collected. + +## Privacy and security + +- All telemetry data is transmitted securely via HTTPS +- No database contents, queries, or user data is collected +- Only operational metrics and system information is transmitted +- Data collection follows InfluxData's privacy policy \ No newline at end of file From 32cdfb533c85b6ea9575f2f140e23ed2ac4f74fe Mon Sep 17 00:00:00 2001 From: Peter Barnett Date: Thu, 14 Aug 2025 11:44:25 -0400 Subject: [PATCH 070/122] fix: small clarity changes and easier reading --- content/shared/influxdb3-admin/telemetry.md | 42 +++++++++------------ 1 file changed, 17 insertions(+), 25 deletions(-) diff --git a/content/shared/influxdb3-admin/telemetry.md b/content/shared/influxdb3-admin/telemetry.md index 90639f993..09a4bd10a 100644 --- a/content/shared/influxdb3-admin/telemetry.md +++ b/content/shared/influxdb3-admin/telemetry.md @@ -6,8 +6,8 @@ InfluxDB 3 can collect and send usage telemetry data to help improve the product ### System metrics -- **CPU utilization**: Process-specific CPU usage (min, max, average) -- **Memory usage**: Process memory consumption in MB (min, max, average) +- **CPU utilization**: Process-specific CPU usage +- **Memory usage**: Process memory consumption in MB - **Cores**: Number of CPU cores in use - **OS**: Operating system information - **Version**: {{< product-name >}} version @@ -15,30 +15,30 @@ InfluxDB 3 can collect and send usage telemetry data to help improve the product ### Write metrics -- **Write requests**: Number of write operations (min, max, average, hourly sum) -- **Write lines**: Number of lines written (min, max, average, hourly sum) -- **Write bytes**: Amount of data written in MB (min, max, average, hourly sum) +- **Write requests**: Number of write operations +- **Write lines**: Number of lines written +- **Write bytes**: Amount of data written in MB ### Query metrics -- **Query requests**: Number of query operations (min, max, average, hourly sum) +- **Query requests**: Number of query operations ### Storage metrics -- **Parquet file count**: Number of Parquet files (when available) -- **Parquet file size**: Total size of Parquet files in MB (when available) -- **Parquet row count**: Total number of rows in Parquet files (when available) +- **Parquet file count**: Number of Parquet files +- **Parquet file size**: Total size of Parquet files in MB +- **Parquet row count**: Total number of rows in Parquet files ### Processing engine metrics -- **WAL triggers**: Write-Ahead Log trigger counts (when available) -- **Schedule triggers**: Scheduled processing trigger counts (when available) -- **Request triggers**: Request-based processing trigger counts (when available) +- **WAL triggers**: Write-Ahead Log trigger counts +- **Schedule triggers**: Scheduled processing trigger counts +- **Request triggers**: Request-based processing trigger counts ### Instance information - **Instance ID**: Unique identifier for the server instance -- **Cluster UUID**: Unique identifier for the cluster (same as catalog UUID) +- **Cluster UUID**: Unique identifier for the cluster - **Storage type**: Type of object storage being used {{% show-in "core" %}} - **Product type**: "Core" @@ -80,18 +80,10 @@ When telemetry is disabled, no usage data is collected or transmitted. ## Data handling -The telemetry data is used by InfluxData to: - -- Understand product usage patterns -- Improve product performance and reliability -- Prioritize feature development -- Identify and resolve issues - -No personally identifiable information (PII) is collected. +The telemetry data is used by InfluxData to understand product usage patterns, improve product performance and reliability, prioritize feature development, and identify/resolve issues. No personally identifiable information (PII) is collected. ## Privacy and security -- All telemetry data is transmitted securely via HTTPS -- No database contents, queries, or user data is collected -- Only operational metrics and system information is transmitted -- Data collection follows InfluxData's privacy policy \ No newline at end of file +All telemetry data is transmitted securely via HTTPS. No database contents, queries, or user data is collected; only operational metrics and system information is transmitted. + +All data collection follows InfluxData's privacy policy. \ No newline at end of file From 4d5d5092347b1cbaf59dd1e70bf12b4491e874d8 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Wed, 23 Jul 2025 15:17:43 -0600 Subject: [PATCH 071/122] fix(monolith): add configurable db, table, and column limits to enterprise --- .../enterprise/reference/config-options.md | 2 +- .../influxdb3-admin/databases/_index.md | 29 +++++++---- .../influxdb3-admin/databases/create.md | 5 ++ .../shared/influxdb3-cli/config-options.md | 50 ++++++++++++++++--- 4 files changed, 69 insertions(+), 17 deletions(-) diff --git a/content/influxdb3/enterprise/reference/config-options.md b/content/influxdb3/enterprise/reference/config-options.md index cab8a5a77..313ceb3f4 100644 --- a/content/influxdb3/enterprise/reference/config-options.md +++ b/content/influxdb3/enterprise/reference/config-options.md @@ -13,4 +13,4 @@ source: /shared/influxdb3-cli/config-options.md \ No newline at end of file +--> diff --git a/content/shared/influxdb3-admin/databases/_index.md b/content/shared/influxdb3-admin/databases/_index.md index 73d148aae..f3f59aef7 100644 --- a/content/shared/influxdb3-admin/databases/_index.md +++ b/content/shared/influxdb3-admin/databases/_index.md @@ -13,7 +13,7 @@ stored. Each database can contain multiple tables. > **If coming from InfluxDB v2, InfluxDB Cloud (TSM), or InfluxDB Cloud Serverless**, > _database_ and _bucket_ are synonymous. - +The _maximum_ retention period is infinite (`none`) meaning data does not expire +and will never be removed by the retention enforcement service. +{{% /show-in %}} ## Database, table, and column limits @@ -40,9 +39,11 @@ never be removed by the retention enforcement service. **Maximum number of tables across all databases**: {{% influxdb3/limit "table" %}} {{< product-name >}} limits the number of tables you can have across _all_ -databases to {{% influxdb3/limit "table" %}}. There is no specific limit on how -many tables you can have in an individual database, as long as the total across -all databases is below the limit. +databases to {{% influxdb3/limit "table" %}}{{% show-in "enterprise" %}} by default{{% /show-in %}}. +{{% show-in "enterprise" %}}You can configure the table limit using the +[`--num-table-limit` configuration option](/influxdb3/enterprise/reference/config-options/#num-table-limit).{{% /show-in %}} +InfluxDB doesn't limit how many tables you can have in an individual database, +as long as the total across all databases is below the limit. Having more tables affects your {{% product-name %}} installation in the following ways: @@ -64,7 +65,8 @@ persists data to Parquet files. Each `PUT` request incurs a monetary cost and increases the operating cost of {{< product-name >}}. {{% /expand %}} -{{% expand "**More work for the compactor** _(Enterprise only)_ View more info" %}} +{{% show-in "enterprise" %}} +{{% expand "**More work for the compactor** View more info" %}} To optimize storage over time, InfluxDB 3 Enterprise has a compactor that routinely compacts Parquet files. @@ -72,6 +74,7 @@ With more tables and Parquet files to compact, the compactor may need to be scal to keep up with demand, adding to the operating cost of InfluxDB 3 Enterprise. {{% /expand %}} +{{% /show-in %}} {{< /expand-wrapper >}} ### Column limit @@ -80,11 +83,17 @@ to keep up with demand, adding to the operating cost of InfluxDB 3 Enterprise. Each row must include a time column, with the remaining columns representing tags and fields. -As a result, a table can have one time column and up to {{% influxdb3/limit "column" -1 %}} +As a result,{{% show-in "enterprise" %}} by default,{{% /show-in %}} a table can +have one time column and up to {{% influxdb3/limit "column" -1 %}} _combined_ field and tag columns. If you attempt to write to a table and exceed the column limit, the write request fails and InfluxDB returns an error. +{{% show-in "enterprise" %}} +You can configure the maximum number of columns per +table using the [`num-total-columns-per-table-limit` configuration option](/influxdb3/enterprise/reference/config-options/#num-total-columns-per-table-limit). +{{% /show-in %}} + Higher numbers of columns has the following side-effects: {{< expand-wrapper >}} diff --git a/content/shared/influxdb3-admin/databases/create.md b/content/shared/influxdb3-admin/databases/create.md index bbac8fa26..fd9546174 100644 --- a/content/shared/influxdb3-admin/databases/create.md +++ b/content/shared/influxdb3-admin/databases/create.md @@ -130,7 +130,12 @@ database_name/retention_policy_name ## Database limit +{{% show-in "enterprise" %}} +**Default maximum number of databases**: {{% influxdb3/limit "database" %}} +{{% /show-in %}} +{{% show-in "core" %}} **Maximum number of databases**: {{% influxdb3/limit "database" %}} +{{% /show-in %}} _For more information about {{< product-name >}} database, table, and column limits, see [Database, table, and column limits](/influxdb3/version/admin/databases/#database-table-and-column-limits)._ diff --git a/content/shared/influxdb3-cli/config-options.md b/content/shared/influxdb3-cli/config-options.md index 268b3ac93..13d98106c 100644 --- a/content/shared/influxdb3-cli/config-options.md +++ b/content/shared/influxdb3-cli/config-options.md @@ -53,6 +53,10 @@ influxdb3 serve - [tls-minimum-versions](#tls-minimum-version) - [without-auth](#without-auth) - [disable-authz](#disable-authz) +{{% show-in "enterprise" %}} + - [num-database-limit](#num-database-limit) + - [num-table-limit](#num-table-limit) + - [num-total-columns-per-table-limit](#num-total-columns-per-table-limit){{% /show-in %}} - [AWS](#aws) - [aws-access-key-id](#aws-access-key-id) - [aws-secret-access-key](#aws-secret-access-key) @@ -204,7 +208,7 @@ This value must be different than the [`--node-id`](#node-id) value. #### data-dir -For the `file` object store, defines the location InfluxDB 3 uses to store files locally. +For the `file` object store, defines the location {{< product-name >}} uses to store files locally. Required when using the `file` [object store](#object-store). | influxdb3 serve option | Environment variable | @@ -216,7 +220,7 @@ Required when using the `file` [object store](#object-store). {{% show-in "enterprise" %}} #### license-email -Specifies the email address to associate with your InfluxDB 3 Enterprise license +Specifies the email address to associate with your {{< product-name >}} license and automatically responds to the interactive email prompt when the server starts. This option is mutually exclusive with [license-file](#license-file). @@ -228,7 +232,7 @@ This option is mutually exclusive with [license-file](#license-file). #### license-file -Specifies the path to a license file for InfluxDB 3 Enterprise. When provided, the license +Specifies the path to a license file for {{< product-name >}}. When provided, the license file's contents are used instead of requesting a new license. This option is mutually exclusive with [license-email](#license-email). @@ -361,10 +365,44 @@ The server processes all requests without requiring tokens or authentication. Optionally disable authz by passing in a comma separated list of resources. Valid values are `health`, `ping`, and `metrics`. -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------- | -| `--disable-authz` | `INFLUXDB3_DISABLE_AUTHZ`| +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------------ | +| `--disable-authz` | `INFLUXDB3_DISABLE_AUTHZ` | +{{% show-in "enterprise" %}} +--- + +#### num-database-limit + +Limits the total number of active databases. +Default is {{% influxdb3/limit "database" %}}. + +| influxdb3 serve option | Environment variable | +| :---------------------- | :---------------------------------------- | +| `--num-database-limit` | `INFLUXDB3_ENTERPRISE_NUM_DATABASE_LIMIT` | + +--- + +#### num-table-limit + +Limits the total number of active tables across all databases. +Default is {{% influxdb3/limit "table" %}}. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------------------------- | +| `--num-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TABLE_LIMIT` | + +--- + +#### num-total-columns-per-table-limit + +Limits the total number of columns per table. +Default is {{% influxdb3/limit "column" %}}. + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :------------------------------------------------------- | +| `--num-total-columns-per-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TOTAL_COLUMNS_PER_TABLE_LIMIT` | +{{% /show-in %}} --- ### AWS From 5492ba9eef7eef1bbee0912acbcbcc49d872bfa6 Mon Sep 17 00:00:00 2001 From: Peter Barnett Date: Thu, 14 Aug 2025 11:52:59 -0400 Subject: [PATCH 072/122] fix: remember the 3 --- content/influxdb3/core/reference/telemetry.md | 4 ++-- content/influxdb3/enterprise/reference/telemetry.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/content/influxdb3/core/reference/telemetry.md b/content/influxdb3/core/reference/telemetry.md index fae9427e3..14eb0692e 100644 --- a/content/influxdb3/core/reference/telemetry.md +++ b/content/influxdb3/core/reference/telemetry.md @@ -1,8 +1,8 @@ --- title: Usage telemetry -seotitle: InfluxDB Core usage telemetry +seotitle: InfluxDB 3 Core usage telemetry description: > - InfluxDB Core can collect and send usage telemetry data to help improve the + InfluxDB 3 Core can collect and send usage telemetry data to help improve the product. menu: influxdb3_core: diff --git a/content/influxdb3/enterprise/reference/telemetry.md b/content/influxdb3/enterprise/reference/telemetry.md index 779896464..3b3fef879 100644 --- a/content/influxdb3/enterprise/reference/telemetry.md +++ b/content/influxdb3/enterprise/reference/telemetry.md @@ -1,8 +1,8 @@ --- title: Usage telemetry -seotitle: InfluxDB Enterprise usage telemetry +seotitle: InfluxDB 3 Enterprise usage telemetry description: > - InfluxDB Enterprise can collect and send usage telemetry data to help improve the + InfluxDB 3 Enterprise can collect and send usage telemetry data to help improve the product. menu: influxdb3_enterprise: From f23026982b681658d658f8a50719a2a7f98e591d Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Thu, 14 Aug 2025 14:41:05 -0500 Subject: [PATCH 073/122] chore(monolith): Move telemetry reference to shared/influxdb3-reference. Revise description, disable, and product names. --- content/influxdb3/core/reference/telemetry.md | 9 +++++---- content/influxdb3/enterprise/reference/telemetry.md | 9 +++++---- .../telemetry.md | 8 ++++++-- 3 files changed, 16 insertions(+), 10 deletions(-) rename content/shared/{influxdb3-admin => influxdb3-reference}/telemetry.md (83%) diff --git a/content/influxdb3/core/reference/telemetry.md b/content/influxdb3/core/reference/telemetry.md index 14eb0692e..91002fb2c 100644 --- a/content/influxdb3/core/reference/telemetry.md +++ b/content/influxdb3/core/reference/telemetry.md @@ -2,17 +2,18 @@ title: Usage telemetry seotitle: InfluxDB 3 Core usage telemetry description: > - InfluxDB 3 Core can collect and send usage telemetry data to help improve the - product. + InfluxData collects telemetry data to help improve the {{< product-name >}}. + Learn what data {{< product-name >}} collects and sends to InfluxData, how it's used, and + how you can opt out. menu: influxdb3_core: parent: Reference weight: 108 influxdb3/core/tags: [telemetry, monitoring, metrics, observability] -source: /shared/influxdb3-admin/telemetry.md +source: /shared/influxdb3-reference/telemetry.md --- \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/telemetry.md b/content/influxdb3/enterprise/reference/telemetry.md index 3b3fef879..6ebb4ac6b 100644 --- a/content/influxdb3/enterprise/reference/telemetry.md +++ b/content/influxdb3/enterprise/reference/telemetry.md @@ -2,17 +2,18 @@ title: Usage telemetry seotitle: InfluxDB 3 Enterprise usage telemetry description: > - InfluxDB 3 Enterprise can collect and send usage telemetry data to help improve the - product. + InfluxData collects telemetry data to help improve the {{< product-name >}}. + Learn what data {{< product-name >}} collects and sends to InfluxData, how it's used, and + how you can opt out. menu: influxdb3_enterprise: parent: Reference weight: 108 influxdb3/enterprise/tags: [telemetry, monitoring, metrics, observability] -source: /shared/influxdb3-admin/telemetry.md +source: /shared/influxdb3-reference/telemetry.md --- \ No newline at end of file diff --git a/content/shared/influxdb3-admin/telemetry.md b/content/shared/influxdb3-reference/telemetry.md similarity index 83% rename from content/shared/influxdb3-admin/telemetry.md rename to content/shared/influxdb3-reference/telemetry.md index 09a4bd10a..4f8a5589f 100644 --- a/content/shared/influxdb3-admin/telemetry.md +++ b/content/shared/influxdb3-reference/telemetry.md @@ -1,4 +1,6 @@ -InfluxDB 3 can collect and send usage telemetry data to help improve the product. This page describes what telemetry data is collected, when it's collected, how it's transmitted, and how to disable it. +InfluxData collects information, or _telemetry data_, about the usage of {{% product-name %}} to help improve the product. +Learn what data {{% product-name %}} collects and sends to InfluxData, how it's used, and +how you can opt out. ## What data is collected @@ -58,7 +60,9 @@ Telemetry data is transmitted once per hour. ## Disable telemetry -Disables sending telemetry data to InfluxData. +To "opt-out" of collecting and sending {{% product-name %}} telemetry data, +include the `--disable-telemetry-upload` flag or set the `INFLUXDB3_TELEMETRY_DISABLE_UPLOAD` environment variable +when starting {{% product-name %}}. **Default:** `false` From 144de5785c78d95f4825ca9571b386dae6a3f2e8 Mon Sep 17 00:00:00 2001 From: meelahme Date: Thu, 14 Aug 2025 14:58:03 -0700 Subject: [PATCH 074/122] minor updates to requestBody to fix Ci build error --- .../influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index a2f8693c9..7735c655d 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -329,10 +329,10 @@ paths: - [Manage databases in InfluxDB Clustered](/influxdb3/clustered/admin/databases/) - [InfluxQL DBRP naming convention in InfluxDB Clustered](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention) - [Migrate data from InfluxDB v1 to InfluxDB Clustered](/influxdb3/clustered/guides/migrate-data/migrate-1x-to-clustered/) - ``` type: string q: - description: Defines the InfluxQL query to run. + description: | + Defines the InfluxQL query to run. type: string chunked: description: | From 34416a1d37734a188f7b8e46ed60757c43cedf30 Mon Sep 17 00:00:00 2001 From: Jakub Bednar Date: Fri, 15 Aug 2025 08:42:39 +0200 Subject: [PATCH 075/122] Release Chronograf v1.10.8 --- content/chronograf/v1/about_the_project/release-notes.md | 6 ++++++ data/products.yml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/content/chronograf/v1/about_the_project/release-notes.md b/content/chronograf/v1/about_the_project/release-notes.md index 5cc972a4f..eae49effd 100644 --- a/content/chronograf/v1/about_the_project/release-notes.md +++ b/content/chronograf/v1/about_the_project/release-notes.md @@ -10,6 +10,12 @@ aliases: - /chronograf/v1/about_the_project/release-notes-changelog/ --- +## v1.10.8 {date="2025-08-15"} + +### Bug Fixes + +- Fix missing retention policies on the Databases page. + ## v1.10.7 {date="2025-04-15"} ### Bug Fixes diff --git a/data/products.yml b/data/products.yml index 44b530c43..e707fd718 100644 --- a/data/products.yml +++ b/data/products.yml @@ -157,7 +157,7 @@ chronograf: versions: [v1] latest: v1.10 latest_patches: - v1: 1.10.7 + v1: 1.10.8 ai_sample_questions: - How do I configure Chronograf for InfluxDB v1? - How do I create a dashboard in Chronograf? From 3b5385812a55f7d1ab56bc89bd1860e6b2900c51 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Fri, 15 Aug 2025 10:14:29 -0500 Subject: [PATCH 076/122] Apply suggestions from code review --- .github/copilot-instructions.md | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 99059b56f..d0fc9113f 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -217,12 +217,14 @@ pkill hugo ## Key Projects in This Codebase -1. **InfluxDB 3 Documentation** (Core, Enterprise, Cloud variants) -2. **InfluxDB v2 Documentation** (OSS and Cloud) -3. **Telegraf Documentation** (agent and plugins) -4. **Supporting Tools Documentation** (Kapacitor, Chronograf, Flux) -5. **API Reference Documentation** (`/api-docs/`) -6. **Shared Documentation Components** (`/content/shared/`) +1. **InfluxDB 3 Documentation** (Core, Enterprise, Clustered, Cloud Dedicated, Cloud Serverless, and InfluxDB 3 plugins for Core and Enterprise) +2. **InfluxDB 3 Explorer** (UI) +3. **InfluxDB v2 Documentation** (OSS and Cloud) +3. **InfuxDB v1 Documentation** (OSS and Enterprise) +4. **Telegraf Documentation** (agent and plugins) +5. **Supporting Tools Documentation** (Kapacitor, Chronograf, Flux) +6. **API Reference Documentation** (`/api-docs/`) +7. **Shared Documentation Components** (`/content/shared/`) ## Important Locations for Frequent Tasks @@ -233,7 +235,7 @@ pkill hugo - **Vale style rules**: `/.ci/vale/styles/` - **GitHub workflows**: `/.github/workflows/` - **Test scripts**: `/test/scripts/` -- **Hugo layouts**: `/layouts/` +- **Hugo layouts and shortcodes**: `/layouts/` - **CSS/JS assets**: `/assets/` ## Content Guidelines and Style @@ -269,7 +271,7 @@ weight: # Page order (1-99, 101-199, etc.) Key shortcodes (see `/content/example.md` for full reference): -- Notes/warnings: `{{% note %}}`, `{{% warn %}}` +- Notes/warnings (GitHub syntax): `> [!Note]`, `> [!Warning]` - Tabbed content: `{{< tabs-wrapper >}}`, `{{% tabs %}}`, `{{% tab-content %}}` - Code examples: `{{< code-tabs-wrapper >}}`, `{{% code-tabs %}}`, `{{% code-tab-content %}}` - Required elements: `{{< req >}}` From 06fff5868f45b461a064d9032f3974ec3451f8ad Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Fri, 15 Aug 2025 15:27:14 -0500 Subject: [PATCH 077/122] feat(v1): update documentation for the upcoming InfluxDB v1.12 and Enterprise v1.12 release **Keep this commit for the upcoming v1.12 release** - Label upcoming features as v1.12.0+: - Update configuration documentation for data nodes - Update database management documentation - Update influx_inspect tool documentation - Update show-shards documentation - Revert product version references to 1.11.8 in products.yml --- .../v1/administration/configure/config-data-nodes.md | 4 ++-- .../v1/query_language/manage-database.md | 4 ++-- content/enterprise_influxdb/v1/tools/influx_inspect.md | 4 ++-- .../enterprise_influxdb/v1/tools/influxd-ctl/show-shards.md | 6 +++--- content/influxdb/v1/query_language/manage-database.md | 4 ++-- content/influxdb/v1/tools/influx_inspect.md | 4 ++-- data/products.yml | 6 +++--- 7 files changed, 16 insertions(+), 16 deletions(-) diff --git a/content/enterprise_influxdb/v1/administration/configure/config-data-nodes.md b/content/enterprise_influxdb/v1/administration/configure/config-data-nodes.md index ecddbd49c..6295ac3d5 100644 --- a/content/enterprise_influxdb/v1/administration/configure/config-data-nodes.md +++ b/content/enterprise_influxdb/v1/administration/configure/config-data-nodes.md @@ -326,7 +326,7 @@ Very useful for troubleshooting, but will log any sensitive data contained withi Environment variable: `INFLUXDB_DATA_QUERY_LOG_ENABLED` -#### query-log-path +#### query-log-path {metadata="v1.12.0+"} Default is `""`. @@ -352,7 +352,7 @@ The following is an example of a `logrotate` configuration: ``` Environment variable: `INFLUXDB_DATA_QUERY_LOG_PATH` - +--> #### wal-fsync-delay Default is `"0s"`. diff --git a/content/enterprise_influxdb/v1/query_language/manage-database.md b/content/enterprise_influxdb/v1/query_language/manage-database.md index c70c1cb52..4a1f09b7a 100644 --- a/content/enterprise_influxdb/v1/query_language/manage-database.md +++ b/content/enterprise_influxdb/v1/query_language/manage-database.md @@ -306,7 +306,7 @@ See [Shard group duration management](/enterprise_influxdb/v1/concepts/schema_and_data_layout/#shard-group-duration-management) for recommended configurations. -##### `PAST LIMIT` +##### `PAST LIMIT` {metadata="v1.12.0+"} The `PAST LIMIT` clause defines a time boundary before and relative to _now_ in which points written to the retention policy are accepted. If a point has a @@ -317,7 +317,7 @@ For example, if a write request tries to write data to a retention policy with a `PAST LIMIT 6h` and there are points in the request with timestamps older than 6 hours, those points are rejected. -##### `FUTURE LIMIT` +##### `FUTURE LIMIT` {metadata="v1.12.0+"} The `FUTURE LIMIT` clause defines a time boundary after and relative to _now_ in which points written to the retention policy are accepted. If a point has a diff --git a/content/enterprise_influxdb/v1/tools/influx_inspect.md b/content/enterprise_influxdb/v1/tools/influx_inspect.md index 08ba93390..8e0e6ee15 100644 --- a/content/enterprise_influxdb/v1/tools/influx_inspect.md +++ b/content/enterprise_influxdb/v1/tools/influx_inspect.md @@ -453,7 +453,7 @@ Default value is `$HOME/.influxdb/wal`. See the [file system layout](/enterprise_influxdb/v1/concepts/file-system-layout/#file-system-layout) for InfluxDB on your system. -##### [ `-tsmfile ` ] +##### [ `-tsmfile ` ] {metadata="v1.12.0+"} Path to a single tsm file to export. This requires both `-database` and `-retention` to be specified. @@ -472,7 +472,7 @@ influx_inspect export -compress influx_inspect export -database DATABASE_NAME -retention RETENTION_POLICY ``` -##### Export data from a single TSM file +##### Export data from a single TSM file {metadata="v1.12.0+"} ```bash influx_inspect export \ diff --git a/content/enterprise_influxdb/v1/tools/influxd-ctl/show-shards.md b/content/enterprise_influxdb/v1/tools/influxd-ctl/show-shards.md index cc3451615..b87f7bea2 100644 --- a/content/enterprise_influxdb/v1/tools/influxd-ctl/show-shards.md +++ b/content/enterprise_influxdb/v1/tools/influxd-ctl/show-shards.md @@ -44,6 +44,8 @@ ID Database Retention Policy Desired Replicas Shard Group Start {{% /expand %}} {{< /expand-wrapper >}} +#### Show inconsistent shards {metadata="v1.12.0+"} + You can also use the `-m` flag to output "inconsistent" shards which are shards that are either in metadata but not on disk or on disk but not in metadata. @@ -52,10 +54,8 @@ that are either in metadata but not on disk or on disk but not in metadata. | Flag | Description | | :--- | :-------------------------------- | | `-v` | Return detailed shard information | -| `-m` | Return inconsistent shards | +| `-m` | Return inconsistent shards | {{% caption %}} _Also see [`influxd-ctl` global flags](/enterprise_influxdb/v1/tools/influxd-ctl/#influxd-ctl-global-flags)._ {{% /caption %}} - -## Examples diff --git a/content/influxdb/v1/query_language/manage-database.md b/content/influxdb/v1/query_language/manage-database.md index bbda3c443..554b8b871 100644 --- a/content/influxdb/v1/query_language/manage-database.md +++ b/content/influxdb/v1/query_language/manage-database.md @@ -307,7 +307,7 @@ See [Shard group duration management](/influxdb/v1/concepts/schema_and_data_layout/#shard-group-duration-management) for recommended configurations. -##### `PAST LIMIT` +##### `PAST LIMIT` {metadata="v1.12.0+"} The `PAST LIMIT` clause defines a time boundary before and relative to _now_ in which points written to the retention policy are accepted. If a point has a @@ -318,7 +318,7 @@ For example, if a write request tries to write data to a retention policy with a `PAST LIMIT 6h` and there are points in the request with timestamps older than 6 hours, those points are rejected. -##### `FUTURE LIMIT` +##### `FUTURE LIMIT` {metadata="v1.12.0+"} The `FUTURE LIMIT` clause defines a time boundary after and relative to _now_ in which points written to the retention policy are accepted. If a point has a diff --git a/content/influxdb/v1/tools/influx_inspect.md b/content/influxdb/v1/tools/influx_inspect.md index 5c4bdb543..1bdbb5f18 100644 --- a/content/influxdb/v1/tools/influx_inspect.md +++ b/content/influxdb/v1/tools/influx_inspect.md @@ -449,7 +449,7 @@ Default value is `$HOME/.influxdb/wal`. See the [file system layout](/influxdb/v1/concepts/file-system-layout/#file-system-layout) for InfluxDB on your system. -##### [ `-tsmfile ` ] +##### [ `-tsmfile ` ] {metadata="v1.12.0+"} Path to a single tsm file to export. This requires both `-database` and `-retention` to be specified. @@ -468,7 +468,7 @@ influx_inspect export -compress influx_inspect export -database DATABASE_NAME -retention RETENTION_POLICY ``` -##### Export data from a single TSM file +##### Export data from a single TSM file {metadata="v1.12.0+"} ```bash influx_inspect export \ diff --git a/data/products.yml b/data/products.yml index e707fd718..62b027b66 100644 --- a/data/products.yml +++ b/data/products.yml @@ -100,7 +100,7 @@ influxdb: latest: v2.7 latest_patches: v2: 2.7.12 - v1: 1.12.1 + v1: 1.11.8 latest_cli: v2: 2.7.5 ai_sample_questions: @@ -183,9 +183,9 @@ enterprise_influxdb: menu_category: self-managed list_order: 5 versions: [v1] - latest: v1.12 + latest: v1.11 latest_patches: - v1: 1.12.1 + v1: 1.11.8 ai_sample_questions: - How can I configure my InfluxDB v1 Enterprise server? - How do I replicate data between InfluxDB v1 Enterprise and OSS? From 63bc1fcc8107c3b71f81defbf4f46da8ec3ab3c4 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Fri, 15 Aug 2025 15:31:17 -0500 Subject: [PATCH 078/122] fix(v1): v1.12 release notes and related changes are pre-release documentation\ **Revert this commit for the v1.12 release**\ - Remove links from release notes to upcoming 1.12.x features - Add callout to explain that v1.12 isn't yet available --- .../v1/about-the-project/release-notes.md | 33 +++++++++++++++++-- .../v1/about_the_project/release-notes.md | 25 +++++++++++++- 2 files changed, 55 insertions(+), 3 deletions(-) diff --git a/content/enterprise_influxdb/v1/about-the-project/release-notes.md b/content/enterprise_influxdb/v1/about-the-project/release-notes.md index aae7eecd2..b6db183f6 100644 --- a/content/enterprise_influxdb/v1/about-the-project/release-notes.md +++ b/content/enterprise_influxdb/v1/about-the-project/release-notes.md @@ -1,5 +1,5 @@ --- -title: InfluxDB Enterprise 1.11 release notes +title: InfluxDB Enterprise v1 release notes description: > Important changes and what's new in each version InfluxDB Enterprise. menu: @@ -7,9 +7,16 @@ menu: name: Release notes weight: 10 parent: About the project +alt_links: + v1: /influxdb/v1/about_the_project/release-notes/ --- -## v1.12.1 {date="2025-06-26"} +## v1.12.x {date="TBD"} + +> [!Important] +> #### Pre-release documentation +> +> This release is not yet available. [**v{{% latest-patch %}}**](#v1118) is the latest InfluxDB Enterprise v1 release. > [!Important] > #### Upgrade meta nodes first @@ -22,31 +29,53 @@ menu: - Add additional log output when using [`influx_inspect buildtsi`](/enterprise_influxdb/v1/tools/influx_inspect/#buildtsi) to rebuild the TSI index. + + +- Use [`influx_inspect export`](/enterprise_influxdb/v1/tools/influx_inspect/#export) with + `-tsmfile` option to + export a single TSM file. - Add `-m` flag to the [`influxd-ctl show-shards` command](/enterprise_influxdb/v1/tools/influxd-ctl/show-shards/) to output inconsistent shards. - Allow the specification of a write window for retention policies. - Add `fluxQueryRespBytes` metric to the `/debug/vars` metrics endpoint. - Log whenever meta gossip times exceed expiration. + + +- Add `query-log-path` configuration option to data nodes. +- Add `aggressive-points-per-block` configuration option to prevent TSM files from not getting fully compacted. - Log TLS configuration settings on startup. - Check for TLS certificate and private key permissions. - Add a warning if the TLS certificate is expired. - Add authentication to the Raft portal and add the following related _data_ node configuration options: + + + - `[meta].raft-portal-auth-required` + - `[meta].raft-dialer-auth-required` - Improve error handling. - InfluxQL updates: - Delete series by retention policy. + + + + - Allow retention policies to discard writes that fall within their range, but + outside of `FUTURE LIMIT` and `PAST LIMIT`. ## Bug fixes diff --git a/content/influxdb/v1/about_the_project/release-notes.md b/content/influxdb/v1/about_the_project/release-notes.md index 3ce948f4b..fcf9dc9ec 100644 --- a/content/influxdb/v1/about_the_project/release-notes.md +++ b/content/influxdb/v1/about_the_project/release-notes.md @@ -10,27 +10,50 @@ aliases: - /influxdb/v1/about_the_project/releasenotes-changelog/ alt_links: v2: /influxdb/v2/reference/release-notes/influxdb/ + enterprise_v1: /enterprise_influxdb/v1/about-the-project/release-notes/ --- -## v1.12.1 {date="2025-06-26"} +## v1.12.x {date="TBD"} + +> [!Important] +> #### Pre-release documentation +> +> This release is not yet available. [**v{{% latest-patch %}}**](#v1118) is the latest InfluxDB v1 release. ## Features - Add additional log output when using [`influx_inspect buildtsi`](/influxdb/v1/tools/influx_inspect/#buildtsi) to rebuild the TSI index. + + +- Use [`influx_inspect export`](/influxdb/v1/tools/influx_inspect/#export) with + `-tsmfile` option to + export a single TSM file. + - Add `fluxQueryRespBytes` metric to the `/debug/vars` metrics endpoint. + + +- Add `aggressive-points-per-block` configuration option + to prevent TSM files from not getting fully compacted. - Improve error handling. - InfluxQL updates: - Delete series by retention policy. + + + - Allow retention policies to discard writes that fall within their range, but + outside of `FUTURE LIMIT` and `PAST LIMIT`. ## Bug fixes From e10340b6ecbab0f5ee3e96adafd4d33cab558b3b Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Thu, 14 Aug 2025 23:14:51 -0500 Subject: [PATCH 079/122] chore(ci): Add config for new link validation tool (docs-tooling/link-checker) chore(ci): Replaced PR link validation workflow with new workflow from docs-tooling/link-checker/.github-workflows-link-check.yml chore: organize .gitignore test: add content to trigger link-checker workflow This small change tests the pr-link-check.yml workflow feat: update link-checker workflow and documentation - Add production config with corrected User-Agent placement - Remove old link validation actions (replaced by link-checker) fix: update link-checker workflow configuration - Update Node.js version to 20 for dependency compatibility feat: use pre-built link-checker binary from docs-tooling releases - Replace building from source with downloading from releases - Use GitHub API to get latest release and binary - Maintain same artifact structure for downstream job fix: improve change detection in pr-link-check workflow - Use GitHub API for reliable PR file detection - Add debug output to show all changed files - Fix conditional logic for when jobs should run docs: update TESTING.md with binary distribution and automated GitHub Actions integration - Document pre-built binary download as recommended installation method - Explain automated PR link checking workflow for docs-v2 - Replace manual GitHub Actions example with automated integration details - Remove exaggerated language and specify actual exclusion types fix(ci): download link-checker binary from docs-v2 releases - Change binary source from private docs-tooling to public docs-v2 releases - Fixes GitHub Actions permission issues accessing private repos - Binary is now stored as a release asset on docs-v2 itself test: add test file with valid links to verify workflow passes test: remove temporary test file for link checker workflow The test file was only needed to verify the workflow functionality and should not be part of the documentation. docs: update TESTING.md to document docs-v2 binary distribution - Change primary installation method to download from docs-v2 releases - Explain that binary distribution enables reliable GitHub Actions access - Update automated workflow description to reflect docs-v2 release usage - Maintain build-from-source as alternative option refactor(ci): combine workflow into single job for cleaner PR display - Merge detect-changes, build-site, and download-link-checker into single job - All setup steps now run conditionally within one job - Cleaner PR display shows only 'Check links in affected files' - Maintains all functionality with improved UX fix(ci): exclude problematic URLs from link checking - Add reddit.com exclusions (blocks bots) - Add support.influxdata.com exclusion (SSL certificate issues in CI) - Prevents false positive failures in automated link checking --- .ci/link-checker/default.lycherc.toml | 66 +++++ .ci/link-checker/production.lycherc.toml | 108 ++++++++ .../actions/report-broken-links/action.yml | 103 -------- .github/actions/validate-links/action.yml | 106 -------- .github/workflows/pr-link-check.yml | 241 ++++++++++++++++++ .github/workflows/pr-link-validation.yml | 148 ----------- .gitignore | 13 +- TESTING.md | 219 ++++++++++------ content/influxdb3/core/get-started/_index.md | 1 + 9 files changed, 572 insertions(+), 433 deletions(-) create mode 100644 .ci/link-checker/default.lycherc.toml create mode 100644 .ci/link-checker/production.lycherc.toml delete mode 100644 .github/actions/report-broken-links/action.yml delete mode 100644 .github/actions/validate-links/action.yml create mode 100644 .github/workflows/pr-link-check.yml delete mode 100644 .github/workflows/pr-link-validation.yml diff --git a/.ci/link-checker/default.lycherc.toml b/.ci/link-checker/default.lycherc.toml new file mode 100644 index 000000000..22f97a0f9 --- /dev/null +++ b/.ci/link-checker/default.lycherc.toml @@ -0,0 +1,66 @@ +# Lychee link checker configuration +# Generated by link-checker +[lychee] +# Performance settings + +# Maximum number of retries for failed checks + +max_retries = 3 + +# Timeout for each link check (in seconds) +timeout = 30 + +# Maximum number of concurrent checks +max_concurrency = 128 + +skip_code_blocks = false + +# HTTP settings +# Identify the tool to external services +user_agent = "Mozilla/5.0 (compatible; link-checker)" + +# Accept these HTTP status codes as valid +accept = [200, 201, 202, 203, 204, 206, 301, 302, 303, 304, +307, 308] + +# Skip these URL schemes +scheme = ["file", "mailto", "tel"] + +# Exclude patterns (regex supported) +exclude = [ + # Localhost URLs + "^https?://localhost", + "^https?://127\\.0\\.0\\.1", + + # Common CI/CD environments + "^https?://.*\\.local", + + # Example domains used in documentation + "^https?://example\\.(com|org|net)", + + # Placeholder URLs from code block filtering + "https://example.com/REMOVED_FROM_CODE_BLOCK", + "example.com/INLINE_CODE_URL", + + # URLs that require authentication + "^https?://.*\\.slack\\.com", + "^https?://.*\\.atlassian\\.net", + + # GitHub URLs (often fail due to rate limiting and bot + # detection) + "^https?://github\\.com", + + # Common documentation placeholders + "YOUR_.*", + "REPLACE_.*", + "<.*>", +] + +# Request headers +[headers] +# Add custom headers here if needed +# "Authorization" = "Bearer $GITHUB_TOKEN" + +# Cache settings +cache = true +max_cache_age = "1d" \ No newline at end of file diff --git a/.ci/link-checker/production.lycherc.toml b/.ci/link-checker/production.lycherc.toml new file mode 100644 index 000000000..9b8be5aa3 --- /dev/null +++ b/.ci/link-checker/production.lycherc.toml @@ -0,0 +1,108 @@ +# Production Link Checker Configuration for InfluxData docs-v2 +# Optimized for performance, reliability, and reduced false positives +[lychee] +# Performance settings + +# Maximum number of retries for failed checks + +max_retries = 3 + +# Timeout for each link check (in seconds) +timeout = 30 + +# Maximum number of concurrent checks +max_concurrency = 128 + +skip_code_blocks = false + +# HTTP settings +# Identify the tool to external services +"User-Agent" = "Mozilla/5.0 (compatible; influxdata-link-checker/1.0; +https://github.com/influxdata/docs-v2)" +accept = [200, 201, 202, 203, 204, 206, 301, 302, 303, 304, 307, 308] + +# Skip these URL schemes +scheme = ["mailto", "tel"] + +# Performance optimizations +cache = true +max_cache_age = "1h" + +# Retry configuration for reliability +include_verbatim = false + +# Exclusion patterns for docs-v2 (regex supported) +exclude = [ + # Localhost URLs + "^https?://localhost", + "^https?://127\\.0\\.0\\.1", + + # Common CI/CD environments + "^https?://.*\\.local", + + # Example domains used in documentation + "^https?://example\\.(com|org|net)", + + # Placeholder URLs from code block filtering + "https://example.com/REMOVED_FROM_CODE_BLOCK", + "example.com/INLINE_CODE_URL", + + # URLs that require authentication + "^https?://.*\\.slack\\.com", + "^https?://.*\\.atlassian\\.net", + + # GitHub URLs (often fail due to rate limiting and bot + # detection) + "^https?://github\\.com", + + # Social media URLs (often block bots) + "^https?://reddit\\.com", + "^https?://.*\\.reddit\\.com", + + # InfluxData support URLs (certificate/SSL issues in CI) + "^https?://support\\.influxdata\\.com", + + # Common documentation placeholders + "YOUR_.*", + "REPLACE_.*", + "<.*>", +] + +# Request headers +[headers] +# Add custom headers here if needed +# "Authorization" = "Bearer $GITHUB_TOKEN" +"Accept" = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" +"Accept-Language" = "en-US,en;q=0.5" +"Accept-Encoding" = "gzip, deflate" +"DNT" = "1" +"Connection" = "keep-alive" +"Upgrade-Insecure-Requests" = "1" + +[ci] +# CI-specific settings + +[ci.github_actions] +output_format = "json" +create_annotations = true +fail_fast = false +max_annotations = 50 # Limit to avoid overwhelming PR comments + +[ci.performance] +# Performance tuning for CI environment +parallel_requests = 32 +connection_timeout = 10 +read_timeout = 30 + +# Resource limits +max_memory_mb = 512 +max_execution_time_minutes = 10 + +[reporting] +# Report configuration +include_fragments = false +verbose = false +no_progress = true # Disable progress bar in CI + +# Summary settings +show_success_count = true +show_skipped_count = true \ No newline at end of file diff --git a/.github/actions/report-broken-links/action.yml b/.github/actions/report-broken-links/action.yml deleted file mode 100644 index 9e95e5605..000000000 --- a/.github/actions/report-broken-links/action.yml +++ /dev/null @@ -1,103 +0,0 @@ -name: 'Report Broken Links' -description: 'Downloads broken link reports, generates PR comment, and posts results' - -inputs: - github-token: - description: 'GitHub token for posting comments' - required: false - default: ${{ github.token }} - max-links-per-file: - description: 'Maximum links to show per file in comment' - required: false - default: '20' - include-success-message: - description: 'Include success message when no broken links found' - required: false - default: 'true' - -outputs: - has-broken-links: - description: 'Whether broken links were found (true/false)' - value: ${{ steps.generate-comment.outputs.has-broken-links }} - broken-link-count: - description: 'Number of broken links found' - value: ${{ steps.generate-comment.outputs.broken-link-count }} - -runs: - using: 'composite' - steps: - - name: Download broken link reports - uses: actions/download-artifact@v4 - with: - path: reports - continue-on-error: true - - - name: Generate PR comment - id: generate-comment - run: | - # Generate comment using our script - node .github/scripts/comment-generator.js \ - --max-links ${{ inputs.max-links-per-file }} \ - ${{ inputs.include-success-message == 'false' && '--no-success' || '' }} \ - --output-file comment.md \ - reports/ || echo "No reports found or errors occurred" - - # Check if comment file was created and has content - if [[ -f comment.md && -s comment.md ]]; then - echo "comment-generated=true" >> $GITHUB_OUTPUT - - # Count broken links by parsing the comment - broken_count=$(grep -o "Found [0-9]* broken link" comment.md | grep -o "[0-9]*" || echo "0") - echo "broken-link-count=$broken_count" >> $GITHUB_OUTPUT - - # Check if there are actually broken links (not just a success comment) - if [[ "$broken_count" -gt 0 ]]; then - echo "has-broken-links=true" >> $GITHUB_OUTPUT - else - echo "has-broken-links=false" >> $GITHUB_OUTPUT - fi - else - echo "has-broken-links=false" >> $GITHUB_OUTPUT - echo "broken-link-count=0" >> $GITHUB_OUTPUT - echo "comment-generated=false" >> $GITHUB_OUTPUT - fi - shell: bash - - - name: Post PR comment - if: steps.generate-comment.outputs.comment-generated == 'true' - uses: actions/github-script@v7 - with: - github-token: ${{ inputs.github-token }} - script: | - const fs = require('fs'); - - if (fs.existsSync('comment.md')) { - const comment = fs.readFileSync('comment.md', 'utf8'); - - if (comment.trim()) { - await github.rest.issues.createComment({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - body: comment - }); - } - } - - - name: Report validation results - run: | - has_broken_links="${{ steps.generate-comment.outputs.has-broken-links }}" - broken_count="${{ steps.generate-comment.outputs.broken-link-count }}" - - if [ "$has_broken_links" = "true" ]; then - echo "::error::❌ Link validation failed: Found $broken_count broken link(s)" - echo "Check the PR comment for detailed broken link information" - exit 1 - else - echo "::notice::✅ Link validation passed successfully" - echo "All links in the changed files are valid" - if [ "${{ steps.generate-comment.outputs.comment-generated }}" = "true" ]; then - echo "PR comment posted with validation summary and cache statistics" - fi - fi - shell: bash \ No newline at end of file diff --git a/.github/actions/validate-links/action.yml b/.github/actions/validate-links/action.yml deleted file mode 100644 index cf180556c..000000000 --- a/.github/actions/validate-links/action.yml +++ /dev/null @@ -1,106 +0,0 @@ -name: 'Validate Links' -description: 'Runs e2e browser-based link validation tests against Hugo site using Cypress' - -inputs: - files: - description: 'Space-separated list of files to validate' - required: true - product-name: - description: 'Product name for reporting (optional)' - required: false - default: '' - cache-enabled: - description: 'Enable link validation caching' - required: false - default: 'true' - cache-key: - description: 'Cache key prefix for this validation run' - required: false - default: 'link-validation' - timeout: - description: 'Test timeout in seconds' - required: false - default: '900' - -outputs: - failed: - description: 'Whether validation failed (true/false)' - value: ${{ steps.validate.outputs.failed }} - -runs: - using: 'composite' - steps: - - name: Restore link validation cache - if: inputs.cache-enabled == 'true' - uses: actions/cache@v4 - with: - path: .cache/link-validation - key: ${{ inputs.cache-key }}-${{ runner.os }}-${{ hashFiles('content/**/*.md', 'content/**/*.html') }} - restore-keys: | - ${{ inputs.cache-key }}-${{ runner.os }}- - ${{ inputs.cache-key }}- - - - name: Run link validation - shell: bash - run: | - # Set CI-specific environment variables - export CI=true - export GITHUB_ACTIONS=true - export NODE_OPTIONS="--max-old-space-size=4096" - - # Set test runner timeout for Hugo shutdown - export HUGO_SHUTDOWN_TIMEOUT=5000 - - # Add timeout to prevent hanging (timeout command syntax: timeout DURATION COMMAND) - timeout ${{ inputs.timeout }}s node cypress/support/run-e2e-specs.js ${{ inputs.files }} \ - --spec cypress/e2e/content/article-links.cy.js || { - exit_code=$? - - # Handle timeout specifically - if [ $exit_code -eq 124 ]; then - echo "::error::Link validation timed out after ${{ inputs.timeout }} seconds" - echo "::notice::This may indicate Hugo server startup issues or very slow link validation" - else - echo "::error::Link validation failed with exit code $exit_code" - fi - - # Check for specific error patterns and logs (but don't dump full content) - if [ -f /tmp/hugo_server.log ]; then - echo "Hugo server log available for debugging" - fi - - if [ -f hugo.log ]; then - echo "Additional Hugo log available for debugging" - fi - - if [ -f /tmp/broken_links_report.json ]; then - # Only show summary, not full report (full report is uploaded as artifact) - broken_count=$(grep -o '"url":' /tmp/broken_links_report.json | wc -l || echo "0") - echo "Broken links report contains $broken_count entries" - fi - - exit $exit_code - } - - # Report success if we get here - echo "::notice::✅ Link validation completed successfully" - echo "No broken links detected in the tested files" - - - name: Upload logs on failure - if: failure() - uses: actions/upload-artifact@v4 - with: - name: validation-logs-${{ inputs.product-name && inputs.product-name || 'default' }} - path: | - hugo.log - /tmp/hugo_server.log - if-no-files-found: ignore - - - - name: Upload broken links report - if: always() - uses: actions/upload-artifact@v4 - with: - name: broken-links-report${{ inputs.product-name && format('-{0}', inputs.product-name) || '' }} - path: /tmp/broken_links_report.json - if-no-files-found: ignore \ No newline at end of file diff --git a/.github/workflows/pr-link-check.yml b/.github/workflows/pr-link-check.yml new file mode 100644 index 000000000..b0764089a --- /dev/null +++ b/.github/workflows/pr-link-check.yml @@ -0,0 +1,241 @@ +name: Link Check PR Changes + +on: + pull_request: + paths: + - 'content/**/*.md' + - 'data/**/*.yml' + - 'layouts/**/*.html' + types: [opened, synchronize, reopened] + +jobs: + link-check: + name: Check links in affected files + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Detect content changes + id: detect + run: | + echo "🔍 Detecting changes between ${{ github.base_ref }} and ${{ github.sha }}" + + # For PRs, use the GitHub Files API to get changed files + if [[ "${{ github.event_name }}" == "pull_request" ]]; then + echo "Using GitHub API to detect PR changes..." + curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.number }}/files" \ + | jq -r '.[].filename' > all_changed_files.txt + else + echo "Using git diff to detect changes..." + git diff --name-only ${{ github.event.before }}..${{ github.sha }} > all_changed_files.txt + fi + + # Filter for content markdown files + CHANGED_FILES=$(grep '^content/.*\.md$' all_changed_files.txt || true) + + echo "📁 All changed files:" + cat all_changed_files.txt + echo "" + echo "📝 Content markdown files:" + echo "$CHANGED_FILES" + + if [[ -n "$CHANGED_FILES" ]]; then + echo "✅ Found $(echo "$CHANGED_FILES" | wc -l) changed content file(s)" + echo "has-changes=true" >> $GITHUB_OUTPUT + echo "changed-content<> $GITHUB_OUTPUT + echo "$CHANGED_FILES" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + # Check if any shared content files were modified + SHARED_CHANGES=$(echo "$CHANGED_FILES" | grep '^content/shared/' || true) + if [[ -n "$SHARED_CHANGES" ]]; then + echo "has-shared-content=true" >> $GITHUB_OUTPUT + echo "🔄 Detected shared content changes: $SHARED_CHANGES" + else + echo "has-shared-content=false" >> $GITHUB_OUTPUT + fi + else + echo "❌ No content changes detected" + echo "has-changes=false" >> $GITHUB_OUTPUT + echo "has-shared-content=false" >> $GITHUB_OUTPUT + fi + + - name: Skip if no content changes + if: steps.detect.outputs.has-changes == 'false' + run: | + echo "No content changes detected in this PR - skipping link check" + echo "✅ **No content changes detected** - link check skipped" >> $GITHUB_STEP_SUMMARY + + - name: Setup Node.js + if: steps.detect.outputs.has-changes == 'true' + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'yarn' + + - name: Install dependencies + if: steps.detect.outputs.has-changes == 'true' + run: yarn install --frozen-lockfile + + - name: Build Hugo site + if: steps.detect.outputs.has-changes == 'true' + run: npx hugo --minify + + - name: Download link-checker binary + if: steps.detect.outputs.has-changes == 'true' + run: | + echo "Downloading link-checker binary from docs-v2 releases..." + + # Download from docs-v2's own releases (always accessible) + curl -L -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + -o link-checker-info.json \ + "https://api.github.com/repos/influxdata/docs-v2/releases/tags/link-checker-v1.0.0" + + # Extract download URL for linux binary + DOWNLOAD_URL=$(jq -r '.assets[] | select(.name | test("link-checker.*linux")) | .url' link-checker-info.json) + + if [[ "$DOWNLOAD_URL" == "null" || -z "$DOWNLOAD_URL" ]]; then + echo "❌ No linux binary found in release" + echo "Available assets:" + jq -r '.assets[].name' link-checker-info.json + exit 1 + fi + + echo "📥 Downloading: $DOWNLOAD_URL" + curl -L -H "Accept: application/octet-stream" \ + -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + -o link-checker "$DOWNLOAD_URL" + + chmod +x link-checker + ./link-checker --version + + - name: Verify link checker config exists + if: steps.detect.outputs.has-changes == 'true' + run: | + if [[ ! -f .ci/link-checker/production.lycherc.toml ]]; then + echo "❌ Configuration file .ci/link-checker/production.lycherc.toml not found" + echo "Please copy production.lycherc.toml from docs-tooling/link-checker/" + exit 1 + fi + echo "✅ Using configuration: .ci/link-checker/production.lycherc.toml" + + - name: Map changed content to public files + if: steps.detect.outputs.has-changes == 'true' + id: mapping + run: | + echo "Mapping changed content files to public HTML files..." + + # Create temporary file with changed content files + echo "${{ steps.detect.outputs.changed-content }}" > changed-files.txt + + # Map content files to public files + PUBLIC_FILES=$(cat changed-files.txt | xargs -r ./link-checker map --existing-only) + + if [[ -n "$PUBLIC_FILES" ]]; then + echo "Found affected public files:" + echo "$PUBLIC_FILES" + echo "public-files<> $GITHUB_OUTPUT + echo "$PUBLIC_FILES" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + # Count files for summary + FILE_COUNT=$(echo "$PUBLIC_FILES" | wc -l) + echo "file-count=$FILE_COUNT" >> $GITHUB_OUTPUT + else + echo "No public files found to check" + echo "public-files=" >> $GITHUB_OUTPUT + echo "file-count=0" >> $GITHUB_OUTPUT + fi + + - name: Run link checker + if: steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != '' + id: link-check + run: | + echo "Checking links in ${{ steps.mapping.outputs.file-count }} affected files..." + + # Create temporary file with public files list + echo "${{ steps.mapping.outputs.public-files }}" > public-files.txt + + # Run link checker with detailed JSON output + set +e # Don't fail immediately on error + + cat public-files.txt | xargs -r ./link-checker check \ + --config .ci/link-checker/production.lycherc.toml \ + --format json \ + --output link-check-results.json + + EXIT_CODE=$? + + if [[ -f link-check-results.json ]]; then + # Parse results + BROKEN_COUNT=$(jq -r '.summary.broken_count // 0' link-check-results.json) + TOTAL_COUNT=$(jq -r '.summary.total_checked // 0' link-check-results.json) + SUCCESS_RATE=$(jq -r '.summary.success_rate // 0' link-check-results.json) + + echo "broken-count=$BROKEN_COUNT" >> $GITHUB_OUTPUT + echo "total-count=$TOTAL_COUNT" >> $GITHUB_OUTPUT + echo "success-rate=$SUCCESS_RATE" >> $GITHUB_OUTPUT + + if [[ $BROKEN_COUNT -gt 0 ]]; then + echo "❌ Found $BROKEN_COUNT broken links out of $TOTAL_COUNT total links" + echo "check-result=failed" >> $GITHUB_OUTPUT + else + echo "✅ All $TOTAL_COUNT links are valid" + echo "check-result=passed" >> $GITHUB_OUTPUT + fi + else + echo "❌ Link check failed to generate results" + echo "check-result=error" >> $GITHUB_OUTPUT + fi + + exit $EXIT_CODE + + - name: Process and report results + if: always() && steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != '' + run: | + if [[ -f link-check-results.json ]]; then + # Create detailed error annotations for broken links + if [[ "${{ steps.link-check.outputs.check-result }}" == "failed" ]]; then + echo "Creating error annotations for broken links..." + + jq -r '.broken_links[]? | + "::error file=\(.file // "unknown"),line=\(.line // 1)::Broken link: \(.url) - \(.error // "Unknown error")"' \ + link-check-results.json || true + fi + + # Generate summary comment + cat >> $GITHUB_STEP_SUMMARY << 'EOF' + ## Link Check Results + + **Files Checked:** ${{ steps.mapping.outputs.file-count }} + **Total Links:** ${{ steps.link-check.outputs.total-count }} + **Broken Links:** ${{ steps.link-check.outputs.broken-count }} + **Success Rate:** ${{ steps.link-check.outputs.success-rate }}% + + EOF + + if [[ "${{ steps.link-check.outputs.check-result }}" == "failed" ]]; then + echo "❌ **Link check failed** - see annotations above for details" >> $GITHUB_STEP_SUMMARY + else + echo "✅ **All links are valid**" >> $GITHUB_STEP_SUMMARY + fi + else + echo "⚠️ **Link check could not complete** - no results file generated" >> $GITHUB_STEP_SUMMARY + fi + + - name: Upload detailed results + if: always() && steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != '' + uses: actions/upload-artifact@v4 + with: + name: link-check-results + path: | + link-check-results.json + changed-files.txt + public-files.txt + retention-days: 30 \ No newline at end of file diff --git a/.github/workflows/pr-link-validation.yml b/.github/workflows/pr-link-validation.yml deleted file mode 100644 index 8d6a8a735..000000000 --- a/.github/workflows/pr-link-validation.yml +++ /dev/null @@ -1,148 +0,0 @@ -# PR Link Validation Workflow -# Provides basic and parallel workflows -# with smart strategy selection based on change volume -name: PR Link Validation - -on: - pull_request: - paths: - - 'content/**/*.md' - - 'content/**/*.html' - - 'api-docs/**/*.yml' - - 'assets/**/*.js' - - 'layouts/**/*.html' - -jobs: - # TEMPORARILY DISABLED - Remove this condition to re-enable link validation - disabled-check: - if: false # Set to true to re-enable the workflow - runs-on: ubuntu-latest - steps: - - run: echo "Link validation is temporarily disabled" - setup: - name: Setup and Strategy Detection - runs-on: ubuntu-latest - if: false # TEMPORARILY DISABLED - Remove this condition to re-enable - outputs: - strategy: ${{ steps.determine-strategy.outputs.strategy }} - has-changes: ${{ steps.determine-strategy.outputs.has-changes }} - matrix: ${{ steps.determine-strategy.outputs.matrix }} - all-files: ${{ steps.changed-files.outputs.all_changed_files }} - cache-hit-rate: ${{ steps.determine-strategy.outputs.cache-hit-rate }} - cache-hits: ${{ steps.determine-strategy.outputs.cache-hits }} - cache-misses: ${{ steps.determine-strategy.outputs.cache-misses }} - original-file-count: ${{ steps.determine-strategy.outputs.original-file-count }} - validation-file-count: ${{ steps.determine-strategy.outputs.validation-file-count }} - cache-message: ${{ steps.determine-strategy.outputs.message }} - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Setup docs environment - uses: ./.github/actions/setup-docs-env - - - name: Get changed files - id: changed-files - uses: tj-actions/changed-files@v41 - with: - files: | - content/**/*.md - content/**/*.html - api-docs/**/*.yml - - - name: Determine validation strategy - id: determine-strategy - run: | - if [[ "${{ steps.changed-files.outputs.any_changed }}" != "true" ]]; then - echo "No relevant files changed" - echo "strategy=none" >> $GITHUB_OUTPUT - echo "has-changes=false" >> $GITHUB_OUTPUT - echo "matrix={\"include\":[]}" >> $GITHUB_OUTPUT - echo "cache-hit-rate=100" >> $GITHUB_OUTPUT - echo "cache-hits=0" >> $GITHUB_OUTPUT - echo "cache-misses=0" >> $GITHUB_OUTPUT - exit 0 - fi - - # Use our matrix generator with cache awareness - files="${{ steps.changed-files.outputs.all_changed_files }}" - - echo "🔍 Analyzing ${files} for cache-aware validation..." - - # Generate matrix and capture outputs - result=$(node .github/scripts/matrix-generator.js \ - --min-files-parallel 10 \ - --max-concurrent 5 \ - --output-format github \ - $files) - - # Parse all outputs from matrix generator - while IFS='=' read -r key value; do - case "$key" in - strategy|has-changes|cache-hit-rate|cache-hits|cache-misses|original-file-count|validation-file-count|message) - echo "$key=$value" >> $GITHUB_OUTPUT - ;; - matrix) - echo "matrix=$value" >> $GITHUB_OUTPUT - ;; - esac - done <<< "$result" - - # Extract values for logging - strategy=$(echo "$result" | grep "^strategy=" | cut -d'=' -f2) - cache_hit_rate=$(echo "$result" | grep "^cache-hit-rate=" | cut -d'=' -f2) - cache_message=$(echo "$result" | grep "^message=" | cut -d'=' -f2-) - - echo "📊 Selected strategy: $strategy" - if [[ -n "$cache_hit_rate" ]]; then - echo "📈 Cache hit rate: ${cache_hit_rate}%" - fi - if [[ -n "$cache_message" ]]; then - echo "$cache_message" - fi - - validate: - name: ${{ matrix.name }} - needs: setup - if: false # TEMPORARILY DISABLED - Original condition: needs.setup.outputs.has-changes == 'true' - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.matrix) }} - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Setup docs environment - uses: ./.github/actions/setup-docs-env - - - name: Validate links - uses: ./.github/actions/validate-links - with: - files: ${{ matrix.files || needs.setup.outputs.all-files }} - product-name: ${{ matrix.product }} - cache-enabled: ${{ matrix.cacheEnabled || 'true' }} - cache-key: link-validation-${{ hashFiles(matrix.files || needs.setup.outputs.all-files) }} - timeout: 900 - - report: - name: Report Results - needs: [setup, validate] - if: false # TEMPORARILY DISABLED - Original condition: always() && needs.setup.outputs.has-changes == 'true' - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Setup docs environment - uses: ./.github/actions/setup-docs-env - - - name: Report broken links - uses: ./.github/actions/report-broken-links - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - max-links-per-file: 20 \ No newline at end of file diff --git a/.gitignore b/.gitignore index 0d9d333c3..32765da72 100644 --- a/.gitignore +++ b/.gitignore @@ -3,11 +3,14 @@ public .*.swp node_modules +package-lock.json .config* **/.env* *.log /resources .hugo_build.lock + +# Content generation /content/influxdb*/**/api/**/*.html !api-docs/**/.config.yml /api-docs/redoc-static.html* @@ -16,18 +19,22 @@ node_modules !telegraf-build/templates !telegraf-build/scripts !telegraf-build/README.md + +# CI/CD tool files /cypress/downloads/* /cypress/screenshots/* /cypress/videos/* +.lycheecache test-results.xml /influxdb3cli-build-scripts/content +tmp + +# IDE files .vscode/* !.vscode/launch.json .idea **/config.toml -package-lock.json -tmp -# Context files for LLMs and AI tools +# User context files for AI assistant tools .context/* !.context/README.md diff --git a/TESTING.md b/TESTING.md index 44a5006ae..e0a2f6f78 100644 --- a/TESTING.md +++ b/TESTING.md @@ -121,96 +121,169 @@ Potential causes: # This is ignored ``` -## Link Validation Testing +## Link Validation with Link-Checker -Link validation uses Cypress for e2e browser-based testing against the Hugo site to ensure all internal and external links work correctly. +Link validation uses the `link-checker` tool to validate internal and external links in documentation files. ### Basic Usage +#### Installation + +**Option 1: Download from docs-v2 releases (recommended)** + +The link-checker binary is distributed via docs-v2 releases for reliable access from GitHub Actions workflows: + ```bash -# Test specific files -yarn test:links content/influxdb3/core/**/*.md +# Download binary from docs-v2 releases +curl -L -o link-checker \ + https://github.com/influxdata/docs-v2/releases/download/link-checker-v1.0.0/link-checker-linux-x86_64 +chmod +x link-checker -# Test all links (may take a long time) -yarn test:links - -# Test by product (may take a long time) -yarn test:links:v3 -yarn test:links:v2 -yarn test:links:telegraf -yarn test:links:chronograf -yarn test:links:kapacitor +# Verify installation +./link-checker --version ``` -### How Link Validation Works +**Option 2: Build from source** -The tests: -1. Start a Hugo development server -2. Navigate to each page in a browser -3. Check all links for validity -4. Report broken or invalid links +```bash +# Clone and build link-checker +git clone https://github.com/influxdata/docs-tooling.git +cd docs-tooling/link-checker +cargo build --release + +# Copy binary to your PATH or use directly +cp target/release/link-checker /usr/local/bin/ +``` + +#### Core Commands + +```bash +# Map content files to public HTML files +link-checker map content/path/to/file.md + +# Check links in HTML files +link-checker check public/path/to/file.html + +# Generate configuration file +link-checker config +``` + +### Content Mapping Workflows + +#### Scenario 1: Map and check InfluxDB 3 Core content + +```bash +# Map Markdown files to HTML +link-checker map content/influxdb3/core/get-started/ + +# Check links in mapped HTML files +link-checker check public/influxdb3/core/get-started/ +``` + +#### Scenario 2: Map and check shared CLI content + +```bash +# Map shared content files +link-checker map content/shared/influxdb3-cli/ + +# Check the mapped output files +# (link-checker map outputs the HTML file paths) +link-checker map content/shared/influxdb3-cli/ | \ + xargs link-checker check +``` + +#### Scenario 3: Direct HTML checking + +```bash +# Check HTML files directly without mapping +link-checker check public/influxdb3/core/get-started/ +``` + +#### Combined workflow for changed files + +```bash +# Check only files changed in the last commit +git diff --name-only HEAD~1 HEAD | grep '\.md$' | \ + xargs link-checker map | \ + xargs link-checker check +``` + +### Configuration Options + +#### Local usage (default configuration) + +```bash +# Uses default settings or test.lycherc.toml if present +link-checker check public/influxdb3/core/get-started/ +``` + +#### Production usage (GitHub Actions) + +```bash +# Use production configuration with comprehensive exclusions +link-checker check \ + --config .ci/link-checker/production.lycherc.toml \ + public/influxdb3/core/get-started/ +``` ### GitHub Actions Integration -#### Composite Action +**Automated Integration (docs-v2)** -The `.github/actions/validate-links/` composite action provides reusable link validation: +The docs-v2 repository includes automated link checking for pull requests: + +- **Trigger**: Runs automatically on PRs that modify content files +- **Binary distribution**: Downloads latest pre-built binary from docs-v2 releases +- **Smart detection**: Only checks files affected by PR changes +- **Production config**: Uses optimized settings with exclusions for GitHub, social media, etc. +- **Results reporting**: Broken links reported as GitHub annotations with detailed summaries + +The workflow automatically: +1. Detects content changes in PRs using GitHub Files API +2. Downloads latest link-checker binary from docs-v2 releases +3. Builds Hugo site and maps changed content to public HTML files +4. Runs link checking with production configuration +5. Reports results with annotations and step summaries + +**Manual Integration (other repositories)** + +For other repositories, you can integrate link checking manually: ```yaml -- uses: ./.github/actions/validate-links - with: - files: "content/influxdb3/core/file.md content/influxdb/v2/file2.md" - product-name: "core" - cache-enabled: "true" - cache-key: "link-validation" +name: Link Check +on: + pull_request: + paths: + - 'content/**/*.md' + +jobs: + link-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Download link-checker + run: | + curl -L -o link-checker \ + https://github.com/influxdata/docs-tooling/releases/latest/download/link-checker-linux-x86_64 + chmod +x link-checker + cp target/release/link-checker ../../link-checker + cd ../.. + + - name: Build Hugo site + run: | + npm install + npx hugo --minify + + - name: Check changed files + run: | + git diff --name-only origin/main HEAD | \ + grep '\.md$' | \ + xargs ./link-checker map | \ + xargs ./link-checker check \ + --config .ci/link-checker/production.lycherc.toml ``` -#### Matrix Generator - -The `.github/scripts/matrix-generator.js` script provides intelligent strategy selection: - -- **Sequential validation**: For small changes (< 10 files) or single-product changes -- **Parallel validation**: For large changes across multiple products (up to 5 concurrent jobs) - -Test locally: - -```bash -node .github/scripts/matrix-generator.js content/influxdb3/core/file1.md content/influxdb/v2/file2.md -``` - -Configuration options: -- `--max-concurrent `: Maximum parallel jobs (default: 5) -- `--force-sequential`: Force sequential execution -- `--min-files-parallel `: Minimum files for parallel (default: 10) - -### Caching for Link Validation - -Link validation supports caching to improve performance: - -- **Cache location**: `.cache/link-validation/` (local), GitHub Actions cache (CI) -- **Cache keys**: Based on content file hashes -- **TTL**: 30 days by default, configurable - -#### Cache Configuration Options - -```bash -# Use 7-day cache for more frequent validation -yarn test:links --cache-ttl=7 content/influxdb3/**/*.md - -# Use 1-day cache via environment variable -LINK_CACHE_TTL_DAYS=1 yarn test:links content/**/*.md - -# Clean up expired cache entries -node .github/scripts/incremental-validator.js --cleanup -``` - -#### How Caching Works - -- **Cache key**: Based on file path + content hash (file changes invalidate cache immediately) -- **External links**: Cached for the TTL period since URLs rarely change -- **Internal links**: Effectively cached until file content changes -- **Automatic cleanup**: Expired entries are removed on access and via `--cleanup` - ## Style Linting (Vale) Style linting uses [Vale](https://vale.sh/) to enforce documentation writing standards, branding guidelines, and vocabulary consistency. diff --git a/content/influxdb3/core/get-started/_index.md b/content/influxdb3/core/get-started/_index.md index 16398f32f..72cbc7746 100644 --- a/content/influxdb3/core/get-started/_index.md +++ b/content/influxdb3/core/get-started/_index.md @@ -18,6 +18,7 @@ prepend: | > [!Note] > InfluxDB 3 Core is purpose-built for real-time data monitoring and recent data. > InfluxDB 3 Enterprise builds on top of Core with support for historical data + > analysis and extended features. > querying, high availability, read replicas, and more. > Enterprise will soon unlock > enhanced security, row-level deletions, an administration UI, and more. From 0546d66ac04b0adc7dc0f86d9061fb332cdd93ff Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Sun, 17 Aug 2025 09:53:10 -0500 Subject: [PATCH 080/122] ci: link-checker known positive test with existing broken link, platform-specific instructions --- TESTING.md | 22 ++++++++++++++++--- .../influxql/functions/transformations.md | 4 ++-- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/TESTING.md b/TESTING.md index e0a2f6f78..dba8f892e 100644 --- a/TESTING.md +++ b/TESTING.md @@ -129,12 +129,27 @@ Link validation uses the `link-checker` tool to validate internal and external l #### Installation -**Option 1: Download from docs-v2 releases (recommended)** +**Option 1: Build from source (macOS/local development)** + +For local development on macOS, build the link-checker from source: + +```bash +# Clone and build link-checker +git clone https://github.com/influxdata/docs-tooling.git +cd docs-tooling/link-checker +cargo build --release + +# Copy binary to your PATH or use directly +cp target/release/link-checker /usr/local/bin/ +# OR use directly: ./target/release/link-checker +``` + +**Option 2: Download pre-built binary (GitHub Actions/Linux)** The link-checker binary is distributed via docs-v2 releases for reliable access from GitHub Actions workflows: ```bash -# Download binary from docs-v2 releases +# Download Linux binary from docs-v2 releases curl -L -o link-checker \ https://github.com/influxdata/docs-v2/releases/download/link-checker-v1.0.0/link-checker-linux-x86_64 chmod +x link-checker @@ -143,7 +158,8 @@ chmod +x link-checker ./link-checker --version ``` -**Option 2: Build from source** +> [!Note] +> Pre-built binaries are currently Linux x86_64 only. For macOS development, use Option 1 to build from source. ```bash # Clone and build link-checker diff --git a/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md b/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md index 433562e74..d3ce61442 100644 --- a/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md +++ b/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md @@ -704,7 +704,7 @@ name: data ## ATAN2() -Returns the the arctangent of `y/x` in radians. +Returns the arctangent of `y/x` in radians. ### Basic syntax @@ -1609,7 +1609,7 @@ SELECT DERIVATIVE( ([ * | | // ]) [ , The advanced syntax requires a [`GROUP BY time()` clause](/influxdb/version/query-data/influxql/explore-data/group-by/#group-by-time-intervals) and a nested InfluxQL function. The query first calculates the results for the nested function at the specified `GROUP BY time()` interval and then applies the `DERIVATIVE()` function to those results. -The `unit` argument is an integer followed by a [duration](//influxdb/version/reference/glossary/#duration) and it is optional. +The `unit` argument is an integer followed by a [duration](///influxdb/version/reference/glossary/#duration) and it is optional. If the query does not specify the `unit` the `unit` defaults to the `GROUP BY time()` interval. Note that this behavior is different from the [basic syntax's](#basic-syntax-1) default behavior. From 98735f4bef2fe2605a5956b62026d23e51f64348 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 18 Aug 2025 10:21:10 -0500 Subject: [PATCH 081/122] test link-checker, positive test --- .../query-data/influxql/functions/transformations.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md b/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md index d3ce61442..7fbdd3a0a 100644 --- a/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md +++ b/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md @@ -1609,7 +1609,7 @@ SELECT DERIVATIVE( ([ * | | // ]) [ , The advanced syntax requires a [`GROUP BY time()` clause](/influxdb/version/query-data/influxql/explore-data/group-by/#group-by-time-intervals) and a nested InfluxQL function. The query first calculates the results for the nested function at the specified `GROUP BY time()` interval and then applies the `DERIVATIVE()` function to those results. -The `unit` argument is an integer followed by a [duration](///influxdb/version/reference/glossary/#duration) and it is optional. +The `unit` argument is an integer followed by a [duration](//influxdb/version/reference/glossary/#duration) and it is optional. If the query does not specify the `unit` the `unit` defaults to the `GROUP BY time()` interval. Note that this behavior is different from the [basic syntax's](#basic-syntax-1) default behavior. From f5df3cb6f06bc74305daf55e0f867650f2a758db Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 18 Aug 2025 10:28:09 -0500 Subject: [PATCH 082/122] fix(v2): broken link --- .../query-data/influxql/functions/transformations.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md b/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md index 7fbdd3a0a..48046fc66 100644 --- a/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md +++ b/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md @@ -1609,7 +1609,7 @@ SELECT DERIVATIVE( ([ * | | // ]) [ , The advanced syntax requires a [`GROUP BY time()` clause](/influxdb/version/query-data/influxql/explore-data/group-by/#group-by-time-intervals) and a nested InfluxQL function. The query first calculates the results for the nested function at the specified `GROUP BY time()` interval and then applies the `DERIVATIVE()` function to those results. -The `unit` argument is an integer followed by a [duration](//influxdb/version/reference/glossary/#duration) and it is optional. +The `unit` argument is an integer followed by a [duration](/influxdb/version/reference/glossary/#duration) and it is optional. If the query does not specify the `unit` the `unit` defaults to the `GROUP BY time()` interval. Note that this behavior is different from the [basic syntax's](#basic-syntax-1) default behavior. From a8578bb0af0ff8b8d0fe961ca4e3bdb4cbc42ab1 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 18 Aug 2025 10:51:57 -0500 Subject: [PATCH 083/122] chore(ci): Removes old Cypress link checker test code --- cypress.config.js | 100 ------- cypress/e2e/content/article-links.cy.js | 370 ------------------------ cypress/e2e/content/example.cy.js | 0 cypress/support/link-cache.js | 215 -------------- cypress/support/link-reporter.js | 310 -------------------- cypress/support/run-e2e-specs.js | 54 +--- lefthook.yml | 10 - package.json | 9 - 8 files changed, 5 insertions(+), 1063 deletions(-) delete mode 100644 cypress/e2e/content/article-links.cy.js delete mode 100644 cypress/e2e/content/example.cy.js delete mode 100644 cypress/support/link-cache.js delete mode 100644 cypress/support/link-reporter.js diff --git a/cypress.config.js b/cypress.config.js index d7ffed8fc..5148f60ec 100644 --- a/cypress.config.js +++ b/cypress.config.js @@ -2,14 +2,6 @@ import { defineConfig } from 'cypress'; import { cwd as _cwd } from 'process'; import * as fs from 'fs'; import * as yaml from 'js-yaml'; -import { - BROKEN_LINKS_FILE, - FIRST_BROKEN_LINK_FILE, - initializeReport, - readBrokenLinksReport, - saveCacheStats, - saveValidationStrategy, -} from './cypress/support/link-reporter.js'; export default defineConfig({ e2e: { @@ -88,98 +80,6 @@ export default defineConfig({ } }, - // Broken links reporting tasks - initializeBrokenLinksReport() { - return initializeReport(); - }, - - // Special case domains are now handled directly in the test without additional reporting - // This task is kept for backward compatibility but doesn't do anything special - reportSpecialCaseLink(linkData) { - console.log( - `✅ Expected status code: ${linkData.url} (status: ${linkData.status}) is valid for this domain` - ); - return true; - }, - - reportBrokenLink(linkData) { - try { - // Validate link data - if (!linkData || !linkData.url || !linkData.page) { - console.error('Invalid link data provided'); - return false; - } - - // Read current report - const report = readBrokenLinksReport(); - - // Find or create entry for this page - let pageReport = report.find((r) => r.page === linkData.page); - if (!pageReport) { - pageReport = { page: linkData.page, links: [] }; - report.push(pageReport); - } - - // Check if link is already in the report to avoid duplicates - const isDuplicate = pageReport.links.some( - (link) => link.url === linkData.url && link.type === linkData.type - ); - - if (!isDuplicate) { - // Add the broken link to the page's report - pageReport.links.push({ - url: linkData.url, - status: linkData.status, - type: linkData.type, - linkText: linkData.linkText, - }); - - // Write updated report back to file - fs.writeFileSync( - BROKEN_LINKS_FILE, - JSON.stringify(report, null, 2) - ); - - // Store first broken link if not already recorded - const firstBrokenLinkExists = - fs.existsSync(FIRST_BROKEN_LINK_FILE) && - fs.readFileSync(FIRST_BROKEN_LINK_FILE, 'utf8').trim() !== ''; - - if (!firstBrokenLinkExists) { - // Store first broken link with complete information - const firstBrokenLink = { - url: linkData.url, - status: linkData.status, - type: linkData.type, - linkText: linkData.linkText, - page: linkData.page, - time: new Date().toISOString(), - }; - - fs.writeFileSync( - FIRST_BROKEN_LINK_FILE, - JSON.stringify(firstBrokenLink, null, 2) - ); - - console.error( - `🔴 FIRST BROKEN LINK: ${linkData.url} (${linkData.status}) - ${linkData.type} on page ${linkData.page}` - ); - } - - // Log the broken link immediately to console - console.error( - `❌ BROKEN LINK: ${linkData.url} (${linkData.status}) - ${linkData.type} on page ${linkData.page}` - ); - } - - return true; - } catch (error) { - console.error(`Error reporting broken link: ${error.message}`); - // Even if there's an error, we want to ensure the test knows there was a broken link - return true; - } - }, - // Cache and incremental validation tasks saveCacheStatistics(stats) { try { diff --git a/cypress/e2e/content/article-links.cy.js b/cypress/e2e/content/article-links.cy.js deleted file mode 100644 index 0ce8d4677..000000000 --- a/cypress/e2e/content/article-links.cy.js +++ /dev/null @@ -1,370 +0,0 @@ -/// - -describe('Article', () => { - let subjects = Cypress.env('test_subjects') - ? Cypress.env('test_subjects') - .split(',') - .filter((s) => s.trim() !== '') - : []; - - // Cache will be checked during test execution at the URL level - - // Always use HEAD for downloads to avoid timeouts - const useHeadForDownloads = true; - - // Set up initialization for tests - before(() => { - // Initialize the broken links report - cy.task('initializeBrokenLinksReport'); - - // Clean up expired cache entries - cy.task('cleanupCache').then((cleaned) => { - if (cleaned > 0) { - cy.log(`🧹 Cleaned up ${cleaned} expired cache entries`); - } - }); - }); - - // Display cache statistics after all tests complete - after(() => { - cy.task('getCacheStats').then((stats) => { - cy.log('📊 Link Validation Cache Statistics:'); - cy.log(` • Cache hits: ${stats.hits}`); - cy.log(` • Cache misses: ${stats.misses}`); - cy.log(` • New entries stored: ${stats.stores}`); - cy.log(` • Hit rate: ${stats.hitRate}`); - cy.log(` • Total validations: ${stats.total}`); - - if (stats.total > 0) { - const message = stats.hits > 0 - ? `✨ Cache optimization saved ${stats.hits} link validations` - : '🔄 No cache hits - all links were validated fresh'; - cy.log(message); - } - - // Save cache statistics for the reporter to display - cy.task('saveCacheStatsForReporter', { - hitRate: parseFloat(stats.hitRate.replace('%', '')), - cacheHits: stats.hits, - cacheMisses: stats.misses, - totalValidations: stats.total, - newEntriesStored: stats.stores, - cleanups: stats.cleanups - }); - }); - }); - - // Helper function to identify download links - function isDownloadLink(href) { - // Check for common download file extensions - const downloadExtensions = [ - '.pdf', - '.zip', - '.tar.gz', - '.tgz', - '.rar', - '.exe', - '.dmg', - '.pkg', - '.deb', - '.rpm', - '.xlsx', - '.csv', - '.doc', - '.docx', - '.ppt', - '.pptx', - ]; - - // Check for download domains or paths - const downloadDomains = ['dl.influxdata.com', 'downloads.influxdata.com']; - - // Check if URL contains a download extension - const hasDownloadExtension = downloadExtensions.some((ext) => - href.toLowerCase().endsWith(ext) - ); - - // Check if URL is from a download domain - const isFromDownloadDomain = downloadDomains.some((domain) => - href.toLowerCase().includes(domain) - ); - - // Return true if either condition is met - return hasDownloadExtension || isFromDownloadDomain; - } - - // Helper function for handling failed links - function handleFailedLink(url, status, type, redirectChain = '', linkText = '', pageUrl = '') { - // Report the broken link - cy.task('reportBrokenLink', { - url: url + redirectChain, - status, - type, - linkText, - page: pageUrl, - }); - - // Throw error for broken links - throw new Error( - `BROKEN ${type.toUpperCase()} LINK: ${url} (status: ${status})${redirectChain} on ${pageUrl}` - ); - } - - // Helper function to test a link with cache integration - function testLink(href, linkText = '', pageUrl) { - // Check cache first - return cy.task('isLinkCached', href).then((isCached) => { - if (isCached) { - cy.log(`✅ Cache hit: ${href}`); - return cy.task('getLinkCache', href).then((cachedResult) => { - if (cachedResult && cachedResult.result && cachedResult.result.status >= 400) { - // Cached result shows this link is broken - handleFailedLink(href, cachedResult.result.status, cachedResult.result.type || 'cached', '', linkText, pageUrl); - } - // For successful cached results, just return - no further action needed - }); - } else { - // Not cached, perform actual validation - return performLinkValidation(href, linkText, pageUrl); - } - }); - } - - // Helper function to perform actual link validation and cache the result - function performLinkValidation(href, linkText = '', pageUrl) { - // Common request options for both methods - const requestOptions = { - failOnStatusCode: true, - timeout: 15000, // Increased timeout for reliability - followRedirect: true, // Explicitly follow redirects - retryOnNetworkFailure: true, // Retry on network issues - retryOnStatusCodeFailure: true, // Retry on 5xx errors - }; - - - if (useHeadForDownloads && isDownloadLink(href)) { - cy.log(`** Testing download link with HEAD: ${href} **`); - return cy.request({ - method: 'HEAD', - url: href, - ...requestOptions, - }).then((response) => { - // Prepare result for caching - const result = { - status: response.status, - type: 'download', - timestamp: new Date().toISOString() - }; - - // Check final status after following any redirects - if (response.status >= 400) { - const redirectInfo = - response.redirects && response.redirects.length > 0 - ? ` (redirected to: ${response.redirects.join(' -> ')})` - : ''; - - // Cache the failed result - cy.task('setLinkCache', { url: href, result }); - handleFailedLink(href, response.status, 'download', redirectInfo, linkText, pageUrl); - } else { - // Cache the successful result - cy.task('setLinkCache', { url: href, result }); - } - }); - } else { - cy.log(`** Testing link: ${href} **`); - return cy.request({ - url: href, - ...requestOptions, - }).then((response) => { - // Prepare result for caching - const result = { - status: response.status, - type: 'regular', - timestamp: new Date().toISOString() - }; - - if (response.status >= 400) { - const redirectInfo = - response.redirects && response.redirects.length > 0 - ? ` (redirected to: ${response.redirects.join(' -> ')})` - : ''; - - // Cache the failed result - cy.task('setLinkCache', { url: href, result }); - handleFailedLink(href, response.status, 'regular', redirectInfo, linkText, pageUrl); - } else { - // Cache the successful result - cy.task('setLinkCache', { url: href, result }); - } - }); - } - } - - // Test setup validation - it('Test Setup Validation', function () { - cy.log(`📋 Test Configuration:`); - cy.log(` • Test subjects: ${subjects.length}`); - cy.log(` • Cache: URL-level caching with 30-day TTL`); - cy.log(` • Link validation: Internal, anchor, and allowed external links`); - - cy.log('✅ Test setup validation completed'); - }); - - subjects.forEach((subject) => { - it(`${subject} has valid internal links`, function () { - - // Add error handling for page visit failures - cy.visit(`${subject}`, { timeout: 20000 }).then(() => { - cy.log(`✅ Successfully loaded page: ${subject}`); - }); - - // Test internal links - cy.get('article, .api-content').then(($article) => { - // Find links without failing the test if none are found - const $links = $article.find('a[href^="/"]'); - if ($links.length === 0) { - cy.log('No internal links found on this page'); - return; - } - - cy.log(`🔍 Testing ${$links.length} internal links on ${subject}`); - - // Now test each link - cy.wrap($links).each(($a) => { - const href = $a.attr('href'); - const linkText = $a.text().trim(); - - try { - testLink(href, linkText, subject); - } catch (error) { - cy.log(`❌ Error testing link ${href}: ${error.message}`); - throw error; // Re-throw to fail the test - } - }); - }); - }); - - it(`${subject} has valid anchor links`, function () { - - cy.visit(`${subject}`).then(() => { - cy.log(`✅ Successfully loaded page for anchor testing: ${subject}`); - }); - - // Define selectors for anchor links to ignore, such as behavior triggers - const ignoreLinks = ['.tabs a[href^="#"]', '.code-tabs a[href^="#"]']; - - const anchorSelector = - 'a[href^="#"]:not(' + ignoreLinks.join('):not(') + ')'; - - cy.get('article, .api-content').then(($article) => { - const $anchorLinks = $article.find(anchorSelector); - if ($anchorLinks.length === 0) { - cy.log('No anchor links found on this page'); - return; - } - - cy.log(`🔗 Testing ${$anchorLinks.length} anchor links on ${subject}`); - - cy.wrap($anchorLinks).each(($a) => { - const href = $a.prop('href'); - const linkText = $a.text().trim(); - - if (href && href.length > 1) { - // Get just the fragment part - const url = new URL(href); - const anchorId = url.hash.substring(1); // Remove the # character - - if (!anchorId) { - cy.log(`Skipping empty anchor in ${href}`); - return; - } - - // Use DOM to check if the element exists - cy.window().then((win) => { - const element = win.document.getElementById(anchorId); - if (!element) { - cy.task('reportBrokenLink', { - url: `#${anchorId}`, - status: 404, - type: 'anchor', - linkText, - page: subject, - }); - cy.log(`⚠️ Missing anchor target: #${anchorId}`); - } - }); - } - }); - }); - }); - - it(`${subject} has valid external links`, function () { - - // Check if we should skip external links entirely - if (Cypress.env('skipExternalLinks') === true) { - cy.log( - 'Skipping all external links as configured by skipExternalLinks' - ); - return; - } - - cy.visit(`${subject}`).then(() => { - cy.log( - `✅ Successfully loaded page for external link testing: ${subject}` - ); - }); - - // Define allowed external domains to test - const allowedExternalDomains = ['github.com', 'kapa.ai']; - - // Test external links - cy.get('article, .api-content').then(($article) => { - // Find links without failing the test if none are found - const $links = $article.find('a[href^="http"]'); - if ($links.length === 0) { - cy.log('No external links found on this page'); - return; - } - - cy.log(`🔍 Found ${$links.length} total external links on ${subject}`); - - // Filter links to only include allowed domains - const $allowedLinks = $links.filter((_, el) => { - const href = el.getAttribute('href'); - try { - const url = new URL(href); - return allowedExternalDomains.some( - (domain) => - url.hostname === domain || url.hostname.endsWith(`.${domain}`) - ); - } catch (urlError) { - cy.log(`⚠️ Invalid URL found: ${href}`); - return false; - } - }); - - if ($allowedLinks.length === 0) { - cy.log('No links to allowed external domains found on this page'); - cy.log(` • Allowed domains: ${allowedExternalDomains.join(', ')}`); - return; - } - - cy.log( - `🌐 Testing ${$allowedLinks.length} links to allowed external domains` - ); - cy.wrap($allowedLinks).each(($a) => { - const href = $a.attr('href'); - const linkText = $a.text().trim(); - - try { - testLink(href, linkText, subject); - } catch (error) { - cy.log(`❌ Error testing external link ${href}: ${error.message}`); - throw error; - } - }); - }); - }); - }); -}); diff --git a/cypress/e2e/content/example.cy.js b/cypress/e2e/content/example.cy.js deleted file mode 100644 index e69de29bb..000000000 diff --git a/cypress/support/link-cache.js b/cypress/support/link-cache.js deleted file mode 100644 index 1a54a6e41..000000000 --- a/cypress/support/link-cache.js +++ /dev/null @@ -1,215 +0,0 @@ -/** - * Link Cache Manager for Cypress Tests - * Manages caching of link validation results at the URL level - */ - -import fs from 'fs'; -import path from 'path'; -import crypto from 'crypto'; - -const CACHE_VERSION = 'v2'; -const CACHE_KEY_PREFIX = 'link-validation'; -const LOCAL_CACHE_DIR = path.join(process.cwd(), '.cache', 'link-validation'); - -/** - * Cache manager for individual link validation results - */ -export class LinkCacheManager { - constructor(options = {}) { - this.localCacheDir = options.localCacheDir || LOCAL_CACHE_DIR; - - // Configurable cache TTL - default 30 days - this.cacheTTLDays = - options.cacheTTLDays || parseInt(process.env.LINK_CACHE_TTL_DAYS) || 30; - this.maxAge = this.cacheTTLDays * 24 * 60 * 60 * 1000; - - this.ensureLocalCacheDir(); - - // Track cache statistics - this.stats = { - hits: 0, - misses: 0, - stores: 0, - cleanups: 0 - }; - } - - ensureLocalCacheDir() { - if (!fs.existsSync(this.localCacheDir)) { - fs.mkdirSync(this.localCacheDir, { recursive: true }); - } - } - - /** - * Generate cache key for a URL - * @param {string} url - The URL to cache - * @returns {string} Cache key - */ - generateCacheKey(url) { - const urlHash = crypto - .createHash('sha256') - .update(url) - .digest('hex') - .substring(0, 16); - return `${CACHE_KEY_PREFIX}-${CACHE_VERSION}-${urlHash}`; - } - - /** - * Get cache file path for a URL - * @param {string} url - The URL - * @returns {string} File path - */ - getCacheFilePath(url) { - const cacheKey = this.generateCacheKey(url); - return path.join(this.localCacheDir, `${cacheKey}.json`); - } - - /** - * Check if a URL's validation result is cached - * @param {string} url - The URL to check - * @returns {Object|null} Cached result or null - */ - get(url) { - const cacheFile = this.getCacheFilePath(url); - - if (!fs.existsSync(cacheFile)) { - this.stats.misses++; - return null; - } - - try { - const content = fs.readFileSync(cacheFile, 'utf8'); - const cached = JSON.parse(content); - - // TTL check - const age = Date.now() - new Date(cached.cachedAt).getTime(); - - if (age > this.maxAge) { - fs.unlinkSync(cacheFile); - this.stats.misses++; - this.stats.cleanups++; - return null; - } - - this.stats.hits++; - return cached; - } catch (error) { - // Clean up corrupted cache - try { - fs.unlinkSync(cacheFile); - this.stats.cleanups++; - } catch (cleanupError) { - // Ignoring cleanup errors as they are non-critical, but logging for visibility - console.warn(`Failed to clean up corrupted cache file: ${cleanupError.message}`); - } - this.stats.misses++; - return null; - } - } - - /** - * Store validation result for a URL - * @param {string} url - The URL - * @param {Object} result - Validation result - * @returns {boolean} True if successfully cached, false otherwise - */ - set(url, result) { - const cacheFile = this.getCacheFilePath(url); - - const cacheData = { - url, - result, - cachedAt: new Date().toISOString(), - ttl: new Date(Date.now() + this.maxAge).toISOString() - }; - - try { - fs.writeFileSync(cacheFile, JSON.stringify(cacheData, null, 2)); - this.stats.stores++; - return true; - } catch (error) { - console.warn(`Failed to cache validation result for ${url}: ${error.message}`); - return false; - } - } - - /** - * Check if a URL is cached and valid - * @param {string} url - The URL to check - * @returns {boolean} True if cached and valid - */ - isCached(url) { - return this.get(url) !== null; - } - - /** - * Get cache statistics - * @returns {Object} Cache statistics - */ - getStats() { - const total = this.stats.hits + this.stats.misses; - const hitRate = total > 0 ? (this.stats.hits / total * 100).toFixed(1) : 0; - - return { - ...this.stats, - total, - hitRate: `${hitRate}%` - }; - } - - /** - * Clean up expired cache entries - * @returns {number} Number of entries cleaned up - */ - cleanup() { - let cleaned = 0; - - try { - const files = fs.readdirSync(this.localCacheDir); - const cacheFiles = files.filter(file => - file.startsWith(CACHE_KEY_PREFIX) && file.endsWith('.json') - ); - - for (const file of cacheFiles) { - const filePath = path.join(this.localCacheDir, file); - - try { - const content = fs.readFileSync(filePath, 'utf8'); - const cached = JSON.parse(content); - - const age = Date.now() - new Date(cached.cachedAt).getTime(); - - if (age > this.maxAge) { - fs.unlinkSync(filePath); - cleaned++; - } - } catch (error) { - console.warn(`Failed to process cache file "${filePath}": ${error.message}`); - // Remove corrupted files - fs.unlinkSync(filePath); - cleaned++; - } - } - } catch (error) { - console.warn(`Cache cleanup failed: ${error.message}`); - } - - this.stats.cleanups += cleaned; - return cleaned; - } -} - -/** - * Cypress task helper to integrate cache with Cypress tasks - */ -export const createCypressCacheTasks = (options = {}) => { - const cache = new LinkCacheManager(options); - - return { - getLinkCache: (url) => cache.get(url), - setLinkCache: ({ url, result }) => cache.set(url, result), - isLinkCached: (url) => cache.isCached(url), - getCacheStats: () => cache.getStats(), - cleanupCache: () => cache.cleanup() - }; -}; \ No newline at end of file diff --git a/cypress/support/link-reporter.js b/cypress/support/link-reporter.js deleted file mode 100644 index fa514c7ef..000000000 --- a/cypress/support/link-reporter.js +++ /dev/null @@ -1,310 +0,0 @@ -/** - * Broken Links Reporter - * Handles collecting, storing, and reporting broken links found during tests - */ -import fs from 'fs'; - -export const BROKEN_LINKS_FILE = '/tmp/broken_links_report.json'; -export const FIRST_BROKEN_LINK_FILE = '/tmp/first_broken_link.json'; -const SOURCES_FILE = '/tmp/test_subjects_sources.json'; -const CACHE_STATS_FILE = '/tmp/cache_statistics.json'; -const VALIDATION_STRATEGY_FILE = '/tmp/validation_strategy.json'; - -/** - * Reads the broken links report from the file system - * @returns {Array} Parsed report data or empty array if file doesn't exist - */ -export function readBrokenLinksReport() { - if (!fs.existsSync(BROKEN_LINKS_FILE)) { - return []; - } - - try { - const fileContent = fs.readFileSync(BROKEN_LINKS_FILE, 'utf8'); - - // Check if the file is empty or contains only an empty array - if (!fileContent || fileContent.trim() === '' || fileContent === '[]') { - return []; - } - - // Try to parse the JSON content - try { - const parsedContent = JSON.parse(fileContent); - - // Ensure the parsed content is an array - if (!Array.isArray(parsedContent)) { - console.error('Broken links report is not an array'); - return []; - } - - return parsedContent; - } catch (parseErr) { - console.error( - `Error parsing broken links report JSON: ${parseErr.message}` - ); - return []; - } - } catch (err) { - console.error(`Error reading broken links report: ${err.message}`); - return []; - } -} - -/** - * Reads the sources mapping file - * @returns {Object} A mapping from URLs to their source files - */ -function readSourcesMapping() { - try { - if (fs.existsSync(SOURCES_FILE)) { - const sourcesData = JSON.parse(fs.readFileSync(SOURCES_FILE, 'utf8')); - return sourcesData.reduce((acc, item) => { - if (item.url && item.source) { - acc[item.url] = item.source; - } - return acc; - }, {}); - } - } catch (err) { - console.warn(`Warning: Could not read sources mapping: ${err.message}`); - } - return {}; -} - -/** - * Read cache statistics from file - * @returns {Object|null} Cache statistics or null if not found - */ -function readCacheStats() { - try { - if (fs.existsSync(CACHE_STATS_FILE)) { - const content = fs.readFileSync(CACHE_STATS_FILE, 'utf8'); - return JSON.parse(content); - } - } catch (err) { - console.warn(`Warning: Could not read cache stats: ${err.message}`); - } - return null; -} - -/** - * Read validation strategy from file - * @returns {Object|null} Validation strategy or null if not found - */ -function readValidationStrategy() { - try { - if (fs.existsSync(VALIDATION_STRATEGY_FILE)) { - const content = fs.readFileSync(VALIDATION_STRATEGY_FILE, 'utf8'); - return JSON.parse(content); - } - } catch (err) { - console.warn(`Warning: Could not read validation strategy: ${err.message}`); - } - return null; -} - -/** - * Save cache statistics for reporting - * @param {Object} stats - Cache statistics to save - */ -export function saveCacheStats(stats) { - try { - fs.writeFileSync(CACHE_STATS_FILE, JSON.stringify(stats, null, 2)); - } catch (err) { - console.warn(`Warning: Could not save cache stats: ${err.message}`); - } -} - -/** - * Save validation strategy for reporting - * @param {Object} strategy - Validation strategy to save - */ -export function saveValidationStrategy(strategy) { - try { - fs.writeFileSync( - VALIDATION_STRATEGY_FILE, - JSON.stringify(strategy, null, 2) - ); - } catch (err) { - console.warn(`Warning: Could not save validation strategy: ${err.message}`); - } -} - -/** - * Formats and displays the broken links report to the console - * @param {Array} brokenLinksReport - The report data to display - * @returns {number} The total number of broken links found - */ -export function displayBrokenLinksReport(brokenLinksReport = null) { - // If no report provided, read from file - if (!brokenLinksReport) { - brokenLinksReport = readBrokenLinksReport(); - } - - // Read cache statistics and validation strategy - const cacheStats = readCacheStats(); - const validationStrategy = readValidationStrategy(); - - // Display cache performance first - if (cacheStats) { - console.log('\n📊 Link Validation Cache Performance:'); - console.log('======================================='); - console.log(`Cache hit rate: ${cacheStats.hitRate}%`); - console.log(`Cache hits: ${cacheStats.cacheHits}`); - console.log(`Cache misses: ${cacheStats.cacheMisses}`); - console.log(`Total validations: ${cacheStats.totalValidations || cacheStats.cacheHits + cacheStats.cacheMisses}`); - console.log(`New entries stored: ${cacheStats.newEntriesStored || 0}`); - - if (cacheStats.cleanups > 0) { - console.log(`Expired entries cleaned: ${cacheStats.cleanups}`); - } - - if (cacheStats.totalValidations > 0) { - const message = cacheStats.cacheHits > 0 - ? `✨ Cache optimization saved ${cacheStats.cacheHits} link validations` - : '🔄 No cache hits - all links were validated fresh'; - console.log(message); - } - - if (validationStrategy) { - console.log(`Files analyzed: ${validationStrategy.total}`); - console.log( - `Links needing validation: ${validationStrategy.newLinks.length}` - ); - } - console.log(''); // Add spacing after cache stats - } - - // Check both the report and first broken link file to determine if we have broken links - const firstBrokenLink = readFirstBrokenLink(); - - // Only report "no broken links" if both checks pass - if ( - (!brokenLinksReport || brokenLinksReport.length === 0) && - !firstBrokenLink - ) { - console.log('\n✅ No broken links detected in the validation report'); - return 0; - } - - // Special case: check if the single broken link file could be missing from the report - if ( - firstBrokenLink && - (!brokenLinksReport || brokenLinksReport.length === 0) - ) { - console.error( - '\n⚠️ Warning: First broken link record exists but no links in the report.' - ); - console.error('This could indicate a reporting issue.'); - } - - // Load sources mapping - const sourcesMapping = readSourcesMapping(); - - // Print a prominent header - console.error('\n\n' + '='.repeat(80)); - console.error(' 🚨 BROKEN LINKS DETECTED 🚨 '); - console.error('='.repeat(80)); - - // Show first failing link if available - if (firstBrokenLink) { - console.error('\n🔴 FIRST FAILING LINK:'); - console.error(` URL: ${firstBrokenLink.url}`); - console.error(` Status: ${firstBrokenLink.status}`); - console.error(` Type: ${firstBrokenLink.type}`); - console.error(` Page: ${firstBrokenLink.page}`); - if (firstBrokenLink.linkText) { - console.error( - ` Link text: "${firstBrokenLink.linkText.substring(0, 50)}${firstBrokenLink.linkText.length > 50 ? '...' : ''}"` - ); - } - console.error('-'.repeat(40)); - } - - let totalBrokenLinks = 0; - - brokenLinksReport.forEach((report) => { - console.error(`\n📄 PAGE: ${report.page}`); - - // Add source information if available - const source = sourcesMapping[report.page]; - if (source) { - console.error(` PAGE CONTENT SOURCE: ${source}`); - } - - console.error('-'.repeat(40)); - - report.links.forEach((link) => { - console.error(`• ${link.url}`); - console.error(` - Status: ${link.status}`); - console.error(` - Type: ${link.type}`); - if (link.linkText) { - console.error( - ` - Link text: "${link.linkText.substring(0, 50)}${link.linkText.length > 50 ? '...' : ''}"` - ); - } - console.error(''); - totalBrokenLinks++; - }); - }); - - // Print a prominent summary footer - console.error('='.repeat(80)); - console.error(`📊 TOTAL BROKEN LINKS FOUND: ${totalBrokenLinks}`); - console.error('='.repeat(80) + '\n'); - - return totalBrokenLinks; -} - -/** - * Reads the first broken link info from the file system - * @returns {Object|null} First broken link data or null if not found - */ -export function readFirstBrokenLink() { - if (!fs.existsSync(FIRST_BROKEN_LINK_FILE)) { - return null; - } - - try { - const fileContent = fs.readFileSync(FIRST_BROKEN_LINK_FILE, 'utf8'); - - // Check if the file is empty or contains whitespace only - if (!fileContent || fileContent.trim() === '') { - return null; - } - - // Try to parse the JSON content - try { - return JSON.parse(fileContent); - } catch (parseErr) { - console.error( - `Error parsing first broken link JSON: ${parseErr.message}` - ); - return null; - } - } catch (err) { - console.error(`Error reading first broken link: ${err.message}`); - return null; - } -} - -/** - * Initialize the broken links report files - * @returns {boolean} True if initialization was successful - */ -export function initializeReport() { - try { - // Create an empty array for the broken links report - fs.writeFileSync(BROKEN_LINKS_FILE, '[]', 'utf8'); - - // Reset the first broken link file by creating an empty file - // Using empty string as a clear indicator that no broken link has been recorded yet - fs.writeFileSync(FIRST_BROKEN_LINK_FILE, '', 'utf8'); - - console.debug('🔄 Initialized broken links reporting system'); - return true; - } catch (err) { - console.error(`Error initializing broken links report: ${err.message}`); - return false; - } -} diff --git a/cypress/support/run-e2e-specs.js b/cypress/support/run-e2e-specs.js index d39dfb4a2..71f1616fa 100644 --- a/cypress/support/run-e2e-specs.js +++ b/cypress/support/run-e2e-specs.js @@ -2,34 +2,10 @@ * InfluxData Documentation E2E Test Runner * * This script automates running Cypress end-to-end tests for the InfluxData documentation site. - * It handles starting a local Hugo server, mapping content files to their URLs, running Cypress tests, + * It handles starting a local Hugo server, mapping content files to their URLs, and running Cypress tests, * and reporting broken links. * - * Usage: node run-e2e-specs.js [file paths...] [--spec test // Display broken links report - const brokenLinksCount = displayBrokenLinksReport(); - - // Check if we might have special case failures - const hasSpecialCaseFailures = - results && - results.totalFailed > 0 && - brokenLinksCount === 0; - - if (hasSpecialCaseFailures) { - console.warn( - `ℹ️ Note: Tests failed (${results.totalFailed}) but no broken links were reported. This may be due to special case URLs (like Reddit) that return expected status codes.` - ); - } - - if ( - (results && results.totalFailed && results.totalFailed > 0 && !hasSpecialCaseFailures) || - brokenLinksCount > 0 - ) { - console.error( - `⚠️ Tests failed: ${results.totalFailed || 0} test(s) failed, ${brokenLinksCount || 0} broken links found` - ); - cypressFailed = true; - exitCode = 1; * - * Example: node run-e2e-specs.js content/influxdb/v2/write-data.md --spec cypress/e2e/content/article-links.cy.js + * Usage: node run-e2e-specs.js [file paths...] [--spec test specs...] */ import { spawn } from 'child_process'; @@ -39,7 +15,6 @@ import path from 'path'; import cypress from 'cypress'; import net from 'net'; import { Buffer } from 'buffer'; -import { displayBrokenLinksReport, initializeReport } from './link-reporter.js'; import { HUGO_ENVIRONMENT, HUGO_PORT, @@ -119,7 +94,7 @@ async function main() { let exitCode = 0; let hugoStarted = false; -// (Lines 124-126 removed; no replacement needed) + // (Lines 124-126 removed; no replacement needed) // Add this signal handler to ensure cleanup on unexpected termination const cleanupAndExit = (code = 1) => { @@ -364,10 +339,6 @@ async function main() { // 4. Run Cypress tests let cypressFailed = false; try { - // Initialize/clear broken links report before running tests - console.log('Initializing broken links report...'); - initializeReport(); - console.log(`Running Cypress tests for ${urlList.length} URLs...`); // Add CI-specific configuration @@ -426,19 +397,13 @@ async function main() { clearInterval(hugoHealthCheckInterval); } - // Process broken links report - const brokenLinksCount = displayBrokenLinksReport(); - // Determine why tests failed const testFailureCount = results?.totalFailed || 0; - if (testFailureCount > 0 && brokenLinksCount === 0) { + if (testFailureCount > 0) { console.warn( `ℹ️ Note: ${testFailureCount} test(s) failed but no broken links were detected in the report.` ); - console.warn( - ' This usually indicates test errors unrelated to link validation.' - ); // Provide detailed failure analysis if (results) { @@ -531,14 +496,8 @@ async function main() { // but we'll still report other test failures cypressFailed = true; exitCode = 1; - } else if (brokenLinksCount > 0) { - console.error( - `⚠️ Tests failed: ${brokenLinksCount} broken link(s) detected` - ); - cypressFailed = true; - exitCode = 1; } else if (results) { - console.log('✅ Tests completed successfully'); + console.log('✅ e2e tests completed successfully'); } } catch (err) { console.error(`❌ Cypress execution error: ${err.message}`); @@ -609,9 +568,6 @@ async function main() { console.error(' • Check if test URLs are accessible manually'); console.error(' • Review Cypress screenshots/videos if available'); - // Still try to display broken links report if available - displayBrokenLinksReport(); - cypressFailed = true; exitCode = 1; } finally { diff --git a/lefthook.yml b/lefthook.yml index 68face524..67db3a771 100644 --- a/lefthook.yml +++ b/lefthook.yml @@ -111,16 +111,6 @@ pre-push: node cypress/support/run-e2e-specs.js --spec "cypress/e2e/content/article-links.cy.js" content/example.md exit $? - # Link validation runs in GitHub actions. - # You can still run it locally for development. - # e2e-links: - # tags: test,links - # glob: 'content/*.{md,html}' - # run: | - # echo "Running link checker for: {staged_files}" - # yarn test:links {staged_files} - # exit $? - # Manage Docker containers prune-legacy-containers: priority: 1 diff --git a/package.json b/package.json index 4dfb14b81..fc09f72a5 100644 --- a/package.json +++ b/package.json @@ -55,15 +55,6 @@ "test:codeblocks:v2": "docker compose run --rm --name v2-pytest v2-pytest", "test:codeblocks:stop-monitors": "./test/scripts/monitor-tests.sh stop cloud-dedicated-pytest && ./test/scripts/monitor-tests.sh stop clustered-pytest", "test:e2e": "node cypress/support/run-e2e-specs.js", - "test:links": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\"", - "test:links:v1": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/influxdb/{v1,enterprise_influxdb}/**/*.{md,html}", - "test:links:v2": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/influxdb/{cloud,v2}/**/*.{md,html}", - "test:links:v3": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/influxdb3/**/*.{md,html}", - "test:links:chronograf": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/chronograf/**/*.{md,html}", - "test:links:kapacitor": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/kapacitor/**/*.{md,html}", - "test:links:telegraf": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/telegraf/**/*.{md,html}", - "test:links:shared": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/shared/**/*.{md,html}", - "test:links:api-docs": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" /influxdb3/core/api/,/influxdb3/enterprise/api/,/influxdb3/cloud-dedicated/api/,/influxdb3/cloud-dedicated/api/v1/,/influxdb/cloud-dedicated/api/v1/,/influxdb/cloud-dedicated/api/management/,/influxdb3/cloud-dedicated/api/management/", "test:shortcode-examples": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/example.md", "audit:cli": "node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js both local", "audit:cli:3core": "node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js core local", From 9ea4acfb2b64bf8a08db5af64a6e281867446922 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 12 Aug 2025 18:12:45 -0500 Subject: [PATCH 084/122] fix(clustered): clarify compactor scaling guidance for CPU and memory Addresses customer confusion where scaling CPU alone doesn't improve compactor performance. Compactor concurrency scales based on memory allocation, not CPU count, so both resources should be scaled together. Closes influxdata/DAR#514 add related links --- content/influxdb3/clustered/admin/scale-cluster.md | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/content/influxdb3/clustered/admin/scale-cluster.md b/content/influxdb3/clustered/admin/scale-cluster.md index 0c8b2d9b2..bd8339718 100644 --- a/content/influxdb3/clustered/admin/scale-cluster.md +++ b/content/influxdb3/clustered/admin/scale-cluster.md @@ -8,9 +8,11 @@ menu: parent: Administer InfluxDB Clustered name: Scale your cluster weight: 207 -influxdb3/clustered/tags: [scale] +influxdb3/clustered/tags: [scale, performance, Kubernetes] related: - /influxdb3/clustered/reference/internals/storage-engine/ + - /influxdb3/clustered/write-data/best-practices/data-lifecycle/ + - /influxdb3/clustered/query-data/troubleshoot-and-optimize/optimize-queries/ - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits, Kubernetes resource requests and limits --- @@ -559,11 +561,14 @@ concurrency demands or reaches the hardware limits of your underlying nodes. ### Compactor -- **Recommended**: Maintain **1 Compactor pod** and use [vertical scaling](#vertical-scaling) (especially -increasing the available CPU) for the Compactor. +- **Recommended**: Maintain **1 Compactor pod** and use [vertical scaling](#vertical-scaling) for the Compactor. +Scale CPU and memory resources together, as compactor concurrency settings scale based on memory, not CPU count. - Because compaction is a compute-heavy process, horizontal scaling increases compaction throughput, but not as efficiently as vertical scaling. +> [!Important] +> When scaling the Compactor, scale CPU and memory resources together. + ### Garbage collector The [Garbage collector](/influxdb3/clustered/reference/internals/storage-engine/#garbage-collector) is a lightweight process that typically doesn't require From a21c06bb4f663fee53d74fe2999117160f7623f2 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 18 Aug 2025 15:16:38 -0500 Subject: [PATCH 085/122] fix(v2): OSS replication:- Fix (simplify) list formatting; remove nested lists.- Convert numbered list to numbered headers to replace nested lists- Add additional headers to show alternatives- Specify Enterprise v1- Update callouts --- .../write-data/replication/replicate-data.md | 2 +- .../write-data/replication/replicate-data.md | 302 ++++++++++-------- 2 files changed, 174 insertions(+), 130 deletions(-) diff --git a/content/influxdb/cloud/write-data/replication/replicate-data.md b/content/influxdb/cloud/write-data/replication/replicate-data.md index 5389bd4d4..1d62daf3e 100644 --- a/content/influxdb/cloud/write-data/replication/replicate-data.md +++ b/content/influxdb/cloud/write-data/replication/replicate-data.md @@ -16,4 +16,4 @@ source: /shared/influxdb-v2/write-data/replication/replicate-data.md --- +// SOURCE content/shared/influxdb-v2/write-data/replication/replicate-data.md --> diff --git a/content/shared/influxdb-v2/write-data/replication/replicate-data.md b/content/shared/influxdb-v2/write-data/replication/replicate-data.md index 8c05a2fe0..90b23280e 100644 --- a/content/shared/influxdb-v2/write-data/replication/replicate-data.md +++ b/content/shared/influxdb-v2/write-data/replication/replicate-data.md @@ -1,9 +1,9 @@ Use InfluxDB replication streams (InfluxDB Edge Data Replication) to replicate the incoming data of select buckets to one or more buckets on a remote -InfluxDB OSS, InfluxDB Cloud, or InfluxDB Enterprise instance. +InfluxDB OSS, InfluxDB Cloud, or InfluxDB Enterprise v1 instance. -Replicate data from InfluxDB OSS to InfluxDB Cloud, InfluxDB OSS, or InfluxDB Enterprise. +Replicate data from InfluxDB OSS to InfluxDB Cloud, InfluxDB OSS, or InfluxDB Enterprise v1. - [Configure a replication stream](#configure-a-replication-stream) - [Replicate downsampled or processed data](#replicate-downsampled-or-processed-data) @@ -17,10 +17,9 @@ Use the [`influx` CLI](/influxdb/version/tools/influx-cli/) or the [InfluxDB {{< current-version >}} API](/influxdb/version/reference/api/) to configure a replication stream. -{{% note %}} -To replicate data to InfluxDB OSS or InfluxDB Enterprise, adjust the -remote connection values accordingly. -{{% /note %}} +> [!Note] +> To replicate data to InfluxDB OSS or InfluxDB Enterprise v1, adjust the +> remote connection values accordingly. {{< tabs-wrapper >}} {{% tabs %}} @@ -30,156 +29,202 @@ remote connection values accordingly. {{% tab-content %}} +### Step 1: Create or find a remote connection -1. In your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS instance, use - the `influx remote create` command to create a remote connection to replicate data to. +- [Create a remote connection](#create-a-remote-connection-cli) +- [Use an existing remote connection](#use-an-existing-remote-connection-cli) - **Provide the following:** +#### Create a remote connection (CLI) - - Remote connection name - {{% show-in "v2" %}}- Remote InfluxDB instance URL{{% /show-in %}} - {{% show-in "v2" %}}- Remote InfluxDB API token _(API token must have write access to the target bucket)_{{% /show-in %}} - {{% show-in "v2" %}}- Remote InfluxDB organization ID{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- [InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/){{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- InfluxDB Cloud API token _(API token must have write access to the target bucket)_{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- InfluxDB Cloud organization ID{{% /show-in %}} +In your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS instance, use +the `influx remote create` command and provide the following arguments for the remote instance: - ```sh - influx remote create \ - --name example-remote-name \ - --remote-url https://cloud2.influxdata.com \ - --remote-api-token mYsuP3r5Ecr37t0k3n \ - --remote-org-id 00xoXXoxXX00 - ``` +{{% show-in "v2" %}} +- Remote connection name +- Remote InfluxDB instance URL +- Remote InfluxDB API token _(API token must have write access to the target bucket)_ +- Remote InfluxDB organization ID +{{% /show-in %}} +{{% show-in "cloud,cloud-serverless" %}} +- Remote connection name +- [InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/) +- InfluxDB Cloud API token _(API token must have write access to the target bucket)_ +- InfluxDB Cloud organization ID +{{% /show-in %}} - If you already have remote InfluxDB connections configured, you can use an existing connection. To view existing connections, run `influx remote list`. + ```sh + influx remote create \ + --name example-remote-name \ + --remote-url https://cloud2.influxdata.com \ + --remote-api-token mYsuP3r5Ecr37t0k3n \ + --remote-org-id 00xoXXoxXX00 + ``` -2. In your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS instance, use the - `influx replication create` command to create a replication stream. +#### Use an existing remote connection (CLI) + +Alternatively, you can use an existing connection that you have already configured. +To retrieve existing connections, run `influx remote list`. + +### Step 2: Create a replication stream (CLI) + +In your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS instance, use the +`influx replication create` command and provide the following arguments: - **Provide the following:** +{{% show-in "v2" %}} +- Replication stream name +- Remote connection ID (created in the previous step) +- Local bucket ID to replicate writes from +- Remote bucket name or ID to replicate writes to. If replicating to **InfluxDB Enterprise v1**, use the `db-name/rp-name` bucket name syntax.{{% /show-in %}} +{{% show-in "cloud,cloud-serverless" %}} +- Replication stream name +- Remote connection ID (created in the previous step) +- InfluxDB OSS bucket ID to replicate writes from +- InfluxDB Cloud bucket ID to replicate writes to +{{% /show-in %}} - - Replication stream name - {{% show-in "v2" %}}- Remote connection ID{{% /show-in %}} - {{% show-in "v2" %}}- Local bucket ID to replicate writes from{{% /show-in %}} - {{% show-in "v2" %}}- Remote bucket name or ID to replicate writes to. If replicating to **InfluxDB Enterprise**, use the `db-name/rp-name` bucket name syntax.{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- Remote connection ID{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- InfluxDB OSS bucket ID to replicate writes from{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- InfluxDB Cloud bucket ID to replicate writes to{{% /show-in %}} +```sh +influx replication create \ + --name REPLICATION_STREAM_NAME \ + --remote-id REPLICATION_REMOTE_ID \ + --local-bucket-id INFLUX_BUCKET_ID \ + --remote-bucket REMOTE_INFLUX_BUCKET_NAME +``` - - ```sh - influx replication create \ - --name REPLICATION_STREAM_NAME \ - --remote-id REPLICATION_REMOTE_ID \ - --local-bucket-id INFLUX_BUCKET_ID \ - --remote-bucket REMOTE_INFLUX_BUCKET_NAME - ``` - -Once a replication stream is created, InfluxDB {{% show-in "v2" %}}OSS{{% /show-in %}} -will replicate all writes to the specified bucket to the {{% show-in "v2" %}}remote {{% /show-in %}} +After you create the replication stream, InfluxDB {{% show-in "v2" %}}OSS{{% /show-in %}} +replicates all writes to the specified local bucket to the {{% show-in "v2" %}}remote {{% /show-in %}} InfluxDB {{% show-in "cloud,cloud-serverless" %}}Cloud {{% /show-in %}}bucket. Use the `influx replication list` command to view information such as the current queue size, max queue size, and latest status code. - {{% /tab-content %}} {{% tab-content %}} -1. Send a `POST` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS `/api/v2/remotes` endpoint to create a remote connection to replicate data to. +### Step 1: Create or find a remote connection (API) - {{< keep-url >}} - {{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="POST" api-ref="/influxdb/version/api/#operation/PostRemoteConnection" >}} +- [Create a remote connection](#create-a-remote-connection-api) +- [Use an existing remote connection](#use-an-existing-remote-connection-api) - Include the following in your request: +#### Create a remote connection (API) - - **Request method:** `POST` - - **Headers:** - - **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/) - - **Content-type:** `application/json` - - **Request body:** JSON object with the following fields: - {{< req type="key" >}} - - {{< req "\*" >}} **allowInsecureTLS:** All insecure TLS connections - - **description:** Remote description - - {{< req "\*" >}} **name:** Remote connection name - - {{< req "\*" >}} **orgID:** {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS organization ID - {{% show-in "v2" %}}- {{< req "\*" >}} **remoteAPIToken:** Remote InfluxDB API token _(API token must have write access to the target bucket)_{{% /show-in %}} - {{% show-in "v2" %}}- {{< req "\*" >}} **remoteOrgID:** Remote InfluxDB organization ID{{% /show-in %}} - {{% show-in "v2" %}}- {{< req "\*" >}} **remoteURL:** Remote InfluxDB instance URL{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- {{< req "\*" >}} **remoteAPIToken:** InfluxDB Cloud API token _(API token must have write access to the target bucket)_{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- {{< req "\*" >}} **remoteOrgID:** InfluxDB Cloud organization ID{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- {{< req "\*" >}} **remoteURL:** [InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/){{% /show-in %}} +To create a remote connection to replicate data to, +send a `POST` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS `/api/v2/remotes` endpoint: - {{< keep-url >}} - ```sh - curl --request POST http://localhost:8086/api/v2/remotes \ - --header 'Authorization: Token INFLUX_OSS_TOKEN' \ - --data '{ - "allowInsecureTLS": false, - "description": "Example remote description", - "name": "Example remote name", - "orgID": "INFLUX_OSS_ORG_ID", - "remoteAPIToken": "REMOTE_INFLUX_TOKEN", - "remoteOrgID": "REMOTE_INFLUX_ORG_ID", - "remoteURL": "https://cloud2.influxdata.com" - }' - ``` +{{< keep-url >}} +{{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="POST" api-ref="/influxdb/version/api/#operation/PostRemoteConnection" >}} - If you already have remote InfluxDB connections configured, you can use an - existing connection. To view existing connections, use the `/api/v2/remotes` - endpoint with the `GET` request method. +Include the following parameters in your request: - {{< keep-url >}} - {{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="GET" api-ref="/influxdb/version/api/#operation/GetRemoteConnections" >}} +- **Request method:** `POST` +- **Headers:** + - **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/) + - **Content-type:** `application/json` +{{% show-in "v2" %}} +- **Request body:** JSON object with the following fields: + {{< req type="key" >}} + - {{< req "\*" >}} **allowInsecureTLS:** All insecure TLS connections + - **description:** Remote description + - {{< req "\*" >}} **name:** Remote connection name + - {{< req "\*" >}} **orgID:** {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS organization ID + - {{< req "\*" >}} **remoteAPIToken:** Remote InfluxDB API token _(API token must have write access to the target bucket)_ + - {{< req "\*" >}} **remoteOrgID:** Remote InfluxDB organization ID + - {{< req "\*" >}} **remoteURL:** Remote InfluxDB instance URL +{{% /show-in %}} +{{% show-in "cloud,cloud-serverless" %}} +- **Request body:** JSON object with the following fields: + {{< req type="key" >}} + - {{< req "\*" >}} **allowInsecureTLS:** All insecure TLS connections + - **description:** Remote description + - {{< req "\*" >}} **name:** Remote connection name + - {{< req "\*" >}} **orgID:** {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS organization ID + - {{< req "\*" >}} **remoteAPIToken:** InfluxDB Cloud API token _(API token must have write access to the target bucket)_ + - {{< req "\*" >}} **remoteOrgID:** InfluxDB Cloud organization ID + - {{< req "\*" >}} **remoteURL:** [InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/) +{{% /show-in %}} - Include the following in your request: +{{< keep-url >}} +```sh +curl --request POST http://localhost:8086/api/v2/remotes \ + --header 'Authorization: Token INFLUX_OSS_TOKEN' \ + --data '{ + "allowInsecureTLS": false, + "description": "Example remote description", + "name": "Example remote name", + "orgID": "INFLUX_OSS_ORG_ID", + "remoteAPIToken": "REMOTE_INFLUX_TOKEN", + "remoteOrgID": "REMOTE_INFLUX_ORG_ID", + "remoteURL": "https://cloud2.influxdata.com" + }' +``` - - **Request method:** `GET` - - **Headers:** - - **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/) - - **Query parameters:** - - **orgID:** {{% show-in "v2" %}}Local{{% /show-in %}} InfluxDB OSS organization ID +#### Use an existing remote connection - {{< keep-url >}} - ```sh - curl --request GET \ - http://localhost:8086/api/v2/remotes?orgID=INFLUX_OSS_ORG_ID \ - --header 'Authorization: Token INFLUX_OSS_TOKEN' \ - ``` +Alternatively, you can use an +existing connection that you have already configured. +To retrieve existing connections, use the `/api/v2/remotes` +endpoint with the `GET` request method: -2. Send a `POST` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS - `/api/v2/replications` endpoint to create a replication stream. +{{< keep-url >}} +{{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="GET" api-ref="/influxdb/version/api/#operation/GetRemoteConnections" >}} - {{< keep-url >}} - {{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="POST" api-ref="/influxdb/version/api/#operation/PostRemoteConnection" >}} - - Include the following in your request: +Include the following parameters in your request: - - **Request method:** `POST` - - **Headers:** - - **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/) - - **Content-type:** `application/json` - - **Request body:** JSON object with the following fields: - {{< req type="key" >}} - - **dropNonRetryableData:** Drop data when a non-retryable error is encountered. - - {{< req "\*" >}} **localBucketID:** {{% show-in "v2" %}}Local{{% /show-in %}} InfluxDB OSS bucket ID to replicate writes from. - - {{< req "\*" >}} **maxAgeSeconds:** Maximum age of data in seconds before it is dropped (default is `604800`, must be greater than or equal to `0`). - - {{< req "\*" >}} **maxQueueSizeBytes:** Maximum replication queue size in bytes (default is `67108860`, must be greater than or equal to `33554430`). - - {{< req "\*" >}} **name:** Replication stream name. - - {{< req "\*" >}} **orgID:** {{% show-in "v2" %}}Local{{% /show-in %}} InfluxDB OSS organization ID. - {{% show-in "v2" %}}- {{< req "\*" >}} **remoteBucketID:** Remote bucket ID to replicate writes to.{{% /show-in %}} - {{% show-in "v2" %}}- {{< req "\*" >}} **remoteBucketName:** Remote bucket name to replicate writes to. If replicating to **InfluxDB Enterprise**, use the `db-name/rp-name` bucket name syntax.{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- {{< req "\*" >}} **remoteBucketID:** InfluxDB Cloud bucket ID to replicate writes to.{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- {{< req "\*" >}} **remoteBucketName:** InfluxDB Cloud bucket name to replicate writes to.{{% /show-in %}} - - {{< req "\*" >}} **remoteID:** Remote connection ID +- **Headers:** + - **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/) +- **Query parameters:** + - **orgID:** {{% show-in "v2" %}}Local{{% /show-in %}} InfluxDB OSS organization ID - {{% note %}} -`remoteBucketID` and `remoteBucketName` are mutually exclusive. -{{% show-in "v2" %}}If replicating to **InfluxDB Enterprise**, use `remoteBucketName` with the `db-name/rp-name` bucket name syntax.{{% /show-in %}} - {{% /note %}} +{{< keep-url >}} +```sh +curl --request GET \ + http://localhost:8086/api/v2/remotes?orgID=INFLUX_OSS_ORG_ID \ + --header 'Authorization: Token INFLUX_OSS_TOKEN' \ +``` + +### Step 2: Create a replication stream (API) + +Send a `POST` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS +`/api/v2/replications` endpoint to create a replication stream. + +{{< keep-url >}} +{{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="POST" api-ref="/influxdb/version/api/#operation/PostRemoteConnection" >}} + +Include the following parameters in your request: + +- **Headers:** + - **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/) + - **Content-type:** `application/json` +{{% show-in "v2" %}} +- **Request body:** JSON object with the following fields: + {{< req type="key" >}} + - **dropNonRetryableData:** Drop data when a non-retryable error is encountered. + - {{< req "\*" >}} **localBucketID:** Local InfluxDB OSS bucket ID to replicate writes from. + - {{< req "\*" >}} **maxAgeSeconds:** Maximum age of data in seconds before it is dropped (default is `604800`, must be greater than or equal to `0`). + - {{< req "\*" >}} **maxQueueSizeBytes:** Maximum replication queue size in bytes (default is `67108860`, must be greater than or equal to `33554430`). + - {{< req "\*" >}} **name:** Replication stream name. + - {{< req "\*" >}} **orgID:** Local InfluxDB OSS organization ID. + - {{< req "\*" >}} **remoteBucketID:** Remote bucket ID to replicate writes to. + - {{< req "\*" >}} **remoteBucketName:** Remote bucket name to replicate writes to. If replicating to **InfluxDB Enterprise v1**, use the `db-name/rp-name` bucket name syntax. +{{% /show-in %}} +{{% show-in "cloud,cloud-serverless" %}} +- **Request body:** JSON object with the following fields: + {{< req type="key" >}} + - **dropNonRetryableData:** Drop data when a non-retryable error is encountered + - {{< req "\*" >}} **localBucketID:** InfluxDB OSS bucket ID to replicate writes from + - {{< req "\*" >}} **maxAgeSeconds:** Maximum age of data in seconds before it is dropped (default is `604800`, must be greater than or equal to `0`) + - {{< req "\*" >}} **maxQueueSizeBytes:** Maximum replication queue size in bytes (default is `67108860`, must be greater than or equal to `33554430`) + - {{< req "\*" >}} **name:** Replication stream name + - {{< req "\*" >}} **orgID:** InfluxDB OSS organization ID + - {{< req "\*" >}} **remoteBucketID:** InfluxDB Cloud bucket ID to replicate writes to (mutually exclusive with `remoteBucketName`) + - {{< req "\*" >}} **remoteBucketName:** InfluxDB Cloud bucket name to replicate writes to (mutually exclusive with `remoteBucketID`) + - {{< req "\*" >}} **remoteID:** Remote connection ID +{{% /show-in %}} + +> [!Note] +> `remoteBucketID` and `remoteBucketName` are mutually exclusive. +> {{% show-in "v2" %}}If replicating to **InfluxDB Enterprise v1**, use `remoteBucketName` with the `db-name/rp-name` bucket name syntax.{{% /show-in %}} {{< keep-url >}} ```sh @@ -197,19 +242,18 @@ curl --request POST http://localhost:8086/api/v2/replications \ }' ``` -Once a replication stream is created, InfluxDB {{% show-in "v2" %}}OSS{{% /show-in %}} -will replicate all writes from the specified local bucket to the {{% show-in "v2" %}}remote {{% /show-in %}} +After you create a replication stream, InfluxDB {{% show-in "v2" %}}OSS{{% /show-in %}} +replicates all writes from the specified local bucket to the {{% show-in "v2" %}}remote {{% /show-in %}} InfluxDB {{% show-in "cloud,cloud-serverless" %}}Cloud {{% /show-in %}}bucket. To get information such as the current queue size, max queue size, and latest status -code for each replication stream, send a `GET` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS `/api/v2/replications` endpoint. +code for each replication stream, send a `GET` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS `/api/v2/replications` endpoint: {{< keep-url >}} {{< api-endpoint endpoint="localhost:8086/api/v2/replications" method="GET" api-ref="/influxdb/version/api/#operation/GetReplications" >}} -Include the following in your request: +Include the following parameters in your request: -- **Request method:** `GET` - **Headers:** - **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/) - **Query parameters:** From ddb9a5584db525c8f04fab9e7a8dc6d95336b52b Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 12 Aug 2025 18:12:45 -0500 Subject: [PATCH 086/122] fix(clustered): clarify compactor scaling guidance for CPU and memory Addresses customer confusion where scaling CPU alone doesn't improve compactor performance. Compactor concurrency scales based on memory allocation, not CPU count, so both resources should be scaled together. Closes influxdata/DAR#514 add related links fix(clustered): correct anchor link in scale-cluster documentation Fix broken internal anchor link from #rrecommended-scaling-strategies-per-component to #recommended-scaling-strategies-per-component (removed extra 'r'). This was used to test the improved link-checker anchor validation functionality. fix(clustered): correct anchor link in scale-cluster documentation Fixes broken anchor link #rrecommended-scaling-strategies-per-component to the correct #recommended-scaling-strategies-per-component --- TESTING.md | 25 +++++++++++++++++++ .../clustered/admin/scale-cluster.md | 11 +++++--- 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/TESTING.md b/TESTING.md index dba8f892e..09330c298 100644 --- a/TESTING.md +++ b/TESTING.md @@ -184,6 +184,31 @@ link-checker check public/path/to/file.html link-checker config ``` +### Link Resolution Behavior + +The link-checker automatically handles relative link resolution based on the input type: + +**Local Files → Local Resolution** +```bash +# When checking local files, relative links resolve to the local filesystem +link-checker check public/influxdb3/core/admin/scale-cluster/index.html +# Relative link /influxdb3/clustered/tags/kubernetes/ becomes: +# → /path/to/public/influxdb3/clustered/tags/kubernetes/index.html +``` + +**URLs → Production Resolution** +```bash +# When checking URLs, relative links resolve to the production site +link-checker check https://docs.influxdata.com/influxdb3/core/admin/scale-cluster/ +# Relative link /influxdb3/clustered/tags/kubernetes/ becomes: +# → https://docs.influxdata.com/influxdb3/clustered/tags/kubernetes/ +``` + +**Why This Matters** +- **Testing new content**: Tag pages generated locally will be found when testing local files +- **Production validation**: Production URLs validate against the live site +- **No false positives**: New content won't appear broken when testing locally before deployment + ### Content Mapping Workflows #### Scenario 1: Map and check InfluxDB 3 Core content diff --git a/content/influxdb3/clustered/admin/scale-cluster.md b/content/influxdb3/clustered/admin/scale-cluster.md index 0c8b2d9b2..bd8339718 100644 --- a/content/influxdb3/clustered/admin/scale-cluster.md +++ b/content/influxdb3/clustered/admin/scale-cluster.md @@ -8,9 +8,11 @@ menu: parent: Administer InfluxDB Clustered name: Scale your cluster weight: 207 -influxdb3/clustered/tags: [scale] +influxdb3/clustered/tags: [scale, performance, Kubernetes] related: - /influxdb3/clustered/reference/internals/storage-engine/ + - /influxdb3/clustered/write-data/best-practices/data-lifecycle/ + - /influxdb3/clustered/query-data/troubleshoot-and-optimize/optimize-queries/ - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits, Kubernetes resource requests and limits --- @@ -559,11 +561,14 @@ concurrency demands or reaches the hardware limits of your underlying nodes. ### Compactor -- **Recommended**: Maintain **1 Compactor pod** and use [vertical scaling](#vertical-scaling) (especially -increasing the available CPU) for the Compactor. +- **Recommended**: Maintain **1 Compactor pod** and use [vertical scaling](#vertical-scaling) for the Compactor. +Scale CPU and memory resources together, as compactor concurrency settings scale based on memory, not CPU count. - Because compaction is a compute-heavy process, horizontal scaling increases compaction throughput, but not as efficiently as vertical scaling. +> [!Important] +> When scaling the Compactor, scale CPU and memory resources together. + ### Garbage collector The [Garbage collector](/influxdb3/clustered/reference/internals/storage-engine/#garbage-collector) is a lightweight process that typically doesn't require From cba3b21f1c9dc005dac277a02dca126070be4912 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 18 Aug 2025 17:40:02 -0500 Subject: [PATCH 087/122] docs(testing): document link-checker binary release process Add comprehensive documentation for maintainers on how to: - Create releases in docs-tooling (automated) - Manually distribute binaries to docs-v2 (required for private repo) - Update workflow references when needed This addresses the missing process documentation for link-checker binary distribution between the two repositories. feat(ci): update link-checker to v1.2.2 and add manual sync workflow - Update pr-link-check.yml to use link-checker-v1.2.2 with latest fixes - Add sync-link-checker-binary.yml for manual binary distribution - Improvements in v1.2.2: base URL detection, anchor validation, JSON parsing The v1.2.2 release fixes the Hugo base URL detection issue and improves anchor link validation that was tested in this PR. --- .github/workflows/pr-link-check.yml | 2 +- .../workflows/sync-link-checker-binary.yml | 68 +++++++++++++++++++ TESTING.md | 41 +++++++++++ 3 files changed, 110 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/sync-link-checker-binary.yml diff --git a/.github/workflows/pr-link-check.yml b/.github/workflows/pr-link-check.yml index b0764089a..5f5dacca8 100644 --- a/.github/workflows/pr-link-check.yml +++ b/.github/workflows/pr-link-check.yml @@ -95,7 +95,7 @@ jobs: curl -L -H "Accept: application/vnd.github+json" \ -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ -o link-checker-info.json \ - "https://api.github.com/repos/influxdata/docs-v2/releases/tags/link-checker-v1.0.0" + "https://api.github.com/repos/influxdata/docs-v2/releases/tags/link-checker-v1.2.2" # Extract download URL for linux binary DOWNLOAD_URL=$(jq -r '.assets[] | select(.name | test("link-checker.*linux")) | .url' link-checker-info.json) diff --git a/.github/workflows/sync-link-checker-binary.yml b/.github/workflows/sync-link-checker-binary.yml new file mode 100644 index 000000000..b0ac46c68 --- /dev/null +++ b/.github/workflows/sync-link-checker-binary.yml @@ -0,0 +1,68 @@ +name: Sync Link Checker Binary from docs-tooling + +on: + workflow_dispatch: + inputs: + version: + description: 'Link checker version to sync (e.g., v1.2.2)' + required: true + type: string + +jobs: + sync-binary: + name: Sync link-checker binary from docs-tooling + runs-on: ubuntu-latest + + steps: + - name: Download binary from docs-tooling release + run: | + echo "Downloading link-checker ${{ inputs.version }} from docs-tooling..." + + # Download binary from docs-tooling release + curl -L -H "Accept: application/octet-stream" \ + -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + -o link-checker-linux-x86_64 \ + "https://github.com/influxdata/docs-tooling/releases/download/link-checker-${{ inputs.version }}/link-checker-linux-x86_64" + + # Download checksums + curl -L -H "Accept: application/octet-stream" \ + -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + -o checksums.txt \ + "https://github.com/influxdata/docs-tooling/releases/download/link-checker-${{ inputs.version }}/checksums.txt" + + # Verify downloads + ls -la link-checker-linux-x86_64 checksums.txt + + - name: Create docs-v2 release + run: | + echo "Creating link-checker-${{ inputs.version }} release in docs-v2..." + + gh release create \ + --title "Link Checker Binary ${{ inputs.version }}" \ + --notes "Link validation tooling binary for docs-v2 GitHub Actions workflows. + + This binary is distributed from the docs-tooling repository release link-checker-${{ inputs.version }}. + + ### Usage in GitHub Actions + + The binary is automatically downloaded by docs-v2 workflows for link validation. + + ### Manual Usage + + \`\`\`bash + # Download and make executable + curl -L -o link-checker https://github.com/influxdata/docs-v2/releases/download/link-checker-${{ inputs.version }}/link-checker-linux-x86_64 + chmod +x link-checker + + # Verify installation + ./link-checker --version + \`\`\` + + ### Changes in ${{ inputs.version }} + + See the [docs-tooling release](https://github.com/influxdata/docs-tooling/releases/tag/link-checker-${{ inputs.version }}) for detailed changelog." \ + link-checker-${{ inputs.version }} \ + link-checker-linux-x86_64 \ + checksums.txt + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/TESTING.md b/TESTING.md index 09330c298..233bb3a36 100644 --- a/TESTING.md +++ b/TESTING.md @@ -171,6 +171,47 @@ cargo build --release cp target/release/link-checker /usr/local/bin/ ``` +#### Binary Release Process + +**For maintainers:** To create a new link-checker release in docs-v2: + +1. **Create release in docs-tooling** (builds and releases binary automatically): + ```bash + cd docs-tooling + git tag link-checker-v1.2.x + git push origin link-checker-v1.2.x + ``` + +2. **Manually distribute to docs-v2** (required due to private repository access): + ```bash + # Download binary from docs-tooling release + curl -L -H "Authorization: Bearer $(gh auth token)" \ + -o link-checker-linux-x86_64 \ + "https://github.com/influxdata/docs-tooling/releases/download/link-checker-v1.2.x/link-checker-linux-x86_64" + + curl -L -H "Authorization: Bearer $(gh auth token)" \ + -o checksums.txt \ + "https://github.com/influxdata/docs-tooling/releases/download/link-checker-v1.2.x/checksums.txt" + + # Create docs-v2 release + gh release create \ + --repo influxdata/docs-v2 \ + --title "Link Checker Binary v1.2.x" \ + --notes "Link validation tooling binary for docs-v2 GitHub Actions workflows." \ + link-checker-v1.2.x \ + link-checker-linux-x86_64 \ + checksums.txt + ``` + +3. **Update workflow reference** (if needed): + ```bash + # Update .github/workflows/pr-link-check.yml line 98 to use new version + sed -i 's/link-checker-v[0-9.]*/link-checker-v1.2.x/' .github/workflows/pr-link-check.yml + ``` + +> [!Note] +> The manual distribution is required because docs-tooling is a private repository and the default GitHub token doesn't have cross-repository access for private repos. + #### Core Commands ```bash From 0001c1cfc44b8475813c0770b8082f118242ac6c Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 18 Aug 2025 18:21:02 -0500 Subject: [PATCH 088/122] fix(v2): missing (API) in heading --- .../shared/influxdb-v2/write-data/replication/replicate-data.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/influxdb-v2/write-data/replication/replicate-data.md b/content/shared/influxdb-v2/write-data/replication/replicate-data.md index 90b23280e..cabc178b6 100644 --- a/content/shared/influxdb-v2/write-data/replication/replicate-data.md +++ b/content/shared/influxdb-v2/write-data/replication/replicate-data.md @@ -159,7 +159,7 @@ curl --request POST http://localhost:8086/api/v2/remotes \ }' ``` -#### Use an existing remote connection +#### Use an existing remote connection (API) Alternatively, you can use an existing connection that you have already configured. From b510e6bac1e19c8954d6a401e54136af4fd5c666 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 08:38:36 -0500 Subject: [PATCH 089/122] fix(v2): broken link fragment --- content/influxdb/v2/install/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb/v2/install/_index.md b/content/influxdb/v2/install/_index.md index 2a7b4bc3a..dab19f201 100644 --- a/content/influxdb/v2/install/_index.md +++ b/content/influxdb/v2/install/_index.md @@ -473,7 +473,7 @@ _If necessary, adjust the example file paths and utilities for your system._ https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz ``` -2. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-system). +2. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-os-version). 3. {{< req text="Recommended:" color="magenta" >}}: Verify the authenticity of the downloaded binary--for example, enter the following command in your terminal. From 116e4fe70a20e7b6267d8dd370378e1bc8d275fc Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 08:43:45 -0500 Subject: [PATCH 090/122] config(link-checker): exclude StackExchange network URLs Add exclusion patterns for StackExchange sites to both production and default link-checker configurations: - *.stackexchange.com - stackoverflow.com - *.stackoverflow.com These sites often block automated requests/bots, causing false positive link validation failures in CI environments. --- .ci/link-checker/default.lycherc.toml | 5 +++++ .ci/link-checker/production.lycherc.toml | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/.ci/link-checker/default.lycherc.toml b/.ci/link-checker/default.lycherc.toml index 22f97a0f9..259efd76a 100644 --- a/.ci/link-checker/default.lycherc.toml +++ b/.ci/link-checker/default.lycherc.toml @@ -50,6 +50,11 @@ exclude = [ # detection) "^https?://github\\.com", + # StackExchange network URLs (often block automated requests) + "^https?://.*\\.stackexchange\\.com", + "^https?://stackoverflow\\.com", + "^https?://.*\\.stackoverflow\\.com", + # Common documentation placeholders "YOUR_.*", "REPLACE_.*", diff --git a/.ci/link-checker/production.lycherc.toml b/.ci/link-checker/production.lycherc.toml index 9b8be5aa3..f8410208c 100644 --- a/.ci/link-checker/production.lycherc.toml +++ b/.ci/link-checker/production.lycherc.toml @@ -58,6 +58,11 @@ exclude = [ "^https?://reddit\\.com", "^https?://.*\\.reddit\\.com", + # StackExchange network URLs (often block automated requests) + "^https?://.*\\.stackexchange\\.com", + "^https?://stackoverflow\\.com", + "^https?://.*\\.stackoverflow\\.com", + # InfluxData support URLs (certificate/SSL issues in CI) "^https?://support\\.influxdata\\.com", From aaf475beef090e37f5486d98058baa0815df738b Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 08:45:40 -0500 Subject: [PATCH 091/122] fix(v2): replace broken link fragment with new URL --- content/influxdb/v2/install/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb/v2/install/_index.md b/content/influxdb/v2/install/_index.md index dab19f201..2896900cf 100644 --- a/content/influxdb/v2/install/_index.md +++ b/content/influxdb/v2/install/_index.md @@ -675,7 +675,7 @@ data isn't deleted if you delete the container._ flags for initial setup options and file system mounts. _If you don't specify InfluxDB initial setup options, you can -[set up manually](#set-up-influxdb) later using the UI or CLI in a running +[set up manually](/influxdb/v2/get-started/setup/) later using the UI or CLI in a running container._ {{% code-placeholders "ADMIN_(USERNAME|PASSWORD)|ORG_NAME|BUCKET_NAME" %}} From 683dfe233c9a3e7ee331d94e92abef82dbeeb421 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 08:59:01 -0500 Subject: [PATCH 092/122] fix(v2): replace broken link fragment with example and page link --- content/influxdb/v2/install/_index.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/content/influxdb/v2/install/_index.md b/content/influxdb/v2/install/_index.md index 2896900cf..60b60d938 100644 --- a/content/influxdb/v2/install/_index.md +++ b/content/influxdb/v2/install/_index.md @@ -731,7 +731,8 @@ and _[Operator token](/influxdb/v2/admin/tokens/#operator-token)_, and logs to s You can view the Operator token in the `/etc/influxdb2/influx-configs` file and use it to authorize -[creating an All Access token](#optional-create-all-access-tokens). +[creating an All Access token](#examples). +For more information, see [API token types](/influxdb/v2/admin/tokens/#api-token-types). _To run the InfluxDB container in [detached mode](https://docs.docker.com/engine/reference/run/#detached-vs-foreground), @@ -761,6 +762,13 @@ docker exec -it ` +```bash +# Create an All Access token +docker exec -it influxdb2 influx auth create \ + --all-access \ + --token OPERATOR_TOKEN +``` + ```bash # List CLI configurations docker exec -it influxdb2 influx config ls From de021b48ebf0cd5e31e16a6e2581d10496209afb Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 09:01:59 -0500 Subject: [PATCH 093/122] fix(telegraf): broken link fragment --- content/telegraf/v1/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/telegraf/v1/install.md b/content/telegraf/v1/install.md index 9aa40d9cd..a8ca6fb87 100644 --- a/content/telegraf/v1/install.md +++ b/content/telegraf/v1/install.md @@ -329,7 +329,7 @@ Replace the following: Choose from the following options to install Telegraf binary files for Linux ARM: - To install on Linux ARMv7(32-bit), see the [downloads page](https://www.influxdata.com/downloads/#telegraf). -- [Download and install on Linux ARMv8 (64-bit)](#download-and-install-on-linux-arm-64) +- [Download and install on Linux ARMv8 (64-bit)](#download-and-install-on-linux-armv8) ### Download and install on Linux ARMv8 From b90b20314872eaaa30e136a5180a5b2ca8250c4f Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 09:02:50 -0500 Subject: [PATCH 094/122] fix(telegraf): broken link fragment --- content/telegraf/v1/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/telegraf/v1/install.md b/content/telegraf/v1/install.md index a8ca6fb87..ac64463fb 100644 --- a/content/telegraf/v1/install.md +++ b/content/telegraf/v1/install.md @@ -15,7 +15,7 @@ To install Telegraf, do the following: - [Review requirements](#requirements) - [Download and install Telegraf](#download-and-install-telegraf) -- [Custom compile Telegraf](#custom-compile) +- [Custom compile Telegraf](#custom-compile-telegraf) ## Requirements From 4f807c9eb6e981c7de5fa0c3845c7efe8fbf673a Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 09:06:06 -0500 Subject: [PATCH 095/122] fix(v2): broken-link-fragment --- content/telegraf/v1/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/telegraf/v1/install.md b/content/telegraf/v1/install.md index ac64463fb..e09b2312b 100644 --- a/content/telegraf/v1/install.md +++ b/content/telegraf/v1/install.md @@ -121,7 +121,7 @@ InfluxData uses [GPG (GnuPG)](https://www.gnupg.org/software/) to sign released public key and encrypted private key (`.key` file) pairs that you can use to verify the integrity of packages and binaries from the InfluxData repository. -Before running the [install](#install) sample code, substitute the key-pair compatible with your OS version: +Before running the [install](#download-and-install-instructions) sample code, substitute the key-pair compatible with your OS version: For newer OS releases (for example, Ubuntu 20.04 LTS and newer, Debian Buster and newer) that support subkey verification: From 9d14efe92e90b990ba42426f6163ebbda86a3d6d Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 09:07:37 -0500 Subject: [PATCH 096/122] fix(v2): broken-link-fragment --- content/telegraf/v1/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/telegraf/v1/install.md b/content/telegraf/v1/install.md index e09b2312b..1bea2c167 100644 --- a/content/telegraf/v1/install.md +++ b/content/telegraf/v1/install.md @@ -627,7 +627,7 @@ Use the Telegraf custom builder tool to compile Telegraf with only the plugins y ### Prerequisites - Follow the instructions to install [Go](https://go.dev/) for your system. -- [Create your Telegraf configuration file](#generate-a-custom-configuration-file) with the plugins you want to use. +- [Create your Telegraf configuration file](#generate-a-configuration-file) with the plugins you want to use. ### Build the custom builder tool From 8754468dbd6482ae541ed0b74179eb2ad8cc53e2 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 09:08:40 -0500 Subject: [PATCH 097/122] fix(v2): broken-link-fragment --- content/telegraf/v1/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/telegraf/v1/install.md b/content/telegraf/v1/install.md index 1bea2c167..5ea2b1e1f 100644 --- a/content/telegraf/v1/install.md +++ b/content/telegraf/v1/install.md @@ -388,7 +388,7 @@ To install using Homebrew, do the following: 3. Choose one of the following methods to start Telegraf and begin collecting and processing metrics: - [Run Telegraf in your terminal](#run-telegraf-in-your-terminal) - - [Run Telegraf as a service](#run-telegraf-as-a-service) + - [Run Telegraf as a service](#run-telegraf-as-a-background-service) ### Run Telegraf in your terminal From 7d95a3f95b9178edabaa8715f7c3ccbb0fc0c5ef Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 4 Aug 2025 14:05:24 -0500 Subject: [PATCH 098/122] chore(qol): Copilot no longer uses instruction settings; it automatically detects instructions files and PRs. --- .vscode/settings.json | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 2c18d3282..c827452b9 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -14,17 +14,6 @@ }, "vale.valeCLI.config": "${workspaceFolder}/.vale.ini", "vale.valeCLI.minAlertLevel": "warning", - "github.copilot.chat.codeGeneration.useInstructionFiles": true, - "github.copilot.chat.codeGeneration.instructions": [ - { - "file": "${workspaceFolder}/.github/copilot-instructions.md", - } - ], - "github.copilot.chat.pullRequestDescriptionGeneration.instructions": [ - { - "file": "${workspaceFolder}/.github/copilot-instructions.md", - } - ], "cSpell.words": [ "influxctl" ] From b2aab8ad43096b57a246acc5353ca4c472074cf3 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 4 Aug 2025 14:43:48 -0500 Subject: [PATCH 099/122] test: When using env_file, the variables are loaded directly into the container's environment, so you don't need to use the syntax. Removed braces --- compose.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/compose.yaml b/compose.yaml index cd466f6e3..ea11e03cb 100644 --- a/compose.yaml +++ b/compose.yaml @@ -349,7 +349,6 @@ services: - --data-dir=/var/lib/influxdb3/data - --plugin-dir=/var/lib/influxdb3/plugins environment: - - INFLUXDB3_ENTERPRISE_LICENSE_EMAIL=${INFLUXDB3_ENTERPRISE_LICENSE_EMAIL} - INFLUXDB3_AUTH_TOKEN=/run/secrets/influxdb3-enterprise-admin-token volumes: - type: bind From c708bd865825c57ab2c3561ecc83c93dab8f8e7d Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 4 Aug 2025 15:22:02 -0500 Subject: [PATCH 100/122] chore(qol): audit-cli-documentation.js should dynamically get top-level commands and recurse through --help.\Both products work together: Running node [audit-cli-documentation.js](http://_vscodecontentref_/1) both successfully audits both Core and Enterprise Core templates use Core-specific frontmatter Enterprise templates use Enterprise-specific frontmatter Fixes audit-cli-documentation.js so that it parses commands dynamically from the CLI output. Some commands () only return top-level help output, which the script had some difficulty with.That seems mostly resolved, but might rear again. --- .../audit-cli-documentation.js | 446 ++++++++++++++---- 1 file changed, 343 insertions(+), 103 deletions(-) diff --git a/helper-scripts/influxdb3-monolith/audit-cli-documentation.js b/helper-scripts/influxdb3-monolith/audit-cli-documentation.js index 74e1af565..ec22a453f 100755 --- a/helper-scripts/influxdb3-monolith/audit-cli-documentation.js +++ b/helper-scripts/influxdb3-monolith/audit-cli-documentation.js @@ -51,39 +51,9 @@ class CLIDocAuditor { ); } - // Commands to extract help for - this.mainCommands = [ - 'create', - 'delete', - 'disable', - 'enable', - 'query', - 'show', - 'test', - 'update', - 'write', - ]; - this.subcommands = [ - 'create database', - 'create token admin', - 'create token', - 'create trigger', - 'create last_cache', - 'create distinct_cache', - 'create table', - 'show databases', - 'show tokens', - 'show system', - 'delete database', - 'delete table', - 'delete trigger', - 'update database', - 'test wal_plugin', - 'test schedule_plugin', - ]; - - // Map for command tracking during option parsing - this.commandOptionsMap = {}; + // Dynamic command discovery - populated by discoverCommands() + this.discoveredCommands = new Map(); // command -> { subcommands: [], options: [] } + this.commandOptionsMap = {}; // For backward compatibility } async fileExists(path) { @@ -154,6 +124,238 @@ class CLIDocAuditor { }); } + async ensureContainerRunning(product) { + const containerName = `influxdb3-${product}`; + + // Check if container exists and is running + const { code, stdout } = await this.runCommand('docker', [ + 'compose', + 'ps', + '--format', + 'json', + containerName, + ]); + + if (code !== 0) { + console.log(`❌ Failed to check container status for ${containerName}`); + return false; + } + + const containers = stdout.trim().split('\n').filter((line) => line); + const isRunning = containers.some((line) => { + try { + const container = JSON.parse(line); + return container.Name === containerName && container.State === 'running'; + } catch { + return false; + } + }); + + if (!isRunning) { + console.log(`🚀 Starting ${containerName}...`); + const startResult = await this.runCommand('docker', [ + 'compose', + 'up', + '-d', + containerName, + ]); + + if (startResult.code !== 0) { + console.log(`❌ Failed to start ${containerName}`); + console.log(startResult.stderr); + return false; + } + + // Wait for container to be ready + console.log(`⏳ Waiting for ${containerName} to be ready...`); + await new Promise((resolve) => setTimeout(resolve, 5000)); + } + + return true; + } + + async discoverCommands(product) { + const containerName = `influxdb3-${product}`; + + // Ensure container is running + if (!(await this.ensureContainerRunning(product))) { + throw new Error(`Failed to start container ${containerName}`); + } + + // Get main help to discover top-level commands + const mainHelp = await this.runCommand('docker', [ + 'compose', + 'exec', + '-T', + containerName, + 'influxdb3', + '--help', + ]); + + if (mainHelp.code !== 0) { + console.error(`Failed to get main help. Exit code: ${mainHelp.code}`); + console.error(`Stdout: ${mainHelp.stdout}`); + console.error(`Stderr: ${mainHelp.stderr}`); + throw new Error(`Failed to get main help: ${mainHelp.stderr}`); + } + + // Parse main commands from help output + const mainCommands = this.parseCommandsFromHelp(mainHelp.stdout); + + // Also add the root command first + this.discoveredCommands.set('influxdb3', { + subcommands: mainCommands, + options: this.parseOptionsFromHelp(mainHelp.stdout), + helpText: mainHelp.stdout, + }); + + // For backward compatibility + this.commandOptionsMap['influxdb3'] = this.parseOptionsFromHelp(mainHelp.stdout); + + // Discover subcommands and options for each main command + for (const command of mainCommands) { + await this.discoverSubcommands(containerName, command, [command]); + } + } + + parseCommandsFromHelp(helpText) { + const commands = []; + // Strip ANSI color codes first + // eslint-disable-next-line no-control-regex + const cleanHelpText = helpText.replace(/\x1b\[[0-9;]*m/g, ''); + const lines = cleanHelpText.split('\n'); + let inCommandsSection = false; + + for (const line of lines) { + const trimmed = line.trim(); + + // Look for any Commands section + if (trimmed.includes('Commands:') || trimmed === 'Resource Management:' || + trimmed === 'System Management:') { + inCommandsSection = true; + continue; + } + + // Stop at next section (but don't stop on management sections) + if (inCommandsSection && /^[A-Z][a-z]+:$/.test(trimmed) && + !trimmed.includes('Commands:') && + trimmed !== 'Resource Management:' && + trimmed !== 'System Management:') { + break; + } + + // Parse command lines (typically indented with command name) + if (inCommandsSection && /^\s+[a-z]/.test(line)) { + const match = line.match(/^\s+([a-z][a-z0-9_-]*)/); + if (match) { + commands.push(match[1]); + } + } + } + + return commands; + } + + parseOptionsFromHelp(helpText) { + const options = []; + const lines = helpText.split('\n'); + let inOptionsSection = false; + + for (const line of lines) { + const trimmed = line.trim(); + + // Look for Options: section + if (trimmed === 'Options:') { + inOptionsSection = true; + continue; + } + + // Stop at next section + if (inOptionsSection && /^[A-Z][a-z]+:$/.test(trimmed)) { + break; + } + + // Parse option lines + if (inOptionsSection && /^\s*-/.test(line)) { + const optionMatch = line.match(/--([a-z][a-z0-9-]*)/); + if (optionMatch) { + options.push(`--${optionMatch[1]}`); + } + } + } + + return options; + } + + async discoverSubcommands(containerName, commandPath, commandParts) { + // Get help for this command + + // First try with --help + let helpResult = await this.runCommand('docker', [ + 'compose', + 'exec', + '-T', + containerName, + 'influxdb3', + ...commandParts, + '--help', + ]); + + // If --help returns main help or fails, try without --help + if (helpResult.code !== 0 || helpResult.stdout.includes('InfluxDB 3 Core Server and Command Line Tools')) { + helpResult = await this.runCommand('docker', [ + 'compose', + 'exec', + '-T', + containerName, + 'influxdb3', + ...commandParts, + ]); + } + + if (helpResult.code !== 0) { + // Check if stderr contains useful help information + if (helpResult.stderr && helpResult.stderr.includes('Usage:') && helpResult.stderr.includes('Commands:')) { + // Use stderr as the help text since it contains the command usage info + helpResult = { code: 0, stdout: helpResult.stderr, stderr: '' }; + } else { + // Command might not exist or might not have subcommands + return; + } + } + + // If the result is still the main help, skip this command + if (helpResult.stdout.includes('InfluxDB 3 Core Server and Command Line Tools')) { + return; + } + + const helpText = helpResult.stdout; + const subcommands = this.parseCommandsFromHelp(helpText); + const options = this.parseOptionsFromHelp(helpText); + + // Store the command info + const fullCommand = `influxdb3 ${commandParts.join(' ')}`; + this.discoveredCommands.set(fullCommand, { + subcommands, + options, + helpText, + }); + + // For backward compatibility + this.commandOptionsMap[fullCommand] = options; + + // Recursively discover subcommands (but limit depth) + if (subcommands.length > 0 && commandParts.length < 3) { + for (const subcommand of subcommands) { + await this.discoverSubcommands( + containerName, + `${commandPath} ${subcommand}`, + [...commandParts, subcommand] + ); + } + } + } + async extractCurrentCLI(product, outputFile) { process.stdout.write( `Extracting current CLI help from influxdb3-${product}...` @@ -164,57 +366,30 @@ class CLIDocAuditor { if (this.version === 'local') { const containerName = `influxdb3-${product}`; - // Check if container is running - const { code, stdout } = await this.runCommand('docker', [ - 'ps', - '--format', - '{{.Names}}', - ]); - if (code !== 0 || !stdout.includes(containerName)) { + // Ensure container is running and discover commands + if (!(await this.ensureContainerRunning(product))) { console.log(` ${Colors.RED}✗${Colors.NC}`); - console.log(`Error: Container ${containerName} is not running.`); - console.log(`Start it with: docker compose up -d influxdb3-${product}`); return false; } - // Extract comprehensive help - let fileContent = ''; + // Discover all commands dynamically + await this.discoverCommands(product); - // Main help - const mainHelp = await this.runCommand('docker', [ - 'exec', - containerName, - 'influxdb3', - '--help', - ]); - fileContent += mainHelp.code === 0 ? mainHelp.stdout : mainHelp.stderr; - - // Extract all subcommand help - for (const cmd of this.mainCommands) { - fileContent += `\n\n===== influxdb3 ${cmd} --help =====\n`; - const cmdHelp = await this.runCommand('docker', [ - 'exec', - containerName, - 'influxdb3', - cmd, - '--help', - ]); - fileContent += cmdHelp.code === 0 ? cmdHelp.stdout : cmdHelp.stderr; + // Generate comprehensive help output + let fileContent = `===== influxdb3 --help =====\n`; + + // Add root command help first + const rootCommand = this.discoveredCommands.get('influxdb3'); + if (rootCommand) { + fileContent += rootCommand.helpText; } - // Extract detailed subcommand help - for (const subcmd of this.subcommands) { - fileContent += `\n\n===== influxdb3 ${subcmd} --help =====\n`; - const cmdParts = [ - 'exec', - containerName, - 'influxdb3', - ...subcmd.split(' '), - '--help', - ]; - const subcmdHelp = await this.runCommand('docker', cmdParts); - fileContent += - subcmdHelp.code === 0 ? subcmdHelp.stdout : subcmdHelp.stderr; + // Add all other discovered command help + for (const [command, info] of this.discoveredCommands) { + if (command !== 'influxdb3') { + fileContent += `\n\n===== ${command} --help =====\n`; + fileContent += info.helpText; + } } await fs.writeFile(outputFile, fileContent); @@ -233,7 +408,8 @@ class CLIDocAuditor { return false; } - // Extract help from specific version + // For version-specific images, we'll use a simpler approach + // since we can't easily discover commands without a running container let fileContent = ''; // Main help @@ -246,8 +422,12 @@ class CLIDocAuditor { ]); fileContent += mainHelp.code === 0 ? mainHelp.stdout : mainHelp.stderr; - // Extract subcommand help - for (const cmd of this.mainCommands) { + // Parse main commands and get their help + const mainCommands = this.parseCommandsFromHelp( + mainHelp.code === 0 ? mainHelp.stdout : mainHelp.stderr + ); + + for (const cmd of mainCommands) { fileContent += `\n\n===== influxdb3 ${cmd} --help =====\n`; const cmdHelp = await this.runCommand('docker', [ 'run', @@ -258,6 +438,25 @@ class CLIDocAuditor { '--help', ]); fileContent += cmdHelp.code === 0 ? cmdHelp.stdout : cmdHelp.stderr; + + // Try to get subcommands + const subcommands = this.parseCommandsFromHelp( + cmdHelp.code === 0 ? cmdHelp.stdout : cmdHelp.stderr + ); + + for (const subcmd of subcommands) { + fileContent += `\n\n===== influxdb3 ${cmd} ${subcmd} --help =====\n`; + const subcmdHelp = await this.runCommand('docker', [ + 'run', + '--rm', + image, + 'influxdb3', + cmd, + subcmd, + '--help', + ]); + fileContent += subcmdHelp.code === 0 ? subcmdHelp.stdout : subcmdHelp.stderr; + } } await fs.writeFile(outputFile, fileContent); @@ -284,8 +483,10 @@ class CLIDocAuditor { .trim(); output += `## ${currentCommand}\n\n`; inOptions = false; - // Initialize options list for this command - this.commandOptionsMap[currentCommand] = []; + // Initialize options list for this command if not exists + if (!this.commandOptionsMap[currentCommand]) { + this.commandOptionsMap[currentCommand] = []; + } } // Detect options sections else if (line.trim() === 'Options:') { @@ -343,7 +544,7 @@ class CLIDocAuditor { const lines = content.split('\n'); let inCommand = false; let helpText = []; - const commandHeader = `===== influxdb3 ${command} --help =====`; + const commandHeader = `===== influxdb3 ${command} --help`; for (let i = 0; i < lines.length; i++) { if (lines[i] === commandHeader) { @@ -361,7 +562,7 @@ class CLIDocAuditor { return helpText.join('\n').trim(); } - async generateDocumentationTemplate(command, helpText) { + async generateDocumentationTemplate(command, helpText, product) { // Parse the help text to extract description and options const lines = helpText.split('\n'); let description = ''; @@ -402,14 +603,18 @@ class CLIDocAuditor { } } + // Generate product-specific frontmatter + const productTag = product === 'enterprise' ? 'influxdb3/enterprise' : 'influxdb3/core'; + const menuRef = product === 'enterprise' ? 'influxdb3_enterprise_reference' : 'influxdb3_core_reference'; + // Generate markdown template let template = `--- title: influxdb3 ${command} description: > The \`influxdb3 ${command}\` command ${description.toLowerCase()}. -influxdb3/core/tags: [cli] +${productTag}/tags: [cli] menu: - influxdb3_core_reference: + ${menuRef}: parent: influxdb3 cli weight: 201 --- @@ -587,22 +792,8 @@ Replace the following: let missingCount = 0; const missingDocs = []; - // Map commands to expected documentation files - const commandToFile = { - 'create database': 'create/database.md', - 'create token': 'create/token/_index.md', - 'create token admin': 'create/token/admin.md', - 'create trigger': 'create/trigger.md', - 'create table': 'create/table.md', - 'create last_cache': 'create/last_cache.md', - 'create distinct_cache': 'create/distinct_cache.md', - 'show databases': 'show/databases.md', - 'show tokens': 'show/tokens.md', - 'delete database': 'delete/database.md', - 'delete table': 'delete/table.md', - query: 'query.md', - write: 'write.md', - }; + // Build command to file mapping dynamically from discovered commands + const commandToFile = this.buildCommandToFileMapping(); // Extract commands from CLI help const content = await fs.readFile(cliFile, 'utf8'); @@ -666,7 +857,8 @@ Replace the following: const helpText = await this.extractCommandHelp(content, command); const docTemplate = await this.generateDocumentationTemplate( command, - helpText + helpText, + product ); // Save patch file @@ -847,6 +1039,54 @@ Replace the following: } } + buildCommandToFileMapping() { + // Build a mapping from discovered commands to expected documentation files + const mapping = {}; + + // Common patterns for command to file mapping + const patterns = { + 'create database': 'create/database.md', + 'create token': 'create/token/_index.md', + 'create token admin': 'create/token/admin.md', + 'create trigger': 'create/trigger.md', + 'create table': 'create/table.md', + 'create last_cache': 'create/last_cache.md', + 'create distinct_cache': 'create/distinct_cache.md', + 'show databases': 'show/databases.md', + 'show tokens': 'show/tokens.md', + 'show system': 'show/system.md', + 'delete database': 'delete/database.md', + 'delete table': 'delete/table.md', + 'delete trigger': 'delete/trigger.md', + 'update database': 'update/database.md', + 'test wal_plugin': 'test/wal_plugin.md', + 'test schedule_plugin': 'test/schedule_plugin.md', + query: 'query.md', + write: 'write.md', + }; + + // Add discovered commands that match patterns + for (const [command, info] of this.discoveredCommands) { + const cleanCommand = command.replace('influxdb3 ', ''); + if (patterns[cleanCommand]) { + mapping[cleanCommand] = patterns[cleanCommand]; + } else if (cleanCommand !== '' && cleanCommand.includes(' ')) { + // Generate file path for subcommands + const parts = cleanCommand.split(' '); + if (parts.length === 2) { + mapping[cleanCommand] = `${parts[0]}/${parts[1]}.md`; + } else if (parts.length === 3) { + mapping[cleanCommand] = `${parts[0]}/${parts[1]}/${parts[2]}.md`; + } + } else if (cleanCommand !== '' && !cleanCommand.includes(' ')) { + // Single command + mapping[cleanCommand] = `${cleanCommand}.md`; + } + } + + return mapping; + } + async run() { console.log( `${Colors.BLUE}🔍 InfluxDB 3 CLI Documentation Audit${Colors.NC}` From 92210137b20a5f66afe45faa67c2f98ab73207f6 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 5 Aug 2025 13:48:07 -0500 Subject: [PATCH 101/122] chore(qol): Make agents more collaborative and not automatically agreeable. --- .github/copilot-instructions.md | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index d0fc9113f..4a541203f 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -4,6 +4,10 @@ Always follow these instructions first and fallback to additional search and con ## Working Effectively +### Collaboration approach + +Be a critical thinking partner, provide honest feedback, and identify potential issues. + ### Bootstrap, Build, and Test the Repository Execute these commands in order to set up a complete working environment: @@ -54,16 +58,18 @@ yarn test:codeblocks:v2 yarn test:codeblocks:telegraf ``` -#### Link Validation (takes 10-30 minutes, NEVER CANCEL - set timeout to 45+ minutes): +#### Link Validation (takes 1-5 minutes): + +Runs automatically on pull requests. +Requires the **link-checker** binary from the repo release artifacts. ```bash -# Test all links (very long-running) -yarn test:links - # Test specific files/products (faster) -yarn test:links content/influxdb3/core/**/*.md -yarn test:links:v3 -yarn test:links:v2 +# JSON format is required for accurate reporting +link-checker map content/influxdb3/core/**/*.md \ +| link-checker check \ + --config .ci/link-checker/production.lycherc.toml + --format json ``` #### Style Linting (takes 30-60 seconds): @@ -168,7 +174,8 @@ yarn test:links content/example.md - **Package Manager**: Yarn (1.22.22+) with Node.js (20.19.4+) - **Testing Framework**: - Pytest with pytest-codeblocks (for code examples) - - Cypress (for link validation and E2E tests) + - Cypress (for E2E tests) + - influxdata/docs-link-checker (for link validation) - Vale (for style and writing guidelines) - **Containerization**: Docker with Docker Compose - **Linting**: ESLint, Prettier, Vale @@ -176,16 +183,6 @@ yarn test:links content/example.md ## Common Tasks and Build Times -### Time Expectations (CRITICAL - NEVER CANCEL) - -- **Dependency installation**: 4 seconds -- **Hugo static build**: 75 seconds (NEVER CANCEL - timeout: 180+ seconds) -- **Hugo server startup**: 92 seconds (NEVER CANCEL - timeout: 150+ seconds) -- **Code block tests**: 5-15 minutes per product (NEVER CANCEL - timeout: 30+ minutes) -- **Link validation**: 10-30 minutes (NEVER CANCEL - timeout: 45+ minutes) -- **Style linting**: 30-60 seconds -- **Docker image build**: 30+ seconds (may fail due to network restrictions) - ### Network Connectivity Issues In restricted environments, these commands may fail due to external dependency downloads: From 3f4ad5fb376f50c806b583e61690992a3c350294 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 5 Aug 2025 16:20:03 -0500 Subject: [PATCH 102/122] chore(scripts): redo cli audit script: Moved to new tooling repo. Removed package scripts for now. Script Gets commands from source code and grep docs for commands- replaces the CLI audit script- searches tagged repo branch source code for CLI commands- searches docs content the commands- allows including and excluding "categories" of docs paths to search --- helper-scripts/influxdb3-monolith/README.md | 373 ----- .../influxdb3-monolith/apply-cli-patches.js | 277 ---- .../audit-cli-documentation.js | 1214 ----------------- .../influxdb3-monolith/setup-auth-tokens.sh | 164 --- package.json | 7 +- 5 files changed, 1 insertion(+), 2034 deletions(-) delete mode 100644 helper-scripts/influxdb3-monolith/README.md delete mode 100755 helper-scripts/influxdb3-monolith/apply-cli-patches.js delete mode 100755 helper-scripts/influxdb3-monolith/audit-cli-documentation.js delete mode 100644 helper-scripts/influxdb3-monolith/setup-auth-tokens.sh diff --git a/helper-scripts/influxdb3-monolith/README.md b/helper-scripts/influxdb3-monolith/README.md deleted file mode 100644 index 34ce14c4c..000000000 --- a/helper-scripts/influxdb3-monolith/README.md +++ /dev/null @@ -1,373 +0,0 @@ -# InfluxDB 3 Monolith (Core and Enterprise) Helper Scripts - -This directory contains helper scripts specifically for InfluxDB 3 Core and Enterprise (monolith deployments), as opposed to distributed/clustered deployments. - -## Overview - -These scripts help with documentation workflows for InfluxDB 3 Core and Enterprise, including CLI change detection, authentication setup, API analysis, and release preparation. - -## Prerequisites - -- **Docker and Docker Compose**: For running InfluxDB 3 containers -- **Node.js 16+**: For running JavaScript ESM scripts -- **Active containers**: InfluxDB 3 Core and/or Enterprise containers running via `docker compose` -- **Secret files**: Docker Compose secrets for auth tokens (`~/.env.influxdb3-core-admin-token` and `~/.env.influxdb3-enterprise-admin-token`) - -## Scripts - -### 🔐 Authentication & Setup - -#### `setup-auth-tokens.sh` -Creates and configures authentication tokens for InfluxDB 3 containers. - -**Usage:** -```bash -./setup-auth-tokens.sh [core|enterprise|both] -``` - -**What it does:** -- Checks existing tokens in secret files (`~/.env.influxdb3-core-admin-token` and `~/.env.influxdb3-enterprise-admin-token`) -- Starts containers if not running -- Creates admin tokens using `influxdb3 create token --admin` -- Updates appropriate secret files with new tokens -- Tests tokens to ensure they work - -**Example:** -```bash -# Set up both Core and Enterprise tokens -./setup-auth-tokens.sh both - -# Set up only Enterprise -./setup-auth-tokens.sh enterprise -``` - -### 🔍 CLI Documentation Audit - -#### `audit-cli-documentation.js` -JavaScript ESM script that audits InfluxDB 3 CLI commands against existing documentation to identify missing or outdated content. - -**Usage:** -```bash -node audit-cli-documentation.js [core|enterprise|both] [version|local] -``` - -**Features:** -- Compares actual CLI help output with documented commands -- Identifies missing documentation for new CLI options -- Finds documented options that no longer exist in the CLI -- Supports both released versions and local containers -- Generates detailed audit reports with recommendations -- Handles authentication automatically using Docker secrets - -**Examples:** -```bash -# Audit Core documentation against local container -node audit-cli-documentation.js core local - -# Audit Enterprise documentation against specific version -node audit-cli-documentation.js enterprise v3.2.0 - -# Audit both products against local containers -node audit-cli-documentation.js both local -``` - -**Output:** -- `../output/cli-audit/documentation-audit-{product}-{version}.md` - Detailed audit report -- `../output/cli-audit/parsed-cli-{product}-{version}.md` - Parsed CLI structure -- `../output/cli-audit/patches/{product}/` - Generated patches for missing documentation - -### 🛠️ CLI Documentation Updates - -#### `apply-cli-patches.js` -JavaScript ESM script that applies generated patches to update CLI documentation with missing options. - -**Usage:** -```bash -node apply-cli-patches.js [core|enterprise|both] [--dry-run] -``` - -**Features:** -- Applies patches generated by `audit-cli-documentation.js` -- Updates CLI reference documentation with missing options -- Supports dry-run mode to preview changes -- Maintains existing documentation structure and formatting -- Creates backups before applying changes - -**Examples:** -```bash -# Preview changes without applying (dry run) -node apply-cli-patches.js core --dry-run - -# Apply patches to Enterprise documentation -node apply-cli-patches.js enterprise - -# Apply patches to both products -node apply-cli-patches.js both -``` - -**Output:** -- Updates CLI reference documentation files in place -- Creates backup files with `.backup` extension -- Logs all changes made to the documentation - -## Quick Start Guide - -### 1. Initial Setup - -```bash -# Navigate to the monolith scripts directory -cd helper-scripts/influxdb3-monolith - -# Make scripts executable -chmod +x *.sh - -# Set up authentication for both products -./setup-auth-tokens.sh both - -# Restart containers to load new secrets -docker compose down && docker compose up -d influxdb3-core influxdb3-enterprise -``` - -### 2. CLI Documentation Audit - -```bash -# Start your containers -docker compose up -d influxdb3-core influxdb3-enterprise - -# Audit CLI documentation -node audit-cli-documentation.js core local -node audit-cli-documentation.js enterprise local - -# Review the output -ls ../output/cli-audit/ -``` - -### 3. Development Workflow - -```bash -# Audit documentation for both products -node audit-cli-documentation.js both local - -# Check the audit results -cat ../output/cli-audit/documentation-audit-core-local.md -cat ../output/cli-audit/documentation-audit-enterprise-local.md - -# Apply patches if needed (dry run first) -node apply-cli-patches.js both --dry-run -``` - -### 4. Release Documentation Updates - -For release documentation, use the audit and patch workflow: - -```bash -# Audit against released version -node audit-cli-documentation.js enterprise v3.2.0 - -# Review missing documentation -cat ../output/cli-audit/documentation-audit-enterprise-v3.2.0.md - -# Apply patches to update documentation -node apply-cli-patches.js enterprise - -# Verify changes look correct -git diff content/influxdb3/enterprise/reference/cli/ -``` - -## Container Integration - -The scripts work with your Docker Compose setup: - -**Expected container names:** -- `influxdb3-core` (port 8282) -- `influxdb3-enterprise` (port 8181) - -**Docker Compose secrets:** -- `influxdb3-core-admin-token` - Admin token for Core (stored in `~/.env.influxdb3-core-admin-token`) -- `influxdb3-enterprise-admin-token` - Admin token for Enterprise (stored in `~/.env.influxdb3-enterprise-admin-token`) -- `INFLUXDB3_LICENSE_EMAIL` - Enterprise license email (set in `.env.3ent` env_file) - -## Use Cases - -### 📋 Release Documentation - -1. **Pre-release audit:** - ```bash - node audit-cli-documentation.js core v3.2.0 - ``` - -2. **Review audit results and update documentation** -3. **Apply patches for missing content** -4. **Test documented commands work correctly** - -### 🔬 Development Testing - -1. **Audit local development:** - ```bash - node audit-cli-documentation.js enterprise local - ``` - -2. **Verify new features are documented** -3. **Test authentication setup** -4. **Apply patches to keep docs current** - -### 🚀 Release Preparation - -1. **Final audit before release:** - ```bash - node audit-cli-documentation.js both local - ``` - -2. **Apply all pending patches** -3. **Update examples and tutorials** -4. **Verify all CLI commands work as documented** - -## Output Structure - -``` -helper-scripts/ -├── output/ -│ └── cli-audit/ -│ ├── documentation-audit-core-local.md # CLI documentation audit report -│ ├── documentation-audit-enterprise-v3.2.0.md # CLI documentation audit report -│ ├── parsed-cli-core-local.md # Parsed CLI structure -│ ├── parsed-cli-enterprise-v3.2.0.md # Parsed CLI structure -│ └── patches/ -│ ├── core/ # Generated patches for Core -│ │ ├── influxdb3-cli-patch-001.md -│ │ └── influxdb3-cli-patch-002.md -│ └── enterprise/ # Generated patches for Enterprise -│ ├── influxdb3-cli-patch-001.md -│ └── influxdb3-cli-patch-002.md -└── influxdb3-monolith/ - ├── README.md # This file - ├── setup-auth-tokens.sh # Auth setup - ├── audit-cli-documentation.js # CLI documentation audit - └── apply-cli-patches.js # CLI documentation patches -``` - -## Error Handling - -### Common Issues - -**Container not running:** -```bash -# Check status -docker compose ps - -# Start specific service -docker compose up -d influxdb3-core -``` - -**Authentication failures:** -```bash -# Recreate tokens -./setup-auth-tokens.sh both - -# Test manually -docker exec influxdb3-core influxdb3 create token --admin -``` - -**Version not found:** -```bash -# Check available versions -docker pull influxdb:3-core:3.2.0 -docker pull influxdb:3-enterprise:3.2.0 -``` - -### Debug Mode - -Enable debug output for troubleshooting: -```bash -DEBUG=1 node audit-cli-documentation.js core local -``` - -## Integration with CI/CD - -### GitHub Actions Example - -```yaml -- name: Audit CLI Documentation - run: | - cd helper-scripts/influxdb3-monolith - node audit-cli-documentation.js core ${{ env.VERSION }} - -- name: Upload CLI Audit Results - uses: actions/upload-artifact@v3 - with: - name: cli-audit - path: helper-scripts/output/cli-audit/ -``` - -### CircleCI Example - -```yaml -- run: - name: CLI Documentation Audit - command: | - cd helper-scripts/influxdb3-monolith - node audit-cli-documentation.js enterprise v3.2.0 - -- store_artifacts: - path: helper-scripts/output/cli-audit/ -``` - -## Best Practices - -### 🔒 Security -- Secret files (`~/.env.influxdb3-*-admin-token`) are stored in your home directory and not in version control -- Rotate auth tokens regularly by re-running `setup-auth-tokens.sh` -- Use minimal token permissions when possible - -### 📚 Documentation -- Run audits early in release cycle -- Review all audit reports for missing content -- Apply patches to keep documentation current -- Test all documented commands work correctly - -### 🔄 Workflow -- Use `local` version for development testing -- Audit against released versions for release prep -- Generate patches before documentation updates -- Validate changes with stakeholders - -## Troubleshooting - -### Script Permissions -```bash -chmod +x *.sh -``` - -### Missing Dependencies -```bash -# Node.js dependencies -node --version # Should be 16 or higher - -# Docker Compose -docker compose version -``` - -### Container Health -```bash -# Check container logs -docker logs influxdb3-core -docker logs influxdb3-enterprise - -# Test basic connectivity -docker exec influxdb3-core influxdb3 --version -``` - -## Contributing - -When adding new scripts to this directory: - -1. **Follow naming conventions**: Use lowercase with hyphens -2. **Add usage documentation**: Include help text in scripts -3. **Handle errors gracefully**: Use proper exit codes -4. **Test with both products**: Ensure Core and Enterprise compatibility -5. **Update this README**: Document new functionality - -## Related Documentation - -- [InfluxDB 3 Core CLI Reference](/influxdb3/core/reference/cli/) -- [InfluxDB 3 Enterprise CLI Reference](/influxdb3/enterprise/reference/cli/) diff --git a/helper-scripts/influxdb3-monolith/apply-cli-patches.js b/helper-scripts/influxdb3-monolith/apply-cli-patches.js deleted file mode 100755 index 07c2f7d71..000000000 --- a/helper-scripts/influxdb3-monolith/apply-cli-patches.js +++ /dev/null @@ -1,277 +0,0 @@ -#!/usr/bin/env node - -/** - * Apply CLI documentation patches generated by audit-cli-documentation.js - * Usage: node apply-cli-patches.js [core|enterprise|both] [--dry-run] - */ - -import { promises as fs } from 'fs'; -import { join, dirname } from 'path'; -import { fileURLToPath } from 'url'; -import { process } from 'node:process'; - -const __filename = fileURLToPath(import.meta.url); -const __dirname = dirname(__filename); - -// Color codes -const Colors = { - RED: '\x1b[0;31m', - GREEN: '\x1b[0;32m', - YELLOW: '\x1b[1;33m', - BLUE: '\x1b[0;34m', - NC: '\x1b[0m', // No Color -}; - -async function fileExists(path) { - try { - await fs.access(path); - return true; - } catch { - return false; - } -} - -async function ensureDir(dir) { - await fs.mkdir(dir, { recursive: true }); -} - -async function extractFrontmatter(content) { - const lines = content.split('\n'); - if (lines[0] !== '---') return { frontmatter: null, content }; - - const frontmatterLines = []; - let i = 1; - while (i < lines.length && lines[i] !== '---') { - frontmatterLines.push(lines[i]); - i++; - } - - if (i >= lines.length) return { frontmatter: null, content }; - - const frontmatterText = frontmatterLines.join('\n'); - const remainingContent = lines.slice(i + 1).join('\n'); - - return { frontmatter: frontmatterText, content: remainingContent }; -} - -async function getActualDocumentationPath(docPath, projectRoot) { - // Check if the documentation file exists and has a source field - const fullPath = join(projectRoot, docPath); - - if (await fileExists(fullPath)) { - const content = await fs.readFile(fullPath, 'utf8'); - const { frontmatter } = await extractFrontmatter(content); - - if (frontmatter) { - // Look for source: field in frontmatter - const sourceMatch = frontmatter.match(/^source:\s*(.+)$/m); - if (sourceMatch) { - const sourcePath = sourceMatch[1].trim(); - return sourcePath; - } - } - } - - return docPath; -} - -async function applyPatches(product, dryRun = false) { - const patchDir = join( - dirname(__dirname), - 'output', - 'cli-audit', - 'patches', - product - ); - const projectRoot = join(__dirname, '..', '..'); - - console.log( - `${Colors.BLUE}📋 Applying CLI documentation patches for ${product}${Colors.NC}` - ); - if (dryRun) { - console.log( - `${Colors.YELLOW}🔍 DRY RUN - No files will be created${Colors.NC}` - ); - } - console.log(); - - // Check if patch directory exists - if (!(await fileExists(patchDir))) { - console.log(`${Colors.YELLOW}No patches found for ${product}.${Colors.NC}`); - console.log("Run 'yarn audit:cli' first to generate patches."); - return; - } - - // Read all patch files - const patchFiles = await fs.readdir(patchDir); - const mdFiles = patchFiles.filter((f) => f.endsWith('.md')); - - if (mdFiles.length === 0) { - console.log( - `${Colors.YELLOW}No patch files found in ${patchDir}${Colors.NC}` - ); - return; - } - - console.log(`Found ${mdFiles.length} patch file(s) to apply:\n`); - - // Map patch files to their destination - const baseCliPath = `content/influxdb3/${product}/reference/cli/influxdb3`; - const commandToFile = { - 'create-database.md': `${baseCliPath}/create/database.md`, - 'create-token.md': `${baseCliPath}/create/token/_index.md`, - 'create-token-admin.md': `${baseCliPath}/create/token/admin.md`, - 'create-trigger.md': `${baseCliPath}/create/trigger.md`, - 'create-table.md': `${baseCliPath}/create/table.md`, - 'create-last_cache.md': `${baseCliPath}/create/last_cache.md`, - 'create-distinct_cache.md': `${baseCliPath}/create/distinct_cache.md`, - 'show-databases.md': `${baseCliPath}/show/databases.md`, - 'show-tokens.md': `${baseCliPath}/show/tokens.md`, - 'delete-database.md': `${baseCliPath}/delete/database.md`, - 'delete-table.md': `${baseCliPath}/delete/table.md`, - 'query.md': `${baseCliPath}/query.md`, - 'write.md': `${baseCliPath}/write.md`, - }; - - let applied = 0; - let skipped = 0; - - for (const patchFile of mdFiles) { - const destinationPath = commandToFile[patchFile]; - - if (!destinationPath) { - console.log( - `${Colors.YELLOW}⚠️ Unknown patch file: ${patchFile}${Colors.NC}` - ); - continue; - } - - // Get the actual documentation path (handles source: frontmatter) - const actualPath = await getActualDocumentationPath( - destinationPath, - projectRoot - ); - const fullDestPath = join(projectRoot, actualPath); - const patchPath = join(patchDir, patchFile); - - // Check if destination already exists - if (await fileExists(fullDestPath)) { - console.log( - `${Colors.YELLOW}⏭️ Skipping${Colors.NC} ${patchFile} - destination already exists:` - ); - console.log(` ${actualPath}`); - skipped++; - continue; - } - - if (dryRun) { - console.log(`${Colors.BLUE}🔍 Would create${Colors.NC} ${actualPath}`); - console.log(` from patch: ${patchFile}`); - if (actualPath !== destinationPath) { - console.log(` (resolved from: ${destinationPath})`); - } - applied++; - } else { - try { - // Ensure destination directory exists - await ensureDir(dirname(fullDestPath)); - - // Copy patch to destination - const content = await fs.readFile(patchPath, 'utf8'); - - // Update the menu configuration based on product - let updatedContent = content; - if (product === 'enterprise') { - updatedContent = content - .replace('influxdb3/core/tags:', 'influxdb3/enterprise/tags:') - .replace( - 'influxdb3_core_reference:', - 'influxdb3_enterprise_reference:' - ); - } - - await fs.writeFile(fullDestPath, updatedContent); - - console.log(`${Colors.GREEN}✅ Created${Colors.NC} ${actualPath}`); - console.log(` from patch: ${patchFile}`); - if (actualPath !== destinationPath) { - console.log(` (resolved from: ${destinationPath})`); - } - applied++; - } catch (error) { - console.log( - `${Colors.RED}❌ Error${Colors.NC} creating ${actualPath}:` - ); - console.log(` ${error.message}`); - } - } - } - - console.log(); - console.log(`${Colors.BLUE}Summary:${Colors.NC}`); - console.log(`- Patches ${dryRun ? 'would be' : ''} applied: ${applied}`); - console.log(`- Files skipped (already exist): ${skipped}`); - console.log(`- Total patch files: ${mdFiles.length}`); - - if (!dryRun && applied > 0) { - console.log(); - console.log( - `${Colors.GREEN}✨ Success!${Colors.NC} Created ${applied} new ` + - 'documentation file(s).' - ); - console.log(); - console.log('Next steps:'); - console.log('1. Review the generated files and customize the content'); - console.log('2. Add proper examples with placeholders'); - console.log('3. Update descriptions and add any missing options'); - console.log('4. Run tests: yarn test:links'); - } -} - -async function main() { - const args = process.argv.slice(2); - const product = - args.find((arg) => ['core', 'enterprise', 'both'].includes(arg)) || 'both'; - const dryRun = args.includes('--dry-run'); - - if (args.includes('--help') || args.includes('-h')) { - console.log( - 'Usage: node apply-cli-patches.js [core|enterprise|both] [--dry-run]' - ); - console.log(); - console.log('Options:'); - console.log( - ' --dry-run Show what would be done without creating files' - ); - console.log(); - console.log('Examples:'); - console.log( - ' node apply-cli-patches.js # Apply patches for both products' - ); - console.log( - ' node apply-cli-patches.js core --dry-run # Preview core patches' - ); - console.log( - ' node apply-cli-patches.js enterprise # Apply enterprise patches' - ); - process.exit(0); - } - - try { - if (product === 'both') { - await applyPatches('core', dryRun); - console.log(); - await applyPatches('enterprise', dryRun); - } else { - await applyPatches(product, dryRun); - } - } catch (error) { - console.error(`${Colors.RED}Error:${Colors.NC}`, error.message); - process.exit(1); - } -} - -// Run if called directly -if (import.meta.url === `file://${process.argv[1]}`) { - main(); -} diff --git a/helper-scripts/influxdb3-monolith/audit-cli-documentation.js b/helper-scripts/influxdb3-monolith/audit-cli-documentation.js deleted file mode 100755 index ec22a453f..000000000 --- a/helper-scripts/influxdb3-monolith/audit-cli-documentation.js +++ /dev/null @@ -1,1214 +0,0 @@ -#!/usr/bin/env node - -/** - * Audit CLI documentation against current CLI help output - * Usage: node audit-cli-documentation.js [core|enterprise|both] [version] - * Example: node audit-cli-documentation.js core 3.2.0 - */ - -import { spawn } from 'child_process'; -import { promises as fs } from 'fs'; -import { homedir } from 'os'; -import { join, dirname } from 'path'; -import { fileURLToPath } from 'url'; -import { - validateVersionInputs, - getRepositoryRoot, -} from '../common/validate-tags.js'; - -const __filename = fileURLToPath(import.meta.url); -const __dirname = dirname(__filename); - -// Color codes -const Colors = { - RED: '\x1b[0;31m', - GREEN: '\x1b[0;32m', - YELLOW: '\x1b[1;33m', - BLUE: '\x1b[0;34m', - NC: '\x1b[0m', // No Color -}; - -class CLIDocAuditor { - constructor(product = 'both', version = 'local') { - this.product = product; - this.version = version; - this.outputDir = join(dirname(__dirname), 'output', 'cli-audit'); - - // Token paths - check environment variables first (Docker Compose), then fall back to local files - const coreTokenEnv = process.env.INFLUXDB3_CORE_TOKEN; - const enterpriseTokenEnv = process.env.INFLUXDB3_ENTERPRISE_TOKEN; - - if (coreTokenEnv && this.fileExists(coreTokenEnv)) { - // Running in Docker Compose with secrets - this.coreTokenFile = coreTokenEnv; - this.enterpriseTokenFile = enterpriseTokenEnv; - } else { - // Running locally - this.coreTokenFile = join(homedir(), '.env.influxdb3-core-admin-token'); - this.enterpriseTokenFile = join( - homedir(), - '.env.influxdb3-enterprise-admin-token' - ); - } - - // Dynamic command discovery - populated by discoverCommands() - this.discoveredCommands = new Map(); // command -> { subcommands: [], options: [] } - this.commandOptionsMap = {}; // For backward compatibility - } - - async fileExists(path) { - try { - await fs.access(path); - return true; - } catch { - return false; - } - } - - async ensureDir(dir) { - await fs.mkdir(dir, { recursive: true }); - } - - async loadTokens() { - let coreToken = null; - let enterpriseToken = null; - - try { - if (await this.fileExists(this.coreTokenFile)) { - const stat = await fs.stat(this.coreTokenFile); - if (stat.size > 0) { - coreToken = (await fs.readFile(this.coreTokenFile, 'utf8')).trim(); - } - } - } catch { - // Token file doesn't exist or can't be read - } - - try { - if (await this.fileExists(this.enterpriseTokenFile)) { - const stat = await fs.stat(this.enterpriseTokenFile); - if (stat.size > 0) { - enterpriseToken = ( - await fs.readFile(this.enterpriseTokenFile, 'utf8') - ).trim(); - } - } - } catch { - // Token file doesn't exist or can't be read - } - - return { coreToken, enterpriseToken }; - } - - runCommand(cmd, args = []) { - return new Promise((resolve) => { - const child = spawn(cmd, args, { encoding: 'utf8' }); - let stdout = ''; - let stderr = ''; - - child.stdout.on('data', (data) => { - stdout += data.toString(); - }); - - child.stderr.on('data', (data) => { - stderr += data.toString(); - }); - - child.on('close', (code) => { - resolve({ code, stdout, stderr }); - }); - - child.on('error', (err) => { - resolve({ code: 1, stdout: '', stderr: err.message }); - }); - }); - } - - async ensureContainerRunning(product) { - const containerName = `influxdb3-${product}`; - - // Check if container exists and is running - const { code, stdout } = await this.runCommand('docker', [ - 'compose', - 'ps', - '--format', - 'json', - containerName, - ]); - - if (code !== 0) { - console.log(`❌ Failed to check container status for ${containerName}`); - return false; - } - - const containers = stdout.trim().split('\n').filter((line) => line); - const isRunning = containers.some((line) => { - try { - const container = JSON.parse(line); - return container.Name === containerName && container.State === 'running'; - } catch { - return false; - } - }); - - if (!isRunning) { - console.log(`🚀 Starting ${containerName}...`); - const startResult = await this.runCommand('docker', [ - 'compose', - 'up', - '-d', - containerName, - ]); - - if (startResult.code !== 0) { - console.log(`❌ Failed to start ${containerName}`); - console.log(startResult.stderr); - return false; - } - - // Wait for container to be ready - console.log(`⏳ Waiting for ${containerName} to be ready...`); - await new Promise((resolve) => setTimeout(resolve, 5000)); - } - - return true; - } - - async discoverCommands(product) { - const containerName = `influxdb3-${product}`; - - // Ensure container is running - if (!(await this.ensureContainerRunning(product))) { - throw new Error(`Failed to start container ${containerName}`); - } - - // Get main help to discover top-level commands - const mainHelp = await this.runCommand('docker', [ - 'compose', - 'exec', - '-T', - containerName, - 'influxdb3', - '--help', - ]); - - if (mainHelp.code !== 0) { - console.error(`Failed to get main help. Exit code: ${mainHelp.code}`); - console.error(`Stdout: ${mainHelp.stdout}`); - console.error(`Stderr: ${mainHelp.stderr}`); - throw new Error(`Failed to get main help: ${mainHelp.stderr}`); - } - - // Parse main commands from help output - const mainCommands = this.parseCommandsFromHelp(mainHelp.stdout); - - // Also add the root command first - this.discoveredCommands.set('influxdb3', { - subcommands: mainCommands, - options: this.parseOptionsFromHelp(mainHelp.stdout), - helpText: mainHelp.stdout, - }); - - // For backward compatibility - this.commandOptionsMap['influxdb3'] = this.parseOptionsFromHelp(mainHelp.stdout); - - // Discover subcommands and options for each main command - for (const command of mainCommands) { - await this.discoverSubcommands(containerName, command, [command]); - } - } - - parseCommandsFromHelp(helpText) { - const commands = []; - // Strip ANSI color codes first - // eslint-disable-next-line no-control-regex - const cleanHelpText = helpText.replace(/\x1b\[[0-9;]*m/g, ''); - const lines = cleanHelpText.split('\n'); - let inCommandsSection = false; - - for (const line of lines) { - const trimmed = line.trim(); - - // Look for any Commands section - if (trimmed.includes('Commands:') || trimmed === 'Resource Management:' || - trimmed === 'System Management:') { - inCommandsSection = true; - continue; - } - - // Stop at next section (but don't stop on management sections) - if (inCommandsSection && /^[A-Z][a-z]+:$/.test(trimmed) && - !trimmed.includes('Commands:') && - trimmed !== 'Resource Management:' && - trimmed !== 'System Management:') { - break; - } - - // Parse command lines (typically indented with command name) - if (inCommandsSection && /^\s+[a-z]/.test(line)) { - const match = line.match(/^\s+([a-z][a-z0-9_-]*)/); - if (match) { - commands.push(match[1]); - } - } - } - - return commands; - } - - parseOptionsFromHelp(helpText) { - const options = []; - const lines = helpText.split('\n'); - let inOptionsSection = false; - - for (const line of lines) { - const trimmed = line.trim(); - - // Look for Options: section - if (trimmed === 'Options:') { - inOptionsSection = true; - continue; - } - - // Stop at next section - if (inOptionsSection && /^[A-Z][a-z]+:$/.test(trimmed)) { - break; - } - - // Parse option lines - if (inOptionsSection && /^\s*-/.test(line)) { - const optionMatch = line.match(/--([a-z][a-z0-9-]*)/); - if (optionMatch) { - options.push(`--${optionMatch[1]}`); - } - } - } - - return options; - } - - async discoverSubcommands(containerName, commandPath, commandParts) { - // Get help for this command - - // First try with --help - let helpResult = await this.runCommand('docker', [ - 'compose', - 'exec', - '-T', - containerName, - 'influxdb3', - ...commandParts, - '--help', - ]); - - // If --help returns main help or fails, try without --help - if (helpResult.code !== 0 || helpResult.stdout.includes('InfluxDB 3 Core Server and Command Line Tools')) { - helpResult = await this.runCommand('docker', [ - 'compose', - 'exec', - '-T', - containerName, - 'influxdb3', - ...commandParts, - ]); - } - - if (helpResult.code !== 0) { - // Check if stderr contains useful help information - if (helpResult.stderr && helpResult.stderr.includes('Usage:') && helpResult.stderr.includes('Commands:')) { - // Use stderr as the help text since it contains the command usage info - helpResult = { code: 0, stdout: helpResult.stderr, stderr: '' }; - } else { - // Command might not exist or might not have subcommands - return; - } - } - - // If the result is still the main help, skip this command - if (helpResult.stdout.includes('InfluxDB 3 Core Server and Command Line Tools')) { - return; - } - - const helpText = helpResult.stdout; - const subcommands = this.parseCommandsFromHelp(helpText); - const options = this.parseOptionsFromHelp(helpText); - - // Store the command info - const fullCommand = `influxdb3 ${commandParts.join(' ')}`; - this.discoveredCommands.set(fullCommand, { - subcommands, - options, - helpText, - }); - - // For backward compatibility - this.commandOptionsMap[fullCommand] = options; - - // Recursively discover subcommands (but limit depth) - if (subcommands.length > 0 && commandParts.length < 3) { - for (const subcommand of subcommands) { - await this.discoverSubcommands( - containerName, - `${commandPath} ${subcommand}`, - [...commandParts, subcommand] - ); - } - } - } - - async extractCurrentCLI(product, outputFile) { - process.stdout.write( - `Extracting current CLI help from influxdb3-${product}...` - ); - - await this.loadTokens(); - - if (this.version === 'local') { - const containerName = `influxdb3-${product}`; - - // Ensure container is running and discover commands - if (!(await this.ensureContainerRunning(product))) { - console.log(` ${Colors.RED}✗${Colors.NC}`); - return false; - } - - // Discover all commands dynamically - await this.discoverCommands(product); - - // Generate comprehensive help output - let fileContent = `===== influxdb3 --help =====\n`; - - // Add root command help first - const rootCommand = this.discoveredCommands.get('influxdb3'); - if (rootCommand) { - fileContent += rootCommand.helpText; - } - - // Add all other discovered command help - for (const [command, info] of this.discoveredCommands) { - if (command !== 'influxdb3') { - fileContent += `\n\n===== ${command} --help =====\n`; - fileContent += info.helpText; - } - } - - await fs.writeFile(outputFile, fileContent); - console.log(` ${Colors.GREEN}✓${Colors.NC}`); - } else { - // Use specific version image - const image = `influxdb:${this.version}-${product}`; - - process.stdout.write(`Extracting CLI help from ${image}...`); - - // Pull image if needed - const pullResult = await this.runCommand('docker', ['pull', image]); - if (pullResult.code !== 0) { - console.log(` ${Colors.RED}✗${Colors.NC}`); - console.log(`Error: Failed to pull image ${image}`); - return false; - } - - // For version-specific images, we'll use a simpler approach - // since we can't easily discover commands without a running container - let fileContent = ''; - - // Main help - const mainHelp = await this.runCommand('docker', [ - 'run', - '--rm', - image, - 'influxdb3', - '--help', - ]); - fileContent += mainHelp.code === 0 ? mainHelp.stdout : mainHelp.stderr; - - // Parse main commands and get their help - const mainCommands = this.parseCommandsFromHelp( - mainHelp.code === 0 ? mainHelp.stdout : mainHelp.stderr - ); - - for (const cmd of mainCommands) { - fileContent += `\n\n===== influxdb3 ${cmd} --help =====\n`; - const cmdHelp = await this.runCommand('docker', [ - 'run', - '--rm', - image, - 'influxdb3', - cmd, - '--help', - ]); - fileContent += cmdHelp.code === 0 ? cmdHelp.stdout : cmdHelp.stderr; - - // Try to get subcommands - const subcommands = this.parseCommandsFromHelp( - cmdHelp.code === 0 ? cmdHelp.stdout : cmdHelp.stderr - ); - - for (const subcmd of subcommands) { - fileContent += `\n\n===== influxdb3 ${cmd} ${subcmd} --help =====\n`; - const subcmdHelp = await this.runCommand('docker', [ - 'run', - '--rm', - image, - 'influxdb3', - cmd, - subcmd, - '--help', - ]); - fileContent += subcmdHelp.code === 0 ? subcmdHelp.stdout : subcmdHelp.stderr; - } - } - - await fs.writeFile(outputFile, fileContent); - console.log(` ${Colors.GREEN}✓${Colors.NC}`); - } - - return true; - } - - async parseCLIHelp(helpFile, parsedFile) { - const content = await fs.readFile(helpFile, 'utf8'); - const lines = content.split('\n'); - - let output = '# CLI Commands and Options\n\n'; - let currentCommand = ''; - let inOptions = false; - - for (const line of lines) { - // Detect command headers - if (line.startsWith('===== influxdb3') && line.endsWith('--help =====')) { - currentCommand = line - .replace('===== ', '') - .replace(' --help =====', '') - .trim(); - output += `## ${currentCommand}\n\n`; - inOptions = false; - // Initialize options list for this command if not exists - if (!this.commandOptionsMap[currentCommand]) { - this.commandOptionsMap[currentCommand] = []; - } - } - // Detect options sections - else if (line.trim() === 'Options:') { - output += '### Options:\n\n'; - inOptions = true; - } - // Parse option lines - else if (inOptions && /^\s*-/.test(line)) { - // Extract option and description - const optionMatch = line.match(/--[a-z][a-z0-9-]*/); - const shortMatch = line.match(/\s-[a-zA-Z],/); - - if (optionMatch) { - const option = optionMatch[0]; - const shortOption = shortMatch - ? shortMatch[0].replace(/[,\s]/g, '') - : null; - - // Extract description by removing option parts - let description = line.replace(/^\s*-[^\s]*\s*/, ''); - description = description.replace(/^\s*--[^\s]*\s*/, '').trim(); - - if (shortOption) { - output += `- \`${shortOption}, ${option}\`: ${description}\n`; - } else { - output += `- \`${option}\`: ${description}\n`; - } - - // Store option with its command context - if (currentCommand && option) { - this.commandOptionsMap[currentCommand].push(option); - } - } - } - // Reset options flag for new sections - else if (/^[A-Z][a-z]+:$/.test(line.trim())) { - inOptions = false; - } - } - - await fs.writeFile(parsedFile, output); - } - - findDocsPath(product) { - if (product === 'core') { - return 'content/influxdb3/core/reference/cli/influxdb3'; - } else if (product === 'enterprise') { - return 'content/influxdb3/enterprise/reference/cli/influxdb3'; - } - return ''; - } - - async extractCommandHelp(content, command) { - // Find the section for this specific command in the CLI help - const lines = content.split('\n'); - let inCommand = false; - let helpText = []; - const commandHeader = `===== influxdb3 ${command} --help`; - - for (let i = 0; i < lines.length; i++) { - if (lines[i] === commandHeader) { - inCommand = true; - continue; - } - if (inCommand && lines[i].startsWith('===== influxdb3')) { - break; - } - if (inCommand) { - helpText.push(lines[i]); - } - } - - return helpText.join('\n').trim(); - } - - async generateDocumentationTemplate(command, helpText, product) { - // Parse the help text to extract description and options - const lines = helpText.split('\n'); - let description = ''; - let usage = ''; - let options = []; - let inOptions = false; - - for (let i = 0; i < lines.length; i++) { - const line = lines[i]; - - if (i === 0 && !line.startsWith('Usage:') && line.trim()) { - description = line.trim(); - } - if (line.startsWith('Usage:')) { - usage = line.replace('Usage:', '').trim(); - } - if (line.trim() === 'Options:') { - inOptions = true; - continue; - } - if (inOptions && /^\s*-/.test(line)) { - const optionMatch = line.match(/--([a-z][a-z0-9-]*)/); - const shortMatch = line.match(/\s-([a-zA-Z]),/); - if (optionMatch) { - const optionName = optionMatch[1]; - const shortOption = shortMatch ? shortMatch[1] : null; - let optionDesc = line - .replace(/^\s*-[^\s]*\s*/, '') - .replace(/^\s*--[^\s]*\s*/, '') - .trim(); - - options.push({ - name: optionName, - short: shortOption, - description: optionDesc, - }); - } - } - } - - // Generate product-specific frontmatter - const productTag = product === 'enterprise' ? 'influxdb3/enterprise' : 'influxdb3/core'; - const menuRef = product === 'enterprise' ? 'influxdb3_enterprise_reference' : 'influxdb3_core_reference'; - - // Generate markdown template - let template = `--- -title: influxdb3 ${command} -description: > - The \`influxdb3 ${command}\` command ${description.toLowerCase()}. -${productTag}/tags: [cli] -menu: - ${menuRef}: - parent: influxdb3 cli -weight: 201 ---- - -# influxdb3 ${command} - -${description} - -## Usage - -\`\`\`bash -${usage || `influxdb3 ${command} [OPTIONS]`} -\`\`\` - -`; - - if (options.length > 0) { - template += `## Options - -| Option | Description | -|--------|-------------| -`; - - for (const opt of options) { - const optionDisplay = opt.short - ? `\`-${opt.short}\`, \`--${opt.name}\`` - : `\`--${opt.name}\``; - template += `| ${optionDisplay} | ${opt.description} |\n`; - } - } - - template += ` -## Examples - -### Example 1: Basic usage - -{{% code-placeholders "PLACEHOLDER1|PLACEHOLDER2" %}} -\`\`\`bash -influxdb3 ${command} --example PLACEHOLDER1 -\`\`\` -{{% /code-placeholders %}} - -Replace the following: - -- {{% code-placeholder-key %}}\`PLACEHOLDER1\`{{% /code-placeholder-key %}}: Description of placeholder -`; - - return template; - } - - async extractFrontmatter(content) { - const lines = content.split('\n'); - if (lines[0] !== '---') return { frontmatter: null, content }; - - const frontmatterLines = []; - let i = 1; - while (i < lines.length && lines[i] !== '---') { - frontmatterLines.push(lines[i]); - i++; - } - - if (i >= lines.length) return { frontmatter: null, content }; - - const frontmatterText = frontmatterLines.join('\n'); - const remainingContent = lines.slice(i + 1).join('\n'); - - return { frontmatter: frontmatterText, content: remainingContent }; - } - - async getActualContentPath(filePath) { - // Get the actual content path, resolving source fields - try { - const content = await fs.readFile(filePath, 'utf8'); - const { frontmatter } = await this.extractFrontmatter(content); - - if (frontmatter) { - const sourceMatch = frontmatter.match(/^source:\s*(.+)$/m); - if (sourceMatch) { - let sourcePath = sourceMatch[1].trim(); - // Handle relative paths from project root - if (sourcePath.startsWith('/shared/')) { - sourcePath = `content${sourcePath}`; - } - return sourcePath; - } - } - return null; // No source field found - } catch { - return null; - } - } - - async parseDocumentedOptions(filePath) { - // Parse a documentation file to extract all documented options - try { - const content = await fs.readFile(filePath, 'utf8'); - const options = []; - - // Look for options in various patterns: - // 1. Markdown tables with option columns - // 2. Option lists with backticks - // 3. Code examples with --option flags - - // Pattern 1: Markdown tables (| Option | Description |) - const tableMatches = content.match(/\|\s*`?--[a-z][a-z0-9-]*`?\s*\|/gi); - if (tableMatches) { - for (const match of tableMatches) { - const option = match.match(/--[a-z][a-z0-9-]*/i); - if (option) { - options.push(option[0]); - } - } - } - - // Pattern 2: Backtick-enclosed options in text - const backtickMatches = content.match(/`--[a-z][a-z0-9-]*`/gi); - if (backtickMatches) { - for (const match of backtickMatches) { - const option = match.replace(/`/g, ''); - options.push(option); - } - } - - // Pattern 3: Options in code blocks - const codeBlockMatches = content.match(/```[\s\S]*?```/g); - if (codeBlockMatches) { - for (const block of codeBlockMatches) { - const blockOptions = block.match(/--[a-z][a-z0-9-]*/gi); - if (blockOptions) { - options.push(...blockOptions); - } - } - } - - // Pattern 4: Environment variable mappings (INFLUXDB3_* to --option) - const envMatches = content.match( - /\|\s*`INFLUXDB3_[^`]*`\s*\|\s*`--[a-z][a-z0-9-]*`\s*\|/gi - ); - if (envMatches) { - for (const match of envMatches) { - const option = match.match(/--[a-z][a-z0-9-]*/); - if (option) { - options.push(option[0]); - } - } - } - - // Remove duplicates and return sorted - return [...new Set(options)].sort(); - } catch { - return []; - } - } - - async auditDocs(product, cliFile, auditFile) { - const docsPath = this.findDocsPath(product); - const sharedPath = 'content/shared/influxdb3-cli'; - const patchDir = join(this.outputDir, 'patches', product); - await this.ensureDir(patchDir); - - let output = `# CLI Documentation Audit - ${product}\n`; - output += `Generated: ${new Date().toISOString()}\n\n`; - - // GitHub base URL for edit links - const githubBase = 'https://github.com/influxdata/docs-v2/edit/master'; - const githubNewBase = 'https://github.com/influxdata/docs-v2/new/master'; - - // VSCode links for local editing - const vscodeBase = 'vscode://file'; - const projectRoot = join(__dirname, '..', '..'); - - // Check for missing documentation - output += '## Missing Documentation\n\n'; - - let missingCount = 0; - const missingDocs = []; - - // Build command to file mapping dynamically from discovered commands - const commandToFile = this.buildCommandToFileMapping(); - - // Extract commands from CLI help - const content = await fs.readFile(cliFile, 'utf8'); - const lines = content.split('\n'); - - for (const line of lines) { - if (line.startsWith('===== influxdb3') && line.endsWith('--help =====')) { - const command = line - .replace('===== influxdb3 ', '') - .replace(' --help =====', ''); - - if (commandToFile[command]) { - const expectedFile = commandToFile[command]; - const productFile = join(docsPath, expectedFile); - const sharedFile = join(sharedPath, expectedFile); - - const productExists = await this.fileExists(productFile); - const sharedExists = await this.fileExists(sharedFile); - - let needsContent = false; - let targetPath = null; - let stubPath = null; - - if (!productExists && !sharedExists) { - // Completely missing - needsContent = true; - targetPath = productFile; - } else if (productExists) { - // Check if it has a source field pointing to missing content - const actualPath = await this.getActualContentPath(productFile); - if (actualPath && !(await this.fileExists(actualPath))) { - needsContent = true; - targetPath = actualPath; - stubPath = productFile; - } - } else if (sharedExists) { - // Shared file exists, check if it has content - const actualPath = await this.getActualContentPath(sharedFile); - if (actualPath && !(await this.fileExists(actualPath))) { - needsContent = true; - targetPath = actualPath; - stubPath = sharedFile; - } - } - - if (needsContent && targetPath) { - const githubNewUrl = `${githubNewBase}/${targetPath}`; - const localPath = join(projectRoot, targetPath); - - output += `- **Missing**: Documentation for \`influxdb3 ${command}\`\n`; - if (stubPath) { - output += ` - Stub exists at: \`${stubPath}\`\n`; - output += ` - Content needed at: \`${targetPath}\`\n`; - } else { - output += ` - Expected: \`${targetPath}\` or \`${sharedFile}\`\n`; - } - output += ` - [Create on GitHub](${githubNewUrl})\n`; - output += ` - Local: \`${localPath}\`\n`; - - // Generate documentation template - const helpText = await this.extractCommandHelp(content, command); - const docTemplate = await this.generateDocumentationTemplate( - command, - helpText, - product - ); - - // Save patch file - const patchFileName = `${command.replace(/ /g, '-')}.md`; - const patchFile = join(patchDir, patchFileName); - await fs.writeFile(patchFile, docTemplate); - - output += ` - **Template generated**: \`${patchFile}\`\n`; - - missingDocs.push({ command, file: targetPath, patchFile }); - missingCount++; - } - } - } - } - - if (missingCount === 0) { - output += 'No missing documentation files detected.\n'; - } else { - output += '\n### Quick Actions\n\n'; - output += - 'Copy and paste these commands to create missing documentation:\n\n'; - output += '```bash\n'; - for (const doc of missingDocs) { - const relativePatch = join( - 'helper-scripts/output/cli-audit/patches', - product, - `${doc.command.replace(/ /g, '-')}.md` - ); - output += `# Create ${doc.command} documentation\n`; - output += `mkdir -p $(dirname ${doc.file})\n`; - output += `cp ${relativePatch} ${doc.file}\n\n`; - } - output += '```\n'; - } - - output += '\n'; - - // Check for outdated options in existing docs - output += '## Existing Documentation Review\n\n'; - - // Parse CLI help first to populate commandOptionsMap - const parsedFile = join( - this.outputDir, - `parsed-cli-${product}-${this.version}.md` - ); - await this.parseCLIHelp(cliFile, parsedFile); - - // For each command, check if documentation exists and compare content - const existingDocs = []; - for (const [command, expectedFile] of Object.entries(commandToFile)) { - const productFile = join(docsPath, expectedFile); - const sharedFile = join(sharedPath, expectedFile); - - let docFile = null; - let actualContentFile = null; - - // Find the documentation file - if (await this.fileExists(productFile)) { - docFile = productFile; - // Check if it's a stub with source field - const actualPath = await this.getActualContentPath(productFile); - actualContentFile = actualPath - ? join(projectRoot, actualPath) - : join(projectRoot, productFile); - } else if (await this.fileExists(sharedFile)) { - docFile = sharedFile; - actualContentFile = join(projectRoot, sharedFile); - } - - if (docFile && (await this.fileExists(actualContentFile))) { - const githubEditUrl = `${githubBase}/${docFile}`; - const localPath = join(projectRoot, docFile); - const vscodeUrl = `${vscodeBase}/${localPath}`; - - // Get CLI options for this command - const cliOptions = this.commandOptionsMap[`influxdb3 ${command}`] || []; - - // Parse documentation content to find documented options - const documentedOptions = - await this.parseDocumentedOptions(actualContentFile); - - // Find missing options (in CLI but not in docs) - const missingOptions = cliOptions.filter( - (opt) => !documentedOptions.includes(opt) - ); - - // Find extra options (in docs but not in CLI) - const extraOptions = documentedOptions.filter( - (opt) => !cliOptions.includes(opt) - ); - - existingDocs.push({ - command, - file: docFile, - actualContentFile: actualContentFile.replace( - join(projectRoot, ''), - '' - ), - githubUrl: githubEditUrl, - localPath, - vscodeUrl, - cliOptions, - documentedOptions, - missingOptions, - extraOptions, - }); - } - } - - if (existingDocs.length > 0) { - output += 'Review these existing documentation files for accuracy:\n\n'; - - for (const doc of existingDocs) { - output += `### \`influxdb3 ${doc.command}\`\n`; - output += `- **File**: \`${doc.file}\`\n`; - if (doc.actualContentFile !== doc.file) { - output += `- **Content**: \`${doc.actualContentFile}\`\n`; - } - output += `- [Edit on GitHub](${doc.githubUrl})\n`; - output += `- [Open in VS Code](${doc.vscodeUrl})\n`; - output += `- **Local**: \`${doc.localPath}\`\n`; - - // Show option analysis - if (doc.missingOptions.length > 0) { - output += `- **⚠️ Missing from docs** (${doc.missingOptions.length} options):\n`; - for (const option of doc.missingOptions.sort()) { - output += ` - \`${option}\`\n`; - } - } - - if (doc.extraOptions.length > 0) { - output += `- **ℹ️ Documented but not in CLI** (${doc.extraOptions.length} options):\n`; - for (const option of doc.extraOptions.sort()) { - output += ` - \`${option}\`\n`; - } - } - - if (doc.missingOptions.length === 0 && doc.extraOptions.length === 0) { - output += `- **✅ Options match** (${doc.cliOptions.length} options)\n`; - } - - if (doc.cliOptions.length > 0) { - output += `- **All CLI Options** (${doc.cliOptions.length}):\n`; - const uniqueOptions = [...new Set(doc.cliOptions)].sort(); - for (const option of uniqueOptions) { - const status = doc.missingOptions.includes(option) ? '❌' : '✅'; - output += ` - ${status} \`${option}\`\n`; - } - } - output += '\n'; - } - } - - output += '\n## Summary\n'; - output += `- Missing documentation files: ${missingCount}\n`; - output += `- Existing documentation files: ${existingDocs.length}\n`; - output += `- Generated templates: ${missingCount}\n`; - output += '- Options are grouped by command for easier review\n\n'; - - output += '## Automation Suggestions\n\n'; - output += - '1. **Use generated templates**: Check the `patches` directory for pre-filled documentation templates\n'; - output += - '2. **Batch creation**: Use the shell commands above to quickly create all missing files\n'; - output += - '3. **CI Integration**: Add this audit to your CI pipeline to catch missing docs early\n'; - output += - '4. **Auto-PR**: Create a GitHub Action that runs this audit and opens PRs for missing docs\n\n'; - - await fs.writeFile(auditFile, output); - console.log(`📄 Audit complete: ${auditFile}`); - - if (missingCount > 0) { - console.log( - `📝 Generated ${missingCount} documentation templates in: ${patchDir}` - ); - } - } - - buildCommandToFileMapping() { - // Build a mapping from discovered commands to expected documentation files - const mapping = {}; - - // Common patterns for command to file mapping - const patterns = { - 'create database': 'create/database.md', - 'create token': 'create/token/_index.md', - 'create token admin': 'create/token/admin.md', - 'create trigger': 'create/trigger.md', - 'create table': 'create/table.md', - 'create last_cache': 'create/last_cache.md', - 'create distinct_cache': 'create/distinct_cache.md', - 'show databases': 'show/databases.md', - 'show tokens': 'show/tokens.md', - 'show system': 'show/system.md', - 'delete database': 'delete/database.md', - 'delete table': 'delete/table.md', - 'delete trigger': 'delete/trigger.md', - 'update database': 'update/database.md', - 'test wal_plugin': 'test/wal_plugin.md', - 'test schedule_plugin': 'test/schedule_plugin.md', - query: 'query.md', - write: 'write.md', - }; - - // Add discovered commands that match patterns - for (const [command, info] of this.discoveredCommands) { - const cleanCommand = command.replace('influxdb3 ', ''); - if (patterns[cleanCommand]) { - mapping[cleanCommand] = patterns[cleanCommand]; - } else if (cleanCommand !== '' && cleanCommand.includes(' ')) { - // Generate file path for subcommands - const parts = cleanCommand.split(' '); - if (parts.length === 2) { - mapping[cleanCommand] = `${parts[0]}/${parts[1]}.md`; - } else if (parts.length === 3) { - mapping[cleanCommand] = `${parts[0]}/${parts[1]}/${parts[2]}.md`; - } - } else if (cleanCommand !== '' && !cleanCommand.includes(' ')) { - // Single command - mapping[cleanCommand] = `${cleanCommand}.md`; - } - } - - return mapping; - } - - async run() { - console.log( - `${Colors.BLUE}🔍 InfluxDB 3 CLI Documentation Audit${Colors.NC}` - ); - console.log('======================================='); - console.log(`Product: ${this.product}`); - console.log(`Version: ${this.version}`); - console.log(); - - // Ensure output directory exists - await this.ensureDir(this.outputDir); - - if (this.product === 'core') { - const cliFile = join( - this.outputDir, - `current-cli-core-${this.version}.txt` - ); - const auditFile = join( - this.outputDir, - `documentation-audit-core-${this.version}.md` - ); - - if (await this.extractCurrentCLI('core', cliFile)) { - await this.auditDocs('core', cliFile, auditFile); - } - } else if (this.product === 'enterprise') { - const cliFile = join( - this.outputDir, - `current-cli-enterprise-${this.version}.txt` - ); - const auditFile = join( - this.outputDir, - `documentation-audit-enterprise-${this.version}.md` - ); - - if (await this.extractCurrentCLI('enterprise', cliFile)) { - await this.auditDocs('enterprise', cliFile, auditFile); - } - } else if (this.product === 'both') { - // Core - const cliFileCore = join( - this.outputDir, - `current-cli-core-${this.version}.txt` - ); - const auditFileCore = join( - this.outputDir, - `documentation-audit-core-${this.version}.md` - ); - - if (await this.extractCurrentCLI('core', cliFileCore)) { - await this.auditDocs('core', cliFileCore, auditFileCore); - } - - // Enterprise - const cliFileEnt = join( - this.outputDir, - `current-cli-enterprise-${this.version}.txt` - ); - const auditFileEnt = join( - this.outputDir, - `documentation-audit-enterprise-${this.version}.md` - ); - - if (await this.extractCurrentCLI('enterprise', cliFileEnt)) { - await this.auditDocs('enterprise', cliFileEnt, auditFileEnt); - } - } else { - console.error(`Error: Invalid product '${this.product}'`); - console.error( - 'Usage: node audit-cli-documentation.js [core|enterprise|both] [version]' - ); - process.exit(1); - } - - console.log(); - console.log( - `${Colors.GREEN}✅ CLI documentation audit complete!${Colors.NC}` - ); - console.log(); - console.log('Next steps:'); - console.log(`1. Review the audit reports in: ${this.outputDir}`); - console.log('2. Update missing documentation files'); - console.log('3. Verify options match current CLI behavior'); - console.log('4. Update examples and usage patterns'); - } -} - -// Main execution -async function main() { - const args = process.argv.slice(2); - const product = args[0] || 'both'; - const version = args[1] || 'local'; - - // Validate product - if (!['core', 'enterprise', 'both'].includes(product)) { - console.error(`Error: Invalid product '${product}'`); - console.error( - 'Usage: node audit-cli-documentation.js [core|enterprise|both] [version]' - ); - console.error('Example: node audit-cli-documentation.js core 3.2.0'); - process.exit(1); - } - - // Validate version tag - try { - const repoRoot = await getRepositoryRoot(); - await validateVersionInputs(version, null, repoRoot); - } catch (error) { - console.error(`Version validation failed: ${error.message}`); - process.exit(1); - } - - const auditor = new CLIDocAuditor(product, version); - await auditor.run(); -} - -// Run if called directly -if (import.meta.url === `file://${process.argv[1]}`) { - main().catch((err) => { - console.error('Error:', err); - process.exit(1); - }); -} - -export { CLIDocAuditor }; diff --git a/helper-scripts/influxdb3-monolith/setup-auth-tokens.sh b/helper-scripts/influxdb3-monolith/setup-auth-tokens.sh deleted file mode 100644 index 6990d757f..000000000 --- a/helper-scripts/influxdb3-monolith/setup-auth-tokens.sh +++ /dev/null @@ -1,164 +0,0 @@ -#!/bin/bash -# Set up authentication tokens for InfluxDB 3 Core and Enterprise containers -# Usage: ./setup-auth-tokens.sh [core|enterprise|both] - -set -e - -# Color codes -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' - -# Parse arguments -TARGET=${1:-both} - -echo -e "${BLUE}🔐 InfluxDB 3 Authentication Setup${NC}" -echo "==================================" -echo "" - -# Check for and load existing secret files -SECRET_CORE_FILE="$HOME/.env.influxdb3-core-admin-token" -SECRET_ENT_FILE="$HOME/.env.influxdb3-enterprise-admin-token" - -if [ -f "$SECRET_CORE_FILE" ]; then - echo "✅ Found existing Core token secret file" -else - echo "📝 Creating new Core token secret file: $SECRET_CORE_FILE" - touch "$SECRET_CORE_FILE" -fi - -if [ -f "$SECRET_ENT_FILE" ]; then - echo "✅ Found existing Enterprise token secret file" -else - echo "📝 Creating new Enterprise token secret file: $SECRET_ENT_FILE" - touch "$SECRET_ENT_FILE" -fi - -echo "" - -# Function to setup auth for a product -setup_auth() { - local product=$1 - local container_name="influxdb3-${product}" - local port - local secret_file - - case "$product" in - "core") - port="8282" - secret_file="$SECRET_CORE_FILE" - ;; - "enterprise") - port="8181" - secret_file="$SECRET_ENT_FILE" - ;; - esac - - echo -e "${BLUE}Setting up $(echo ${product} | awk '{print toupper(substr($0,1,1)) tolower(substr($0,2))}') authentication...${NC}" - - # Check if token already exists in secret file - if [ -s "$secret_file" ]; then - local existing_token=$(cat "$secret_file") - echo "✅ Token already exists in secret file" - echo " Token: ${existing_token:0:20}..." - - # Test if the token works - echo -n "🧪 Testing existing token..." - if docker exec "${container_name}" influxdb3 show databases --token "${existing_token}" --host "http://localhost:${port}" > /dev/null 2>&1; then - echo -e " ${GREEN}✓ Working${NC}" - return 0 - else - echo -e " ${YELLOW}⚠ Not working, will create new token${NC}" - fi - fi - - # Check if container is running - if ! docker ps --format '{{.Names}}' | grep -q "^${container_name}$"; then - echo "🚀 Starting ${container_name} container..." - if ! docker compose up -d "${container_name}"; then - echo -e "${RED}❌ Failed to start container${NC}" - return 1 - fi - - echo -n "⏳ Waiting for container to be ready..." - sleep 5 - echo -e " ${GREEN}✓${NC}" - else - echo "✅ Container ${container_name} is running" - fi - - # Create admin token - echo "🔑 Creating admin token..." - - local token_output - if token_output=$(docker exec "${container_name}" influxdb3 create token --admin 2>&1); then - # Extract the token from the "Token: " line - local new_token=$(echo "$token_output" | grep "^Token: " | sed 's/^Token: //' | tr -d '\r\n') - - echo -e "✅ ${GREEN}Token created successfully!${NC}" - echo " Token: ${new_token:0:20}..." - - # Update secret file - echo "${new_token}" > "$secret_file" - - echo "📝 Updated secret file: $secret_file" - - # Test the new token - echo -n "🧪 Testing new token..." - if docker exec "${container_name}" influxdb3 show databases --token "${new_token}" --host "http://localhost:${port}" > /dev/null 2>&1; then - echo -e " ${GREEN}✓ Working${NC}" - else - echo -e " ${YELLOW}⚠ Test failed, but token was created${NC}" - fi - - else - echo -e "${RED}❌ Failed to create token${NC}" - echo "Error output: $token_output" - return 1 - fi - - echo "" -} - -# Main execution -case "$TARGET" in - "core") - setup_auth "core" - ;; - "enterprise") - setup_auth "enterprise" - ;; - "both") - setup_auth "core" - setup_auth "enterprise" - ;; - *) - echo "Usage: $0 [core|enterprise|both]" - exit 1 - ;; -esac - -echo -e "${GREEN}🎉 Authentication setup complete!${NC}" -echo "" -echo "📋 Next steps:" -echo "1. Restart containers to load new secrets:" -echo " docker compose down && docker compose up -d influxdb3-core influxdb3-enterprise" -echo "2. Test CLI commands with authentication:" -echo " ./detect-cli-changes.sh core 3.1.0 local" -echo " ./detect-cli-changes.sh enterprise 3.1.0 local" -echo "" -echo "📄 Your secret files now contain:" - -# Show Core tokens -if [ -f "$SECRET_CORE_FILE" ] && [ -s "$SECRET_CORE_FILE" ]; then - token_preview=$(head -c 20 "$SECRET_CORE_FILE") - echo " $SECRET_CORE_FILE: ${token_preview}..." -fi - -# Show Enterprise tokens -if [ -f "$SECRET_ENT_FILE" ] && [ -s "$SECRET_ENT_FILE" ]; then - token_preview=$(head -c 20 "$SECRET_ENT_FILE") - echo " $SECRET_ENT_FILE: ${token_preview}..." -fi \ No newline at end of file diff --git a/package.json b/package.json index fc09f72a5..e57beed19 100644 --- a/package.json +++ b/package.json @@ -55,12 +55,7 @@ "test:codeblocks:v2": "docker compose run --rm --name v2-pytest v2-pytest", "test:codeblocks:stop-monitors": "./test/scripts/monitor-tests.sh stop cloud-dedicated-pytest && ./test/scripts/monitor-tests.sh stop clustered-pytest", "test:e2e": "node cypress/support/run-e2e-specs.js", - "test:shortcode-examples": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/example.md", - "audit:cli": "node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js both local", - "audit:cli:3core": "node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js core local", - "audit:cli:3ent": "node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js enterprise local", - "audit:cli:apply": "node ./helper-scripts/influxdb3-monolith/apply-cli-patches.js both", - "audit:cli:apply:dry": "node ./helper-scripts/influxdb3-monolith/apply-cli-patches.js both --dry-run" + "test:shortcode-examples": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/example.md" }, "type": "module", "browserslist": [ From 9a0d4035d89b844b9537e2f7ad70486398ba6719 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 10:57:24 -0500 Subject: [PATCH 103/122] config(link-checker): exclude Docker Hub URLs Add exclusion pattern for hub.docker.com to both production and default link-checker configurations. Docker Hub often implements rate limiting and bot detection that causes false positive link validation failures in CI environments. --- .ci/link-checker/default.lycherc.toml | 3 +++ .ci/link-checker/production.lycherc.toml | 3 +++ 2 files changed, 6 insertions(+) diff --git a/.ci/link-checker/default.lycherc.toml b/.ci/link-checker/default.lycherc.toml index 259efd76a..f769afc36 100644 --- a/.ci/link-checker/default.lycherc.toml +++ b/.ci/link-checker/default.lycherc.toml @@ -55,6 +55,9 @@ exclude = [ "^https?://stackoverflow\\.com", "^https?://.*\\.stackoverflow\\.com", + # Docker Hub URLs (rate limiting and bot detection) + "^https?://hub\\.docker\\.com", + # Common documentation placeholders "YOUR_.*", "REPLACE_.*", diff --git a/.ci/link-checker/production.lycherc.toml b/.ci/link-checker/production.lycherc.toml index f8410208c..37f692e47 100644 --- a/.ci/link-checker/production.lycherc.toml +++ b/.ci/link-checker/production.lycherc.toml @@ -63,6 +63,9 @@ exclude = [ "^https?://stackoverflow\\.com", "^https?://.*\\.stackoverflow\\.com", + # Docker Hub URLs (rate limiting and bot detection) + "^https?://hub\\.docker\\.com", + # InfluxData support URLs (certificate/SSL issues in CI) "^https?://support\\.influxdata\\.com", From fe455525d45452066fd0fa9e6a6262155746a089 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 10:57:58 -0500 Subject: [PATCH 104/122] fix(v2): broken link fragment --- content/influxdb/v2/install/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb/v2/install/_index.md b/content/influxdb/v2/install/_index.md index 60b60d938..d9398a993 100644 --- a/content/influxdb/v2/install/_index.md +++ b/content/influxdb/v2/install/_index.md @@ -112,7 +112,7 @@ _If `gpg` isn't available on your system, see The following steps guide you through using GPG to verify InfluxDB binary releases: -1. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-system). +1. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-os-version). 2. Download and import the InfluxData public key. `gpg --import` outputs to stderr. From c1de7e71be8710c617a73c0a8bc72720d16864ec Mon Sep 17 00:00:00 2001 From: David Rusnak Date: Tue, 19 Aug 2025 15:24:43 -0400 Subject: [PATCH 105/122] docs: add artifacts and release notes for clustered release 20250814 --- .../reference/release-notes/clustered.md | 25 + .../20250814-1819052/app-instance-schema.json | 3255 +++++++++++++++++ .../20250814-1819052/example-customer.yml | 342 ++ 3 files changed, 3622 insertions(+) create mode 100644 static/downloads/clustered-release-artifacts/20250814-1819052/app-instance-schema.json create mode 100644 static/downloads/clustered-release-artifacts/20250814-1819052/example-customer.yml diff --git a/content/influxdb3/clustered/reference/release-notes/clustered.md b/content/influxdb3/clustered/reference/release-notes/clustered.md index a82fa5c9b..81167f85f 100644 --- a/content/influxdb3/clustered/reference/release-notes/clustered.md +++ b/content/influxdb3/clustered/reference/release-notes/clustered.md @@ -61,6 +61,31 @@ directory. This new directory contains artifacts associated with the specified r --- +## 20250814-1819052 + +### Quickstart + +```yaml +spec: + package: + image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250814-1819052 +``` + +### Bug Fixes + +- Fix incorrect service address for tokens in Clustered auth sidecar. If you were overriding the `AUTHZ_TOKEN_SVC_ADDRESS` environment variable in your `AppInstance`, you can now remove that override. +- Remove default `fallbackScrapeProtocol` environment variable for prometheus-operator. +- Update Grafana to `12.1.1` to address CVE-2025-6023 and CVE-2025-6197. + +### Changes + +#### Database Engine + +- Update DataFusion to `48`. +- Tweak compaction to reduce write amplification and querier cache churn in some circumstances. + +--- + ## 20250721-1796368 {date="2025-07-21"} ### Quickstart diff --git a/static/downloads/clustered-release-artifacts/20250814-1819052/app-instance-schema.json b/static/downloads/clustered-release-artifacts/20250814-1819052/app-instance-schema.json new file mode 100644 index 000000000..51eb13f3b --- /dev/null +++ b/static/downloads/clustered-release-artifacts/20250814-1819052/app-instance-schema.json @@ -0,0 +1,3255 @@ +{ + "additionalProperties": false, + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "metadata": { + "type": "object" + }, + "spec": { + "additionalProperties": false, + "properties": { + "imagePullSecrets": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "package": { + "properties": { + "apiVersion": { + "type": "string" + }, + "image": { + "type": "string" + }, + "spec": { + "additionalProperties": false, + "properties": { + "admin": { + "additionalProperties": false, + "description": "OAuth configuration for restricting access to Clustered", + "properties": { + "dsn": { + "additionalProperties": false, + "description": "The dsn for the postgres compatible database", + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "identityProvider": { + "description": "The identity provider to be used e.g. \"keycloak\", \"auth0\", \"azure\"", + "type": "string" + }, + "internalSigningKey": { + "description": "Internal JWT secrets", + "properties": { + "id": { + "additionalProperties": false, + "description": "random ID that uniquely identifies this keypair. Generally a UUID.", + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "privateKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "publicKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "required": [ + "id", + "privateKey", + "publicKey" + ], + "type": "object" + }, + "jwksEndpoint": { + "description": "The JWKS endpoint given by your identity provider. This should look like \"https://{identityProviderDomain}/.well-known/jwks.json\"", + "type": "string" + }, + "users": { + "description": "The list of users to grant access to Clustered via influxctl", + "item": { + "properties": { + "email": { + "description": "The email of the user within your identity provider.", + "type": "string" + }, + "firstName": { + "description": "The first name of the user that will be used in Clustered.", + "type": "string" + }, + "id": { + "description": "The identifier of the user within your identity provider.", + "type": "string" + }, + "lastName": { + "description": "The last name of the user that will be used in Clustered.", + "type": "string" + }, + "userGroups": { + "description": "Optional list of user groups to assign to the user, rather than the default groups. The following groups are currently supported: Admin, Auditor, Member", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "firstName", + "lastName", + "email", + "id" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "catalog": { + "additionalProperties": false, + "description": "Configuration for the postgres-compatible database that is used as a catalog/metadata store", + "properties": { + "dsn": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "type": "object" + }, + "components": { + "additionalProperties": false, + "properties": { + "catalog": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "common": { + "additionalProperties": false, + "description": "Common configuration to all components. They will be overridden by component-specific configuration.\nAny value defined in the component-specific settings will be merged with values defined in the common settings.\n", + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "compactor": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "garbage-collector": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "granite": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ingester": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "querier": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "router": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "egress": { + "additionalProperties": false, + "description": "Configuration for how external resources are accessed from Clustered components", + "properties": { + "customCertificates": { + "additionalProperties": false, + "description": "Custom certificate or CA Bundle. Used to verify outbound connections performed by influxdb, such as OIDC servers,\npostgres databases, or object store API endpoints.\n\nEquivalent to the SSL_CERT_FILE environment variable used by OpenSSL.\n", + "examples": [ + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "type": "object" + }, + "featureFlags": { + "description": "An array of feature flag names. Feature flags (aka feature gates) control features that\nhave not yet been released. They can be experimental to varying degrees (alpha, beta, rc).\n", + "properties": { + "clusteredAuth": { + "description": "Use the authorization service optimized for Clustered deployments.\n\nThis authorization service communicates directly with the locally deployed\ngranite service, which allows it to become ready to validate access tokens\npromptly on pod start up. It also offers more control over the invalidation\nschedule for cached tokens, and may slightly reduce query latency.\n", + "type": "string" + }, + "enableDefaultResourceLimits": { + "description": "Enable Default Resource Limits for Containers\n\nWhen enabled, all containers will have `requests.cpu`, `requests.memory`,\n`limits.cpu`, and `limits.memory` defined. This is particularily useful\nfor namespaces that include a ResourceQuota. When enabling this feature\nflag, make sure to specify the resource limits and requests for the IOx\ncomponents as the defaults may not be properly sized for your cluster.\n", + "type": "string" + }, + "grafana": { + "description": "An experimental, minimal installation of a Grafana Deployment to use alongside Clustered.\n\nOnly this flag if you do not have your own metric visualisation setup and wish\nto experiment with Clustered. It is tested with Grafana v12.1.1.\n", + "type": "string" + }, + "localTracing": { + "description": "Experimental installation of Jaeger for tracing capabilities with InfluxDB 3.\n\nOnly enable this flag when instructed to do so by the support team.\n", + "type": "string" + }, + "noGrpcProbes": { + "description": "Remove gRPC liveness/readiness probes for debug service", + "type": "string" + }, + "noMinReadySeconds": { + "description": "Experimental flag for Kubernetes clusters that are lower than v1.25.\n\nNo longer uses minReadySeconds for workloads, this will cause downtime.\n", + "type": "string" + }, + "noPrometheus": { + "description": "Disable the install of the default bare-bones Prometheus StatefulSet installation alongside Clustered.\n\nThis feature flag is useful when you already have a monitoring setup and wish to utilise it.\n\nNOTE: In future releases, the `debug-service` will have a partial, minor, dependency on a Prometheus instance being available.\nIf you do not wish for this service to utilise your own installation of Prometheus, disabling it here may cause issues.\n", + "type": "string" + }, + "serviceMonitor": { + "description": "Deprecated. Use observability.serviceMonitor instead.\n\nCreate a ServiceMonitor resource for InfluxDB3.\n", + "type": "string" + }, + "useLicensedBinaries": { + "description": "This flag is deprecated and no longer has any effect. Licensed binaries are now always used.\n", + "type": "string" + } + }, + "type": "array" + }, + "hostingEnvironment": { + "additionalProperties": false, + "description": "Environment or cloud-specific configuration elements which are utilised by InfluxDB Clustered.", + "properties": { + "aws": { + "additionalProperties": false, + "description": "Configuration for hosting on AWS.", + "properties": { + "eksRoleArn": { + "default": "", + "description": "IAM role ARN to apply to the IOx ServiceAccount, used with EKS IRSA.", + "type": "string" + } + }, + "type": "object" + }, + "gke": { + "additionalProperties": false, + "description": "Configuration for hosting on Google Kubernetes Engine (GKE).", + "properties": { + "workloadIdentity": { + "additionalProperties": false, + "description": "Authentication via GKE workload identity. This will annotate the relevant Kubernetes ServiceAccount objects.\nSee https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity for further details.\n", + "properties": { + "serviceAccountEmail": { + "description": "Google IAM Service Account email, this should be in the format \"NAME@PROJECT_ID.iam.gserviceaccount.com\".", + "type": "string" + } + }, + "required": [ + "serviceAccountEmail" + ], + "type": "object" + } + }, + "type": "object" + }, + "openshift": { + "additionalProperties": false, + "description": "Configuration for hosting on Red Hat OpenShift.", + "properties": { }, + "type": "object" + } + }, + "type": "object" + }, + "images": { + "description": "Manipulate how images are retrieved for Clustered. This is typically useful for air-gapped environments when you need to use an internal registry.", + "properties": { + "overrides": { + "description": "Override specific images using the contained predicate fields.\n\nThis takes precedence over the registryOverride field.\n", + "item": { + "description": "Remaps an image matching naming predicates\n", + "properties": { + "name": { + "description": "Naming predicate: the part of the image name that comes after the registry name, e.g.\nIf the image name is \"oci.influxdata.com/foo/bar:1234\", the name field matches \"foo/bar\"\n", + "type": "string" + }, + "newFQIN": { + "description": "Rewrite expression: when a naming predicate matches this image, rewrite the image reference\nusing this Fully Qualified Image Name. i.e. this replaces the whole registry/imagename:tag@digest\nparts of the input image reference.\n", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "registryOverride": { + "default": "", + "description": "Place a new registry prefix infront of all Clustered component images.\n\nThis is used when you wish to maintain the original registry path for images and simply relocate them underneath\nyour own registry.\n\nExample:\nregistryOverride: 'newReg' means 'myregistry/test' becomes 'newReg/myregistry/test'\n", + "type": "string" + } + }, + "type": "object" + }, + "ingesterStorage": { + "additionalProperties": false, + "description": "Storage configuration for the Clustered ingesters.", + "properties": { + "storage": { + "description": "A higher value provides more disk space for the Write-Ahead Log (WAL) to each ingester, allowing for a greater set of leading edge data to be maintained in-memory.\nThis also reduces the frequency of WAL rotations, leading to better query performance and less burden on the compactor.\n\nNote that at 90% capacity, an ingester will stop accepting writes in order to persist its active WAL into the configured object store as parquet files.\n", + "type": "string" + }, + "storageClassName": { + "default": "", + "type": "string" + } + }, + "required": [ + "storage" + ], + "type": "object" + }, + "ingress": { + "additionalProperties": false, + "description": "Configuration for how Clustered components are accessed.", + "properties": { + "grpc": { + "additionalProperties": false, + "description": "Configuration for components which utilise gRPC", + "properties": { + "className": { + "default": "", + "type": "string" + } + }, + "type": "object" + }, + "hosts": { + "description": "A number of hosts/domains to use as entrypoints within the Ingress resources.", + "type": "array" + }, + "http": { + "additionalProperties": false, + "description": "Configuration for components which utilise HTTP", + "properties": { + "className": { + "default": "", + "type": "string" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "description": "Template to apply across configured Ingress-type resources.\nThis allows you to specify a range of third party annotations onto the created Ingress objects and/or\nalter the kind of Ingress you would like to use, e.g. 'Route'.\n", + "oneOf": [ + { + "properties": { + "apiVersion": { + "const": "networking.istio.io/v1beta1" + }, + "kind": { + "const": "Gateway" + }, + "selector": { + "default": { }, + "description": "This selector determines which Istio ingress gateway pods will be chosen\nto handle traffic for the created Gateway resources. A blank selector means that all\ngateway pods in the cluster will handle traffic.\n\nFor more details, see https://istio.io/latest/docs/reference/config/networking/gateway/#Gateway\n", + "type": "object" + } + }, + "required": [ + "apiVersion", + "kind" + ] + }, + { + "properties": { + "apiVersion": { + "enum": [ + "networking.k8s.io/v1", + "route.openshift.io/v1" + ], + "type": "string" + }, + "kind": { + "enum": [ + "Ingress", + "Route" + ], + "type": "string" + } + } + } + ], + "properties": { + "apiVersion": { + "default": "networking.k8s.io/v1", + "enum": [ + "networking.k8s.io/v1", + "route.openshift.io/v1", + "networking.istio.io/v1beta1" + ], + "type": "string" + }, + "kind": { + "default": "Ingress", + "enum": [ + "Ingress", + "Route", + "Gateway" + ], + "type": "string" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations to place onto the objects which enable ingress.", + "type": "object" + } + }, + "type": "object" + }, + "selector": { + "description": "Selector to specify which gateway deployment utilises the configured ingress configuration.\n\nNote that this is only for Istio Gateway, see https://istio.io/latest/docs/reference/config/networking/gateway/#Gateway for further details\n", + "type": "object" + } + }, + "type": "object" + }, + "tlsSecretName": { + "default": "", + "description": "Kubernetes Secret name which contains TLS certificates.\n\nIf you are using cert-manager, this is the name of the Secret to create containing certificates.\nNote that cert-manager is externally managed and is not apart of a Clustered configuration.\n", + "type": "string" + } + }, + "type": "object" + }, + "monitoringStorage": { + "additionalProperties": false, + "description": "Storage configuration for the Prometheus instance shipped alongside Clustered for basic monitoring purposes.", + "properties": { + "storage": { + "description": "The amount of storage to provision for the attached volume, e.g. \"10Gi\".", + "type": "string" + }, + "storageClassName": { + "default": "", + "type": "string" + } + }, + "required": [ + "storage" + ], + "type": "object" + }, + "objectStore": { + "additionalProperties": false, + "description": "Configuration for the backing object store of IOx.", + "oneOf": [ + { + "required": [ + "bucket", + "region" + ] + }, + { + "required": [ + "s3", + "bucket" + ] + }, + { + "required": [ + "azure", + "bucket" + ] + }, + { + "required": [ + "google", + "bucket" + ] + } + ], + "properties": { + "accessKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "allowHttp": { + "default": "false", + "type": "string" + }, + "azure": { + "additionalProperties": false, + "description": "Configuration for Azure Blob Storage.", + "properties": { + "accessKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "account": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "required": [ + "accessKey", + "account" + ], + "type": "object" + }, + "bucket": { + "type": "string" + }, + "endpoint": { + "default": "", + "type": "string" + }, + "google": { + "additionalProperties": false, + "description": "Configuration for Google Cloud Storage.", + "properties": { + "serviceAccountSecret": { + "additionalProperties": false, + "description": "Authentication via Google IAM Service Account credentials file using a Kubernetes Secret name and key.\nSee https://cloud.google.com/kubernetes-engine/docs/tutorials/authenticating-to-cloud-platform for further details.\n\nIf you wish to use GKE IAM annotations, refer to the hostingEnviornment section of the schema.\n", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "region": { + "default": "", + "description": "The region in which the bucket resides. This may not be required dependent on your object store provider.", + "type": "string" + }, + "s3": { + "additionalProperties": false, + "description": "Configuration for AWS S3 (compatible) object stores.", + "properties": { + "accessKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "allowHttp": { + "description": "Allow the S3 client to accept insecure HTTP, as well as HTTPS connections to object store.", + "type": "string" + }, + "endpoint": { + "default": "", + "description": "S3 bucket region, see https://docs.aws.amazon.com/general/latest/gr/s3.html#s3_region for further details.", + "type": "string" + }, + "region": { + "description": "AWS region for the bucket, such as us-east-1.", + "type": "string" + }, + "secretKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "required": [ + "region" + ], + "type": "object" + }, + "secretKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "type": "object" + }, + "observability": { + "additionalProperties": false, + "default": { }, + "description": "Configuration for gaining operational insight into Clustered components", + "properties": { + "retention": { + "default": "12h", + "description": "The retention period for prometheus", + "type": "string" + }, + "serviceMonitor": { + "additionalProperties": false, + "description": "Configure a ServiceMonitor resource to easily expose InfluxDB metrics via the Prometheus Operator.\nSee the Prometheus Operator documentation for usage:\nhttps://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md\n", + "properties": { + "fallbackScrapeProtocol": { + "default": null, + "description": "Specifies which protocol to use when scraping endpoints that return a blank or invalid Content-Type header.\n\nRequired for Prometheus v3.0.0+ only, which enforces Content-Type validation (unlike v2).\n\nFor most standard Prometheus metrics endpoints, including InfluxDB, use \"PrometheusText0.0.4\".\n", + "type": "string" + }, + "interval": { + "default": "30s", + "description": "A duration string that controls the length of time between scrape attempts, ex: '15s', or '1m'", + "type": "string" + }, + "scrapeTimeout": { + "default": null, + "description": "A duration string that controls the scrape timeout duration, ex: '10s'", + "type": "string" + } + }, + "required": [ ], + "type": "object" + } + }, + "type": "object" + }, + "resources": { + "additionalProperties": false, + "properties": { + "catalog": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "4", + "type": "string" + }, + "memory": { + "default": "16Gi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "compactor": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "8", + "type": "string" + }, + "memory": { + "default": "32Gi", + "type": "string" + }, + "replicas": { + "default": 1, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "garbage-collector": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + }, + "replicas": { + "const": 1, + "description": "Replica configuration for the Garbage Collector.\nNOTE: This component does not support horizontal scaling at this time.\nRefer to https://docs.influxdata.com/influxdb/clustered/reference/internals/storage-engine/#garbage-collector-scaling-strategies\nfor more details.\n", + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "granite": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": "500M", + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "0.5", + "type": "string" + }, + "memory": { + "default": "500M", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ingester": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "6", + "type": "string" + }, + "memory": { + "default": "24Gi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "prometheus": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "500m", + "type": "string" + }, + "memory": { + "default": "512Mi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "querier": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "8", + "type": "string" + }, + "memory": { + "default": "32Gi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "router": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "1", + "type": "string" + }, + "memory": { + "default": "2Gi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "catalog", + "objectStore", + "ingesterStorage", + "monitoringStorage" + ], + "type": "object" + } + }, + "required": [ + "image", + "apiVersion" + ], + "type": "object" + }, + "pause": { + "default": false, + "type": "boolean" + } + }, + "type": "object" + }, + "status": { + "additionalProperties": true, + "type": "object" + } + }, + "type": "object" +} + diff --git a/static/downloads/clustered-release-artifacts/20250814-1819052/example-customer.yml b/static/downloads/clustered-release-artifacts/20250814-1819052/example-customer.yml new file mode 100644 index 000000000..2a0aeb736 --- /dev/null +++ b/static/downloads/clustered-release-artifacts/20250814-1819052/example-customer.yml @@ -0,0 +1,342 @@ +# yaml-language-server: $schema=app-instance-schema.json +apiVersion: kubecfg.dev/v1alpha1 +kind: AppInstance +metadata: + name: influxdb + namespace: influxdb +spec: + # One or more secrets that are used to pull the images from an authenticated registry. + # This will either be the secret provided to you, if using our registry, or a secret for your own registry + # if self-hosting the images. + imagePullSecrets: + - name: + package: + # The version of the clustered package that will be used. + # This determines the version of all of the individual components. + # When a new version of the product is released, this version should be updated and any + # new config options should be updated below. + image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250814-1819052 + apiVersion: influxdata.com/v1alpha1 + spec: + # # Provides a way to pass down hosting environment specific configuration, such as an role ARN when using EKS IRSA. + # # This section contains three multually-exclusive "blocks". Uncomment the block named after the hosting environment + # # you run: "aws", "openshift" or "gke". + # hostingEnvironment: + # # # Uncomment this block if you're running in EKS. + # # aws: + # # eksRoleArn: 'arn:aws:iam::111111111111:role/your-influxdb-clustered-role' + # # + # # # Uncomment this block if you're running inside OpenShift. + # # # Note: there are currently no OpenShift-specific parameters. You have to pass an empty object + # # # as a marker that you're choosing OpenShift as hosting environment. + # # openshift: {} + # # + # # # Uncomment this block if you're running in GKE: + # # gke: + # # # Authenticate to Google Cloud services via workload identity, this + # # # annotates the 'iox' ServiceAccount with the role name you specify. + # # # NOTE: This setting just enables GKE specific authentication mechanism, + # # # You still need to enable `spec.objectStore.google` below if you want to use GCS. + # # workloadIdentity: + # # # Google Service Account name to use for the workload identity. + # # serviceAccountEmail: @.iam.gserviceaccount.com + catalog: + # A postgresql style DSN that points at a postgresql compatible database. + # eg: postgres://[user[:password]@][netloc][:port][/dbname][?param1=value1&...] + dsn: + valueFrom: + secretKeyRef: + name: + key: + + # images: + # # This can be used to override a specific image name with its FQIN + # # (Fully Qualified Image Name) for testing. eg. + # overrides: + # - name: influxdb2-artifacts/iox/iox + # newFQIN: mycompany/test-iox-build:aninformativetag + # + # # Set this variable to the prefix of your internal registry. This will be prefixed to all expected images. + # # eg. us-docker.pkg.dev/iox:latest => registry.mycompany.io/us-docker.pkg.dev/iox:latest + # registryOverride: + + objectStore: + # Bucket that the parquet files will be stored in + bucket: + + # Uncomment one of the following (s3, azure) + # to enable the configuration of your object store + s3: + # URL for S3 Compatible object store + endpoint: + + # Set to true to allow communication over HTTP (instead of HTTPS) + allowHttp: "false" + + # S3 Access Key + # This can also be provided as a valueFrom: secretKeyRef: + accessKey: + value: + + # S3 Secret Key + # This can also be provided as a valueFrom: secretKeyRef: + secretKey: + value: + + # This value is required for AWS S3, it may or may not be required for other providers. + region: + + # azure: + # Azure Blob Storage Access Key + # This can also be provided as a valueFrom: secretKeyRef: + # accessKey: + # value: + + # Azure Blob Storage Account + # This can also be provided as a valueFrom: secretKeyRef: + # account: + # value: + + # There are two main ways you can access a Google: + # + # a) GKE Workload Identity: configure workload identity in the top level `hostingEnvironment.gke` section. + # b) Explicit service account secret (JSON) file: use the `serviceAccountSecret` field here + # + # If you pick (a) you may not need to uncomment anything else in this section, + # but you still need to tell influxdb that you intend to use Google Cloud Storage. + # so you need to specify an empty object. Uncomment the following line: + # + # google: {} + # + # + # If you pick (b), uncomment the following block: + # + # google: + # # If you're authenticating to Google Cloud service using a Service Account credentials file, as opposed + # # as to use workload identity (see above) you need to provide a reference to a k8s secret containing the credentials file. + # serviceAccountSecret: + # # Kubernetes Secret name containing the credentials for a Google IAM Service Account. + # name: + # # The key within the Secret containing the credentials. + # key: + + # Parameters to tune observability configuration, such as Prometheus ServiceMonitor's. + observability: {} + # retention: 12h + # serviceMonitor: + # interval: 10s + # scrapeTimeout: 30s + + # Ingester pods have a volume attached. + ingesterStorage: + # (Optional) Set the storage class. This will differ based on the K8s environment and desired storage characteristics. + # If not set, the default storage class will be used. + # storageClassName: + # Set the storage size (minimum 2Gi recommended) + storage: + + # Monitoring pods have a volume attached. + monitoringStorage: + # (Optional) Set the storage class. This will differ based on the K8s environment and desired storage characteristics. + # If not set, the default storage class will be used. + # storageClassName: + # Set the storage size (minimum 10Gi recommended) + storage: + + # Uncomment the follow block if using our provided Ingress. + # + # We currently only support the ingress NGINX ingress controller: https://github.com/kubernetes/ingress-nginx + # + # ingress: + # hosts: + # # This is the host on which you will access Influxdb 3.0, for both reads and writes + # - + + # (Optional) + # The name of the Kubernetes Secret containing a TLS certificate, this should exist in the same namespace as the Clustered installation. + # If you are using cert-manager, enter a name for the Secret it should create. + # tlsSecretName: + + # http: + # # Usually you have only one ingress controller installed in a given cluster. + # # In case you have more than one, you have to specify the "class name" of the ingress controller you want to use + # className: nginx + + # grpc: + # # Usually you have only one ingress controller installed in a given cluster. + # # In case you have more than one, you have to specify the "class name" of the ingress controller you want to use + # className: nginx + # + # Enables specifying which 'type' of Ingress to use, alongside whether to place additional annotations + # onto those objects, this is useful for third party software in your environment, such as cert-manager. + # template: + # apiVersion: 'route.openshift.io/v1' + # kind: 'Route' + # metadata: + # annotations: + # 'example-annotation': 'annotation-value' + + # Enables specifying customizations for the various components in InfluxDB 3.0. + # components: + # # router: + # # template: + # # containers: + # # iox: + # # env: + # # INFLUXDB_IOX_MAX_HTTP_REQUESTS: "5000" + # # nodeSelector: + # # disktype: ssd + # # tolerations: + # # - effect: NoSchedule + # # key: example + # # operator: Exists + # # Common customizations for all components go in a pseudo-component called "common" + # # common: + # # template: + # # # Metadata contains custom annotations (and labels) to be added to a component. E.g.: + # # metadata: + # # annotations: + # # telegraf.influxdata.com/class: "foo" + + # Example of setting nodeAffinity for the querier component to ensure it runs on nodes with specific labels + # components: + # # querier: + # # template: + # # affinity: + # # nodeAffinity: + # # requiredDuringSchedulingIgnoredDuringExecution: + # # Node must have these labels to be considered for scheduling + # # nodeSelectorTerms: + # # - matchExpressions: + # # - key: required + # # operator: In + # # values: + # # - ssd + # # preferredDuringSchedulingIgnoredDuringExecution: + # # Scheduler will prefer nodes with these labels but they're not required + # # - weight: 1 + # # preference: + # # matchExpressions: + # # - key: preferred + # # operator: In + # # values: + # # - postgres + + # Example of setting podAntiAffinity for the querier component to ensure it runs on nodes with specific labels + # components: + # # querier: + # # template: + # # affinity: + # # podAntiAffinity: + # # requiredDuringSchedulingIgnoredDuringExecution: + # # Ensures that the pod will not be scheduled on a node if another pod matching the labelSelector is already running there + # # - labelSelector: + # # matchExpressions: + # # - key: app + # # operator: In + # # values: + # # - querier + # # topologyKey: "kubernetes.io/hostname" + # # preferredDuringSchedulingIgnoredDuringExecution: + # # Scheduler will prefer not to schedule pods together but may do so if necessary + # # - weight: 1 + # # podAffinityTerm: + # # labelSelector: + # # matchExpressions: + # # - key: app + # # operator: In + # # values: + # # - querier + # # topologyKey: "kubernetes.io/hostname" + + # Uncomment the following block to tune the various pods for their cpu/memory/replicas based on workload needs. + # Only uncomment the specific resources you want to change, anything uncommented will use the package default. + # (You can read more about k8s resources and limits in https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits) + # + # resources: + # # The ingester handles data being written + # ingester: + # requests: + # cpu: + # memory: + # replicas: # The default for ingesters is 3 to increase availability + # + # # optionally you can specify the resource limits which improves isolation. + # # (see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits) + # # limits: + # # cpu: + # # memory: + + # # The compactor reorganizes old data to improve query and storage efficiency. + # compactor: + # requests: + # cpu: + # memory: + # replicas: # the default is 1 + + # # The querier handles querying data. + # querier: + # requests: + # cpu: + # memory: + # replicas: # the default is 3 + + # # The router performs some api routing. + # router: + # requests: + # cpu: + # memory: + # replicas: # the default is 3 + + admin: + # The list of users to grant access to Clustered via influxctl + users: + # First name of user + - firstName: + # Last name of user + lastName: + # Email of user + email: + # The ID that the configured Identity Provider uses for the user in oauth flows + id: + # Optional list of user groups to assign to the user, rather than the default groups. The following groups are currently supported: Admin, Auditor, Member + userGroups: + - + + # The dsn for the postgres compatible database (note this is the same as defined above) + dsn: + valueFrom: + secretKeyRef: + name: + key: + # The identity provider to be used e.g. "keycloak", "auth0", "azure", etc + # Note for Azure Active Directory it must be exactly "azure" + identityProvider: + # The JWKS endpoint provided by the Identity Provider + jwksEndpoint: + + # # This (optional) section controls how InfluxDB issues outbound requests to other services + # egress: + # # If you're using a custom CA you will need to specify the full custom CA bundle here. + # # + # # NOTE: the custom CA is currently only honoured for outbound requests used to obtain + # # the JWT public keys from your identiy provider (see `jwksEndpoint`). + # customCertificates: + # valueFrom: + # configMapKeyRef: + # key: ca.pem + # name: custom-ca + + # We also include the ability to enable some features that are not yet ready for general availability + # or for which we don't yet have a proper place to turn on an optional feature in the configuration file. + # To turn on these you should include the name of the feature flag in the `featureFlag` array. + # + # featureFlags: + # # Uncomment to install a Grafana deployment. + # # Depends on one of the prometheus features being deployed. + # # - grafana + + # # The following 2 flags should be uncommented for k8s API 1.21 support. + # # Note that this is an experimental configuration. + # # - noMinReadySeconds + # # - noGrpcProbes From 52ea0bf2cc27877173fcb6a3b6ec56975b677ef2 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 20 Aug 2025 07:52:27 -0500 Subject: [PATCH 106/122] Update _index.md Previous Kapacitor version in menu --- content/kapacitor/v1/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/kapacitor/v1/_index.md b/content/kapacitor/v1/_index.md index bd71793fd..5f9ecda8b 100644 --- a/content/kapacitor/v1/_index.md +++ b/content/kapacitor/v1/_index.md @@ -5,7 +5,7 @@ description: > create alerts, run ETL jobs and detect anomalies. menu: kapacitor_v1: - name: Kapacitor v1.7 + name: Kapacitor v1.8 weight: 1 --- From 73bb35d4de129338efb38bacc140588304ec35d1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Aug 2025 14:42:57 +0000 Subject: [PATCH 107/122] chore(deps): bump mermaid from 11.9.0 to 11.10.0 Bumps [mermaid](https://github.com/mermaid-js/mermaid) from 11.9.0 to 11.10.0. - [Release notes](https://github.com/mermaid-js/mermaid/releases) - [Commits](https://github.com/mermaid-js/mermaid/compare/mermaid@11.9.0...mermaid@11.10.0) --- updated-dependencies: - dependency-name: mermaid dependency-version: 11.10.0 dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- package.json | 2 +- yarn.lock | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/package.json b/package.json index e57beed19..10896b237 100644 --- a/package.json +++ b/package.json @@ -36,7 +36,7 @@ "js-yaml": "^4.1.0", "lefthook": "^1.10.10", "markdown-link": "^0.1.1", - "mermaid": "^11.4.1", + "mermaid": "^11.10.0", "vanillajs-datepicker": "^1.3.4" }, "scripts": { diff --git a/yarn.lock b/yarn.lock index 00856c0cd..a17503f82 100644 --- a/yarn.lock +++ b/yarn.lock @@ -3667,10 +3667,10 @@ merge2@^1.3.0: resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== -mermaid@^11.4.1: - version "11.9.0" - resolved "https://registry.yarnpkg.com/mermaid/-/mermaid-11.9.0.tgz#fdc055d0f2a7f2afc13a78cb3e3c9b1374614e2e" - integrity sha512-YdPXn9slEwO0omQfQIsW6vS84weVQftIyyTGAZCwM//MGhPzL1+l6vO6bkf0wnP4tHigH1alZ5Ooy3HXI2gOag== +mermaid@^11.10.0: + version "11.10.0" + resolved "https://registry.yarnpkg.com/mermaid/-/mermaid-11.10.0.tgz#4949f98d08cfdc4cda429372ed2f843a64c99946" + integrity sha512-oQsFzPBy9xlpnGxUqLbVY8pvknLlsNIJ0NWwi8SUJjhbP1IT0E0o1lfhU4iYV3ubpy+xkzkaOyDUQMn06vQElQ== dependencies: "@braintree/sanitize-url" "^7.0.4" "@iconify/utils" "^2.1.33" From 8151caa4f3d1e8ac8743765cc6c0355767bce02c Mon Sep 17 00:00:00 2001 From: David Rusnak Date: Wed, 20 Aug 2025 12:30:40 -0400 Subject: [PATCH 108/122] docs: add date to release notes --- .../influxdb3/clustered/reference/release-notes/clustered.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb3/clustered/reference/release-notes/clustered.md b/content/influxdb3/clustered/reference/release-notes/clustered.md index 81167f85f..0973ace20 100644 --- a/content/influxdb3/clustered/reference/release-notes/clustered.md +++ b/content/influxdb3/clustered/reference/release-notes/clustered.md @@ -61,7 +61,7 @@ directory. This new directory contains artifacts associated with the specified r --- -## 20250814-1819052 +## 20250814-1819052 {date="2025-08-14"} ### Quickstart From 29ff1ba739f73a1d4b9975e413c33ff39adeaec2 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 20 Aug 2025 16:16:48 -0500 Subject: [PATCH 109/122] chore(tools): remove minor version from product name in sidebarDisplay Kapacitor v1 instead of Kapacitor v1. --- content/chronograf/v1/_index.md | 2 +- content/kapacitor/v1/_index.md | 2 +- content/telegraf/v1/_index.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/content/chronograf/v1/_index.md b/content/chronograf/v1/_index.md index ca0c40807..244caf8a3 100644 --- a/content/chronograf/v1/_index.md +++ b/content/chronograf/v1/_index.md @@ -6,7 +6,7 @@ description: > monitoring data and easily create alerting and automation rules. menu: chronograf_v1: - name: Chronograf v1.10 + name: Chronograf v1 weight: 1 --- diff --git a/content/kapacitor/v1/_index.md b/content/kapacitor/v1/_index.md index 5f9ecda8b..b4eef1de8 100644 --- a/content/kapacitor/v1/_index.md +++ b/content/kapacitor/v1/_index.md @@ -5,7 +5,7 @@ description: > create alerts, run ETL jobs and detect anomalies. menu: kapacitor_v1: - name: Kapacitor v1.8 + name: Kapacitor v1 weight: 1 --- diff --git a/content/telegraf/v1/_index.md b/content/telegraf/v1/_index.md index 2ef2e0fb2..e089b9dfc 100644 --- a/content/telegraf/v1/_index.md +++ b/content/telegraf/v1/_index.md @@ -5,7 +5,7 @@ description: > time series platform, used to collect and report metrics. Telegraf supports four categories of plugins -- input, output, aggregator, and processor. menu: telegraf_v1: - name: Telegraf v1.35 + name: Telegraf v1 weight: 1 related: - /resources/videos/intro-to-telegraf/ From 2a56bec0c9fa5c60bc62a1f1285a80cf8a300f30 Mon Sep 17 00:00:00 2001 From: Phil Bracikowski <13472206+philjb@users.noreply.github.com> Date: Wed, 20 Aug 2025 14:53:15 -0700 Subject: [PATCH 110/122] Adjust wording for the two types of tsi index: inmen and on disk tsi1 --- content/influxdb/v1/administration/upgrading.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb/v1/administration/upgrading.md b/content/influxdb/v1/administration/upgrading.md index f67dd08e4..3fffdadaf 100644 --- a/content/influxdb/v1/administration/upgrading.md +++ b/content/influxdb/v1/administration/upgrading.md @@ -9,7 +9,7 @@ menu: --- -We recommend enabling Time Series Index (TSI) (step 3 of Upgrade to InfluxDB 1.11.x). [Switch between TSM and TSI](#switch-index-types) as needed. To learn more about TSI, see: +We recommend enabling Time Series Index (TSI) (step 3 of Upgrade to InfluxDB 1.11.x). [Switch between TSI and inmem index types](#switch-index-types) as needed. To learn more about TSI, see: - [Time Series Index (TSI) overview](/influxdb/v1/concepts/time-series-index/) - [Time Series Index (TSI) details](/influxdb/v1/concepts/tsi-details/) From 127b15b6e2a7f126fde0ce5184b946663e394cb4 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 20 Aug 2025 17:25:15 -0500 Subject: [PATCH 111/122] fix(v1): improve TSI recommendation clarity and fix grammar typos - Make TSI recommendation more actionable by explaining benefits (removes RAM limits, better performance for high-cardinality data) - Fix "from to" grammar typos in index switching instructions - Add specific scenarios for when to switch between TSI and inmem index types - Remove "above" directional language per style guidelines Source: Verified against InfluxDB v1 documentation via MCP docs verification - TSI details: https://docs.influxdata.com/influxdb/v1/concepts/tsi-details/ - TSI overview: https://docs.influxdata.com/influxdb/v1/concepts/time-series-index/ --- content/influxdb/v1/administration/upgrading.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/content/influxdb/v1/administration/upgrading.md b/content/influxdb/v1/administration/upgrading.md index 3fffdadaf..4b9fa82b0 100644 --- a/content/influxdb/v1/administration/upgrading.md +++ b/content/influxdb/v1/administration/upgrading.md @@ -9,7 +9,7 @@ menu: --- -We recommend enabling Time Series Index (TSI) (step 3 of Upgrade to InfluxDB 1.11.x). [Switch between TSI and inmem index types](#switch-index-types) as needed. To learn more about TSI, see: +We recommend enabling Time Series Index (TSI) (step 3 of Upgrade to InfluxDB 1.11.x) because it removes RAM-based limits on series cardinality and provides better performance for high-cardinality datasets compared to the default in-memory index. [Switch between TSI and inmem index types](#switch-index-types) as needed. To learn more about TSI, see: - [Time Series Index (TSI) overview](/influxdb/v1/concepts/time-series-index/) - [Time Series Index (TSI) details](/influxdb/v1/concepts/tsi-details/) @@ -53,8 +53,8 @@ Run the `buildtsi` command using the user account that you are going to run the Switch index types at any time by doing one of the following: -- To switch from to `inmem` to `tsi1`, complete steps 3 and 4 above in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x). -- To switch from to `tsi1` to `inmem`, change `tsi1` to `inmem` by completing steps 3a-3c and 4 above in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x). +- To switch from `inmem` to `tsi1` (for example, when experiencing high memory usage or out-of-memory errors with high-cardinality data), complete steps 3 and 4 in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x). +- To switch from `tsi1` to `inmem` (for example, for small datasets where memory is not a constraint), change `tsi1` to `inmem` by completing steps 3a-3c and 4 in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x). ## Downgrade InfluxDB From e42df8a4adb24665e24d073362fb29a2647de66e Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 20 Aug 2025 17:48:58 -0500 Subject: [PATCH 112/122] docs(v1): restructure upgrade guide for better UX and progressive disclosure - Restructure content flow to follow progressive disclosure principles - Move index type decision to Important callout after basic upgrade steps - Improve headings with active voice ("Switch index types anytime") - Enhance callout formatting (Important, Tip, Warning callouts) - Consolidate Enterprise upgrade information into dedicated section - Improve information hierarchy and scanability Changes are primarily formatting and phrasing improvements to enhance developer experience and follow Google Developer Documentation best practices. --- .../influxdb/v1/administration/upgrading.md | 67 ++++++++++++------- 1 file changed, 43 insertions(+), 24 deletions(-) diff --git a/content/influxdb/v1/administration/upgrading.md b/content/influxdb/v1/administration/upgrading.md index 4b9fa82b0..33de0ea92 100644 --- a/content/influxdb/v1/administration/upgrading.md +++ b/content/influxdb/v1/administration/upgrading.md @@ -6,22 +6,12 @@ menu: name: Upgrade InfluxDB weight: 25 parent: Administration +related: + - /enterprise_influxdb/v1/guides/migration/ + - /enterprise_influxdb/v1/administration/upgrading/ --- - -We recommend enabling Time Series Index (TSI) (step 3 of Upgrade to InfluxDB 1.11.x) because it removes RAM-based limits on series cardinality and provides better performance for high-cardinality datasets compared to the default in-memory index. [Switch between TSI and inmem index types](#switch-index-types) as needed. To learn more about TSI, see: - -- [Time Series Index (TSI) overview](/influxdb/v1/concepts/time-series-index/) -- [Time Series Index (TSI) details](/influxdb/v1/concepts/tsi-details/) - -> **_Note:_** The default configuration continues to use TSM-based shards with in-memory indexes (as in earlier versions). - -{{% note %}} -### Upgrade to InfluxDB Enterprise - -To upgrade from InfluxDB OSS to InfluxDB Enterprise, [contact InfluxData Sales](https://www.influxdata.com/contact-sales/) -and see [Migrate to InfluxDB Enterprise](/enterprise_influxdb/v1/guides/migration/). -{{% /note %}} +Upgrade to the latest version of InfluxDB OSS v1. ## Upgrade to InfluxDB 1.11.x @@ -29,7 +19,26 @@ and see [Migrate to InfluxDB Enterprise](/enterprise_influxdb/v1/guides/migratio 2. Migrate configuration file customizations from your existing configuration file to the InfluxDB 1.11.x [configuration file](/influxdb/v1/administration/config/). Add or modify your environment variables as needed. -3. To enable TSI in InfluxDB 1.11.x, complete the following steps: +> [!Important] +> #### Choose your index type +> InfluxDB 1.11.x supports two index types: +> +> - **Time Series Index (TSI)** - Recommended for most users. Removes RAM-based limits on series cardinality and provides better performance for high-cardinality datasets. +> - **In-memory index (inmem)** - Default option that maintains compatibility with earlier versions but has RAM limitations. +> +> **When to use TSI:** +> - High-cardinality datasets (many unique tag combinations) +> - Experiencing high memory usage or out-of-memory errors +> - Large production deployments +> +> **When to use inmem:** +> - Small datasets where memory is not a constraint +> - Development or testing environments +> - Maintaining compatibility with existing tooling +> +> To learn more about TSI, see [Time Series Index overview](/influxdb/v1/concepts/time-series-index/) and [TSI details](/influxdb/v1/concepts/tsi-details/). + +3. **Optional:** To enable TSI in InfluxDB 1.11.x, complete the following steps: 1. If using the InfluxDB configuration file, find the `[data]` section, uncomment `index-version = "inmem"` and change the value to `tsi1`. @@ -43,26 +52,36 @@ and see [Migrate to InfluxDB Enterprise](/enterprise_influxdb/v1/guides/migratio ``` 4. Build TSI by running the [influx_inspect buildtsi](/influxdb/v1/tools/influx_inspect/#buildtsi) command. - {{% note %}} -Run the `buildtsi` command using the user account that you are going to run the database as, or ensure that the permissions match afterward. - {{% /note %}} + > [!Important] + > Run the `buildtsi` command using the user account that you are going to run the database as, or ensure that the permissions match afterward. 4. Restart the `influxdb` service. +> [!Tip] +> #### Switch index types anytime +> +> The default configuration continues to use TSM-based shards with in-memory indexes (as in earlier versions). You can [switch between TSI and inmem index types](#switch-index-types) at any time. + ## Switch index types -Switch index types at any time by doing one of the following: +You can switch between index types at any time after upgrading: -- To switch from `inmem` to `tsi1` (for example, when experiencing high memory usage or out-of-memory errors with high-cardinality data), complete steps 3 and 4 in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x). -- To switch from `tsi1` to `inmem` (for example, for small datasets where memory is not a constraint), change `tsi1` to `inmem` by completing steps 3a-3c and 4 in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x). +**Switch from inmem to TSI:** +- Complete steps 3 and 4 in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x) +- Recommended when experiencing high memory usage or out-of-memory errors with high-cardinality data + +**Switch from TSI to inmem:** +- Change `tsi1` to `inmem` by completing steps 3a-3c and 4 in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x) +- Suitable for small datasets where memory is not a constraint ## Downgrade InfluxDB To downgrade to an earlier version, complete the procedures above in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x), replacing the version numbers with the version that you want to downgrade to. After downloading the release, migrating your configuration settings, and enabling TSI or TSM, make sure to [rebuild your index](/influxdb/v1/administration/rebuild-tsi-index/). ->**Note:** Some versions of InfluxDB may have breaking changes that impact your ability to upgrade and downgrade. For example, you cannot downgrade from InfluxDB 1.3 or later to an earlier version. Please review the applicable version of release notes to check for compatibility issues between releases. +> [!Warning] +> Some versions of InfluxDB may have breaking changes that impact your ability to upgrade and downgrade. For example, you cannot downgrade from InfluxDB 1.3 or later to an earlier version. Please review the applicable version of release notes to check for compatibility issues between releases. -## Upgrade InfluxDB Enterprise clusters +## Upgrade to InfluxDB Enterprise -See [Upgrading InfluxDB Enterprise clusters](/enterprise_influxdb/v1/administration/upgrading/). +To upgrade from InfluxDB OSS to InfluxDB Enterprise, [contact InfluxData Sales](https://www.influxdata.com/contact-sales/). From a816f51c299a70c34e5cfb9a670456b395e250c3 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Thu, 21 Aug 2025 08:48:53 -0500 Subject: [PATCH 113/122] Apply suggestions from code review Co-authored-by: Scott Anderson --- content/chronograf/v1/_index.md | 2 +- content/kapacitor/v1/_index.md | 2 +- content/telegraf/v1/_index.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/content/chronograf/v1/_index.md b/content/chronograf/v1/_index.md index 244caf8a3..b447ab871 100644 --- a/content/chronograf/v1/_index.md +++ b/content/chronograf/v1/_index.md @@ -6,7 +6,7 @@ description: > monitoring data and easily create alerting and automation rules. menu: chronograf_v1: - name: Chronograf v1 + name: Chronograf weight: 1 --- diff --git a/content/kapacitor/v1/_index.md b/content/kapacitor/v1/_index.md index b4eef1de8..23bd229de 100644 --- a/content/kapacitor/v1/_index.md +++ b/content/kapacitor/v1/_index.md @@ -5,7 +5,7 @@ description: > create alerts, run ETL jobs and detect anomalies. menu: kapacitor_v1: - name: Kapacitor v1 + name: Kapacitor weight: 1 --- diff --git a/content/telegraf/v1/_index.md b/content/telegraf/v1/_index.md index e089b9dfc..0e9898b0b 100644 --- a/content/telegraf/v1/_index.md +++ b/content/telegraf/v1/_index.md @@ -5,7 +5,7 @@ description: > time series platform, used to collect and report metrics. Telegraf supports four categories of plugins -- input, output, aggregator, and processor. menu: telegraf_v1: - name: Telegraf v1 + name: Telegraf weight: 1 related: - /resources/videos/intro-to-telegraf/ From 0742ced3c96262a610764dd77e0f09f9002e5a66 Mon Sep 17 00:00:00 2001 From: Peter Barnett Date: Thu, 21 Aug 2025 11:34:55 -0400 Subject: [PATCH 114/122] chore: add more clarity to Explorer quick start --- content/influxdb3/explorer/_index.md | 3 +++ content/influxdb3/explorer/get-started.md | 1 + 2 files changed, 4 insertions(+) diff --git a/content/influxdb3/explorer/_index.md b/content/influxdb3/explorer/_index.md index a0827d43e..fd10bd1d8 100644 --- a/content/influxdb3/explorer/_index.md +++ b/content/influxdb3/explorer/_index.md @@ -42,7 +42,10 @@ docker run --detach \ --publish 8889:8888 \ influxdata/influxdb3-ui:{{% latest-patch %}} \ --mode=admin + +# Visit http://localhost:8888 to begin using Explorer ``` + Install and run InfluxDB 3 Explorer Get started with InfluxDB 3 Explorer diff --git a/content/influxdb3/explorer/get-started.md b/content/influxdb3/explorer/get-started.md index 863ecbfec..a91a4a122 100644 --- a/content/influxdb3/explorer/get-started.md +++ b/content/influxdb3/explorer/get-started.md @@ -35,6 +35,7 @@ InfluxDB 3 Explorer supports the following InfluxDB 3 products: - **Server URL**: The URL used to connect to your InfluxDB 3 server. - Select the protocol to use (http or https). - Provide the host and, if necessary, the port. + - _If connecting to a local, non-Docker instance, use host.docker.internal_ - **Token**: The authorization token to use to connect to your InfluxDB 3 server. We recommend using an InfluxDB 3 _admin_ token. From f2ebfde75b3a51e8cac6ebcb1f1765435e74ed2b Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Thu, 21 Aug 2025 09:58:49 -0600 Subject: [PATCH 115/122] hotfix: remove top messaging from explorer docs --- layouts/partials/article/special-state.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/layouts/partials/article/special-state.html b/layouts/partials/article/special-state.html index 6a89cd7d4..3661c392f 100644 --- a/layouts/partials/article/special-state.html +++ b/layouts/partials/article/special-state.html @@ -5,7 +5,7 @@ {{ $productKey := cond (eq $product "influxdb3") (print "influxdb3_" (replaceRE "-" "_" $version)) $product }} {{ $productData := index $.Site.Data.products $productKey }} {{ $displayName := $productData.name }} -{{ $earlyAccessList := slice "influxdb3/explorer" }} +{{ $earlyAccessList := slice "" }} {{ if in $earlyAccessList (print $product "/" $version )}}
From 0281e51c2c921e8f940caad43479b9334ce941a4 Mon Sep 17 00:00:00 2001 From: peterbarnett03 Date: Thu, 21 Aug 2025 13:18:39 -0400 Subject: [PATCH 116/122] Update content/influxdb3/explorer/_index.md Co-authored-by: Jason Stirnaman --- content/influxdb3/explorer/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb3/explorer/_index.md b/content/influxdb3/explorer/_index.md index fd10bd1d8..f08e8f18a 100644 --- a/content/influxdb3/explorer/_index.md +++ b/content/influxdb3/explorer/_index.md @@ -48,4 +48,4 @@ docker run --detach \ Install and run InfluxDB 3 Explorer -Get started with InfluxDB 3 Explorer +Get started using InfluxDB 3 Explorer From 1de1a6158934a0f6416c917df82dacef3f35913e Mon Sep 17 00:00:00 2001 From: peterbarnett03 Date: Thu, 21 Aug 2025 13:18:45 -0400 Subject: [PATCH 117/122] Update content/influxdb3/explorer/get-started.md Co-authored-by: Jason Stirnaman --- content/influxdb3/explorer/get-started.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb3/explorer/get-started.md b/content/influxdb3/explorer/get-started.md index a91a4a122..6cb6baf57 100644 --- a/content/influxdb3/explorer/get-started.md +++ b/content/influxdb3/explorer/get-started.md @@ -35,7 +35,7 @@ InfluxDB 3 Explorer supports the following InfluxDB 3 products: - **Server URL**: The URL used to connect to your InfluxDB 3 server. - Select the protocol to use (http or https). - Provide the host and, if necessary, the port. - - _If connecting to a local, non-Docker instance, use host.docker.internal_ + - _If connecting to a local, non-Docker instance, use `host.docker.internal`._ For more information about host.docker.internal, see the [Docker documentation](https://docs.docker.com/desktop/features/networking). - **Token**: The authorization token to use to connect to your InfluxDB 3 server. We recommend using an InfluxDB 3 _admin_ token. From e2823b768841fe9ab253bddee2506901dfe70538 Mon Sep 17 00:00:00 2001 From: peterbarnett03 Date: Thu, 21 Aug 2025 13:18:57 -0400 Subject: [PATCH 118/122] Update content/influxdb3/explorer/_index.md Co-authored-by: Jason Stirnaman --- content/influxdb3/explorer/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb3/explorer/_index.md b/content/influxdb3/explorer/_index.md index f08e8f18a..dd5965be8 100644 --- a/content/influxdb3/explorer/_index.md +++ b/content/influxdb3/explorer/_index.md @@ -47,5 +47,5 @@ docker run --detach \ ``` -Install and run InfluxDB 3 Explorer +For installation and configuration options, see [Install and run InfluxDB 3 Explorer](/influxdb3/explorer/install/). Get started using InfluxDB 3 Explorer From ca8777614724b03302d9d0c42185877ec1aea332 Mon Sep 17 00:00:00 2001 From: peterbarnett03 Date: Thu, 21 Aug 2025 13:19:03 -0400 Subject: [PATCH 119/122] Update content/influxdb3/explorer/_index.md Co-authored-by: Jason Stirnaman --- content/influxdb3/explorer/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb3/explorer/_index.md b/content/influxdb3/explorer/_index.md index dd5965be8..1fae003e1 100644 --- a/content/influxdb3/explorer/_index.md +++ b/content/influxdb3/explorer/_index.md @@ -43,7 +43,7 @@ docker run --detach \ influxdata/influxdb3-ui:{{% latest-patch %}} \ --mode=admin -# Visit http://localhost:8888 to begin using Explorer +# Visit http://localhost:8888 in your browser to begin using InfluxDB 3 Explorer ``` From 3aa4c0eae1c4ff0b7038bb58e1d1c2d6604babb9 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Thu, 21 Aug 2025 14:50:44 -0500 Subject: [PATCH 120/122] Update content/influxdb/v1/administration/upgrading.md --- content/influxdb/v1/administration/upgrading.md | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/content/influxdb/v1/administration/upgrading.md b/content/influxdb/v1/administration/upgrading.md index 33de0ea92..cff4beca5 100644 --- a/content/influxdb/v1/administration/upgrading.md +++ b/content/influxdb/v1/administration/upgrading.md @@ -27,14 +27,15 @@ Upgrade to the latest version of InfluxDB OSS v1. > - **In-memory index (inmem)** - Default option that maintains compatibility with earlier versions but has RAM limitations. > > **When to use TSI:** -> - High-cardinality datasets (many unique tag combinations) -> - Experiencing high memory usage or out-of-memory errors -> - Large production deployments -> +> - General purpose production instances. +> - Especially recommended for: +> - High-cardinality datasets (many unique tag combinations) +> - Experiencing high memory usage or out-of-memory errors +> - Large production deployments +> > **When to use inmem:** -> - Small datasets where memory is not a constraint -> - Development or testing environments -> - Maintaining compatibility with existing tooling +> - Small datasets when memory is not a constraint +> - Ephemeral deployments such as development or testing environments > > To learn more about TSI, see [Time Series Index overview](/influxdb/v1/concepts/time-series-index/) and [TSI details](/influxdb/v1/concepts/tsi-details/). From e7e59322acf12111ace325589c845c35d89a44f2 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Thu, 21 Aug 2025 14:57:22 -0500 Subject: [PATCH 121/122] Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- content/influxdb/v1/administration/upgrading.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb/v1/administration/upgrading.md b/content/influxdb/v1/administration/upgrading.md index cff4beca5..de12b4046 100644 --- a/content/influxdb/v1/administration/upgrading.md +++ b/content/influxdb/v1/administration/upgrading.md @@ -24,7 +24,7 @@ Upgrade to the latest version of InfluxDB OSS v1. > InfluxDB 1.11.x supports two index types: > > - **Time Series Index (TSI)** - Recommended for most users. Removes RAM-based limits on series cardinality and provides better performance for high-cardinality datasets. -> - **In-memory index (inmem)** - Default option that maintains compatibility with earlier versions but has RAM limitations. +> - **In-memory index (inmem)** - Default option that maintains compatibility with earlier versions but is limited by available system RAM (series cardinality is limited by available RAM). > > **When to use TSI:** > - General purpose production instances. From d49d69ba2612485984df21984f4c9ed6d1367a41 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Fri, 22 Aug 2025 11:08:48 -0600 Subject: [PATCH 122/122] fix: add note to distributed about duplicate points on flush (#6330) --- .../best-practices/optimize-writes.md | 16 +- .../best-practices/optimize-writes.md | 16 +- .../reference/syntax/line-protocol.md | 261 +----------------- .../best-practices/optimize-writes.md | 19 +- content/shared/v3-line-protocol.md | 18 +- 5 files changed, 66 insertions(+), 264 deletions(-) diff --git a/content/influxdb3/cloud-dedicated/write-data/best-practices/optimize-writes.md b/content/influxdb3/cloud-dedicated/write-data/best-practices/optimize-writes.md index 71917fe4c..8f0de2ca7 100644 --- a/content/influxdb3/cloud-dedicated/write-data/best-practices/optimize-writes.md +++ b/content/influxdb3/cloud-dedicated/write-data/best-practices/optimize-writes.md @@ -416,9 +416,23 @@ The following example creates sample data for two series (the combination of mea ### Avoid sending duplicate data -Use Telegraf and the [Dedup processor plugin](/telegraf/v1/plugins/#processor-dedup) to filter data whose field values are exact repetitions of previous values. +When writing duplicate points (points with the same timestamp and tag set), +InfluxDB deduplicates the data by creating a union of the duplicate points. Deduplicating your data can reduce your write payload size and resource usage. +> [!Important] +> #### Write ordering for duplicate points +> +> InfluxDB attempts to honor write ordering for duplicate points, with the most +> recently written point taking precedence. However, when data is flushed from +> the in-memory buffer to Parquet files—typically every 15 minutes, but +> sometimes sooner—this ordering is not guaranteed if duplicate points are flushed +> at the same time. As a result, the last written duplicate point may not always +> be retained in storage. + +Use Telegraf and the [Dedup processor plugin](/telegraf/v1/plugins/#processor-dedup) +to filter data whose field values are exact repetitions of previous values. + The following example shows how to use Telegraf to remove points that repeat field values, and then write the data to InfluxDB: 1. In your terminal, enter the following command to create the sample data file and calculate the number of seconds between the earliest timestamp and _now_. diff --git a/content/influxdb3/cloud-serverless/write-data/best-practices/optimize-writes.md b/content/influxdb3/cloud-serverless/write-data/best-practices/optimize-writes.md index bb4c9fe92..ed01029d2 100644 --- a/content/influxdb3/cloud-serverless/write-data/best-practices/optimize-writes.md +++ b/content/influxdb3/cloud-serverless/write-data/best-practices/optimize-writes.md @@ -430,9 +430,23 @@ The following example creates sample data for two series (the combination of mea ### Avoid sending duplicate data -Use Telegraf and the [Dedup processor plugin](/telegraf/v1/plugins/#processor-dedup) to filter data whose field values are exact repetitions of previous values. +When writing duplicate points (points with the same timestamp and tag set), +InfluxDB deduplicates the data by creating a union of the duplicate points. Deduplicating your data can reduce your write payload size and resource usage. +> [!Important] +> #### Write ordering for duplicate points +> +> InfluxDB attempts to honor write ordering for duplicate points, with the most +> recently written point taking precedence. However, when data is flushed from +> the in-memory buffer to Parquet files—typically every 15 minutes, but +> sometimes sooner—this ordering is not guaranteed if duplicate points are flushed +> at the same time. As a result, the last written duplicate point may not always +> be retained in storage. + +Use Telegraf and the [Dedup processor plugin](/telegraf/v1/plugins/#processor-dedup) +to filter data whose field values are exact repetitions of previous values. + The following example shows how to use Telegraf to remove points that repeat field values, and then write the data to InfluxDB: 1. In your terminal, enter the following command to create the sample data file and calculate the number of seconds between the earliest timestamp and _now_. diff --git a/content/influxdb3/clustered/reference/syntax/line-protocol.md b/content/influxdb3/clustered/reference/syntax/line-protocol.md index 7947d9e93..87ff87707 100644 --- a/content/influxdb3/clustered/reference/syntax/line-protocol.md +++ b/content/influxdb3/clustered/reference/syntax/line-protocol.md @@ -2,7 +2,7 @@ title: Line protocol reference description: > InfluxDB uses line protocol to write data points. - It is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point. + It is a text-based format that provides the table, tag set, field set, and timestamp of a data point. menu: influxdb3_clustered: name: Line protocol @@ -11,261 +11,8 @@ weight: 102 influxdb3/clustered/tags: [write, line protocol, syntax] related: - /influxdb3/clustered/write-data/ +source: /shared/v3-line-protocol.md --- -InfluxDB uses line protocol to write data points. -It is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point. - -- [Elements of line protocol](#elements-of-line-protocol) -- [Data types and format](#data-types-and-format) -- [Quotes](#quotes) -- [Special characters](#special-characters) -- [Comments](#comments) -- [Naming restrictions](#naming-restrictions) -- [Duplicate points](#duplicate-points) - -```js -// Syntax -[,=[,=]] =[,=] [] - -// Example -myMeasurement,tag1=value1,tag2=value2 fieldKey="fieldValue" 1556813561098000000 -``` - -Lines separated by the newline character `\n` represent a single point -in InfluxDB. Line protocol is whitespace sensitive. - -> [!Note] -> Line protocol does not support the newline character `\n` in tag or field values. - -## Elements of line protocol - -{{< influxdb/line-protocol commas=false whitespace=false >}} - -### Measurement -({{< req >}}) -The measurement name. -InfluxDB accepts one measurement per point. -_Measurement names are case-sensitive and subject to [naming restrictions](#naming-restrictions)._ - -_**Data type:** [String](#string)_ - - -### Tag set -_**Optional**_ – -All tag key-value pairs for the point. -Key-value relationships are denoted with the `=` operand. -Multiple tag key-value pairs are comma-delimited. -_Tag keys and tag values are case-sensitive. -Tag keys are subject to [naming restrictions](#naming-restrictions). -Tag values cannot be empty; instead, omit the tag from the tag set._ - -_**Key data type:** [String](#string)_ -_**Value data type:** [String](#string)_ - -### Field set -({{< req >}}) -All field key-value pairs for the point. -Points must have at least one field. -_Field keys and string values are case-sensitive. -Field keys are subject to [naming restrictions](#naming-restrictions)._ - -_**Key data type:** [String](#string)_ -_**Value data type:** [Float](#float) | [Integer](#integer) | [UInteger](#uinteger) | [String](#string) | [Boolean](#boolean)_ - -> [!Note] -> _Always double quote string field values. More on quotes [below](#quotes)._ -> -> ```sh -> measurementName fieldKey="field string value" 1556813561098000000 -> ``` - -### Timestamp -_**Optional**_ – -The [unix timestamp](/influxdb/v2/reference/glossary/#unix-timestamp) for the data point. -InfluxDB accepts one timestamp per point. -If no timestamp is provided, InfluxDB uses the system time (UTC) of its host machine. - -_**Data type:** [Unix timestamp](#unix-timestamp)_ - -> [!Note] -> #### Important notes about timestamps -> -> - To ensure a data point includes the time a metric is observed (not received by InfluxDB), -> include the timestamp. -> - If your timestamps are not in nanoseconds, specify the precision of your timestamps -> when [writing the data to InfluxDB](/influxdb/v2/write-data/#timestamp-precision). - -### Whitespace -Whitespace in line protocol determines how InfluxDB interprets the data point. -The **first unescaped space** delimits the measurement and the tag set from the field set. -The **second unescaped space** delimits the field set from the timestamp. - -{{< influxdb/line-protocol elements=false commas=false >}} - -## Data types and format - -### Float -IEEE-754 64-bit floating-point numbers. -Default numerical type. -_InfluxDB supports scientific notation in float field values._ - -##### Float field value examples -```js -myMeasurement fieldKey=1.0 -myMeasurement fieldKey=1 -myMeasurement fieldKey=-1.234456e+78 -``` - -### Integer -Signed 64-bit integers. -Trailing `i` on the number specifies an integer. - -| Minimum integer | Maximum integer | -| --------------- | --------------- | -| `-9223372036854775808i` | `9223372036854775807i` | - -##### Integer field value examples -```js -myMeasurement fieldKey=1i -myMeasurement fieldKey=12485903i -myMeasurement fieldKey=-12485903i -``` - -### UInteger -Unsigned 64-bit integers. -Trailing `u` on the number specifies an unsigned integer. - -| Minimum uinteger | Maximum uinteger | -| ---------------- | ---------------- | -| `0u` | `18446744073709551615u` | - -##### UInteger field value examples -```js -myMeasurement fieldKey=1u -myMeasurement fieldKey=12485903u -``` - -### String -Plain text string. -Length limit 64KB. - -##### String example -```sh -# String measurement name, field key, and field value -myMeasurement fieldKey="this is a string" -``` - -### Boolean -Stores `true` or `false` values. - -| Boolean value | Accepted syntax | -|:-------------:|:--------------- | -| True | `t`, `T`, `true`, `True`, `TRUE` | -| False | `f`, `F`, `false`, `False`, `FALSE` | - -##### Boolean field value examples -```js -myMeasurement fieldKey=true -myMeasurement fieldKey=false -myMeasurement fieldKey=t -myMeasurement fieldKey=f -myMeasurement fieldKey=TRUE -myMeasurement fieldKey=FALSE -``` - -> [!Note] -> Do not quote boolean field values. -> Quoted field values are interpreted as strings. - -### Unix timestamp -Unix timestamp in a [specified precision](/influxdb/v2/reference/glossary/#unix-timestamp). -Default precision is nanoseconds (`ns`). - -| Minimum timestamp | Maximum timestamp | -| ----------------- | ----------------- | -| `-9223372036854775806` | `9223372036854775806` | - -##### Unix timestamp example -```js -myMeasurementName fieldKey="fieldValue" 1556813561098000000 -``` - -## Quotes -Line protocol supports single and double quotes as described in the following table: - -| Element | Double quotes | Single quotes | -| :------ | :------------: |:-------------: | -| Measurement | _Limited_ * | _Limited_ * | -| Tag key | _Limited_ * | _Limited_ * | -| Tag value | _Limited_ * | _Limited_ * | -| Field key | _Limited_ * | _Limited_ * | -| Field value | **Strings only** | Never | -| Timestamp | Never | Never | - -\* _Line protocol accepts double and single quotes in -measurement names, tag keys, tag values, and field keys, but interprets them as -part of the name, key, or value._ - -## Special Characters -Line protocol supports special characters in [string elements](#string). -In the following contexts, it requires escaping certain characters with a backslash (`\`): - -| Element | Escape characters | -|:------- |:----------------- | -| Measurement | Comma, Space | -| Tag key | Comma, Equals Sign, Space | -| Tag value | Comma, Equals Sign, Space | -| Field key | Comma, Equals Sign, Space | -| Field value | Double quote, Backslash | - -You do not need to escape other special characters. - -##### Examples of special characters in line protocol -```sh -# Measurement name with spaces -my\ Measurement fieldKey="string value" - -# Double quotes in a string field value -myMeasurement fieldKey="\"string\" within a string" - -# Tag keys and values with spaces -myMeasurement,tag\ Key1=tag\ Value1,tag\ Key2=tag\ Value2 fieldKey=100 - -# Emojis -myMeasurement,tagKey=🍭 fieldKey="Launch 🚀" 1556813561098000000 -``` - -### Escaping backslashes -Line protocol supports both literal backslashes and backslashes as an escape character. -With two contiguous backslashes, the first is interpreted as an escape character. -For example: - -| Backslashes | Interpreted as | -|:-----------:|:-------------:| -| `\` | `\` | -| `\\` | `\` | -| `\\\` | `\\` | -| `\\\\` | `\\` | -| `\\\\\` | `\\\` | -| `\\\\\\` | `\\\` | - -## Comments -Line protocol interprets `#` at the beginning of a line as a comment character -and ignores all subsequent characters until the next newline `\n`. - -```sh -# This is a comment -myMeasurement fieldKey="string value" 1556813561098000000 -``` - -## Naming restrictions -Measurement names, tag keys, and field keys cannot begin with an underscore `_`. -The `_` namespace is reserved for InfluxDB system use. - -## Duplicate points -A point is uniquely identified by the measurement name, tag set, and timestamp. -If you submit line protocol with the same measurement, tag set, and timestamp, -but with a different field set, the field set becomes the union of the old -field set and the new field set, where any conflicts favor the new field set. - + diff --git a/content/influxdb3/clustered/write-data/best-practices/optimize-writes.md b/content/influxdb3/clustered/write-data/best-practices/optimize-writes.md index 9e9dff460..b19502238 100644 --- a/content/influxdb3/clustered/write-data/best-practices/optimize-writes.md +++ b/content/influxdb3/clustered/write-data/best-practices/optimize-writes.md @@ -14,7 +14,8 @@ related: - /influxdb3/clustered/write-data/use-telegraf/ --- -Use these tips to optimize performance and system overhead when writing data to InfluxDB. +Use these tips to optimize performance and system overhead when writing data to +{{% product-name %}}. - [Batch writes](#batch-writes) - [Sort tags by key](#sort-tags-by-key) @@ -422,9 +423,23 @@ The following example creates sample data for two series (the combination of mea ### Avoid sending duplicate data -Use Telegraf and the [Dedup processor plugin](/telegraf/v1/plugins/#processor-dedup) to filter data whose field values are exact repetitions of previous values. +When writing duplicate points (points with the same timestamp and tag set), +InfluxDB deduplicates the data by creating a union of the duplicate points. Deduplicating your data can reduce your write payload size and resource usage. +> [!Important] +> #### Write ordering for duplicate points +> +> InfluxDB attempts to honor write ordering for duplicate points, with the most +> recently written point taking precedence. However, when data is flushed from +> the in-memory buffer to Parquet files—typically every 15 minutes, but +> sometimes sooner—this ordering is not guaranteed if duplicate points are flushed +> at the same time. As a result, the last written duplicate point may not always +> be retained in storage. + +Use Telegraf and the [Dedup processor plugin](/telegraf/v1/plugins/#processor-dedup) +to filter data whose field values are exact repetitions of previous values. + The following example shows how to use Telegraf to remove points that repeat field values, and then write the data to InfluxDB: 1. In your terminal, enter the following command to create the sample data file and calculate the number of seconds between the earliest timestamp and _now_. diff --git a/content/shared/v3-line-protocol.md b/content/shared/v3-line-protocol.md index 0f197053d..323fb8d26 100644 --- a/content/shared/v3-line-protocol.md +++ b/content/shared/v3-line-protocol.md @@ -44,7 +44,7 @@ _**Data type:** [String](#string)_ ### Tag set -_**Optional**_ – +(_**Optional**_) All tag key-value pairs for the point. Key-value relationships are denoted with the `=` operand. Multiple tag key-value pairs are comma-delimited. @@ -75,8 +75,8 @@ _**Value data type:** [Float](#float) | [Integer](#integer) | [UInteger](#uinteg ### Timestamp -_**Optional**_ – -The [unix timestamp](/influxdb3/version/reference/glossary/#unix-timestamp) for the data point. +(_**Optional**_) +The [Unix timestamp](/influxdb3/version/reference/glossary/#unix-timestamp) for the data point. InfluxDB accepts one timestamp per point. If no timestamp is provided, InfluxDB uses the system time (UTC) of its host machine. @@ -282,3 +282,15 @@ A point is uniquely identified by the table name, tag set, and timestamp. If you submit line protocol with the same table, tag set, and timestamp, but with a different field set, the field set becomes the union of the old field set and the new field set, where any conflicts favor the new field set. + +{{% show-in "cloud-dedicated,clustered" %}} +> [!Important] +> #### Write ordering for duplicate points +> +> {{% product-name %}} attempts to honor write ordering for duplicate points, +> with the most recently written point taking precedence. However, when data is +> flushed from the in-memory buffer to Parquet files—typically every 15 minutes, +> but sometimes sooner—this ordering is not guaranteed if duplicate points are +> flushed at the same time. As a result, the last written duplicate point may +> not always be retained in storage. +{{% /show-in %}}