From 68bcb0f3c3d9b47d32bcee743e81143d02012e26 Mon Sep 17 00:00:00 2001 From: thrasherht Date: Sat, 22 Mar 2025 00:32:32 -0400 Subject: [PATCH 001/179] Update _index.md Escape ${basearch} variable to prevent bash from interpreting it. --- content/influxdb/v2/install/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb/v2/install/_index.md b/content/influxdb/v2/install/_index.md index c27d355f4..29c6251ba 100644 --- a/content/influxdb/v2/install/_index.md +++ b/content/influxdb/v2/install/_index.md @@ -377,7 +377,7 @@ To install {{% product-name %}} on Linux, do one of the following: cat < Date: Wed, 30 Jul 2025 14:39:02 -0700 Subject: [PATCH 002/179] feat: Add complete GET /query endpoint --- .../v1-compatibility/swaggerV1Compat.yml | 135 ++++++++++++++++++ 1 file changed, 135 insertions(+) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index 744692e6d..b43a670cf 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -136,6 +136,141 @@ paths: schema: $ref: '#/components/schemas/Error' /query: + get: + operationId: GetV1ExecuteQuery + tags: + - Query + summary: Query using the InfluxDB v1 HTTP API + parameters: + - $ref: '#/components/parameters/TraceSpan' + - $ref: '#/components/parameters/AuthUserV1' + - $ref: '#/components/parameters/AuthPassV1' + - in: header + name: Accept + schema: + type: string + description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps. + default: application/json + enum: + - application/json + - application/csv + - text/csv + - application/x-msgpack + - in: header + name: Accept-Encoding + description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand. + schema: + type: string + description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. + schema: + type: boolean + default: false + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + schema: + type: string + required: true + description: Bucket to query. + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: Defines the influxql query to run. + required: true + schema: + type: string + - in: query + name: rp + schema: + type: string + description: Retention policy name. + - name: epoch + description: | + Formats timestamps as unix (epoch) timestamps with the specified precision + instead of RFC3339 timestamps with nanosecond precision. + in: query + schema: + type: string + enum: + - h + - m + - s + - ms + - u + - µ + - ns + responses: + '200': + description: Query results + headers: + Content-Encoding: + description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body + schema: + type: string + description: Specifies that the response in the body is encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + Trace-Id: + description: The Trace-Id header reports the request's trace ID, if one was generated. + schema: + type: string + description: Specifies the request's trace ID. + content: + application/csv: + schema: + $ref: '#/components/schemas/InfluxQLCSVResponse' + text/csv: + schema: + $ref: '#/components/schemas/InfluxQLCSVResponse' + application/json: + schema: + $ref: '#/components/schemas/InfluxQLResponse' + examples: + influxql-chunk_size_2: + value: | + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:55Z",90,"1"],["2016-05-19T18:37:56Z",90,"1"]],"partial":true}],"partial":true}]} + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:57Z",90,"1"],["2016-05-19T18:37:58Z",90,"1"]]}]}]} + application/x-msgpack: + schema: + type: string + format: binary + '429': + description: Token is temporarily over quota. The Retry-After header describes when to try the read again. + headers: + Retry-After: + description: A non-negative decimal integer indicating the seconds to delay after the response is received. + schema: + type: integer + format: int32 + default: + description: Error processing query + content: + application/json: + schema: + $ref: '#/components/schemas/Error' post: operationId: PostQueryV1 tags: From 9117e7ab3515fdde1aa18cc0230d2a1669d38236 Mon Sep 17 00:00:00 2001 From: meelahme Date: Wed, 30 Jul 2025 14:48:52 -0700 Subject: [PATCH 003/179] feat: Add complete GET /query endpoint to cluster, dedicated, and serverless --- .../v1-compatibility/swaggerV1Compat.yml | 135 ++++++++++++++++++ .../v1-compatibility/swaggerV1Compat.yml | 135 ++++++++++++++++++ 2 files changed, 270 insertions(+) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index 55f91d971..ba5a4dff2 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -137,6 +137,141 @@ paths: schema: $ref: '#/components/schemas/Error' /query: + get: + operationId: GetV1ExecuteQuery + tags: + - Query + summary: Query using the InfluxDB v1 HTTP API + parameters: + - $ref: '#/components/parameters/TraceSpan' + - $ref: '#/components/parameters/AuthUserV1' + - $ref: '#/components/parameters/AuthPassV1' + - in: header + name: Accept + schema: + type: string + description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps. + default: application/json + enum: + - application/json + - application/csv + - text/csv + - application/x-msgpack + - in: header + name: Accept-Encoding + description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand. + schema: + type: string + description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. + schema: + type: boolean + default: false + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + schema: + type: string + required: true + description: Bucket to query. + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: Defines the influxql query to run. + required: true + schema: + type: string + - in: query + name: rp + schema: + type: string + description: Retention policy name. + - name: epoch + description: | + Formats timestamps as unix (epoch) timestamps with the specified precision + instead of RFC3339 timestamps with nanosecond precision. + in: query + schema: + type: string + enum: + - h + - m + - s + - ms + - u + - µ + - ns + responses: + '200': + description: Query results + headers: + Content-Encoding: + description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body + schema: + type: string + description: Specifies that the response in the body is encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + Trace-Id: + description: The Trace-Id header reports the request's trace ID, if one was generated. + schema: + type: string + description: Specifies the request's trace ID. + content: + application/csv: + schema: + $ref: '#/components/schemas/InfluxQLCSVResponse' + text/csv: + schema: + $ref: '#/components/schemas/InfluxQLCSVResponse' + application/json: + schema: + $ref: '#/components/schemas/InfluxQLResponse' + examples: + influxql-chunk_size_2: + value: | + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:55Z",90,"1"],["2016-05-19T18:37:56Z",90,"1"]],"partial":true}],"partial":true}]} + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:57Z",90,"1"],["2016-05-19T18:37:58Z",90,"1"]]}]}]} + application/x-msgpack: + schema: + type: string + format: binary + '429': + description: Token is temporarily over quota. The Retry-After header describes when to try the read again. + headers: + Retry-After: + description: A non-negative decimal integer indicating the seconds to delay after the response is received. + schema: + type: integer + format: int32 + default: + description: Error processing query + content: + application/json: + schema: + $ref: '#/components/schemas/Error' post: operationId: PostQueryV1 tags: diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index 36c3e08b0..b822af222 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -136,6 +136,141 @@ paths: schema: $ref: '#/components/schemas/Error' /query: + get: + operationId: GetV1ExecuteQuery + tags: + - Query + summary: Query using the InfluxDB v1 HTTP API + parameters: + - $ref: '#/components/parameters/TraceSpan' + - $ref: '#/components/parameters/AuthUserV1' + - $ref: '#/components/parameters/AuthPassV1' + - in: header + name: Accept + schema: + type: string + description: Specifies how query results should be encoded in the response. **Note:** With `application/csv`, query results include epoch timestamps instead of RFC3339 timestamps. + default: application/json + enum: + - application/json + - application/csv + - text/csv + - application/x-msgpack + - in: header + name: Accept-Encoding + description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand. + schema: + type: string + description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. + schema: + type: boolean + default: false + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + schema: + type: string + required: true + description: Bucket to query. + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: Defines the influxql query to run. + required: true + schema: + type: string + - in: query + name: rp + schema: + type: string + description: Retention policy name. + - name: epoch + description: | + Formats timestamps as unix (epoch) timestamps with the specified precision + instead of RFC3339 timestamps with nanosecond precision. + in: query + schema: + type: string + enum: + - h + - m + - s + - ms + - u + - µ + - ns + responses: + '200': + description: Query results + headers: + Content-Encoding: + description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body + schema: + type: string + description: Specifies that the response in the body is encoded with gzip or not encoded with identity. + default: identity + enum: + - gzip + - identity + Trace-Id: + description: The Trace-Id header reports the request's trace ID, if one was generated. + schema: + type: string + description: Specifies the request's trace ID. + content: + application/csv: + schema: + $ref: '#/components/schemas/InfluxQLCSVResponse' + text/csv: + schema: + $ref: '#/components/schemas/InfluxQLCSVResponse' + application/json: + schema: + $ref: '#/components/schemas/InfluxQLResponse' + examples: + influxql-chunk_size_2: + value: | + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:55Z",90,"1"],["2016-05-19T18:37:56Z",90,"1"]],"partial":true}],"partial":true}]} + {"results":[{"statement_id":0,"series":[{"name":"mymeas","columns":["time","myfield","mytag"],"values":[["2016-05-19T18:37:57Z",90,"1"],["2016-05-19T18:37:58Z",90,"1"]]}]}]} + application/x-msgpack: + schema: + type: string + format: binary + '429': + description: Token is temporarily over quota. The Retry-After header describes when to try the read again. + headers: + Retry-After: + description: A non-negative decimal integer indicating the seconds to delay after the response is received. + schema: + type: integer + format: int32 + default: + description: Error processing query + content: + application/json: + schema: + $ref: '#/components/schemas/Error' post: operationId: PostQueryV1 tags: From dbcd8dde73519e23cac4604bc7d7352df7cf93eb Mon Sep 17 00:00:00 2001 From: meelahme Date: Wed, 30 Jul 2025 15:34:42 -0700 Subject: [PATCH 004/179] docs(api): enhance POST /query with JSON chunk parameter support --- .../v1-compatibility/swaggerV1Compat.yml | 32 +++++++++++++++++++ .../v1-compatibility/swaggerV1Compat.yml | 32 +++++++++++++++++++ .../v1-compatibility/swaggerV1Compat.yml | 32 +++++++++++++++++++ 3 files changed, 96 insertions(+) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index ba5a4dff2..45dcee9e6 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -283,6 +283,38 @@ paths: text/plain: schema: type: string + application/json: + schema: + type: object + properties: + db: + type: string + description: Bucket to query. + q: + description: Defines the influxql query to run. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: | + A unix timestamp precision. + type: string + enum: + - h + - m + - s + - ms + - u + - µ + - ns parameters: - $ref: '#/components/parameters/TraceSpan' - $ref: '#/components/parameters/AuthUserV1' diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index b43a670cf..16ac580f3 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -282,6 +282,38 @@ paths: text/plain: schema: type: string + application/json: + schema: + type: object + properties: + db: + type: string + description: Bucket to query. + q: + description: Defines the influxql query to run. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: | + A unix timestamp precision. + type: string + enum: + - h + - m + - s + - ms + - u + - µ + - ns parameters: - $ref: '#/components/parameters/TraceSpan' - $ref: '#/components/parameters/AuthUserV1' diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index b822af222..08f4a617e 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -282,6 +282,38 @@ paths: text/plain: schema: type: string + application/json: + schema: + type: object + properties: + db: + type: string + description: Bucket to query. + q: + description: Defines the influxql query to run. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: | + A unix timestamp precision. + type: string + enum: + - h + - m + - s + - ms + - u + - µ + - ns parameters: - $ref: '#/components/parameters/TraceSpan' - $ref: '#/components/parameters/AuthUserV1' From 67c2d19e727c601a6f8a8fa7f020bd0585bae097 Mon Sep 17 00:00:00 2001 From: meelahme Date: Thu, 31 Jul 2025 11:56:20 -0700 Subject: [PATCH 005/179] docs(api): adding api examples to DVC and LVC --- .../distinct-value-cache/create.md | 53 ++++++++++++++++++ .../distinct-value-cache/query.md | 32 +++++++++++ .../distinct-value-cache/show.md | 39 +++++++++++++ .../last-value-cache/create.md | 55 ++++++++++++++++++- .../last-value-cache/delete.md | 26 +++++++++ .../influxdb3-admin/last-value-cache/show.md | 38 +++++++++++++ 6 files changed, 242 insertions(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/create.md b/content/shared/influxdb3-admin/distinct-value-cache/create.md index c897c0dbf..a11489f63 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/create.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/create.md @@ -69,6 +69,59 @@ influxdb3 create distinct_cache \ {{% /show-in %}} +## Use the HTTP API + +You can also create a Distinct Value Cache using the [InfluxDB v3 HTTP API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/configure/distinct_cache` endpoint. + +{{% code-placeholders "(DATABASE|TABLE|DVC)_NAME|AUTH_TOKEN|COLUMNS|MAX_(CARDINALITY|AGE)" %}} + +```bash +curl -X POST "https://localhost:8181/api/v3/configure/distinct_cache" \ + -H "Authorization: Bearer AUTH_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "DATABASE_NAME", + "table": "TABLE_NAME", + "name": "DVC_NAME", + "columns": ["COLUMNS"], + "max_cardinality": MAX_CARDINALITY, + "max_age": MAX_AGE + }' +``` + +{{% /code-placeholders %}} + +### Example + +```bash +curl -X POST "https://localhost:8181/api/v3/configure/distinct_cache" \ + -H "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "example-db", + "table": "wind_data", + "name": "windDistinctCache", + "columns": ["country", "county", "city"], + "max_cardinality": 10000, + "max_age": 86400 + }' +``` + +**Response codes:** + +- `201` : Success. The distinct cache has been created. +- `204` : Not created. A distinct cache with this configuration already exists. +- `400` : Bad request. + + +> [!Note] +> #### API parameter differences +> +> - **Columns format**: The API uses a JSON array (`["country", "county", "city"]`) +> instead of the CLI's comma-delimited format (`country,county,city`). +> - **Maximum age format**: The API uses seconds (`86400`) instead of the CLI's +> [humantime format](https://docs.rs/humantime/latest/humantime/fn.parse_duration.html) (`24h`, `1 day`). + Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: diff --git a/content/shared/influxdb3-admin/distinct-value-cache/query.md b/content/shared/influxdb3-admin/distinct-value-cache/query.md index 55e0ce4d0..4fbd41e97 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/query.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/query.md @@ -31,3 +31,35 @@ FROM WHERE country = 'Spain' ``` + +## Use the HTTP API + +You can query cached data using the [InfluxDB v3 SQL query API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/query_sql` endpoint. + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TABLE_NAME|CACHE_NAME" %}} + +```bash +curl -X POST "https://localhost:8181/api/v3/query_sql" \ + -H "Authorization: Bearer AUTH_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "DATABASE_NAME", + "q": "SELECT * FROM last_cache('\''TABLE_NAME'\'', '\''CACHE_NAME'\'')", + "format": "json" + }' +``` + +{{% /code-placeholders %}} + +## Example with WHERE clause + +```bash +curl -X POST "https://localhost:8181/api/v3/query_sql" \ + -H "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "example-db", + "q": "SELECT room, temp FROM last_cache('\''home'\'', '\''homeCache'\'') WHERE room = '\''Kitchen'\''", + "format": "json" + }' +``` diff --git a/content/shared/influxdb3-admin/distinct-value-cache/show.md b/content/shared/influxdb3-admin/distinct-value-cache/show.md index 0de0e2ac0..cc778fcf2 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/show.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/show.md @@ -67,3 +67,42 @@ In the examples above, replace the following: - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{< product-name >}} {{% show-in "enterprise" %}}admin {{% /show-in %}} authentication token + +## Use the HTTP API + +You can query cache information using the [InfluxDB v3 SQL query API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/query_sql` endpoint. + +### Query all caches + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} + +```bash +curl -X POST "https://localhost:8181/api/v3/query_sql" \ + -H "Authorization: Bearer AUTH_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "DATABASE_NAME", + "q": "SELECT * FROM system.last_caches", + "format": "json" + }' + ``` + +{{% /code-placeholders %}} + +## Query specific cache details + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|CACHE_NAME" %}} + +```bash +curl -X POST "https://localhost:8181/api/v3/query_sql" \ + -H "Authorization: Bearer AUTH_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "DATABASE_NAME", + "q": "SELECT * FROM system.last_caches WHERE name = '\''CACHE_NAME'\''", + "format": "json" + }' +``` + +{{% /code-placeholders %}} + diff --git a/content/shared/influxdb3-admin/last-value-cache/create.md b/content/shared/influxdb3-admin/last-value-cache/create.md index febc66f83..873e64214 100644 --- a/content/shared/influxdb3-admin/last-value-cache/create.md +++ b/content/shared/influxdb3-admin/last-value-cache/create.md @@ -80,6 +80,59 @@ influxdb3 create last_cache \ {{% /show-in %}} +## Use the HTTP API + +You can also create a Last Value Cache using the [InfluxDB v3 HTTP API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/configure/last_cache` endpoint. + +{{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN|(KEY|VALUE)_COLUMNS|COUNT|TTL" %}} + +```bash +curl -X POST "https://localhost:8181/api/v3/configure/last_cache" \ + -H "Authorization: Bearer AUTH_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "DATABASE_NAME", + "table": "TABLE_NAME", + "name": "LVC_NAME", + "key_columns": ["KEY_COLUMNS"], + "value_columns": ["VALUE_COLUMNS"], + "count": COUNT, + "ttl": TTL + }' + ``` + + {{% /code-placeholders %}} + + ### Example + +```bash + curl -X POST "https://localhost:8181/api/v3/configure/last_cache" \ + -H "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "example-db", + "table": "home", + "name": "homeLastCache", + "key_columns": ["room", "wall"], + "value_columns": ["temp", "hum", "co"], + "count": 5, + "ttl": 14400 + }' +``` + +**Response codes:** + +- `201` : Success. Last cache created. +- `400` : Bad request. +- `401` : Unauthorized. +- `404` : Cache not found. +- `409` : Cache already exists. + +> [!Note] +> #### API parameter differences +> Column format: The API uses JSON arrays (["room", "wall"]) instead of the CLI's comma-delimited format (room,wall). +> TTL format: The API uses seconds (14400) instead of the CLI's humantime format (4h, 4 hours). + Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: @@ -116,4 +169,4 @@ The cache imports the distinct values from the table and starts caching them. > > The LVC is stored in memory, so it's important to consider the size and persistence > of the cache. For more information, see -> [Important things to know about the Last Value Cache](/influxdb3/version/admin/last-value-cache/#important-things-to-know-about-the-last-value-cache). +> [Important things to know about the Last Value Cache](/influxdb3/version/admin/last-value-cache/#important-things-to-know-about-the-last-value-cache) diff --git a/content/shared/influxdb3-admin/last-value-cache/delete.md b/content/shared/influxdb3-admin/last-value-cache/delete.md index b06ba5eb9..3ea2261aa 100644 --- a/content/shared/influxdb3-admin/last-value-cache/delete.md +++ b/content/shared/influxdb3-admin/last-value-cache/delete.md @@ -23,6 +23,32 @@ influxdb3 delete last_cache \ ``` {{% /code-placeholders %}} +## Use the HTTP API + +You can also delete a Last Value Cache using the [InfluxDB v3 HTTP API](/influxdb3/version/api/v3/). Send a `DELETE` request to the `/api/v3/configure/last_cache` endpoint with query parameters. + +{{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN" %}} + +```bash +curl -X DELETE "https://localhost:8181/api/v3/configure/last_cache?db=DATABASE_NAME&table=TABLE_NAME&name=LVC_NAME" \ + -H "Authorization: Bearer AUTH_TOKEN" +{{% /code-placeholders %}} +``` + +## Example + +```bash +curl -X DELETE "https://localhost:8181/api/v3/configure/last_cache?db=example-db&table=home&name=homeLastCache" \ + -H "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" +``` + +**Response codes:** + +- `200` : Success. The last cache has been deleted. +- `400` : Bad request. +- `401` : Unauthorized. +- `404` : Cache not found. + Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: diff --git a/content/shared/influxdb3-admin/last-value-cache/show.md b/content/shared/influxdb3-admin/last-value-cache/show.md index cf0aa7019..c9838a9f9 100644 --- a/content/shared/influxdb3-admin/last-value-cache/show.md +++ b/content/shared/influxdb3-admin/last-value-cache/show.md @@ -66,3 +66,41 @@ In the examples above, replace the following: - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{< product-name >}} {{% show-in "enterprise" %}}admin {{% /show-in %}} authentication token + +## Use the HTTP API + +You can query cache information using the [InfluxDB v3 SQL query API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/query_sql` endpoint. + +### Query all caches + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} + +```bash +curl -X POST "https://localhost:8181/api/v3/query_sql" \ + -H "Authorization: Bearer AUTH_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "DATABASE_NAME", + "q": "SELECT * FROM system.last_caches", + "format": "json" + }' + ``` + +{{% /code-placeholders %}} + +## Query specific cache details + +{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|CACHE_NAME" %}} + +```bash +curl -X POST "https://localhost:8181/api/v3/query_sql" \ + -H "Authorization: Bearer AUTH_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "db": "DATABASE_NAME", + "q": "SELECT * FROM system.last_caches WHERE name = '\''CACHE_NAME'\''", + "format": "json" + }' +``` + +{{% /code-placeholders %}} \ No newline at end of file From 8a74c7da12f4c9427d2cd25ef46154620e6aefec Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 31 Jul 2025 12:02:28 -0700 Subject: [PATCH 006/179] Update content/shared/influxdb3-admin/distinct-value-cache/show.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- content/shared/influxdb3-admin/distinct-value-cache/show.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/show.md b/content/shared/influxdb3-admin/distinct-value-cache/show.md index cc778fcf2..f02229d14 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/show.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/show.md @@ -82,7 +82,7 @@ curl -X POST "https://localhost:8181/api/v3/query_sql" \ -H "Content-Type: application/json" \ -d '{ "db": "DATABASE_NAME", - "q": "SELECT * FROM system.last_caches", + "q": "SELECT * FROM system.distinct_caches", "format": "json" }' ``` From d2904df598feacecffc8f7b1547dbac9af149feb Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 31 Jul 2025 12:02:40 -0700 Subject: [PATCH 007/179] Update content/shared/influxdb3-admin/distinct-value-cache/show.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- content/shared/influxdb3-admin/distinct-value-cache/show.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/show.md b/content/shared/influxdb3-admin/distinct-value-cache/show.md index f02229d14..306ad3099 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/show.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/show.md @@ -99,7 +99,7 @@ curl -X POST "https://localhost:8181/api/v3/query_sql" \ -H "Content-Type: application/json" \ -d '{ "db": "DATABASE_NAME", - "q": "SELECT * FROM system.last_caches WHERE name = '\''CACHE_NAME'\''", + "q": "SELECT * FROM system.distinct_caches WHERE name = '\''CACHE_NAME'\''", "format": "json" }' ``` From faa973a86c29efc02542fab6cfb10a5c42ea8027 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 31 Jul 2025 12:02:52 -0700 Subject: [PATCH 008/179] Update content/shared/influxdb3-admin/last-value-cache/delete.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- content/shared/influxdb3-admin/last-value-cache/delete.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/delete.md b/content/shared/influxdb3-admin/last-value-cache/delete.md index 3ea2261aa..dbd090fe1 100644 --- a/content/shared/influxdb3-admin/last-value-cache/delete.md +++ b/content/shared/influxdb3-admin/last-value-cache/delete.md @@ -32,8 +32,6 @@ You can also delete a Last Value Cache using the [InfluxDB v3 HTTP API](/influxd ```bash curl -X DELETE "https://localhost:8181/api/v3/configure/last_cache?db=DATABASE_NAME&table=TABLE_NAME&name=LVC_NAME" \ -H "Authorization: Bearer AUTH_TOKEN" -{{% /code-placeholders %}} -``` ## Example From 5419f10fa5f59884fa187352043a620f5cc34ed9 Mon Sep 17 00:00:00 2001 From: meelahme Date: Thu, 31 Jul 2025 12:47:15 -0700 Subject: [PATCH 009/179] fix(docs): close code-placeholders shortcode in LVC delete guide --- content/shared/influxdb3-admin/last-value-cache/create.md | 2 +- content/shared/influxdb3-admin/last-value-cache/delete.md | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/create.md b/content/shared/influxdb3-admin/last-value-cache/create.md index 873e64214..db892cea6 100644 --- a/content/shared/influxdb3-admin/last-value-cache/create.md +++ b/content/shared/influxdb3-admin/last-value-cache/create.md @@ -169,4 +169,4 @@ The cache imports the distinct values from the table and starts caching them. > > The LVC is stored in memory, so it's important to consider the size and persistence > of the cache. For more information, see -> [Important things to know about the Last Value Cache](/influxdb3/version/admin/last-value-cache/#important-things-to-know-about-the-last-value-cache) +> [Important things to know about the Last Value Cache.](/influxdb3/version/admin/last-value-cache/#important-things-to-know-about-the-last-value-cache) diff --git a/content/shared/influxdb3-admin/last-value-cache/delete.md b/content/shared/influxdb3-admin/last-value-cache/delete.md index dbd090fe1..23e0765cd 100644 --- a/content/shared/influxdb3-admin/last-value-cache/delete.md +++ b/content/shared/influxdb3-admin/last-value-cache/delete.md @@ -28,10 +28,11 @@ influxdb3 delete last_cache \ You can also delete a Last Value Cache using the [InfluxDB v3 HTTP API](/influxdb3/version/api/v3/). Send a `DELETE` request to the `/api/v3/configure/last_cache` endpoint with query parameters. {{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN" %}} - ```bash curl -X DELETE "https://localhost:8181/api/v3/configure/last_cache?db=DATABASE_NAME&table=TABLE_NAME&name=LVC_NAME" \ -H "Authorization: Bearer AUTH_TOKEN" +``` +{{% /code-placeholders %}} ## Example From 105a06b73bf3e22a6efa7081c9ad75c4d31650b5 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 31 Jul 2025 23:21:23 -0700 Subject: [PATCH 010/179] Update api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index 08f4a617e..dfba93fa6 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -196,7 +196,7 @@ paths: default: false - in: query name: q - description: Defines the influxql query to run. + description: Defines the InfluxQL query to run. required: true schema: type: string From ed7fa9016dcca75936cd1ab21c08bd7a9fd58167 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 31 Jul 2025 23:21:31 -0700 Subject: [PATCH 011/179] Update api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index dfba93fa6..751b96ea9 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -290,7 +290,7 @@ paths: type: string description: Bucket to query. q: - description: Defines the influxql query to run. + description: Defines the InfluxQL query to run. type: string chunked: description: | From aa766edd7037656b972f2202c42d67fbec30ff49 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 31 Jul 2025 23:21:38 -0700 Subject: [PATCH 012/179] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../cloud-serverless/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index 16ac580f3..cdbcf785a 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -196,7 +196,7 @@ paths: default: false - in: query name: q - description: Defines the influxql query to run. + description: Defines the InfluxQL query to run. required: true schema: type: string From e9bbdf1823bad2af01ddbda7174c185fe5936f4f Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 31 Jul 2025 23:21:50 -0700 Subject: [PATCH 013/179] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../cloud-serverless/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index cdbcf785a..858effb50 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -290,7 +290,7 @@ paths: type: string description: Bucket to query. q: - description: Defines the influxql query to run. + description: Defines the InfluxQL query to run. type: string chunked: description: | From e7723172a5af617c589de83efed3719f59b15c9e Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 31 Jul 2025 23:21:58 -0700 Subject: [PATCH 014/179] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../cloud-dedicated/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index 45dcee9e6..fdf030435 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -197,7 +197,7 @@ paths: default: false - in: query name: q - description: Defines the influxql query to run. + description: Defines the InfluxQL query to run. required: true schema: type: string From 33ace566326678fdcd45ce262c05d4be44226b25 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 31 Jul 2025 23:22:06 -0700 Subject: [PATCH 015/179] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../cloud-dedicated/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index fdf030435..9c843ef9f 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -291,7 +291,7 @@ paths: type: string description: Bucket to query. q: - description: Defines the influxql query to run. + description: Defines the InfluxQL query to run. type: string chunked: description: | From 7140e46f45901b608d2eff3b368a907ac7959fd4 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:17:27 -0700 Subject: [PATCH 016/179] Update content/shared/influxdb3-admin/distinct-value-cache/query.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/distinct-value-cache/query.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/query.md b/content/shared/influxdb3-admin/distinct-value-cache/query.md index 4fbd41e97..d6656fa9b 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/query.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/query.md @@ -44,7 +44,7 @@ curl -X POST "https://localhost:8181/api/v3/query_sql" \ -H "Content-Type: application/json" \ -d '{ "db": "DATABASE_NAME", - "q": "SELECT * FROM last_cache('\''TABLE_NAME'\'', '\''CACHE_NAME'\'')", + "q": "SELECT * FROM distinct_cache('\''TABLE_NAME'\'', '\''CACHE_NAME'\'')", "format": "json" }' ``` From 0e58bb864ac9c4ca837724e0862b09850d087665 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:18:11 -0700 Subject: [PATCH 017/179] Update content/shared/influxdb3-admin/distinct-value-cache/query.md Co-authored-by: Jason Stirnaman --- .../shared/influxdb3-admin/distinct-value-cache/query.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/query.md b/content/shared/influxdb3-admin/distinct-value-cache/query.md index d6656fa9b..63349341b 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/query.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/query.md @@ -34,7 +34,11 @@ WHERE ## Use the HTTP API -You can query cached data using the [InfluxDB v3 SQL query API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/query_sql` endpoint. +To use the HTTP API to query cached data, send a `GET` or `POST` request to the `/api/v3/query_sql` endpoint and include the [`distinct_cache()`](/influxdb3/version/reference/sql/functions/cache/#distinct_cache) function in your query. + +{{% api-endpoint method="GET" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/GetExecuteQuerySQL" %}} + +{{% api-endpoint method="POST" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/PostExecuteQuerySQL" %}} {{% code-placeholders "DATABASE_NAME|AUTH_TOKEN|TABLE_NAME|CACHE_NAME" %}} From f39af3f15a10bd1929593fc9289314eff78eb0d5 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:18:24 -0700 Subject: [PATCH 018/179] Update content/shared/influxdb3-admin/distinct-value-cache/show.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/distinct-value-cache/show.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/show.md b/content/shared/influxdb3-admin/distinct-value-cache/show.md index 306ad3099..2541d606a 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/show.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/show.md @@ -70,7 +70,11 @@ In the examples above, replace the following: ## Use the HTTP API -You can query cache information using the [InfluxDB v3 SQL query API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/query_sql` endpoint. +To use the HTTP API to query and output cache information from the system table, send a `GET` or `POST` request to the `/api/v3/query_sql` endpoint. + +{{% api-endpoint method="GET" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/GetExecuteQuerySQL" %}} + +{{% api-endpoint method="POST" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/PostExecuteQuerySQL" %}} ### Query all caches From 0f5056e239df597958a9540e1bb7aceec6a63518 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:18:32 -0700 Subject: [PATCH 019/179] Update content/shared/influxdb3-admin/last-value-cache/create.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/last-value-cache/create.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/create.md b/content/shared/influxdb3-admin/last-value-cache/create.md index db892cea6..720963e79 100644 --- a/content/shared/influxdb3-admin/last-value-cache/create.md +++ b/content/shared/influxdb3-admin/last-value-cache/create.md @@ -82,7 +82,9 @@ influxdb3 create last_cache \ ## Use the HTTP API -You can also create a Last Value Cache using the [InfluxDB v3 HTTP API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/configure/last_cache` endpoint. +To use the HTTP API to create a Last Value Cache, send a `POST` request to the `/api/v3/configure/last_cache` endpoint. + +{{% api-endpoint method="POST" endpoint="/api/v3/configure/last_cache" api-ref="/influxdb3/version/api/v3/#operation/PostConfigureLastCache" %}} {{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN|(KEY|VALUE)_COLUMNS|COUNT|TTL" %}} From 99715ce70dc08469570261257a19e097b66c20b3 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:18:46 -0700 Subject: [PATCH 020/179] Update content/shared/influxdb3-admin/last-value-cache/show.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/last-value-cache/show.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/show.md b/content/shared/influxdb3-admin/last-value-cache/show.md index c9838a9f9..9fe027e86 100644 --- a/content/shared/influxdb3-admin/last-value-cache/show.md +++ b/content/shared/influxdb3-admin/last-value-cache/show.md @@ -77,9 +77,8 @@ You can query cache information using the [InfluxDB v3 SQL query API](/influxdb3 ```bash curl -X POST "https://localhost:8181/api/v3/query_sql" \ - -H "Authorization: Bearer AUTH_TOKEN" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer AUTH_TOKEN" \ + --json '{ "db": "DATABASE_NAME", "q": "SELECT * FROM system.last_caches", "format": "json" From 9c9f2c69995e9c8db32779758e991803e47e866b Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:18:56 -0700 Subject: [PATCH 021/179] Update content/shared/influxdb3-admin/last-value-cache/show.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/last-value-cache/show.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/show.md b/content/shared/influxdb3-admin/last-value-cache/show.md index 9fe027e86..27289a6db 100644 --- a/content/shared/influxdb3-admin/last-value-cache/show.md +++ b/content/shared/influxdb3-admin/last-value-cache/show.md @@ -93,9 +93,8 @@ curl -X POST "https://localhost:8181/api/v3/query_sql" \ ```bash curl -X POST "https://localhost:8181/api/v3/query_sql" \ - -H "Authorization: Bearer AUTH_TOKEN" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer AUTH_TOKEN" \ + --json '{ "db": "DATABASE_NAME", "q": "SELECT * FROM system.last_caches WHERE name = '\''CACHE_NAME'\''", "format": "json" From c281a6e10191ab4d681edfe63e99c2192b3f5bf7 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:19:14 -0700 Subject: [PATCH 022/179] Update content/shared/influxdb3-admin/distinct-value-cache/create.md Co-authored-by: Jason Stirnaman --- .../shared/influxdb3-admin/distinct-value-cache/create.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/create.md b/content/shared/influxdb3-admin/distinct-value-cache/create.md index a11489f63..229fd665a 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/create.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/create.md @@ -95,9 +95,8 @@ curl -X POST "https://localhost:8181/api/v3/configure/distinct_cache" \ ```bash curl -X POST "https://localhost:8181/api/v3/configure/distinct_cache" \ - -H "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \ + --json '{ "db": "example-db", "table": "wind_data", "name": "windDistinctCache", From a4023bae6dcad70a715e9376a32aee94793aeab8 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:19:25 -0700 Subject: [PATCH 023/179] Update content/shared/influxdb3-admin/distinct-value-cache/show.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/distinct-value-cache/show.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/show.md b/content/shared/influxdb3-admin/distinct-value-cache/show.md index 2541d606a..3326a8467 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/show.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/show.md @@ -99,9 +99,8 @@ curl -X POST "https://localhost:8181/api/v3/query_sql" \ ```bash curl -X POST "https://localhost:8181/api/v3/query_sql" \ - -H "Authorization: Bearer AUTH_TOKEN" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer AUTH_TOKEN" \ + --json '{ "db": "DATABASE_NAME", "q": "SELECT * FROM system.distinct_caches WHERE name = '\''CACHE_NAME'\''", "format": "json" From aad8d899aea7d2e9061b4c0ebf79162cd3eabf1f Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:19:40 -0700 Subject: [PATCH 024/179] Update content/shared/influxdb3-admin/distinct-value-cache/query.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/distinct-value-cache/query.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/query.md b/content/shared/influxdb3-admin/distinct-value-cache/query.md index 63349341b..952ce9d39 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/query.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/query.md @@ -59,9 +59,8 @@ curl -X POST "https://localhost:8181/api/v3/query_sql" \ ```bash curl -X POST "https://localhost:8181/api/v3/query_sql" \ - -H "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \ + --json '{ "db": "example-db", "q": "SELECT room, temp FROM last_cache('\''home'\'', '\''homeCache'\'') WHERE room = '\''Kitchen'\''", "format": "json" From 866ee9b11d26acfea5bcfe6e2ffdb31f72cfa8bc Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:21:05 -0700 Subject: [PATCH 025/179] Update content/shared/influxdb3-admin/distinct-value-cache/create.md Co-authored-by: Jason Stirnaman --- .../shared/influxdb3-admin/distinct-value-cache/create.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/create.md b/content/shared/influxdb3-admin/distinct-value-cache/create.md index 229fd665a..d0e89dc92 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/create.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/create.md @@ -77,9 +77,8 @@ You can also create a Distinct Value Cache using the [InfluxDB v3 HTTP API](/inf ```bash curl -X POST "https://localhost:8181/api/v3/configure/distinct_cache" \ - -H "Authorization: Bearer AUTH_TOKEN" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer AUTH_TOKEN" \ + --json '{ "db": "DATABASE_NAME", "table": "TABLE_NAME", "name": "DVC_NAME", From 0384cad10091c6841438ee84b7e5eeff507ed006 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:24:38 -0700 Subject: [PATCH 026/179] Update content/shared/influxdb3-admin/distinct-value-cache/query.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/distinct-value-cache/query.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/query.md b/content/shared/influxdb3-admin/distinct-value-cache/query.md index 952ce9d39..9ec48d4ee 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/query.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/query.md @@ -44,9 +44,8 @@ To use the HTTP API to query cached data, send a `GET` or `POST` request to the ```bash curl -X POST "https://localhost:8181/api/v3/query_sql" \ - -H "Authorization: Bearer AUTH_TOKEN" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer AUTH_TOKEN" \ + --json '{ "db": "DATABASE_NAME", "q": "SELECT * FROM distinct_cache('\''TABLE_NAME'\'', '\''CACHE_NAME'\'')", "format": "json" From 057de1230b225ef0fd5b287433ea26d21bc79320 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:24:48 -0700 Subject: [PATCH 027/179] Update content/shared/influxdb3-admin/distinct-value-cache/show.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/distinct-value-cache/show.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/show.md b/content/shared/influxdb3-admin/distinct-value-cache/show.md index 3326a8467..fd825711d 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/show.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/show.md @@ -82,9 +82,8 @@ To use the HTTP API to query and output cache information from the system table, ```bash curl -X POST "https://localhost:8181/api/v3/query_sql" \ - -H "Authorization: Bearer AUTH_TOKEN" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer AUTH_TOKEN" \ + --json '{ "db": "DATABASE_NAME", "q": "SELECT * FROM system.distinct_caches", "format": "json" From 419fda92b5b0b95d39a97d23872b3ff14f938d28 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:25:19 -0700 Subject: [PATCH 028/179] Update content/shared/influxdb3-admin/last-value-cache/create.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/last-value-cache/create.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/create.md b/content/shared/influxdb3-admin/last-value-cache/create.md index 720963e79..209aefcf7 100644 --- a/content/shared/influxdb3-admin/last-value-cache/create.md +++ b/content/shared/influxdb3-admin/last-value-cache/create.md @@ -90,9 +90,8 @@ To use the HTTP API to create a Last Value Cache, send a `POST` request to the ` ```bash curl -X POST "https://localhost:8181/api/v3/configure/last_cache" \ - -H "Authorization: Bearer AUTH_TOKEN" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer AUTH_TOKEN" \ + --json '{ "db": "DATABASE_NAME", "table": "TABLE_NAME", "name": "LVC_NAME", From a5c19e60cde28af2aea0747b4f2cb50436da4c4e Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:26:14 -0700 Subject: [PATCH 029/179] Update content/shared/influxdb3-admin/last-value-cache/create.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/last-value-cache/create.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/create.md b/content/shared/influxdb3-admin/last-value-cache/create.md index 209aefcf7..5e57de077 100644 --- a/content/shared/influxdb3-admin/last-value-cache/create.md +++ b/content/shared/influxdb3-admin/last-value-cache/create.md @@ -108,9 +108,8 @@ curl -X POST "https://localhost:8181/api/v3/configure/last_cache" \ ```bash curl -X POST "https://localhost:8181/api/v3/configure/last_cache" \ - -H "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \ - -H "Content-Type: application/json" \ - -d '{ + --header "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" \ + --json '{ "db": "example-db", "table": "home", "name": "homeLastCache", From 5ea874998c24601145606e9353e387136b0bb0b5 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:26:27 -0700 Subject: [PATCH 030/179] Update content/shared/influxdb3-admin/last-value-cache/delete.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/last-value-cache/delete.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/delete.md b/content/shared/influxdb3-admin/last-value-cache/delete.md index 23e0765cd..ba3d55b9b 100644 --- a/content/shared/influxdb3-admin/last-value-cache/delete.md +++ b/content/shared/influxdb3-admin/last-value-cache/delete.md @@ -25,7 +25,9 @@ influxdb3 delete last_cache \ ## Use the HTTP API -You can also delete a Last Value Cache using the [InfluxDB v3 HTTP API](/influxdb3/version/api/v3/). Send a `DELETE` request to the `/api/v3/configure/last_cache` endpoint with query parameters. +To use the HTTP API to delete a Last Value Cache, send a `DELETE` request to the `/api/v3/configure/last_cache` endpoint with query parameters. + +{{% api-endpoint method="DELETE" endpoint="/api/v3/configure/last_cache" api-ref="/influxdb3/core/api/v3/#operation/DeleteConfigureLastCache" %}} {{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN" %}} ```bash From 5e465a412163c25843fd1314bdbb082874d985ea Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:26:40 -0700 Subject: [PATCH 031/179] Update content/shared/influxdb3-admin/last-value-cache/delete.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/last-value-cache/delete.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/delete.md b/content/shared/influxdb3-admin/last-value-cache/delete.md index ba3d55b9b..4b7563084 100644 --- a/content/shared/influxdb3-admin/last-value-cache/delete.md +++ b/content/shared/influxdb3-admin/last-value-cache/delete.md @@ -32,7 +32,7 @@ To use the HTTP API to delete a Last Value Cache, send a `DELETE` request to the {{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN" %}} ```bash curl -X DELETE "https://localhost:8181/api/v3/configure/last_cache?db=DATABASE_NAME&table=TABLE_NAME&name=LVC_NAME" \ - -H "Authorization: Bearer AUTH_TOKEN" + --header "Authorization: Bearer AUTH_TOKEN" ``` {{% /code-placeholders %}} From 61fe70ad8ed698e56ab6dc24da1209da527e6435 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:26:50 -0700 Subject: [PATCH 032/179] Update content/shared/influxdb3-admin/last-value-cache/delete.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/last-value-cache/delete.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/delete.md b/content/shared/influxdb3-admin/last-value-cache/delete.md index 4b7563084..8f61adaa6 100644 --- a/content/shared/influxdb3-admin/last-value-cache/delete.md +++ b/content/shared/influxdb3-admin/last-value-cache/delete.md @@ -40,7 +40,7 @@ curl -X DELETE "https://localhost:8181/api/v3/configure/last_cache?db=DATABASE_N ```bash curl -X DELETE "https://localhost:8181/api/v3/configure/last_cache?db=example-db&table=home&name=homeLastCache" \ - -H "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" + --header "Authorization: Bearer 00xoXX0xXXx0000XxxxXx0Xx0xx0" ``` **Response codes:** From 0f46f108cacddcee695b31829dd19aa875cadcea Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:27:00 -0700 Subject: [PATCH 033/179] Update content/shared/influxdb3-admin/last-value-cache/show.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/last-value-cache/show.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/content/shared/influxdb3-admin/last-value-cache/show.md b/content/shared/influxdb3-admin/last-value-cache/show.md index 27289a6db..623e1c57f 100644 --- a/content/shared/influxdb3-admin/last-value-cache/show.md +++ b/content/shared/influxdb3-admin/last-value-cache/show.md @@ -69,9 +69,13 @@ In the examples above, replace the following: ## Use the HTTP API -You can query cache information using the [InfluxDB v3 SQL query API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/query_sql` endpoint. +To use the HTTP API to query and output cache information from the system table, send a `GET` or `POST` request to the `/api/v3/query_sql` endpoint. -### Query all caches +{{% api-endpoint method="GET" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/GetExecuteQuerySQL" %}} + +{{% api-endpoint method="POST" endpoint="/api/v3/query_sql" api-ref="/influxdb3/version/api/v3/#operation/PostExecuteQuerySQL" %}} + +### Query all last value caches {{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} From 36085a80ea90b218210113a1016c16c356d9f1a1 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:42:13 -0700 Subject: [PATCH 034/179] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../cloud-dedicated/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index 9c843ef9f..024ecaaf6 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -289,7 +289,7 @@ paths: properties: db: type: string - description: Bucket to query. + description: Database to query from. q: description: Defines the InfluxQL query to run. type: string From 7cc0c38df6be582540f0da7cd656e6197df3ae40 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:42:28 -0700 Subject: [PATCH 035/179] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../cloud-dedicated/v1-compatibility/swaggerV1Compat.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index 024ecaaf6..7c661ce47 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -290,6 +290,10 @@ paths: db: type: string description: Database to query from. + rp: + description: | + The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). + type: string q: description: Defines the InfluxQL query to run. type: string From 6ba77d4808f9a413b5f065abed19144ae4f78951 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:42:36 -0700 Subject: [PATCH 036/179] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../cloud-dedicated/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index 7c661ce47..c2fd7a753 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -138,7 +138,7 @@ paths: $ref: '#/components/schemas/Error' /query: get: - operationId: GetV1ExecuteQuery + operationId: GetQueryV1 tags: - Query summary: Query using the InfluxDB v1 HTTP API From 6efb013714df8e42bdd0d6707ffecb2a539ab5c4 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:43:13 -0700 Subject: [PATCH 037/179] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../cloud-dedicated/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index c2fd7a753..d8c5df5f9 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -187,7 +187,7 @@ paths: schema: type: string required: true - description: Bucket to query. + description: Database to query from. - in: query name: pretty description: | From 6a20bb037218f175a1858551903fd7c17365ff9e Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:43:22 -0700 Subject: [PATCH 038/179] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../cloud-serverless/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index 858effb50..bf1029d56 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -186,7 +186,7 @@ paths: schema: type: string required: true - description: Bucket to query. + description: The database to query from. - in: query name: pretty description: | From 3b738ec6180849c506508d7ec6a6e48b4632cd78 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:43:29 -0700 Subject: [PATCH 039/179] Update api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index 751b96ea9..0c89e064e 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -186,7 +186,7 @@ paths: schema: type: string required: true - description: Bucket to query. + description: The database to query from. - in: query name: pretty description: | From 13087e49d0ab60d221a9db34498d84ef4df56dda Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 13:46:56 -0700 Subject: [PATCH 040/179] Update content/shared/influxdb3-admin/distinct-value-cache/create.md Co-authored-by: Jason Stirnaman --- content/shared/influxdb3-admin/distinct-value-cache/create.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/distinct-value-cache/create.md b/content/shared/influxdb3-admin/distinct-value-cache/create.md index d0e89dc92..560208c34 100644 --- a/content/shared/influxdb3-admin/distinct-value-cache/create.md +++ b/content/shared/influxdb3-admin/distinct-value-cache/create.md @@ -71,7 +71,9 @@ influxdb3 create distinct_cache \ ## Use the HTTP API -You can also create a Distinct Value Cache using the [InfluxDB v3 HTTP API](/influxdb3/version/api/v3/). Send a `POST` request to the `/api/v3/configure/distinct_cache` endpoint. +To use the HTTP API to create a Distinct Value Cache, send a `POST` request to the `/api/v3/configure/distinct_cache` endpoint. + +{{% api-endpoint method="POST" endpoint="/api/v3/configure/distinct_cache" api-ref="/influxdb3/version/api/v3/#operation/PostConfigureDistinctCache" %}} {{% code-placeholders "(DATABASE|TABLE|DVC)_NAME|AUTH_TOKEN|COLUMNS|MAX_(CARDINALITY|AGE)" %}} From 76af7669ee3378c980903b6f80db29da0a2190b7 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Fri, 1 Aug 2025 16:45:45 -0700 Subject: [PATCH 041/179] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../cloud-serverless/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index bf1029d56..43b84af62 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -288,7 +288,7 @@ paths: properties: db: type: string - description: Bucket to query. + description: The database to query from. q: description: Defines the InfluxQL query to run. type: string From 7436f0fbd862e9e70e1d2b040a3739fe8473ef4c Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Mon, 4 Aug 2025 14:37:18 -0700 Subject: [PATCH 042/179] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../cloud-dedicated/v1-compatibility/swaggerV1Compat.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index d8c5df5f9..e76225b56 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -294,6 +294,10 @@ paths: description: | The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). type: string + rp: + description: | + The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). + type: string q: description: Defines the InfluxQL query to run. type: string From c6a11dbb089ed40d4d0d4ccab01bd2062f7a5bf0 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Mon, 4 Aug 2025 14:37:47 -0700 Subject: [PATCH 043/179] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../cloud-dedicated/v1-compatibility/swaggerV1Compat.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index e76225b56..db77d64f1 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -298,6 +298,10 @@ paths: description: | The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). type: string + rp: + description: | + The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). + type: string q: description: Defines the InfluxQL query to run. type: string From 3b60c7f253a2765a718a7ba6f70b42e92cfbd7bc Mon Sep 17 00:00:00 2001 From: meelahme Date: Tue, 5 Aug 2025 13:21:16 -0700 Subject: [PATCH 044/179] docs: updating swaggerVwith rp, operationID, and bucket changed to database --- .../v1-compatibility/swaggerV1Compat.yml | 14 +++----------- .../v1-compatibility/swaggerV1Compat.yml | 8 ++++++-- .../clustered/v1-compatibility/swaggerV1Compat.yml | 8 ++++++-- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index db77d64f1..16491b315 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -66,7 +66,7 @@ paths: schema: type: string required: true - description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy. + description: Database to write to. If none exists, InfluxDB creates a database with a default 3-day retention policy. - in: query name: rp schema: @@ -187,7 +187,7 @@ paths: schema: type: string required: true - description: Database to query from. + description: The database to query from. - in: query name: pretty description: | @@ -294,14 +294,6 @@ paths: description: | The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). type: string - rp: - description: | - The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). - type: string - rp: - description: | - The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). - type: string q: description: Defines the InfluxQL query to run. type: string @@ -363,7 +355,7 @@ paths: schema: type: string required: true - description: Bucket to query. + description: Database to query. - in: query name: rp schema: diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index 43b84af62..e599a77ce 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -65,7 +65,7 @@ paths: schema: type: string required: true - description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy. + description: Database to write to. If none exists, InfluxDB creates a database with a default 3-day retention policy. - in: query name: rp schema: @@ -137,7 +137,7 @@ paths: $ref: '#/components/schemas/Error' /query: get: - operationId: GetV1ExecuteQuery + operationId: GetQueryV1 tags: - Query summary: Query using the InfluxDB v1 HTTP API @@ -289,6 +289,10 @@ paths: db: type: string description: The database to query from. + rp: + description: | + The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-serverless/admin/databases/create/#influxql-dbrp-naming-convention). + type: string q: description: Defines the InfluxQL query to run. type: string diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index 0c89e064e..a189e53f9 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -65,7 +65,7 @@ paths: schema: type: string required: true - description: Bucket to write to. If none exists, InfluxDB creates a bucket with a default 3-day retention policy. + description: Database to write to. If none exists, InfluxDB creates a database with a default 3-day retention policy. - in: query name: rp schema: @@ -137,7 +137,7 @@ paths: $ref: '#/components/schemas/Error' /query: get: - operationId: GetV1ExecuteQuery + operationId: GetQueryV1 tags: - Query summary: Query using the InfluxDB v1 HTTP API @@ -289,6 +289,10 @@ paths: db: type: string description: Bucket to query. + rp: + description: | + The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-serverless/admin/databases/create/#influxql-dbrp-naming-convention). + type: string q: description: Defines the InfluxQL query to run. type: string From 3c2f475751be06c628863b785698a0fc32d03d3b Mon Sep 17 00:00:00 2001 From: meelahme Date: Tue, 5 Aug 2025 13:39:31 -0700 Subject: [PATCH 045/179] dox: fixing wrong indentation --- .../influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index a189e53f9..c8a797bb7 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -289,7 +289,7 @@ paths: db: type: string description: Bucket to query. - rp: + rp: description: | The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-serverless/admin/databases/create/#influxql-dbrp-naming-convention). type: string From 4a6cbb38dd712b53eec9938c378c373549e040ce Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Tue, 5 Aug 2025 13:43:53 -0700 Subject: [PATCH 046/179] Update api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index c8a797bb7..ffe39b4fc 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -288,7 +288,7 @@ paths: properties: db: type: string - description: Bucket to query. + description: Database to query. rp: description: | The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-serverless/admin/databases/create/#influxql-dbrp-naming-convention). From 7f17176865fc6f980bdce71d26aac8be2d877389 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Tue, 5 Aug 2025 13:44:08 -0700 Subject: [PATCH 047/179] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../cloud-dedicated/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index 16491b315..d3d067a62 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -289,7 +289,7 @@ paths: properties: db: type: string - description: Database to query from. + description: The database to query from. rp: description: | The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). From 5ef4d2e1e4fe0e2199f49e0b058fc854ecb30894 Mon Sep 17 00:00:00 2001 From: meelahme Date: Tue, 5 Aug 2025 13:52:18 -0700 Subject: [PATCH 048/179] docs: updating rp for cluster and serverless --- .../influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index c8a797bb7..d6f473311 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -291,7 +291,7 @@ paths: description: Bucket to query. rp: description: | - The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-serverless/admin/databases/create/#influxql-dbrp-naming-convention). + The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention). type: string q: description: Defines the InfluxQL query to run. From ae96f0154ede3ab81b76229286256f5e23b86941 Mon Sep 17 00:00:00 2001 From: meelahme Date: Tue, 5 Aug 2025 14:02:39 -0700 Subject: [PATCH 049/179] docs: updates to clous-serveless rp --- .../cloud-serverless/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index e599a77ce..c3fc9f285 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -291,7 +291,7 @@ paths: description: The database to query from. rp: description: | - The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-serverless/admin/databases/create/#influxql-dbrp-naming-convention). + The retention policy to query data from. type: string q: description: Defines the InfluxQL query to run. From a2157763c9d589c3c94fabe5c6548ae452ffe3f9 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Tue, 5 Aug 2025 14:38:10 -0700 Subject: [PATCH 050/179] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../cloud-serverless/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index c3fc9f285..9b65dfd27 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -288,7 +288,7 @@ paths: properties: db: type: string - description: The database to query from. + description: Database to query. rp: description: | The retention policy to query data from. From 65e80cad92e53f3c9c999b60fc2925297ad8917a Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Tue, 5 Aug 2025 14:38:21 -0700 Subject: [PATCH 051/179] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../cloud-serverless/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index 9b65dfd27..b1bcb5e09 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -290,7 +290,7 @@ paths: type: string description: Database to query. rp: - description: | + description: | The retention policy to query data from. type: string q: From 9ee11d3d41ba64421d97f548dc1043efa3294906 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Tue, 5 Aug 2025 14:38:30 -0700 Subject: [PATCH 052/179] Update api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index 9741acb5a..2b6305f4c 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -290,7 +290,7 @@ paths: type: string description: Database to query. rp: - description: | + description: | The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention). type: string q: From 6fb6454aa26437349ab7153f643dee69301f643d Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:54:12 -0700 Subject: [PATCH 053/179] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index d3d067a62..c429fc406 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -205,7 +205,26 @@ paths: name: rp schema: type: string - description: Retention policy name. + description: | + The retention policy name for InfluxQL compatibility + + Optional parameter that, when combined with the db parameter, forms the complete database name to query. In InfluxDB Cloud Dedicated, databases can be named using the + database_name/retention_policy_name convention for InfluxQL compatibility. + + When a request specifies both `db` and `rp`, Cloud Dedicated combines them as `db/rp` to target the database--for example: + + - If `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen` + - If only `db=mydb` is provided (no `rp`), the query targets the database named `mydb` + + Unlike InfluxDB v1 and Cloud Serverless, Cloud Dedicated does not use DBRP mappings or separate retention policy objects. This parameter exists solely for v1 API + compatibility and database naming conventions. + + _Note: The retention policy name does not control data retention in Cloud Dedicated. Data retention is determined by the database's **retention period** setting._ + + ### Related + + - [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention) + - [InfluxQL data retention policy mapping differences](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences) - name: epoch description: | Formats timestamps as unix (epoch) timestamps with the specified precision From ce5d9292c542d85cc1261595e35571fc234d8220 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:54:39 -0700 Subject: [PATCH 054/179] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index c429fc406..f4e16bc45 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -66,7 +66,22 @@ paths: schema: type: string required: true - description: Database to write to. If none exists, InfluxDB creates a database with a default 3-day retention policy. + description: | + The database to write to. + + **Database targeting:** In Cloud Dedicated, databases can be named using the `database_name/retention_policy_name` convention for InfluxQL compatibility. Cloud Dedicated does not use DBRP mappings. The db and rp parameters are used to construct the target database name following this naming convention. + + **Auto-creation behavior:** Cloud Dedicated requires databases to be created before writing data. The v1 `/write` API does not automatically create databases. If the specified + database does not exist, the write request will fail. + + Authentication: Requires a valid API token with _write_ permissions for the target database. + + ### Related + + - [Write data to InfluxDB Cloud Dedicated](/influxdb3/cloud-dedicated/write-data/) + - [Manage databases in InfluxDB Cloud Dedicated](/influxdb3/cloud-dedicated/admin/databases/) + - [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention) + - [InfluxQL data retention policy mapping differences](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences) - in: query name: rp schema: From 89db6cdd51ba67439d109e22276a011dde20c65c Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:55:09 -0700 Subject: [PATCH 055/179] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index f4e16bc45..6a9750253 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -326,7 +326,26 @@ paths: description: The database to query from. rp: description: | - The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention). + The retention policy name for InfluxQL compatibility + + Optional parameter that, when combined with the db parameter, forms the complete database name to query. In InfluxDB Cloud Dedicated, databases can be named using the + database_name/retention_policy_name convention for InfluxQL compatibility. + + When a request specifies both `db` and `rp`, Cloud Dedicated combines them as `db/rp` to target the database--for example: + + - If `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen` + - If only `db=mydb` is provided (no `rp`), the query targets the database named `mydb` + + Unlike InfluxDB v1 and Cloud Serverless, Cloud Dedicated does not use DBRP mappings or separate retention policy objects. This parameter exists solely for v1 API + compatibility and database naming conventions. + + _Note: The retention policy name does not control data retention in Cloud Dedicated. Data retention is determined by the database's **retention period** setting._ + + ### Related + + - [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention) + - [Migrate data from InfluxDB 1.x to Cloud Dedicated](/influxdb3/cloud-dedicated/guides/migrate-data/migrate-1x-to-cloud-dedicated/) + - [InfluxQL data retention policy mapping differences](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences) type: string q: description: Defines the InfluxQL query to run. From bc890e4d6d71783e6e3c4d119ceffb15c7a9bcbf Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:55:21 -0700 Subject: [PATCH 056/179] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 27 ++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index b1bcb5e09..db48db47f 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -186,7 +186,32 @@ paths: schema: type: string required: true - description: The database to query from. + description: | + The database name for InfluxQL queries + + Required parameter that specifies the database to query via DBRP (Database Retention Policy) mapping. In Cloud Serverless, this parameter is used together with DBRP + mappings to identify which bucket to query. + + The `db` parameter (optionally combined with `rp`) must have an existing DBRP mapping that points to a bucket. Without a valid DBRP mapping, queries will fail with an + authorization error. + + **DBRP mapping requirements:** + - A DBRP mapping must exist before querying + - Mappings can be created automatically when writing data with the v1 API (if your token has permissions) + - Mappings can be created manually using the InfluxDB CLI or API + + ### Examples + - `db=mydb` - uses the default DBRP mapping for `mydb` + - `db=mydb` with `rp=weekly` - uses the DBRP mapping for `mydb/weekly` + + _Note: Unlike the v1 `/write` endpoint which can auto-create buckets and mappings, the `/query` endpoint requires pre-existing DBRP mappings. The actual data is stored in and + queried from the bucket that the DBRP mapping points to._ + + ### Related + + - [Use the InfluxDB v1 query API and InfluxQL in Cloud Serverless](/influxdb3/cloud-serverless/query-data/execute-queries/v1-http/) + - [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets) + - [Migrate from InfluxDB 1.x to Cloud Serverless](/influxdb3/cloud-serverless/guides/migrate-data/migrate-1x-to-serverless/) - in: query name: pretty description: | From a69fea8a007e2dae25af25593c365f3201e1d0ef Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:55:33 -0700 Subject: [PATCH 057/179] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 27 ++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index db48db47f..1e18adbf7 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -313,7 +313,32 @@ paths: properties: db: type: string - description: Database to query. + description: | + The database name for InfluxQL queries + + Required parameter that specifies the database to query via DBRP (Database Retention Policy) mapping. In Cloud Serverless, this parameter is used together with DBRP + mappings to identify which bucket to query. + + The `db` parameter (optionally combined with `rp`) must have an existing DBRP mapping that points to a bucket. Without a valid DBRP mapping, queries will fail with an + authorization error. + + **DBRP mapping requirements:** + - A DBRP mapping must exist before querying + - Mappings can be created automatically when writing data with the v1 API (if your token has permissions) + - Mappings can be created manually using the InfluxDB CLI or API + + ### Examples + - `db=mydb` - uses the default DBRP mapping for `mydb` + - `db=mydb` with `rp=weekly` - uses the DBRP mapping for `mydb/weekly` + + _Note: Unlike the v1 `/write` endpoint which can auto-create buckets and mappings, the `/query` endpoint requires pre-existing DBRP mappings. The actual data is stored in and + queried from the bucket that the DBRP mapping points to._ + + ### Related + + - [Execute InfluxQL queries using the v1 API](/influxdb3/cloud-serverless/query-data/execute-queries/influxql/api/v1-http/) + - [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets) + - [Manage DBRP mappings in Cloud Serverless](/influxdb3/cloud-serverless/admin/dbrp/) rp: description: | The retention policy to query data from. From c6bd7bb726208d45c65893791997f4262065f9f5 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:55:44 -0700 Subject: [PATCH 058/179] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 22 ++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index 1e18adbf7..08a07ef8b 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -341,7 +341,27 @@ paths: - [Manage DBRP mappings in Cloud Serverless](/influxdb3/cloud-serverless/admin/dbrp/) rp: description: | - The retention policy to query data from. + The retention policy name for InfluxQL queries + + Optional parameter that specifies the retention policy to use when querying data with InfluxQL. In Cloud Serverless, this parameter works with DBRP (Database Retention + Policy) mappings to identify the target bucket. + + When provided together with the `db` parameter, Cloud Serverless uses the DBRP mapping to determine which bucket to query. The combination of `db` and `rp` must have an + existing DBRP mapping that points to a bucket. If no `rp` is specified, Cloud Serverless uses the default retention policy mapping for the database. + + Requirements: A DBRP mapping must exist for the db/rp combination before you can query data. DBRP mappings can be created: + - Automatically when writing data with the v1 API (if your token has sufficient permissions) + - Manually using the InfluxDB CLI or API + + Example: If `db=mydb` and `rp=weekly`, the query uses the DBRP mapping for `mydb/weekly` to determine which bucket to query. + + _Note: The retention policy name is used only for DBRP mapping. Actual data retention is controlled by the target bucket's retention period setting, not by the retention policy name._ + + ### Related + + - [Execute InfluxQL queries using the v1 API](/influxdb3/cloud-serverless/query-data/execute-queries/influxql/api/v1-http/) + - [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets) + - [Manage DBRP mappings in Cloud Serverless](/influxdb3/cloud-serverless/admin/dbrp/) type: string q: description: Defines the InfluxQL query to run. From 4d77cde02ea4f0fa0f4ced8669d5d35ab7350266 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:55:55 -0700 Subject: [PATCH 059/179] Update api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml index 6a9750253..128021d19 100644 --- a/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-dedicated/v1-compatibility/swaggerV1Compat.yml @@ -323,7 +323,29 @@ paths: properties: db: type: string - description: The database to query from. + description: | + The database name for InfluxQL queries. + + Required parameter that specifies the database to query. + In InfluxDB Cloud Dedicated, this can be either: + - A simple database name (for example, `mydb`) + - The database portion of a `database_name/retention_policy_name` naming convention (used together with the `rp` parameter) + + When used alone, `db` specifies the complete database name to query. When used with the `rp` parameter, they combine to form the full database name as `db/rp`--for example, if `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen`. + + Unlike InfluxDB Cloud Serverless, Cloud Dedicated does not use DBRP mappings. The database name directly corresponds to an existing database in your Cloud Dedicated cluster. + + Examples: + - `db=mydb` - queries the database named `mydb` + - `db=mydb` with `rp=autogen` - queries the database named `mydb/autogen` + + _Note: The specified database must exist in your Cloud Dedicated cluster. Queries will fail if the database does not exist._ + + ### Related + + - [InfluxQL DBRP naming convention](/influxdb3/cloud-dedicated/admin/databases/create/#influxql-dbrp-naming-convention) + - [Migrate data from InfluxDB 1.x to Cloud Dedicated](/influxdb3/cloud-dedicated/guides/migrate-data/migrate-1x-to-cloud-dedicated/) + - [InfluxQL data retention policy mapping differences between InfluxDB Cloud Dedicated and Cloud Serverless](/influxdb3/cloud-serverless/guides/prototype-evaluation/#influxql-data-retention-policy-mapping-differences) rp: description: | The retention policy name for InfluxQL compatibility From 33cae0b3e2154587d92959eebad9c362c82353ac Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:56:05 -0700 Subject: [PATCH 060/179] Update api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index 2b6305f4c..2fb6d81fc 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -291,7 +291,29 @@ paths: description: Database to query. rp: description: | - The retention policy to query data from. For more information, see [InfluxQL DBRP naming convention](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention). + The retention policy name for InfluxQL compatibility + + Optional parameter that, when combined with the db parameter, forms the complete database name to query. In InfluxDB Clustered, databases can be named using the + database_name/retention_policy_name convention for InfluxQL compatibility. + + When a request specifies both `db` and `rp`, InfluxDB Clustered combines them as `db/rp` to target the database--for example: + + - If `db=mydb` and `rp=autogen`, the query targets the database named `mydb/autogen` + - If only `db=mydb` is provided (no `rp`), the query targets the database named `mydb` + + Unlike InfluxDB v1 and Cloud Serverless, InfluxDB Clustered does not use DBRP mappings or separate retention policy objects. This parameter exists solely for v1 API + compatibility and database naming conventions. + + Note: The retention policy name does not control data retention in InfluxDB Clustered. Data retention is determined by the database's _retention period_ setting. + + ### Related + + - [Use the v1 query API and InfluxQL to query data in InfluxDB Clustered](/influxdb3/clustered/query-data/execute-queries/influxdb-v1-api/) + - [Use the InfluxDB v1 API with InfluxDB Clustered](/influxdb3/clustered/guides/api-compatibility/v1/) + - [Manage databases in InfluxDB Clustered](/influxdb3/clustered/admin/databases/) + - [InfluxQL DBRP naming convention in InfluxDB Clustered](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention) + - [Migrate data from InfluxDB v1 to InfluxDB Clustered](/influxdb3/clustered/guides/migrate-data/migrate-1x-to-clustered/) + ``` type: string q: description: Defines the InfluxQL query to run. From 4f0a7181efa34a4705560c9114da09c91f1fbe26 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:56:21 -0700 Subject: [PATCH 061/179] Update api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index 2fb6d81fc..a2f8693c9 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -65,7 +65,23 @@ paths: schema: type: string required: true - description: Database to write to. If none exists, InfluxDB creates a database with a default 3-day retention policy. + description: | + The database to write to. + + **Database targeting:** In InfluxDB Clustered, databases can be named using the `database_name/retention_policy_name` convention for InfluxQL compatibility. InfluxDB Clustered does not use DBRP mappings. The db and rp parameters are used to construct the target database name following this naming convention. + + **Auto-creation behavior:** InfluxDB Clustered requires databases to be created before writing data. The v1 `/write` API does not automatically create databases. If the specified + database does not exist, the write request will fail. + + Authentication: Requires a valid API token with _write_ permissions for the target database. + + ### Related + + - [Write data to InfluxDB Clustered](/influxdb3/clustered/write-data/) + - [Use the InfluxDB v1 API with InfluxDB Clustered](/influxdb3/clustered/guides/api-compatibility/v1/) + - [Manage databases in InfluxDB Clustered](/influxdb3/clustered/admin/databases/) + - [InfluxQL DBRP naming convention in InfluxDB Clustered](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention) + - [Migrate data from InfluxDB v1 to InfluxDB Clustered](/influxdb3/clustered/guides/migrate-data/migrate-1x-to-clustered/) - in: query name: rp schema: From beb7bb2261ea0ce49279747b802a450a466c4c15 Mon Sep 17 00:00:00 2001 From: Jameelah Mercer <36314199+MeelahMe@users.noreply.github.com> Date: Thu, 7 Aug 2025 13:13:56 -0700 Subject: [PATCH 062/179] Update api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml Co-authored-by: Jason Stirnaman --- .../v1-compatibility/swaggerV1Compat.yml | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml index 08a07ef8b..2ff111180 100644 --- a/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/cloud-serverless/v1-compatibility/swaggerV1Compat.yml @@ -229,7 +229,29 @@ paths: name: rp schema: type: string - description: Retention policy name. + description: | + The retention policy name for InfluxQL queries + + Optional parameter that specifies the retention policy to use when querying data with InfluxQL. In Cloud Serverless, this parameter works with DBRP (Database Retention + Policy) mappings to identify the target bucket. + + When provided together with the `db` parameter, Cloud Serverless uses the DBRP mapping to determine which bucket to query. The combination of `db` and `rp` must have an + existing DBRP mapping that points to a bucket. If no `rp` is specified, Cloud Serverless uses the default retention policy mapping for the database. + + Requirements: A DBRP mapping must exist for the db/rp combination before you can query data. DBRP mappings can be created: + - Automatically when writing data with the v1 API (if your token has sufficient permissions) + - Manually using the InfluxDB CLI or API + + Example: If `db=mydb` and `rp=weekly`, the query uses the DBRP mapping for `mydb/weekly` to determine which bucket to query. + + _Note: The retention policy name is used only for DBRP mapping. Actual data retention is controlled by the target bucket's retention period setting, not by the retention + policy name._ + + ### Related + + - [Use the InfluxDB v1 query API and InfluxQL in Cloud Serverless](/influxdb3/cloud-serverless/query-data/execute-queries/v1-http/) + - [Map v1 databases and retention policies to buckets in Cloud Serverless](/influxdb3/cloud-serverless/guides/api-compatibility/v1/#map-v1-databases-and-retention-policies-to-buckets) + - [Migrate from InfluxDB 1.x to Cloud Serverless](/influxdb3/cloud-serverless/guides/migrate-data/migrate-1x-to-serverless/) - name: epoch description: | Formats timestamps as unix (epoch) timestamps with the specified precision From 1846623a278cf59cdee3eefd54a465c3586e8335 Mon Sep 17 00:00:00 2001 From: Abhishek Saharn <102726227+asaharn@users.noreply.github.com> Date: Mon, 11 Aug 2025 20:19:24 +0530 Subject: [PATCH 063/179] Updated Description for MS Fabric --- data/telegraf_plugins.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/data/telegraf_plugins.yml b/data/telegraf_plugins.yml index 0f9ee3fe4..ac89b5be7 100644 --- a/data/telegraf_plugins.yml +++ b/data/telegraf_plugins.yml @@ -2940,8 +2940,8 @@ output: Explorer](https://docs.microsoft.com/en-us/azure/data-explorer), [Azure Synapse Data Explorer](https://docs.microsoft.com/en-us/azure/synapse-analytics/data-explorer/data-explorer-overview), - and [Real time analytics in - Fabric](https://learn.microsoft.com/en-us/fabric/real-time-analytics/overview) + and [Real-Time Intelligence in + Fabric](https://learn.microsoft.com/fabric/real-time-intelligence/overview) services. Azure Data Explorer is a distributed, columnar store, purpose built for From 0aa345572b4e91773475e2dab6e54fd6b7c9b398 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 13 Aug 2025 14:19:54 +0000 Subject: [PATCH 066/179] Update APT signing key location from /etc/apt/trusted.gpg.d to /etc/apt/keyrings Co-authored-by: jstirnaman <212227+jstirnaman@users.noreply.github.com> --- content/influxdb/v1/introduction/install.md | 8 ++++---- content/influxdb/v2/install/_index.md | 4 ++-- .../cloud-dedicated/reference/cli/influxctl/_index.md | 4 ++-- .../influxdb3/clustered/reference/cli/influxctl/_index.md | 4 ++-- content/telegraf/v1/install.md | 8 ++++---- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/content/influxdb/v1/introduction/install.md b/content/influxdb/v1/introduction/install.md index 690799f4e..7aa6a7320 100644 --- a/content/influxdb/v1/introduction/install.md +++ b/content/influxdb/v1/introduction/install.md @@ -75,8 +75,8 @@ For Ubuntu/Debian users, add the InfluxData repository with the following comman # Primary key fingerprint: 24C9 75CB A61A 024E E1B6 3178 7C3D 5715 9FC2 F927 # Subkey fingerprint: 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E wget -q https://repos.influxdata.com/influxdata-archive.key -gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null -echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list +gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null +echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list ``` {{% /code-tab-content %}} @@ -86,8 +86,8 @@ echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repo # Primary key fingerprint: 24C9 75CB A61A 024E E1B6 3178 7C3D 5715 9FC2 F927 # Subkey fingerprint: 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E curl --silent --location -O https://repos.influxdata.com/influxdata-archive.key -gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null -echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list +gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null +echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list ``` {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} diff --git a/content/influxdb/v2/install/_index.md b/content/influxdb/v2/install/_index.md index 0932a613f..2a7b4bc3a 100644 --- a/content/influxdb/v2/install/_index.md +++ b/content/influxdb/v2/install/_index.md @@ -354,8 +354,8 @@ To install {{% product-name %}} on Linux, do one of the following: | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' \ && cat influxdata-archive.key \ | gpg --dearmor \ - | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null \ - && echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' \ + | sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null \ + && echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' \ | sudo tee /etc/apt/sources.list.d/influxdata.list # Install influxdb sudo apt-get update && sudo apt-get install influxdb2 diff --git a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/_index.md b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/_index.md index a8557c472..ff50c38df 100644 --- a/content/influxdb3/cloud-dedicated/reference/cli/influxctl/_index.md +++ b/content/influxdb3/cloud-dedicated/reference/cli/influxctl/_index.md @@ -176,8 +176,8 @@ To download the Linux `influxctl` package, do one of the following: # Primary key fingerprint: 24C9 75CB A61A 024E E1B6 3178 7C3D 5715 9FC2 F927 # Subkey fingerprint: 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E wget -q https://repos.influxdata.com/influxdata-archive.key -gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null -echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list +gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null +echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list sudo apt-get update && sudo apt-get install influxctl ``` diff --git a/content/influxdb3/clustered/reference/cli/influxctl/_index.md b/content/influxdb3/clustered/reference/cli/influxctl/_index.md index bb7b97175..953d016e9 100644 --- a/content/influxdb3/clustered/reference/cli/influxctl/_index.md +++ b/content/influxdb3/clustered/reference/cli/influxctl/_index.md @@ -166,8 +166,8 @@ To download the Linux `influxctl` package, do one of the following: # Primary key fingerprint: 24C9 75CB A61A 024E E1B6 3178 7C3D 5715 9FC2 F927 # Subkey fingerprint: 9D53 9D90 D332 8DC7 D6C8 D3B9 D8FF 8E1F 7DF8 B07E wget -q https://repos.influxdata.com/influxdata-archive.key -gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null -echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list +gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' && cat influxdata-archive.key | gpg --dearmor | sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null +echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list ``` {{% /code-tab-content %}} diff --git a/content/telegraf/v1/install.md b/content/telegraf/v1/install.md index 21c0d2175..9aa40d9cd 100644 --- a/content/telegraf/v1/install.md +++ b/content/telegraf/v1/install.md @@ -180,8 +180,8 @@ gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive.key 2>&1 \ | grep -q '^fpr:\+24C975CBA61A024EE1B631787C3D57159FC2F927:$' \ && cat influxdata-archive.key \ | gpg --dearmor \ -| sudo tee /etc/apt/trusted.gpg.d/influxdata-archive.gpg > /dev/null \ -&& echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' \ +| sudo tee /etc/apt/keyrings/influxdata-archive.gpg > /dev/null \ +&& echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive.gpg] https://repos.influxdata.com/debian stable main' \ | sudo tee /etc/apt/sources.list.d/influxdata.list sudo apt-get update && sudo apt-get install telegraf ``` @@ -198,8 +198,8 @@ gpg --show-keys --with-fingerprint --with-colons ./influxdata-archive_compat.key | grep -q '^fpr:\+9D539D90D3328DC7D6C8D3B9D8FF8E1F7DF8B07E:$' \ && cat influxdata-archive_compat.key \ | gpg --dearmor \ -| sudo tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null -echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' \ +| sudo tee /etc/apt/keyrings/influxdata-archive_compat.gpg > /dev/null +echo 'deb [signed-by=/etc/apt/keyrings/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' \ | sudo tee /etc/apt/sources.list.d/influxdata.list sudo apt-get update && sudo apt-get install telegraf ``` From 97c02baf20b3371f22bceb4f192006ce936e9518 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 13 Aug 2025 14:40:54 +0000 Subject: [PATCH 067/179] Complete comprehensive GitHub Copilot instructions with validated build and test processes Co-authored-by: jstirnaman <212227+jstirnaman@users.noreply.github.com> --- .github/copilot-instructions.md | 443 ++++++++++++++++++++------------ 1 file changed, 273 insertions(+), 170 deletions(-) diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index ffa9b01d0..99059b56f 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -1,134 +1,283 @@ -# Instructions for InfluxData Documentation +# InfluxData Documentation Repository (docs-v2) -## Purpose and scope +Always follow these instructions first and fallback to additional search and context gathering only when the information provided here is incomplete or found to be in error. -Help document InfluxData products by creating clear, accurate technical content with proper code examples, frontmatter, and formatting. +## Working Effectively -## Documentation structure +### Bootstrap, Build, and Test the Repository + +Execute these commands in order to set up a complete working environment: + +1. **Install Node.js dependencies** (takes ~4 seconds): + + ```bash + # Skip Cypress binary download due to network restrictions in CI environments + CYPRESS_INSTALL_BINARY=0 yarn install + ``` + +2. **Build the static site** (takes ~75 seconds, NEVER CANCEL - set timeout to 180+ seconds): + + ```bash + npx hugo --quiet + ``` + +3. **Start the development server** (builds in ~92 seconds, NEVER CANCEL - set timeout to 150+ seconds): + + ```bash + npx hugo server --bind 0.0.0.0 --port 1313 + ``` + + - Access at: http://localhost:1313/ + - Serves 5,359+ pages and 441 static files + - Auto-rebuilds on file changes + +4. **Alternative Docker development setup** (use if local Hugo fails): + ```bash + docker compose up local-dev + ``` + **Note**: May fail in restricted network environments due to Alpine package manager issues. + +### Testing (CRITICAL: NEVER CANCEL long-running tests) + +#### Code Block Testing (takes 5-15 minutes per product, NEVER CANCEL - set timeout to 30+ minutes): + +```bash +# Build test environment first (takes ~30 seconds, may fail due to network restrictions) +docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest . + +# Test all products (takes 15-45 minutes total) +yarn test:codeblocks:all + +# Test specific products +yarn test:codeblocks:cloud +yarn test:codeblocks:v2 +yarn test:codeblocks:telegraf +``` + +#### Link Validation (takes 10-30 minutes, NEVER CANCEL - set timeout to 45+ minutes): + +```bash +# Test all links (very long-running) +yarn test:links + +# Test specific files/products (faster) +yarn test:links content/influxdb3/core/**/*.md +yarn test:links:v3 +yarn test:links:v2 +``` + +#### Style Linting (takes 30-60 seconds): + +```bash +# Basic Vale linting +docker compose run -T vale content/**/*.md + +# Product-specific linting with custom configurations +docker compose run -T vale --config=content/influxdb3/cloud-dedicated/.vale.ini --minAlertLevel=error content/influxdb3/cloud-dedicated/**/*.md +``` + +#### JavaScript and CSS Linting (takes 5-10 seconds): + +```bash +yarn eslint assets/js/**/*.js +yarn prettier --check "**/*.{css,js,ts,jsx,tsx}" +``` + +### Pre-commit Hooks (automatically run, can be skipped if needed): + +```bash +# Run all pre-commit checks manually +yarn lint + +# Skip pre-commit hooks if necessary (not recommended) +git commit -m "message" --no-verify +``` + +## Validation Scenarios + +Always test these scenarios after making changes to ensure full functionality: + +### 1. Documentation Rendering Test + +```bash +# Start Hugo server +npx hugo server --bind 0.0.0.0 --port 1313 + +# Verify key pages load correctly (200 status) +curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/influxdb3/core/ +curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/influxdb/v2/ +curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/telegraf/v1/ + +# Verify content contains expected elements +curl -s http://localhost:1313/influxdb3/core/ | grep -i "influxdb" +``` + +### 2. Build Output Validation + +```bash +# Verify build completes successfully +npx hugo --quiet + +# Check build output exists and has reasonable size (~529MB) +ls -la public/ +du -sh public/ + +# Verify key files exist +file public/index.html +file public/influxdb3/core/index.html +``` + +### 3. Shortcode and Formatting Test + +```bash +# Test shortcode examples page +yarn test:links content/example.md +``` + +## Repository Structure and Key Locations + +### Content Organization + +- **InfluxDB 3**: `/content/influxdb3/` (core, enterprise, cloud-dedicated, cloud-serverless, clustered, explorer) +- **InfluxDB v2**: `/content/influxdb/` (v2, cloud, enterprise_influxdb, v1) +- **Telegraf**: `/content/telegraf/v1/` +- **Other tools**: `/content/kapacitor/`, `/content/chronograf/`, `/content/flux/` +- **Shared content**: `/content/shared/` +- **Examples**: `/content/example.md` (comprehensive shortcode reference) + +### Configuration Files + +- **Hugo config**: `/config/_default/` +- **Package management**: `package.json`, `yarn.lock` +- **Docker**: `compose.yaml`, `Dockerfile.pytest` +- **Git hooks**: `lefthook.yml` +- **Testing**: `cypress.config.js`, `pytest.ini` (in test directories) +- **Linting**: `.vale.ini`, `.prettierrc.yaml`, `eslint.config.js` + +### Build and Development + +- **Hugo binary**: Available via `npx hugo` (version 0.148.2+) +- **Static assets**: `/assets/` (JavaScript, CSS, images) +- **Build output**: `/public/` (generated, ~529MB) +- **Layouts**: `/layouts/` (Hugo templates) +- **Data files**: `/data/` (YAML/JSON data for templates) + +## Technology Stack + +- **Static Site Generator**: Hugo (0.148.2+ extended) +- **Package Manager**: Yarn (1.22.22+) with Node.js (20.19.4+) +- **Testing Framework**: + - Pytest with pytest-codeblocks (for code examples) + - Cypress (for link validation and E2E tests) + - Vale (for style and writing guidelines) +- **Containerization**: Docker with Docker Compose +- **Linting**: ESLint, Prettier, Vale +- **Git Hooks**: Lefthook + +## Common Tasks and Build Times + +### Time Expectations (CRITICAL - NEVER CANCEL) + +- **Dependency installation**: 4 seconds +- **Hugo static build**: 75 seconds (NEVER CANCEL - timeout: 180+ seconds) +- **Hugo server startup**: 92 seconds (NEVER CANCEL - timeout: 150+ seconds) +- **Code block tests**: 5-15 minutes per product (NEVER CANCEL - timeout: 30+ minutes) +- **Link validation**: 10-30 minutes (NEVER CANCEL - timeout: 45+ minutes) +- **Style linting**: 30-60 seconds +- **Docker image build**: 30+ seconds (may fail due to network restrictions) + +### Network Connectivity Issues + +In restricted environments, these commands may fail due to external dependency downloads: + +- `docker build -t influxdata/docs-pytest:latest -f Dockerfile.pytest .` (InfluxData repositories, HashiCorp repos) +- `docker compose up local-dev` (Alpine package manager) +- Cypress binary installation (use `CYPRESS_INSTALL_BINARY=0`) + +Document these limitations but proceed with available functionality. + +### Validation Commands for CI + +Always run these before committing changes: + +```bash +# Format and lint code +yarn prettier --write "**/*.{css,js,ts,jsx,tsx}" +yarn eslint assets/js/**/*.js + +# Test Hugo build +npx hugo --quiet + +# Test development server startup +timeout 150 npx hugo server --bind 0.0.0.0 --port 1313 & +sleep 120 +curl -s -o /dev/null -w "%{http_code}" http://localhost:1313/ +pkill hugo +``` + +## Key Projects in This Codebase + +1. **InfluxDB 3 Documentation** (Core, Enterprise, Cloud variants) +2. **InfluxDB v2 Documentation** (OSS and Cloud) +3. **Telegraf Documentation** (agent and plugins) +4. **Supporting Tools Documentation** (Kapacitor, Chronograf, Flux) +5. **API Reference Documentation** (`/api-docs/`) +6. **Shared Documentation Components** (`/content/shared/`) + +## Important Locations for Frequent Tasks + +- **Shortcode reference**: `/content/example.md` +- **Contributing guide**: `CONTRIBUTING.md` +- **Testing guide**: `TESTING.md` +- **Product configurations**: `/data/products.yml` +- **Vale style rules**: `/.ci/vale/styles/` +- **GitHub workflows**: `/.github/workflows/` +- **Test scripts**: `/test/scripts/` +- **Hugo layouts**: `/layouts/` +- **CSS/JS assets**: `/assets/` + +## Content Guidelines and Style + +### Documentation Structure - **Product version data**: `/data/products.yml` -- **InfluxData products**: - - InfluxDB 3 Explorer - - Documentation source path: `/content/influxdb3/explorer` - - Published for the web: https://docs.influxdata.com/influxdb3/explorer/ - - InfluxDB 3 Core - - Documentation source path: `/content/influxdb3/core` - - Published for the web: https://docs.influxdata.com/influxdb3/core/ - - Code repositories: https://github.com/influxdata/influxdb, https://github.com/influxdata/influxdb3_core - - InfluxDB 3 Enterprise - - Documentation source path: `/content/influxdb3/enterprise` - - Published for the web: https://docs.influxdata.com/influxdb3/enterprise/ - - Code repositories: https://github.com/influxdata/influxdb, https://github.com/influxdata/influxdb3_enterprise - - InfluxDB Cloud Dedicated - - Documentation source path: `/content/influxdb3/cloud-dedicated` - - Published for the web: https://docs.influxdata.com/influxdb3/cloud-dedicated/ - - Code repository: https://github.com/influxdata/influxdb - - InfluxDB Cloud Serverless - - Documentation source path: `/content/influxdb3/cloud-serverless` - - Published for the web: https://docs.influxdata.com/influxdb3/cloud-serverless/ - - Code repository: https://github.com/influxdata/idpe - - InfluxDB Cloud v2 (TSM) - - Documentation source path: `/content/influxdb/cloud` - - Published for the web: https://docs.influxdata.com/influxdb/cloud/ - - Code repository: https://github.com/influxdata/idpe - - InfluxDB Clustered - - Documentation source path: `/content/influxdb3/clustered` - - Published for the web: https://docs.influxdata.com/influxdb3/clustered/ - - Code repository: https://github.com/influxdata/influxdb - - InfluxDB Enterprise v1 (1.x) - - Documentation source path: `/content/influxdb/enterprise_influxdb` - - Published for the web: https://docs.influxdata.com/enterprise_influxdb/v1/ - - Code repository: https://github.com/influxdata/influxdb - - InfluxDB OSS 1.x - - Documentation source path: `/content/influxdb/v1` - - Published for the web: https://docs.influxdata.com/influxdb/v1/ - - Code repository: https://github.com/influxdata/influxdb - - InfluxDB OSS 2.x - - Documentation source path: `/content/influxdb/v2` - - Published for the web: https://docs.influxdata.com/influxdb/v2/ - - Code repository: https://github.com/influxdata/influxdb - - Telegraf - - Documentation source path: `/content/telegraf/v1` - - Published for the web: https://docs.influxdata.com/telegraf/v1/ - - Code repository: https://github.com/influxdata/telegraf - - Kapacitor - - Documentation source path: `/content/kapacitor/v1` - - Published for the web: https://docs.influxdata.com/kapacitor/v1/ - - Code repository: https://github.com/influxdata/kapacitor - - Chronograf - - Documentation source path: `/content/chronograf/v1` - - Published for the web: https://docs.influxdata.com/chronograf/v1/ - - Code repository: https://github.com/influxdata/chronograf - - Flux - - Documentation source path: `/content/flux/v0` - - Published for the web: https://docs.influxdata.com/flux/v0/ - - Code repository: https://github.com/influxdata/flux -- **InfluxData-supported tools**: - - InfluxDB API client libraries - - Code repositories: https://github.com/InfluxCommunity - - InfluxDB 3 processing engine plugins - - Code repository: https://github.com/influxdata/influxdb3_plugins - **Query Languages**: SQL, InfluxQL, Flux (use appropriate language per product version) - **Documentation Site**: https://docs.influxdata.com -- **Repository**: https://github.com/influxdata/docs-v2 - **Framework**: Hugo static site generator -## Abbreviations and shortcuts - -- `gdd`: Google Developer Documentation style -- `3core`: InfluxDB 3 Core -- `3ent`: InfluxDB 3 Enterprise - -## Style guidelines +### Style Guidelines - Follow Google Developer Documentation style guidelines -- For API references, follow YouTube Data API style - Use semantic line feeds (one sentence per line) - Format code examples to fit within 80 characters -- Command line examples: - - Should be formatted as code blocks - - Should use long options (e.g., `--option` instead of `-o`) -- Use cURL for API examples - - Format to fit within 80 characters - - Should use `--data-urlencode` for query parameters - - Should use `--header` for headers -- Use only h2-h6 headings in content (h1 comes from frontmatter title properties) -- Use sentence case for headings -- Use GitHub callout syntax +- Use long options in command line examples (`--option` instead of `-o`) +- Use GitHub callout syntax for notes and warnings - Image naming: `project/version-context-description.png` -- Use appropriate product names and versions consistently -- Follow InfluxData vocabulary guidelines -## Markdown and shortcodes +### Markdown and Shortcodes -- Include proper frontmatter for Markdown pages in `content/**/*.md` (except for - shared content files in `content/shared/`): +Include proper frontmatter for all content pages: - ```yaml - title: # Page title (h1) - seotitle: # SEO title - list_title: # Title for article lists - description: # SEO description - menu: - product_version: - weight: # Page order (1-99, 101-199, etc.) - ``` -- Follow the shortcode examples in `content/example.md` and the documentation - for docs-v2 contributors in `CONTRIBUTING.md` -- Use provided shortcodes correctly: - - Notes/warnings: `{{% note %}}`, `{{% warn %}}` - - Product-specific: `{{% enterprise %}}`, `{{% cloud %}}` - - Tabbed content: `{{< tabs-wrapper >}}`, `{{% tabs %}}`, `{{% tab-content %}}` - - Tabbed content for code examples (without additional text): `{{< code-tabs-wrapper >}}`, `{{% code-tabs %}}`, `{{% code-tab-content %}}` - - Version links: `{{< latest >}}`, `{{< latest-patch >}}` - - API endpoints: `{{< api-endpoint >}}` - - Required elements: `{{< req >}}` - - Navigation: `{{< page-nav >}}` - - Diagrams: `{{< diagram >}}`, `{{< filesystem-diagram >}}` +```yaml +title: # Page title (h1) +seotitle: # SEO title +description: # SEO description +menu: + product_version: +weight: # Page order (1-99, 101-199, etc.) +``` -## Code examples and testing +Key shortcodes (see `/content/example.md` for full reference): -- Provide complete, working examples with proper testing annotations: +- Notes/warnings: `{{% note %}}`, `{{% warn %}}` +- Tabbed content: `{{< tabs-wrapper >}}`, `{{% tabs %}}`, `{{% tab-content %}}` +- Code examples: `{{< code-tabs-wrapper >}}`, `{{% code-tabs %}}`, `{{% code-tab-content %}}` +- Required elements: `{{< req >}}` +- API endpoints: `{{< api-endpoint >}}` + +### Code Examples and Testing + +Provide complete, working examples with pytest annotations: ```python print("Hello, world!") @@ -140,67 +289,21 @@ print("Hello, world!") Hello, world! ``` -- CLI command example: +## Troubleshooting Common Issues -```sh -influx query 'from(bucket:"example") |> range(start:-1h)' -``` +1. **"Pytest collected 0 items"**: Use `python` (not `py`) for code block language identifiers +2. **Hugo build errors**: Check `/config/_default/` for configuration issues +3. **Docker build failures**: Expected in restricted networks - document and continue with local Hugo +4. **Cypress installation failures**: Use `CYPRESS_INSTALL_BINARY=0 yarn install` +5. **Link validation slow**: Use file-specific testing: `yarn test:links content/specific-file.md` +6. **Vale linting errors**: Check `.ci/vale/styles/config/vocabularies` for accepted/rejected terms - - -``` -Table: keys: [_start, _stop, _field, _measurement] - _start:time _stop:time _field:string _measurement:string _time:time _value:float ------------------------------- ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- -``` - -- Include necessary environment variables -- Show proper credential handling for authenticated commands - -## API documentation - -- `/api-docs` contains OpenAPI spec files used for API reference documentation -- Follow OpenAPI specification patterns -- Match REST API examples to current implementation -- Include complete request/response examples -- Document required headers and authentication - -## Versioning and product differentiation - -- Clearly distinguish between different InfluxDB versions (1.x, 2.x, 3.x) -- Use correct terminology for each product variant -- Apply appropriate UI descriptions and screenshots -- Reference appropriate query language per version - -## Development tools - -- Vale.sh linter for style checking - - Configuration file: `.vale.ini` -- Docker for local development and testing -- pytest and pytest-codeblocks for validating code examples -- Use cypress for testing documentation UI and links -- Prettier for code formatting -- ESLint for JavaScript and TypeScript linting -- Lefthook (NPM package) for managing pre-commit hooks for quality assurance - -## Code style - -- Use modern JavaScript (ES6+) syntax - -## Related repositories - -- **Internal documentation assistance requests**: https://github.com/influxdata/DAR/issues Documentation - -## Additional instruction files +## Additional Instruction Files For specific workflows and content types, also refer to: -- **InfluxDB 3 code placeholders**: `.github/instructions/influxdb3-code-placeholders.instructions.md` - Guidelines for placeholder formatting, descriptions, and shortcode usage in InfluxDB 3 documentation -- **Contributing guidelines**: `.github/instructions/contributing.instructions.md` - Detailed style guidelines, shortcode usage, frontmatter requirements, and development workflows -- **Content-specific instructions**: Check `.github/instructions/` directory for specialized guidelines covering specific documentation patterns and requirements +- **InfluxDB 3 code placeholders**: `.github/instructions/influxdb3-code-placeholders.instructions.md` +- **Contributing guidelines**: `.github/instructions/contributing.instructions.md` +- **Content-specific instructions**: Check `.github/instructions/` directory -## Integration with specialized instructions - -When working on InfluxDB 3 documentation (Core/Enterprise), prioritize the placeholder guidelines from `influxdb3-code-placeholders.instructions.md`. - -For general documentation structure, shortcodes, and development workflows, follow the comprehensive guidelines in `contributing.instructions.md`. +Remember: This is a large documentation site with complex build processes. Patience with build times is essential, and NEVER CANCEL long-running operations. From 963d0a8d218cd2cb7a5cd90f89b87fb5d327df09 Mon Sep 17 00:00:00 2001 From: Peter Barnett Date: Thu, 14 Aug 2025 11:29:44 -0400 Subject: [PATCH 068/179] docs: add usage telemetry documentation --- content/influxdb3/core/reference/telemetry.md | 18 ++++ .../enterprise/reference/telemetry.md | 18 ++++ content/shared/influxdb3-admin/telemetry.md | 97 +++++++++++++++++++ 3 files changed, 133 insertions(+) create mode 100644 content/influxdb3/core/reference/telemetry.md create mode 100644 content/influxdb3/enterprise/reference/telemetry.md create mode 100644 content/shared/influxdb3-admin/telemetry.md diff --git a/content/influxdb3/core/reference/telemetry.md b/content/influxdb3/core/reference/telemetry.md new file mode 100644 index 000000000..fae9427e3 --- /dev/null +++ b/content/influxdb3/core/reference/telemetry.md @@ -0,0 +1,18 @@ +--- +title: Usage telemetry +seotitle: InfluxDB Core usage telemetry +description: > + InfluxDB Core can collect and send usage telemetry data to help improve the + product. +menu: + influxdb3_core: + parent: Reference +weight: 108 +influxdb3/core/tags: [telemetry, monitoring, metrics, observability] +source: /shared/influxdb3-admin/telemetry.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/telemetry.md b/content/influxdb3/enterprise/reference/telemetry.md new file mode 100644 index 000000000..779896464 --- /dev/null +++ b/content/influxdb3/enterprise/reference/telemetry.md @@ -0,0 +1,18 @@ +--- +title: Usage telemetry +seotitle: InfluxDB Enterprise usage telemetry +description: > + InfluxDB Enterprise can collect and send usage telemetry data to help improve the + product. +menu: + influxdb3_enterprise: + parent: Reference +weight: 108 +influxdb3/enterprise/tags: [telemetry, monitoring, metrics, observability] +source: /shared/influxdb3-admin/telemetry.md +--- + + \ No newline at end of file diff --git a/content/shared/influxdb3-admin/telemetry.md b/content/shared/influxdb3-admin/telemetry.md new file mode 100644 index 000000000..90639f993 --- /dev/null +++ b/content/shared/influxdb3-admin/telemetry.md @@ -0,0 +1,97 @@ +InfluxDB 3 can collect and send usage telemetry data to help improve the product. This page describes what telemetry data is collected, when it's collected, how it's transmitted, and how to disable it. + +## What data is collected + +{{< product-name >}} collects the following telemetry data: + +### System metrics + +- **CPU utilization**: Process-specific CPU usage (min, max, average) +- **Memory usage**: Process memory consumption in MB (min, max, average) +- **Cores**: Number of CPU cores in use +- **OS**: Operating system information +- **Version**: {{< product-name >}} version +- **Uptime**: Server uptime in seconds + +### Write metrics + +- **Write requests**: Number of write operations (min, max, average, hourly sum) +- **Write lines**: Number of lines written (min, max, average, hourly sum) +- **Write bytes**: Amount of data written in MB (min, max, average, hourly sum) + +### Query metrics + +- **Query requests**: Number of query operations (min, max, average, hourly sum) + +### Storage metrics + +- **Parquet file count**: Number of Parquet files (when available) +- **Parquet file size**: Total size of Parquet files in MB (when available) +- **Parquet row count**: Total number of rows in Parquet files (when available) + +### Processing engine metrics + +- **WAL triggers**: Write-Ahead Log trigger counts (when available) +- **Schedule triggers**: Scheduled processing trigger counts (when available) +- **Request triggers**: Request-based processing trigger counts (when available) + +### Instance information + +- **Instance ID**: Unique identifier for the server instance +- **Cluster UUID**: Unique identifier for the cluster (same as catalog UUID) +- **Storage type**: Type of object storage being used +{{% show-in "core" %}} +- **Product type**: "Core" +{{% /show-in %}} +{{% show-in "enterprise" %}} +- **Product type**: "Enterprise" +{{% /show-in %}} + +## Collection frequency + +- **System metrics** (CPU, memory): Collected every 60 seconds +- **Write and query metrics**: Collected per operation, rolled up every 60 seconds +- **Storage and processing engine metrics**: Collected at snapshot time (when available) +- **Instance information**: Static data collected once + +Telemetry data is transmitted once per hour. + +## Disable telemetry + +Disables sending telemetry data to InfluxData. + +**Default:** `false` + +| influxdb3 flag | Environment variable | +| :------------- | :------------------- | +| `--disable-telemetry-upload` | `INFLUXDB3_TELEMETRY_DISABLE_UPLOAD` | + +#### Command line flag +```sh +influxdb3 serve --disable-telemetry-upload +``` + +#### Environment variable +```sh +export INFLUXDB3_TELEMETRY_DISABLE_UPLOAD=true +``` + +When telemetry is disabled, no usage data is collected or transmitted. + +## Data handling + +The telemetry data is used by InfluxData to: + +- Understand product usage patterns +- Improve product performance and reliability +- Prioritize feature development +- Identify and resolve issues + +No personally identifiable information (PII) is collected. + +## Privacy and security + +- All telemetry data is transmitted securely via HTTPS +- No database contents, queries, or user data is collected +- Only operational metrics and system information is transmitted +- Data collection follows InfluxData's privacy policy \ No newline at end of file From 32cdfb533c85b6ea9575f2f140e23ed2ac4f74fe Mon Sep 17 00:00:00 2001 From: Peter Barnett Date: Thu, 14 Aug 2025 11:44:25 -0400 Subject: [PATCH 069/179] fix: small clarity changes and easier reading --- content/shared/influxdb3-admin/telemetry.md | 42 +++++++++------------ 1 file changed, 17 insertions(+), 25 deletions(-) diff --git a/content/shared/influxdb3-admin/telemetry.md b/content/shared/influxdb3-admin/telemetry.md index 90639f993..09a4bd10a 100644 --- a/content/shared/influxdb3-admin/telemetry.md +++ b/content/shared/influxdb3-admin/telemetry.md @@ -6,8 +6,8 @@ InfluxDB 3 can collect and send usage telemetry data to help improve the product ### System metrics -- **CPU utilization**: Process-specific CPU usage (min, max, average) -- **Memory usage**: Process memory consumption in MB (min, max, average) +- **CPU utilization**: Process-specific CPU usage +- **Memory usage**: Process memory consumption in MB - **Cores**: Number of CPU cores in use - **OS**: Operating system information - **Version**: {{< product-name >}} version @@ -15,30 +15,30 @@ InfluxDB 3 can collect and send usage telemetry data to help improve the product ### Write metrics -- **Write requests**: Number of write operations (min, max, average, hourly sum) -- **Write lines**: Number of lines written (min, max, average, hourly sum) -- **Write bytes**: Amount of data written in MB (min, max, average, hourly sum) +- **Write requests**: Number of write operations +- **Write lines**: Number of lines written +- **Write bytes**: Amount of data written in MB ### Query metrics -- **Query requests**: Number of query operations (min, max, average, hourly sum) +- **Query requests**: Number of query operations ### Storage metrics -- **Parquet file count**: Number of Parquet files (when available) -- **Parquet file size**: Total size of Parquet files in MB (when available) -- **Parquet row count**: Total number of rows in Parquet files (when available) +- **Parquet file count**: Number of Parquet files +- **Parquet file size**: Total size of Parquet files in MB +- **Parquet row count**: Total number of rows in Parquet files ### Processing engine metrics -- **WAL triggers**: Write-Ahead Log trigger counts (when available) -- **Schedule triggers**: Scheduled processing trigger counts (when available) -- **Request triggers**: Request-based processing trigger counts (when available) +- **WAL triggers**: Write-Ahead Log trigger counts +- **Schedule triggers**: Scheduled processing trigger counts +- **Request triggers**: Request-based processing trigger counts ### Instance information - **Instance ID**: Unique identifier for the server instance -- **Cluster UUID**: Unique identifier for the cluster (same as catalog UUID) +- **Cluster UUID**: Unique identifier for the cluster - **Storage type**: Type of object storage being used {{% show-in "core" %}} - **Product type**: "Core" @@ -80,18 +80,10 @@ When telemetry is disabled, no usage data is collected or transmitted. ## Data handling -The telemetry data is used by InfluxData to: - -- Understand product usage patterns -- Improve product performance and reliability -- Prioritize feature development -- Identify and resolve issues - -No personally identifiable information (PII) is collected. +The telemetry data is used by InfluxData to understand product usage patterns, improve product performance and reliability, prioritize feature development, and identify/resolve issues. No personally identifiable information (PII) is collected. ## Privacy and security -- All telemetry data is transmitted securely via HTTPS -- No database contents, queries, or user data is collected -- Only operational metrics and system information is transmitted -- Data collection follows InfluxData's privacy policy \ No newline at end of file +All telemetry data is transmitted securely via HTTPS. No database contents, queries, or user data is collected; only operational metrics and system information is transmitted. + +All data collection follows InfluxData's privacy policy. \ No newline at end of file From 5492ba9eef7eef1bbee0912acbcbcc49d872bfa6 Mon Sep 17 00:00:00 2001 From: Peter Barnett Date: Thu, 14 Aug 2025 11:52:59 -0400 Subject: [PATCH 070/179] fix: remember the 3 --- content/influxdb3/core/reference/telemetry.md | 4 ++-- content/influxdb3/enterprise/reference/telemetry.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/content/influxdb3/core/reference/telemetry.md b/content/influxdb3/core/reference/telemetry.md index fae9427e3..14eb0692e 100644 --- a/content/influxdb3/core/reference/telemetry.md +++ b/content/influxdb3/core/reference/telemetry.md @@ -1,8 +1,8 @@ --- title: Usage telemetry -seotitle: InfluxDB Core usage telemetry +seotitle: InfluxDB 3 Core usage telemetry description: > - InfluxDB Core can collect and send usage telemetry data to help improve the + InfluxDB 3 Core can collect and send usage telemetry data to help improve the product. menu: influxdb3_core: diff --git a/content/influxdb3/enterprise/reference/telemetry.md b/content/influxdb3/enterprise/reference/telemetry.md index 779896464..3b3fef879 100644 --- a/content/influxdb3/enterprise/reference/telemetry.md +++ b/content/influxdb3/enterprise/reference/telemetry.md @@ -1,8 +1,8 @@ --- title: Usage telemetry -seotitle: InfluxDB Enterprise usage telemetry +seotitle: InfluxDB 3 Enterprise usage telemetry description: > - InfluxDB Enterprise can collect and send usage telemetry data to help improve the + InfluxDB 3 Enterprise can collect and send usage telemetry data to help improve the product. menu: influxdb3_enterprise: From f23026982b681658d658f8a50719a2a7f98e591d Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Thu, 14 Aug 2025 14:41:05 -0500 Subject: [PATCH 071/179] chore(monolith): Move telemetry reference to shared/influxdb3-reference. Revise description, disable, and product names. --- content/influxdb3/core/reference/telemetry.md | 9 +++++---- content/influxdb3/enterprise/reference/telemetry.md | 9 +++++---- .../telemetry.md | 8 ++++++-- 3 files changed, 16 insertions(+), 10 deletions(-) rename content/shared/{influxdb3-admin => influxdb3-reference}/telemetry.md (83%) diff --git a/content/influxdb3/core/reference/telemetry.md b/content/influxdb3/core/reference/telemetry.md index 14eb0692e..91002fb2c 100644 --- a/content/influxdb3/core/reference/telemetry.md +++ b/content/influxdb3/core/reference/telemetry.md @@ -2,17 +2,18 @@ title: Usage telemetry seotitle: InfluxDB 3 Core usage telemetry description: > - InfluxDB 3 Core can collect and send usage telemetry data to help improve the - product. + InfluxData collects telemetry data to help improve the {{< product-name >}}. + Learn what data {{< product-name >}} collects and sends to InfluxData, how it's used, and + how you can opt out. menu: influxdb3_core: parent: Reference weight: 108 influxdb3/core/tags: [telemetry, monitoring, metrics, observability] -source: /shared/influxdb3-admin/telemetry.md +source: /shared/influxdb3-reference/telemetry.md --- \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/telemetry.md b/content/influxdb3/enterprise/reference/telemetry.md index 3b3fef879..6ebb4ac6b 100644 --- a/content/influxdb3/enterprise/reference/telemetry.md +++ b/content/influxdb3/enterprise/reference/telemetry.md @@ -2,17 +2,18 @@ title: Usage telemetry seotitle: InfluxDB 3 Enterprise usage telemetry description: > - InfluxDB 3 Enterprise can collect and send usage telemetry data to help improve the - product. + InfluxData collects telemetry data to help improve the {{< product-name >}}. + Learn what data {{< product-name >}} collects and sends to InfluxData, how it's used, and + how you can opt out. menu: influxdb3_enterprise: parent: Reference weight: 108 influxdb3/enterprise/tags: [telemetry, monitoring, metrics, observability] -source: /shared/influxdb3-admin/telemetry.md +source: /shared/influxdb3-reference/telemetry.md --- \ No newline at end of file diff --git a/content/shared/influxdb3-admin/telemetry.md b/content/shared/influxdb3-reference/telemetry.md similarity index 83% rename from content/shared/influxdb3-admin/telemetry.md rename to content/shared/influxdb3-reference/telemetry.md index 09a4bd10a..4f8a5589f 100644 --- a/content/shared/influxdb3-admin/telemetry.md +++ b/content/shared/influxdb3-reference/telemetry.md @@ -1,4 +1,6 @@ -InfluxDB 3 can collect and send usage telemetry data to help improve the product. This page describes what telemetry data is collected, when it's collected, how it's transmitted, and how to disable it. +InfluxData collects information, or _telemetry data_, about the usage of {{% product-name %}} to help improve the product. +Learn what data {{% product-name %}} collects and sends to InfluxData, how it's used, and +how you can opt out. ## What data is collected @@ -58,7 +60,9 @@ Telemetry data is transmitted once per hour. ## Disable telemetry -Disables sending telemetry data to InfluxData. +To "opt-out" of collecting and sending {{% product-name %}} telemetry data, +include the `--disable-telemetry-upload` flag or set the `INFLUXDB3_TELEMETRY_DISABLE_UPLOAD` environment variable +when starting {{% product-name %}}. **Default:** `false` From 144de5785c78d95f4825ca9571b386dae6a3f2e8 Mon Sep 17 00:00:00 2001 From: meelahme Date: Thu, 14 Aug 2025 14:58:03 -0700 Subject: [PATCH 072/179] minor updates to requestBody to fix Ci build error --- .../influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml index a2f8693c9..7735c655d 100644 --- a/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml +++ b/api-docs/influxdb3/clustered/v1-compatibility/swaggerV1Compat.yml @@ -329,10 +329,10 @@ paths: - [Manage databases in InfluxDB Clustered](/influxdb3/clustered/admin/databases/) - [InfluxQL DBRP naming convention in InfluxDB Clustered](/influxdb3/clustered/admin/databases/create/#influxql-dbrp-naming-convention) - [Migrate data from InfluxDB v1 to InfluxDB Clustered](/influxdb3/clustered/guides/migrate-data/migrate-1x-to-clustered/) - ``` type: string q: - description: Defines the InfluxQL query to run. + description: | + Defines the InfluxQL query to run. type: string chunked: description: | From 34416a1d37734a188f7b8e46ed60757c43cedf30 Mon Sep 17 00:00:00 2001 From: Jakub Bednar Date: Fri, 15 Aug 2025 08:42:39 +0200 Subject: [PATCH 073/179] Release Chronograf v1.10.8 --- content/chronograf/v1/about_the_project/release-notes.md | 6 ++++++ data/products.yml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/content/chronograf/v1/about_the_project/release-notes.md b/content/chronograf/v1/about_the_project/release-notes.md index 5cc972a4f..eae49effd 100644 --- a/content/chronograf/v1/about_the_project/release-notes.md +++ b/content/chronograf/v1/about_the_project/release-notes.md @@ -10,6 +10,12 @@ aliases: - /chronograf/v1/about_the_project/release-notes-changelog/ --- +## v1.10.8 {date="2025-08-15"} + +### Bug Fixes + +- Fix missing retention policies on the Databases page. + ## v1.10.7 {date="2025-04-15"} ### Bug Fixes diff --git a/data/products.yml b/data/products.yml index 44b530c43..e707fd718 100644 --- a/data/products.yml +++ b/data/products.yml @@ -157,7 +157,7 @@ chronograf: versions: [v1] latest: v1.10 latest_patches: - v1: 1.10.7 + v1: 1.10.8 ai_sample_questions: - How do I configure Chronograf for InfluxDB v1? - How do I create a dashboard in Chronograf? From 3b5385812a55f7d1ab56bc89bd1860e6b2900c51 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Fri, 15 Aug 2025 10:14:29 -0500 Subject: [PATCH 074/179] Apply suggestions from code review --- .github/copilot-instructions.md | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 99059b56f..d0fc9113f 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -217,12 +217,14 @@ pkill hugo ## Key Projects in This Codebase -1. **InfluxDB 3 Documentation** (Core, Enterprise, Cloud variants) -2. **InfluxDB v2 Documentation** (OSS and Cloud) -3. **Telegraf Documentation** (agent and plugins) -4. **Supporting Tools Documentation** (Kapacitor, Chronograf, Flux) -5. **API Reference Documentation** (`/api-docs/`) -6. **Shared Documentation Components** (`/content/shared/`) +1. **InfluxDB 3 Documentation** (Core, Enterprise, Clustered, Cloud Dedicated, Cloud Serverless, and InfluxDB 3 plugins for Core and Enterprise) +2. **InfluxDB 3 Explorer** (UI) +3. **InfluxDB v2 Documentation** (OSS and Cloud) +3. **InfuxDB v1 Documentation** (OSS and Enterprise) +4. **Telegraf Documentation** (agent and plugins) +5. **Supporting Tools Documentation** (Kapacitor, Chronograf, Flux) +6. **API Reference Documentation** (`/api-docs/`) +7. **Shared Documentation Components** (`/content/shared/`) ## Important Locations for Frequent Tasks @@ -233,7 +235,7 @@ pkill hugo - **Vale style rules**: `/.ci/vale/styles/` - **GitHub workflows**: `/.github/workflows/` - **Test scripts**: `/test/scripts/` -- **Hugo layouts**: `/layouts/` +- **Hugo layouts and shortcodes**: `/layouts/` - **CSS/JS assets**: `/assets/` ## Content Guidelines and Style @@ -269,7 +271,7 @@ weight: # Page order (1-99, 101-199, etc.) Key shortcodes (see `/content/example.md` for full reference): -- Notes/warnings: `{{% note %}}`, `{{% warn %}}` +- Notes/warnings (GitHub syntax): `> [!Note]`, `> [!Warning]` - Tabbed content: `{{< tabs-wrapper >}}`, `{{% tabs %}}`, `{{% tab-content %}}` - Code examples: `{{< code-tabs-wrapper >}}`, `{{% code-tabs %}}`, `{{% code-tab-content %}}` - Required elements: `{{< req >}}` From 06fff5868f45b461a064d9032f3974ec3451f8ad Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Fri, 15 Aug 2025 15:27:14 -0500 Subject: [PATCH 075/179] feat(v1): update documentation for the upcoming InfluxDB v1.12 and Enterprise v1.12 release **Keep this commit for the upcoming v1.12 release** - Label upcoming features as v1.12.0+: - Update configuration documentation for data nodes - Update database management documentation - Update influx_inspect tool documentation - Update show-shards documentation - Revert product version references to 1.11.8 in products.yml --- .../v1/administration/configure/config-data-nodes.md | 4 ++-- .../v1/query_language/manage-database.md | 4 ++-- content/enterprise_influxdb/v1/tools/influx_inspect.md | 4 ++-- .../enterprise_influxdb/v1/tools/influxd-ctl/show-shards.md | 6 +++--- content/influxdb/v1/query_language/manage-database.md | 4 ++-- content/influxdb/v1/tools/influx_inspect.md | 4 ++-- data/products.yml | 6 +++--- 7 files changed, 16 insertions(+), 16 deletions(-) diff --git a/content/enterprise_influxdb/v1/administration/configure/config-data-nodes.md b/content/enterprise_influxdb/v1/administration/configure/config-data-nodes.md index ecddbd49c..6295ac3d5 100644 --- a/content/enterprise_influxdb/v1/administration/configure/config-data-nodes.md +++ b/content/enterprise_influxdb/v1/administration/configure/config-data-nodes.md @@ -326,7 +326,7 @@ Very useful for troubleshooting, but will log any sensitive data contained withi Environment variable: `INFLUXDB_DATA_QUERY_LOG_ENABLED` -#### query-log-path +#### query-log-path {metadata="v1.12.0+"} Default is `""`. @@ -352,7 +352,7 @@ The following is an example of a `logrotate` configuration: ``` Environment variable: `INFLUXDB_DATA_QUERY_LOG_PATH` - +--> #### wal-fsync-delay Default is `"0s"`. diff --git a/content/enterprise_influxdb/v1/query_language/manage-database.md b/content/enterprise_influxdb/v1/query_language/manage-database.md index c70c1cb52..4a1f09b7a 100644 --- a/content/enterprise_influxdb/v1/query_language/manage-database.md +++ b/content/enterprise_influxdb/v1/query_language/manage-database.md @@ -306,7 +306,7 @@ See [Shard group duration management](/enterprise_influxdb/v1/concepts/schema_and_data_layout/#shard-group-duration-management) for recommended configurations. -##### `PAST LIMIT` +##### `PAST LIMIT` {metadata="v1.12.0+"} The `PAST LIMIT` clause defines a time boundary before and relative to _now_ in which points written to the retention policy are accepted. If a point has a @@ -317,7 +317,7 @@ For example, if a write request tries to write data to a retention policy with a `PAST LIMIT 6h` and there are points in the request with timestamps older than 6 hours, those points are rejected. -##### `FUTURE LIMIT` +##### `FUTURE LIMIT` {metadata="v1.12.0+"} The `FUTURE LIMIT` clause defines a time boundary after and relative to _now_ in which points written to the retention policy are accepted. If a point has a diff --git a/content/enterprise_influxdb/v1/tools/influx_inspect.md b/content/enterprise_influxdb/v1/tools/influx_inspect.md index 08ba93390..8e0e6ee15 100644 --- a/content/enterprise_influxdb/v1/tools/influx_inspect.md +++ b/content/enterprise_influxdb/v1/tools/influx_inspect.md @@ -453,7 +453,7 @@ Default value is `$HOME/.influxdb/wal`. See the [file system layout](/enterprise_influxdb/v1/concepts/file-system-layout/#file-system-layout) for InfluxDB on your system. -##### [ `-tsmfile ` ] +##### [ `-tsmfile ` ] {metadata="v1.12.0+"} Path to a single tsm file to export. This requires both `-database` and `-retention` to be specified. @@ -472,7 +472,7 @@ influx_inspect export -compress influx_inspect export -database DATABASE_NAME -retention RETENTION_POLICY ``` -##### Export data from a single TSM file +##### Export data from a single TSM file {metadata="v1.12.0+"} ```bash influx_inspect export \ diff --git a/content/enterprise_influxdb/v1/tools/influxd-ctl/show-shards.md b/content/enterprise_influxdb/v1/tools/influxd-ctl/show-shards.md index cc3451615..b87f7bea2 100644 --- a/content/enterprise_influxdb/v1/tools/influxd-ctl/show-shards.md +++ b/content/enterprise_influxdb/v1/tools/influxd-ctl/show-shards.md @@ -44,6 +44,8 @@ ID Database Retention Policy Desired Replicas Shard Group Start {{% /expand %}} {{< /expand-wrapper >}} +#### Show inconsistent shards {metadata="v1.12.0+"} + You can also use the `-m` flag to output "inconsistent" shards which are shards that are either in metadata but not on disk or on disk but not in metadata. @@ -52,10 +54,8 @@ that are either in metadata but not on disk or on disk but not in metadata. | Flag | Description | | :--- | :-------------------------------- | | `-v` | Return detailed shard information | -| `-m` | Return inconsistent shards | +| `-m` | Return inconsistent shards | {{% caption %}} _Also see [`influxd-ctl` global flags](/enterprise_influxdb/v1/tools/influxd-ctl/#influxd-ctl-global-flags)._ {{% /caption %}} - -## Examples diff --git a/content/influxdb/v1/query_language/manage-database.md b/content/influxdb/v1/query_language/manage-database.md index bbda3c443..554b8b871 100644 --- a/content/influxdb/v1/query_language/manage-database.md +++ b/content/influxdb/v1/query_language/manage-database.md @@ -307,7 +307,7 @@ See [Shard group duration management](/influxdb/v1/concepts/schema_and_data_layout/#shard-group-duration-management) for recommended configurations. -##### `PAST LIMIT` +##### `PAST LIMIT` {metadata="v1.12.0+"} The `PAST LIMIT` clause defines a time boundary before and relative to _now_ in which points written to the retention policy are accepted. If a point has a @@ -318,7 +318,7 @@ For example, if a write request tries to write data to a retention policy with a `PAST LIMIT 6h` and there are points in the request with timestamps older than 6 hours, those points are rejected. -##### `FUTURE LIMIT` +##### `FUTURE LIMIT` {metadata="v1.12.0+"} The `FUTURE LIMIT` clause defines a time boundary after and relative to _now_ in which points written to the retention policy are accepted. If a point has a diff --git a/content/influxdb/v1/tools/influx_inspect.md b/content/influxdb/v1/tools/influx_inspect.md index 5c4bdb543..1bdbb5f18 100644 --- a/content/influxdb/v1/tools/influx_inspect.md +++ b/content/influxdb/v1/tools/influx_inspect.md @@ -449,7 +449,7 @@ Default value is `$HOME/.influxdb/wal`. See the [file system layout](/influxdb/v1/concepts/file-system-layout/#file-system-layout) for InfluxDB on your system. -##### [ `-tsmfile ` ] +##### [ `-tsmfile ` ] {metadata="v1.12.0+"} Path to a single tsm file to export. This requires both `-database` and `-retention` to be specified. @@ -468,7 +468,7 @@ influx_inspect export -compress influx_inspect export -database DATABASE_NAME -retention RETENTION_POLICY ``` -##### Export data from a single TSM file +##### Export data from a single TSM file {metadata="v1.12.0+"} ```bash influx_inspect export \ diff --git a/data/products.yml b/data/products.yml index e707fd718..62b027b66 100644 --- a/data/products.yml +++ b/data/products.yml @@ -100,7 +100,7 @@ influxdb: latest: v2.7 latest_patches: v2: 2.7.12 - v1: 1.12.1 + v1: 1.11.8 latest_cli: v2: 2.7.5 ai_sample_questions: @@ -183,9 +183,9 @@ enterprise_influxdb: menu_category: self-managed list_order: 5 versions: [v1] - latest: v1.12 + latest: v1.11 latest_patches: - v1: 1.12.1 + v1: 1.11.8 ai_sample_questions: - How can I configure my InfluxDB v1 Enterprise server? - How do I replicate data between InfluxDB v1 Enterprise and OSS? From 63bc1fcc8107c3b71f81defbf4f46da8ec3ab3c4 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Fri, 15 Aug 2025 15:31:17 -0500 Subject: [PATCH 076/179] fix(v1): v1.12 release notes and related changes are pre-release documentation\ **Revert this commit for the v1.12 release**\ - Remove links from release notes to upcoming 1.12.x features - Add callout to explain that v1.12 isn't yet available --- .../v1/about-the-project/release-notes.md | 33 +++++++++++++++++-- .../v1/about_the_project/release-notes.md | 25 +++++++++++++- 2 files changed, 55 insertions(+), 3 deletions(-) diff --git a/content/enterprise_influxdb/v1/about-the-project/release-notes.md b/content/enterprise_influxdb/v1/about-the-project/release-notes.md index aae7eecd2..b6db183f6 100644 --- a/content/enterprise_influxdb/v1/about-the-project/release-notes.md +++ b/content/enterprise_influxdb/v1/about-the-project/release-notes.md @@ -1,5 +1,5 @@ --- -title: InfluxDB Enterprise 1.11 release notes +title: InfluxDB Enterprise v1 release notes description: > Important changes and what's new in each version InfluxDB Enterprise. menu: @@ -7,9 +7,16 @@ menu: name: Release notes weight: 10 parent: About the project +alt_links: + v1: /influxdb/v1/about_the_project/release-notes/ --- -## v1.12.1 {date="2025-06-26"} +## v1.12.x {date="TBD"} + +> [!Important] +> #### Pre-release documentation +> +> This release is not yet available. [**v{{% latest-patch %}}**](#v1118) is the latest InfluxDB Enterprise v1 release. > [!Important] > #### Upgrade meta nodes first @@ -22,31 +29,53 @@ menu: - Add additional log output when using [`influx_inspect buildtsi`](/enterprise_influxdb/v1/tools/influx_inspect/#buildtsi) to rebuild the TSI index. + + +- Use [`influx_inspect export`](/enterprise_influxdb/v1/tools/influx_inspect/#export) with + `-tsmfile` option to + export a single TSM file. - Add `-m` flag to the [`influxd-ctl show-shards` command](/enterprise_influxdb/v1/tools/influxd-ctl/show-shards/) to output inconsistent shards. - Allow the specification of a write window for retention policies. - Add `fluxQueryRespBytes` metric to the `/debug/vars` metrics endpoint. - Log whenever meta gossip times exceed expiration. + + +- Add `query-log-path` configuration option to data nodes. +- Add `aggressive-points-per-block` configuration option to prevent TSM files from not getting fully compacted. - Log TLS configuration settings on startup. - Check for TLS certificate and private key permissions. - Add a warning if the TLS certificate is expired. - Add authentication to the Raft portal and add the following related _data_ node configuration options: + + + - `[meta].raft-portal-auth-required` + - `[meta].raft-dialer-auth-required` - Improve error handling. - InfluxQL updates: - Delete series by retention policy. + + + + - Allow retention policies to discard writes that fall within their range, but + outside of `FUTURE LIMIT` and `PAST LIMIT`. ## Bug fixes diff --git a/content/influxdb/v1/about_the_project/release-notes.md b/content/influxdb/v1/about_the_project/release-notes.md index 3ce948f4b..fcf9dc9ec 100644 --- a/content/influxdb/v1/about_the_project/release-notes.md +++ b/content/influxdb/v1/about_the_project/release-notes.md @@ -10,27 +10,50 @@ aliases: - /influxdb/v1/about_the_project/releasenotes-changelog/ alt_links: v2: /influxdb/v2/reference/release-notes/influxdb/ + enterprise_v1: /enterprise_influxdb/v1/about-the-project/release-notes/ --- -## v1.12.1 {date="2025-06-26"} +## v1.12.x {date="TBD"} + +> [!Important] +> #### Pre-release documentation +> +> This release is not yet available. [**v{{% latest-patch %}}**](#v1118) is the latest InfluxDB v1 release. ## Features - Add additional log output when using [`influx_inspect buildtsi`](/influxdb/v1/tools/influx_inspect/#buildtsi) to rebuild the TSI index. + + +- Use [`influx_inspect export`](/influxdb/v1/tools/influx_inspect/#export) with + `-tsmfile` option to + export a single TSM file. + - Add `fluxQueryRespBytes` metric to the `/debug/vars` metrics endpoint. + + +- Add `aggressive-points-per-block` configuration option + to prevent TSM files from not getting fully compacted. - Improve error handling. - InfluxQL updates: - Delete series by retention policy. + + + - Allow retention policies to discard writes that fall within their range, but + outside of `FUTURE LIMIT` and `PAST LIMIT`. ## Bug fixes From e10340b6ecbab0f5ee3e96adafd4d33cab558b3b Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Thu, 14 Aug 2025 23:14:51 -0500 Subject: [PATCH 077/179] chore(ci): Add config for new link validation tool (docs-tooling/link-checker) chore(ci): Replaced PR link validation workflow with new workflow from docs-tooling/link-checker/.github-workflows-link-check.yml chore: organize .gitignore test: add content to trigger link-checker workflow This small change tests the pr-link-check.yml workflow feat: update link-checker workflow and documentation - Add production config with corrected User-Agent placement - Remove old link validation actions (replaced by link-checker) fix: update link-checker workflow configuration - Update Node.js version to 20 for dependency compatibility feat: use pre-built link-checker binary from docs-tooling releases - Replace building from source with downloading from releases - Use GitHub API to get latest release and binary - Maintain same artifact structure for downstream job fix: improve change detection in pr-link-check workflow - Use GitHub API for reliable PR file detection - Add debug output to show all changed files - Fix conditional logic for when jobs should run docs: update TESTING.md with binary distribution and automated GitHub Actions integration - Document pre-built binary download as recommended installation method - Explain automated PR link checking workflow for docs-v2 - Replace manual GitHub Actions example with automated integration details - Remove exaggerated language and specify actual exclusion types fix(ci): download link-checker binary from docs-v2 releases - Change binary source from private docs-tooling to public docs-v2 releases - Fixes GitHub Actions permission issues accessing private repos - Binary is now stored as a release asset on docs-v2 itself test: add test file with valid links to verify workflow passes test: remove temporary test file for link checker workflow The test file was only needed to verify the workflow functionality and should not be part of the documentation. docs: update TESTING.md to document docs-v2 binary distribution - Change primary installation method to download from docs-v2 releases - Explain that binary distribution enables reliable GitHub Actions access - Update automated workflow description to reflect docs-v2 release usage - Maintain build-from-source as alternative option refactor(ci): combine workflow into single job for cleaner PR display - Merge detect-changes, build-site, and download-link-checker into single job - All setup steps now run conditionally within one job - Cleaner PR display shows only 'Check links in affected files' - Maintains all functionality with improved UX fix(ci): exclude problematic URLs from link checking - Add reddit.com exclusions (blocks bots) - Add support.influxdata.com exclusion (SSL certificate issues in CI) - Prevents false positive failures in automated link checking --- .ci/link-checker/default.lycherc.toml | 66 +++++ .ci/link-checker/production.lycherc.toml | 108 ++++++++ .../actions/report-broken-links/action.yml | 103 -------- .github/actions/validate-links/action.yml | 106 -------- .github/workflows/pr-link-check.yml | 241 ++++++++++++++++++ .github/workflows/pr-link-validation.yml | 148 ----------- .gitignore | 13 +- TESTING.md | 219 ++++++++++------ content/influxdb3/core/get-started/_index.md | 1 + 9 files changed, 572 insertions(+), 433 deletions(-) create mode 100644 .ci/link-checker/default.lycherc.toml create mode 100644 .ci/link-checker/production.lycherc.toml delete mode 100644 .github/actions/report-broken-links/action.yml delete mode 100644 .github/actions/validate-links/action.yml create mode 100644 .github/workflows/pr-link-check.yml delete mode 100644 .github/workflows/pr-link-validation.yml diff --git a/.ci/link-checker/default.lycherc.toml b/.ci/link-checker/default.lycherc.toml new file mode 100644 index 000000000..22f97a0f9 --- /dev/null +++ b/.ci/link-checker/default.lycherc.toml @@ -0,0 +1,66 @@ +# Lychee link checker configuration +# Generated by link-checker +[lychee] +# Performance settings + +# Maximum number of retries for failed checks + +max_retries = 3 + +# Timeout for each link check (in seconds) +timeout = 30 + +# Maximum number of concurrent checks +max_concurrency = 128 + +skip_code_blocks = false + +# HTTP settings +# Identify the tool to external services +user_agent = "Mozilla/5.0 (compatible; link-checker)" + +# Accept these HTTP status codes as valid +accept = [200, 201, 202, 203, 204, 206, 301, 302, 303, 304, +307, 308] + +# Skip these URL schemes +scheme = ["file", "mailto", "tel"] + +# Exclude patterns (regex supported) +exclude = [ + # Localhost URLs + "^https?://localhost", + "^https?://127\\.0\\.0\\.1", + + # Common CI/CD environments + "^https?://.*\\.local", + + # Example domains used in documentation + "^https?://example\\.(com|org|net)", + + # Placeholder URLs from code block filtering + "https://example.com/REMOVED_FROM_CODE_BLOCK", + "example.com/INLINE_CODE_URL", + + # URLs that require authentication + "^https?://.*\\.slack\\.com", + "^https?://.*\\.atlassian\\.net", + + # GitHub URLs (often fail due to rate limiting and bot + # detection) + "^https?://github\\.com", + + # Common documentation placeholders + "YOUR_.*", + "REPLACE_.*", + "<.*>", +] + +# Request headers +[headers] +# Add custom headers here if needed +# "Authorization" = "Bearer $GITHUB_TOKEN" + +# Cache settings +cache = true +max_cache_age = "1d" \ No newline at end of file diff --git a/.ci/link-checker/production.lycherc.toml b/.ci/link-checker/production.lycherc.toml new file mode 100644 index 000000000..9b8be5aa3 --- /dev/null +++ b/.ci/link-checker/production.lycherc.toml @@ -0,0 +1,108 @@ +# Production Link Checker Configuration for InfluxData docs-v2 +# Optimized for performance, reliability, and reduced false positives +[lychee] +# Performance settings + +# Maximum number of retries for failed checks + +max_retries = 3 + +# Timeout for each link check (in seconds) +timeout = 30 + +# Maximum number of concurrent checks +max_concurrency = 128 + +skip_code_blocks = false + +# HTTP settings +# Identify the tool to external services +"User-Agent" = "Mozilla/5.0 (compatible; influxdata-link-checker/1.0; +https://github.com/influxdata/docs-v2)" +accept = [200, 201, 202, 203, 204, 206, 301, 302, 303, 304, 307, 308] + +# Skip these URL schemes +scheme = ["mailto", "tel"] + +# Performance optimizations +cache = true +max_cache_age = "1h" + +# Retry configuration for reliability +include_verbatim = false + +# Exclusion patterns for docs-v2 (regex supported) +exclude = [ + # Localhost URLs + "^https?://localhost", + "^https?://127\\.0\\.0\\.1", + + # Common CI/CD environments + "^https?://.*\\.local", + + # Example domains used in documentation + "^https?://example\\.(com|org|net)", + + # Placeholder URLs from code block filtering + "https://example.com/REMOVED_FROM_CODE_BLOCK", + "example.com/INLINE_CODE_URL", + + # URLs that require authentication + "^https?://.*\\.slack\\.com", + "^https?://.*\\.atlassian\\.net", + + # GitHub URLs (often fail due to rate limiting and bot + # detection) + "^https?://github\\.com", + + # Social media URLs (often block bots) + "^https?://reddit\\.com", + "^https?://.*\\.reddit\\.com", + + # InfluxData support URLs (certificate/SSL issues in CI) + "^https?://support\\.influxdata\\.com", + + # Common documentation placeholders + "YOUR_.*", + "REPLACE_.*", + "<.*>", +] + +# Request headers +[headers] +# Add custom headers here if needed +# "Authorization" = "Bearer $GITHUB_TOKEN" +"Accept" = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" +"Accept-Language" = "en-US,en;q=0.5" +"Accept-Encoding" = "gzip, deflate" +"DNT" = "1" +"Connection" = "keep-alive" +"Upgrade-Insecure-Requests" = "1" + +[ci] +# CI-specific settings + +[ci.github_actions] +output_format = "json" +create_annotations = true +fail_fast = false +max_annotations = 50 # Limit to avoid overwhelming PR comments + +[ci.performance] +# Performance tuning for CI environment +parallel_requests = 32 +connection_timeout = 10 +read_timeout = 30 + +# Resource limits +max_memory_mb = 512 +max_execution_time_minutes = 10 + +[reporting] +# Report configuration +include_fragments = false +verbose = false +no_progress = true # Disable progress bar in CI + +# Summary settings +show_success_count = true +show_skipped_count = true \ No newline at end of file diff --git a/.github/actions/report-broken-links/action.yml b/.github/actions/report-broken-links/action.yml deleted file mode 100644 index 9e95e5605..000000000 --- a/.github/actions/report-broken-links/action.yml +++ /dev/null @@ -1,103 +0,0 @@ -name: 'Report Broken Links' -description: 'Downloads broken link reports, generates PR comment, and posts results' - -inputs: - github-token: - description: 'GitHub token for posting comments' - required: false - default: ${{ github.token }} - max-links-per-file: - description: 'Maximum links to show per file in comment' - required: false - default: '20' - include-success-message: - description: 'Include success message when no broken links found' - required: false - default: 'true' - -outputs: - has-broken-links: - description: 'Whether broken links were found (true/false)' - value: ${{ steps.generate-comment.outputs.has-broken-links }} - broken-link-count: - description: 'Number of broken links found' - value: ${{ steps.generate-comment.outputs.broken-link-count }} - -runs: - using: 'composite' - steps: - - name: Download broken link reports - uses: actions/download-artifact@v4 - with: - path: reports - continue-on-error: true - - - name: Generate PR comment - id: generate-comment - run: | - # Generate comment using our script - node .github/scripts/comment-generator.js \ - --max-links ${{ inputs.max-links-per-file }} \ - ${{ inputs.include-success-message == 'false' && '--no-success' || '' }} \ - --output-file comment.md \ - reports/ || echo "No reports found or errors occurred" - - # Check if comment file was created and has content - if [[ -f comment.md && -s comment.md ]]; then - echo "comment-generated=true" >> $GITHUB_OUTPUT - - # Count broken links by parsing the comment - broken_count=$(grep -o "Found [0-9]* broken link" comment.md | grep -o "[0-9]*" || echo "0") - echo "broken-link-count=$broken_count" >> $GITHUB_OUTPUT - - # Check if there are actually broken links (not just a success comment) - if [[ "$broken_count" -gt 0 ]]; then - echo "has-broken-links=true" >> $GITHUB_OUTPUT - else - echo "has-broken-links=false" >> $GITHUB_OUTPUT - fi - else - echo "has-broken-links=false" >> $GITHUB_OUTPUT - echo "broken-link-count=0" >> $GITHUB_OUTPUT - echo "comment-generated=false" >> $GITHUB_OUTPUT - fi - shell: bash - - - name: Post PR comment - if: steps.generate-comment.outputs.comment-generated == 'true' - uses: actions/github-script@v7 - with: - github-token: ${{ inputs.github-token }} - script: | - const fs = require('fs'); - - if (fs.existsSync('comment.md')) { - const comment = fs.readFileSync('comment.md', 'utf8'); - - if (comment.trim()) { - await github.rest.issues.createComment({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - body: comment - }); - } - } - - - name: Report validation results - run: | - has_broken_links="${{ steps.generate-comment.outputs.has-broken-links }}" - broken_count="${{ steps.generate-comment.outputs.broken-link-count }}" - - if [ "$has_broken_links" = "true" ]; then - echo "::error::❌ Link validation failed: Found $broken_count broken link(s)" - echo "Check the PR comment for detailed broken link information" - exit 1 - else - echo "::notice::✅ Link validation passed successfully" - echo "All links in the changed files are valid" - if [ "${{ steps.generate-comment.outputs.comment-generated }}" = "true" ]; then - echo "PR comment posted with validation summary and cache statistics" - fi - fi - shell: bash \ No newline at end of file diff --git a/.github/actions/validate-links/action.yml b/.github/actions/validate-links/action.yml deleted file mode 100644 index cf180556c..000000000 --- a/.github/actions/validate-links/action.yml +++ /dev/null @@ -1,106 +0,0 @@ -name: 'Validate Links' -description: 'Runs e2e browser-based link validation tests against Hugo site using Cypress' - -inputs: - files: - description: 'Space-separated list of files to validate' - required: true - product-name: - description: 'Product name for reporting (optional)' - required: false - default: '' - cache-enabled: - description: 'Enable link validation caching' - required: false - default: 'true' - cache-key: - description: 'Cache key prefix for this validation run' - required: false - default: 'link-validation' - timeout: - description: 'Test timeout in seconds' - required: false - default: '900' - -outputs: - failed: - description: 'Whether validation failed (true/false)' - value: ${{ steps.validate.outputs.failed }} - -runs: - using: 'composite' - steps: - - name: Restore link validation cache - if: inputs.cache-enabled == 'true' - uses: actions/cache@v4 - with: - path: .cache/link-validation - key: ${{ inputs.cache-key }}-${{ runner.os }}-${{ hashFiles('content/**/*.md', 'content/**/*.html') }} - restore-keys: | - ${{ inputs.cache-key }}-${{ runner.os }}- - ${{ inputs.cache-key }}- - - - name: Run link validation - shell: bash - run: | - # Set CI-specific environment variables - export CI=true - export GITHUB_ACTIONS=true - export NODE_OPTIONS="--max-old-space-size=4096" - - # Set test runner timeout for Hugo shutdown - export HUGO_SHUTDOWN_TIMEOUT=5000 - - # Add timeout to prevent hanging (timeout command syntax: timeout DURATION COMMAND) - timeout ${{ inputs.timeout }}s node cypress/support/run-e2e-specs.js ${{ inputs.files }} \ - --spec cypress/e2e/content/article-links.cy.js || { - exit_code=$? - - # Handle timeout specifically - if [ $exit_code -eq 124 ]; then - echo "::error::Link validation timed out after ${{ inputs.timeout }} seconds" - echo "::notice::This may indicate Hugo server startup issues or very slow link validation" - else - echo "::error::Link validation failed with exit code $exit_code" - fi - - # Check for specific error patterns and logs (but don't dump full content) - if [ -f /tmp/hugo_server.log ]; then - echo "Hugo server log available for debugging" - fi - - if [ -f hugo.log ]; then - echo "Additional Hugo log available for debugging" - fi - - if [ -f /tmp/broken_links_report.json ]; then - # Only show summary, not full report (full report is uploaded as artifact) - broken_count=$(grep -o '"url":' /tmp/broken_links_report.json | wc -l || echo "0") - echo "Broken links report contains $broken_count entries" - fi - - exit $exit_code - } - - # Report success if we get here - echo "::notice::✅ Link validation completed successfully" - echo "No broken links detected in the tested files" - - - name: Upload logs on failure - if: failure() - uses: actions/upload-artifact@v4 - with: - name: validation-logs-${{ inputs.product-name && inputs.product-name || 'default' }} - path: | - hugo.log - /tmp/hugo_server.log - if-no-files-found: ignore - - - - name: Upload broken links report - if: always() - uses: actions/upload-artifact@v4 - with: - name: broken-links-report${{ inputs.product-name && format('-{0}', inputs.product-name) || '' }} - path: /tmp/broken_links_report.json - if-no-files-found: ignore \ No newline at end of file diff --git a/.github/workflows/pr-link-check.yml b/.github/workflows/pr-link-check.yml new file mode 100644 index 000000000..b0764089a --- /dev/null +++ b/.github/workflows/pr-link-check.yml @@ -0,0 +1,241 @@ +name: Link Check PR Changes + +on: + pull_request: + paths: + - 'content/**/*.md' + - 'data/**/*.yml' + - 'layouts/**/*.html' + types: [opened, synchronize, reopened] + +jobs: + link-check: + name: Check links in affected files + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Detect content changes + id: detect + run: | + echo "🔍 Detecting changes between ${{ github.base_ref }} and ${{ github.sha }}" + + # For PRs, use the GitHub Files API to get changed files + if [[ "${{ github.event_name }}" == "pull_request" ]]; then + echo "Using GitHub API to detect PR changes..." + curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.number }}/files" \ + | jq -r '.[].filename' > all_changed_files.txt + else + echo "Using git diff to detect changes..." + git diff --name-only ${{ github.event.before }}..${{ github.sha }} > all_changed_files.txt + fi + + # Filter for content markdown files + CHANGED_FILES=$(grep '^content/.*\.md$' all_changed_files.txt || true) + + echo "📁 All changed files:" + cat all_changed_files.txt + echo "" + echo "📝 Content markdown files:" + echo "$CHANGED_FILES" + + if [[ -n "$CHANGED_FILES" ]]; then + echo "✅ Found $(echo "$CHANGED_FILES" | wc -l) changed content file(s)" + echo "has-changes=true" >> $GITHUB_OUTPUT + echo "changed-content<> $GITHUB_OUTPUT + echo "$CHANGED_FILES" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + # Check if any shared content files were modified + SHARED_CHANGES=$(echo "$CHANGED_FILES" | grep '^content/shared/' || true) + if [[ -n "$SHARED_CHANGES" ]]; then + echo "has-shared-content=true" >> $GITHUB_OUTPUT + echo "🔄 Detected shared content changes: $SHARED_CHANGES" + else + echo "has-shared-content=false" >> $GITHUB_OUTPUT + fi + else + echo "❌ No content changes detected" + echo "has-changes=false" >> $GITHUB_OUTPUT + echo "has-shared-content=false" >> $GITHUB_OUTPUT + fi + + - name: Skip if no content changes + if: steps.detect.outputs.has-changes == 'false' + run: | + echo "No content changes detected in this PR - skipping link check" + echo "✅ **No content changes detected** - link check skipped" >> $GITHUB_STEP_SUMMARY + + - name: Setup Node.js + if: steps.detect.outputs.has-changes == 'true' + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'yarn' + + - name: Install dependencies + if: steps.detect.outputs.has-changes == 'true' + run: yarn install --frozen-lockfile + + - name: Build Hugo site + if: steps.detect.outputs.has-changes == 'true' + run: npx hugo --minify + + - name: Download link-checker binary + if: steps.detect.outputs.has-changes == 'true' + run: | + echo "Downloading link-checker binary from docs-v2 releases..." + + # Download from docs-v2's own releases (always accessible) + curl -L -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + -o link-checker-info.json \ + "https://api.github.com/repos/influxdata/docs-v2/releases/tags/link-checker-v1.0.0" + + # Extract download URL for linux binary + DOWNLOAD_URL=$(jq -r '.assets[] | select(.name | test("link-checker.*linux")) | .url' link-checker-info.json) + + if [[ "$DOWNLOAD_URL" == "null" || -z "$DOWNLOAD_URL" ]]; then + echo "❌ No linux binary found in release" + echo "Available assets:" + jq -r '.assets[].name' link-checker-info.json + exit 1 + fi + + echo "📥 Downloading: $DOWNLOAD_URL" + curl -L -H "Accept: application/octet-stream" \ + -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + -o link-checker "$DOWNLOAD_URL" + + chmod +x link-checker + ./link-checker --version + + - name: Verify link checker config exists + if: steps.detect.outputs.has-changes == 'true' + run: | + if [[ ! -f .ci/link-checker/production.lycherc.toml ]]; then + echo "❌ Configuration file .ci/link-checker/production.lycherc.toml not found" + echo "Please copy production.lycherc.toml from docs-tooling/link-checker/" + exit 1 + fi + echo "✅ Using configuration: .ci/link-checker/production.lycherc.toml" + + - name: Map changed content to public files + if: steps.detect.outputs.has-changes == 'true' + id: mapping + run: | + echo "Mapping changed content files to public HTML files..." + + # Create temporary file with changed content files + echo "${{ steps.detect.outputs.changed-content }}" > changed-files.txt + + # Map content files to public files + PUBLIC_FILES=$(cat changed-files.txt | xargs -r ./link-checker map --existing-only) + + if [[ -n "$PUBLIC_FILES" ]]; then + echo "Found affected public files:" + echo "$PUBLIC_FILES" + echo "public-files<> $GITHUB_OUTPUT + echo "$PUBLIC_FILES" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + # Count files for summary + FILE_COUNT=$(echo "$PUBLIC_FILES" | wc -l) + echo "file-count=$FILE_COUNT" >> $GITHUB_OUTPUT + else + echo "No public files found to check" + echo "public-files=" >> $GITHUB_OUTPUT + echo "file-count=0" >> $GITHUB_OUTPUT + fi + + - name: Run link checker + if: steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != '' + id: link-check + run: | + echo "Checking links in ${{ steps.mapping.outputs.file-count }} affected files..." + + # Create temporary file with public files list + echo "${{ steps.mapping.outputs.public-files }}" > public-files.txt + + # Run link checker with detailed JSON output + set +e # Don't fail immediately on error + + cat public-files.txt | xargs -r ./link-checker check \ + --config .ci/link-checker/production.lycherc.toml \ + --format json \ + --output link-check-results.json + + EXIT_CODE=$? + + if [[ -f link-check-results.json ]]; then + # Parse results + BROKEN_COUNT=$(jq -r '.summary.broken_count // 0' link-check-results.json) + TOTAL_COUNT=$(jq -r '.summary.total_checked // 0' link-check-results.json) + SUCCESS_RATE=$(jq -r '.summary.success_rate // 0' link-check-results.json) + + echo "broken-count=$BROKEN_COUNT" >> $GITHUB_OUTPUT + echo "total-count=$TOTAL_COUNT" >> $GITHUB_OUTPUT + echo "success-rate=$SUCCESS_RATE" >> $GITHUB_OUTPUT + + if [[ $BROKEN_COUNT -gt 0 ]]; then + echo "❌ Found $BROKEN_COUNT broken links out of $TOTAL_COUNT total links" + echo "check-result=failed" >> $GITHUB_OUTPUT + else + echo "✅ All $TOTAL_COUNT links are valid" + echo "check-result=passed" >> $GITHUB_OUTPUT + fi + else + echo "❌ Link check failed to generate results" + echo "check-result=error" >> $GITHUB_OUTPUT + fi + + exit $EXIT_CODE + + - name: Process and report results + if: always() && steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != '' + run: | + if [[ -f link-check-results.json ]]; then + # Create detailed error annotations for broken links + if [[ "${{ steps.link-check.outputs.check-result }}" == "failed" ]]; then + echo "Creating error annotations for broken links..." + + jq -r '.broken_links[]? | + "::error file=\(.file // "unknown"),line=\(.line // 1)::Broken link: \(.url) - \(.error // "Unknown error")"' \ + link-check-results.json || true + fi + + # Generate summary comment + cat >> $GITHUB_STEP_SUMMARY << 'EOF' + ## Link Check Results + + **Files Checked:** ${{ steps.mapping.outputs.file-count }} + **Total Links:** ${{ steps.link-check.outputs.total-count }} + **Broken Links:** ${{ steps.link-check.outputs.broken-count }} + **Success Rate:** ${{ steps.link-check.outputs.success-rate }}% + + EOF + + if [[ "${{ steps.link-check.outputs.check-result }}" == "failed" ]]; then + echo "❌ **Link check failed** - see annotations above for details" >> $GITHUB_STEP_SUMMARY + else + echo "✅ **All links are valid**" >> $GITHUB_STEP_SUMMARY + fi + else + echo "⚠️ **Link check could not complete** - no results file generated" >> $GITHUB_STEP_SUMMARY + fi + + - name: Upload detailed results + if: always() && steps.detect.outputs.has-changes == 'true' && steps.mapping.outputs.public-files != '' + uses: actions/upload-artifact@v4 + with: + name: link-check-results + path: | + link-check-results.json + changed-files.txt + public-files.txt + retention-days: 30 \ No newline at end of file diff --git a/.github/workflows/pr-link-validation.yml b/.github/workflows/pr-link-validation.yml deleted file mode 100644 index 8d6a8a735..000000000 --- a/.github/workflows/pr-link-validation.yml +++ /dev/null @@ -1,148 +0,0 @@ -# PR Link Validation Workflow -# Provides basic and parallel workflows -# with smart strategy selection based on change volume -name: PR Link Validation - -on: - pull_request: - paths: - - 'content/**/*.md' - - 'content/**/*.html' - - 'api-docs/**/*.yml' - - 'assets/**/*.js' - - 'layouts/**/*.html' - -jobs: - # TEMPORARILY DISABLED - Remove this condition to re-enable link validation - disabled-check: - if: false # Set to true to re-enable the workflow - runs-on: ubuntu-latest - steps: - - run: echo "Link validation is temporarily disabled" - setup: - name: Setup and Strategy Detection - runs-on: ubuntu-latest - if: false # TEMPORARILY DISABLED - Remove this condition to re-enable - outputs: - strategy: ${{ steps.determine-strategy.outputs.strategy }} - has-changes: ${{ steps.determine-strategy.outputs.has-changes }} - matrix: ${{ steps.determine-strategy.outputs.matrix }} - all-files: ${{ steps.changed-files.outputs.all_changed_files }} - cache-hit-rate: ${{ steps.determine-strategy.outputs.cache-hit-rate }} - cache-hits: ${{ steps.determine-strategy.outputs.cache-hits }} - cache-misses: ${{ steps.determine-strategy.outputs.cache-misses }} - original-file-count: ${{ steps.determine-strategy.outputs.original-file-count }} - validation-file-count: ${{ steps.determine-strategy.outputs.validation-file-count }} - cache-message: ${{ steps.determine-strategy.outputs.message }} - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Setup docs environment - uses: ./.github/actions/setup-docs-env - - - name: Get changed files - id: changed-files - uses: tj-actions/changed-files@v41 - with: - files: | - content/**/*.md - content/**/*.html - api-docs/**/*.yml - - - name: Determine validation strategy - id: determine-strategy - run: | - if [[ "${{ steps.changed-files.outputs.any_changed }}" != "true" ]]; then - echo "No relevant files changed" - echo "strategy=none" >> $GITHUB_OUTPUT - echo "has-changes=false" >> $GITHUB_OUTPUT - echo "matrix={\"include\":[]}" >> $GITHUB_OUTPUT - echo "cache-hit-rate=100" >> $GITHUB_OUTPUT - echo "cache-hits=0" >> $GITHUB_OUTPUT - echo "cache-misses=0" >> $GITHUB_OUTPUT - exit 0 - fi - - # Use our matrix generator with cache awareness - files="${{ steps.changed-files.outputs.all_changed_files }}" - - echo "🔍 Analyzing ${files} for cache-aware validation..." - - # Generate matrix and capture outputs - result=$(node .github/scripts/matrix-generator.js \ - --min-files-parallel 10 \ - --max-concurrent 5 \ - --output-format github \ - $files) - - # Parse all outputs from matrix generator - while IFS='=' read -r key value; do - case "$key" in - strategy|has-changes|cache-hit-rate|cache-hits|cache-misses|original-file-count|validation-file-count|message) - echo "$key=$value" >> $GITHUB_OUTPUT - ;; - matrix) - echo "matrix=$value" >> $GITHUB_OUTPUT - ;; - esac - done <<< "$result" - - # Extract values for logging - strategy=$(echo "$result" | grep "^strategy=" | cut -d'=' -f2) - cache_hit_rate=$(echo "$result" | grep "^cache-hit-rate=" | cut -d'=' -f2) - cache_message=$(echo "$result" | grep "^message=" | cut -d'=' -f2-) - - echo "📊 Selected strategy: $strategy" - if [[ -n "$cache_hit_rate" ]]; then - echo "📈 Cache hit rate: ${cache_hit_rate}%" - fi - if [[ -n "$cache_message" ]]; then - echo "$cache_message" - fi - - validate: - name: ${{ matrix.name }} - needs: setup - if: false # TEMPORARILY DISABLED - Original condition: needs.setup.outputs.has-changes == 'true' - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.setup.outputs.matrix) }} - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Setup docs environment - uses: ./.github/actions/setup-docs-env - - - name: Validate links - uses: ./.github/actions/validate-links - with: - files: ${{ matrix.files || needs.setup.outputs.all-files }} - product-name: ${{ matrix.product }} - cache-enabled: ${{ matrix.cacheEnabled || 'true' }} - cache-key: link-validation-${{ hashFiles(matrix.files || needs.setup.outputs.all-files) }} - timeout: 900 - - report: - name: Report Results - needs: [setup, validate] - if: false # TEMPORARILY DISABLED - Original condition: always() && needs.setup.outputs.has-changes == 'true' - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Setup docs environment - uses: ./.github/actions/setup-docs-env - - - name: Report broken links - uses: ./.github/actions/report-broken-links - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - max-links-per-file: 20 \ No newline at end of file diff --git a/.gitignore b/.gitignore index 0d9d333c3..32765da72 100644 --- a/.gitignore +++ b/.gitignore @@ -3,11 +3,14 @@ public .*.swp node_modules +package-lock.json .config* **/.env* *.log /resources .hugo_build.lock + +# Content generation /content/influxdb*/**/api/**/*.html !api-docs/**/.config.yml /api-docs/redoc-static.html* @@ -16,18 +19,22 @@ node_modules !telegraf-build/templates !telegraf-build/scripts !telegraf-build/README.md + +# CI/CD tool files /cypress/downloads/* /cypress/screenshots/* /cypress/videos/* +.lycheecache test-results.xml /influxdb3cli-build-scripts/content +tmp + +# IDE files .vscode/* !.vscode/launch.json .idea **/config.toml -package-lock.json -tmp -# Context files for LLMs and AI tools +# User context files for AI assistant tools .context/* !.context/README.md diff --git a/TESTING.md b/TESTING.md index 44a5006ae..e0a2f6f78 100644 --- a/TESTING.md +++ b/TESTING.md @@ -121,96 +121,169 @@ Potential causes: # This is ignored ``` -## Link Validation Testing +## Link Validation with Link-Checker -Link validation uses Cypress for e2e browser-based testing against the Hugo site to ensure all internal and external links work correctly. +Link validation uses the `link-checker` tool to validate internal and external links in documentation files. ### Basic Usage +#### Installation + +**Option 1: Download from docs-v2 releases (recommended)** + +The link-checker binary is distributed via docs-v2 releases for reliable access from GitHub Actions workflows: + ```bash -# Test specific files -yarn test:links content/influxdb3/core/**/*.md +# Download binary from docs-v2 releases +curl -L -o link-checker \ + https://github.com/influxdata/docs-v2/releases/download/link-checker-v1.0.0/link-checker-linux-x86_64 +chmod +x link-checker -# Test all links (may take a long time) -yarn test:links - -# Test by product (may take a long time) -yarn test:links:v3 -yarn test:links:v2 -yarn test:links:telegraf -yarn test:links:chronograf -yarn test:links:kapacitor +# Verify installation +./link-checker --version ``` -### How Link Validation Works +**Option 2: Build from source** -The tests: -1. Start a Hugo development server -2. Navigate to each page in a browser -3. Check all links for validity -4. Report broken or invalid links +```bash +# Clone and build link-checker +git clone https://github.com/influxdata/docs-tooling.git +cd docs-tooling/link-checker +cargo build --release + +# Copy binary to your PATH or use directly +cp target/release/link-checker /usr/local/bin/ +``` + +#### Core Commands + +```bash +# Map content files to public HTML files +link-checker map content/path/to/file.md + +# Check links in HTML files +link-checker check public/path/to/file.html + +# Generate configuration file +link-checker config +``` + +### Content Mapping Workflows + +#### Scenario 1: Map and check InfluxDB 3 Core content + +```bash +# Map Markdown files to HTML +link-checker map content/influxdb3/core/get-started/ + +# Check links in mapped HTML files +link-checker check public/influxdb3/core/get-started/ +``` + +#### Scenario 2: Map and check shared CLI content + +```bash +# Map shared content files +link-checker map content/shared/influxdb3-cli/ + +# Check the mapped output files +# (link-checker map outputs the HTML file paths) +link-checker map content/shared/influxdb3-cli/ | \ + xargs link-checker check +``` + +#### Scenario 3: Direct HTML checking + +```bash +# Check HTML files directly without mapping +link-checker check public/influxdb3/core/get-started/ +``` + +#### Combined workflow for changed files + +```bash +# Check only files changed in the last commit +git diff --name-only HEAD~1 HEAD | grep '\.md$' | \ + xargs link-checker map | \ + xargs link-checker check +``` + +### Configuration Options + +#### Local usage (default configuration) + +```bash +# Uses default settings or test.lycherc.toml if present +link-checker check public/influxdb3/core/get-started/ +``` + +#### Production usage (GitHub Actions) + +```bash +# Use production configuration with comprehensive exclusions +link-checker check \ + --config .ci/link-checker/production.lycherc.toml \ + public/influxdb3/core/get-started/ +``` ### GitHub Actions Integration -#### Composite Action +**Automated Integration (docs-v2)** -The `.github/actions/validate-links/` composite action provides reusable link validation: +The docs-v2 repository includes automated link checking for pull requests: + +- **Trigger**: Runs automatically on PRs that modify content files +- **Binary distribution**: Downloads latest pre-built binary from docs-v2 releases +- **Smart detection**: Only checks files affected by PR changes +- **Production config**: Uses optimized settings with exclusions for GitHub, social media, etc. +- **Results reporting**: Broken links reported as GitHub annotations with detailed summaries + +The workflow automatically: +1. Detects content changes in PRs using GitHub Files API +2. Downloads latest link-checker binary from docs-v2 releases +3. Builds Hugo site and maps changed content to public HTML files +4. Runs link checking with production configuration +5. Reports results with annotations and step summaries + +**Manual Integration (other repositories)** + +For other repositories, you can integrate link checking manually: ```yaml -- uses: ./.github/actions/validate-links - with: - files: "content/influxdb3/core/file.md content/influxdb/v2/file2.md" - product-name: "core" - cache-enabled: "true" - cache-key: "link-validation" +name: Link Check +on: + pull_request: + paths: + - 'content/**/*.md' + +jobs: + link-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Download link-checker + run: | + curl -L -o link-checker \ + https://github.com/influxdata/docs-tooling/releases/latest/download/link-checker-linux-x86_64 + chmod +x link-checker + cp target/release/link-checker ../../link-checker + cd ../.. + + - name: Build Hugo site + run: | + npm install + npx hugo --minify + + - name: Check changed files + run: | + git diff --name-only origin/main HEAD | \ + grep '\.md$' | \ + xargs ./link-checker map | \ + xargs ./link-checker check \ + --config .ci/link-checker/production.lycherc.toml ``` -#### Matrix Generator - -The `.github/scripts/matrix-generator.js` script provides intelligent strategy selection: - -- **Sequential validation**: For small changes (< 10 files) or single-product changes -- **Parallel validation**: For large changes across multiple products (up to 5 concurrent jobs) - -Test locally: - -```bash -node .github/scripts/matrix-generator.js content/influxdb3/core/file1.md content/influxdb/v2/file2.md -``` - -Configuration options: -- `--max-concurrent `: Maximum parallel jobs (default: 5) -- `--force-sequential`: Force sequential execution -- `--min-files-parallel `: Minimum files for parallel (default: 10) - -### Caching for Link Validation - -Link validation supports caching to improve performance: - -- **Cache location**: `.cache/link-validation/` (local), GitHub Actions cache (CI) -- **Cache keys**: Based on content file hashes -- **TTL**: 30 days by default, configurable - -#### Cache Configuration Options - -```bash -# Use 7-day cache for more frequent validation -yarn test:links --cache-ttl=7 content/influxdb3/**/*.md - -# Use 1-day cache via environment variable -LINK_CACHE_TTL_DAYS=1 yarn test:links content/**/*.md - -# Clean up expired cache entries -node .github/scripts/incremental-validator.js --cleanup -``` - -#### How Caching Works - -- **Cache key**: Based on file path + content hash (file changes invalidate cache immediately) -- **External links**: Cached for the TTL period since URLs rarely change -- **Internal links**: Effectively cached until file content changes -- **Automatic cleanup**: Expired entries are removed on access and via `--cleanup` - ## Style Linting (Vale) Style linting uses [Vale](https://vale.sh/) to enforce documentation writing standards, branding guidelines, and vocabulary consistency. diff --git a/content/influxdb3/core/get-started/_index.md b/content/influxdb3/core/get-started/_index.md index 16398f32f..72cbc7746 100644 --- a/content/influxdb3/core/get-started/_index.md +++ b/content/influxdb3/core/get-started/_index.md @@ -18,6 +18,7 @@ prepend: | > [!Note] > InfluxDB 3 Core is purpose-built for real-time data monitoring and recent data. > InfluxDB 3 Enterprise builds on top of Core with support for historical data + > analysis and extended features. > querying, high availability, read replicas, and more. > Enterprise will soon unlock > enhanced security, row-level deletions, an administration UI, and more. From 0546d66ac04b0adc7dc0f86d9061fb332cdd93ff Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Sun, 17 Aug 2025 09:53:10 -0500 Subject: [PATCH 078/179] ci: link-checker known positive test with existing broken link, platform-specific instructions --- TESTING.md | 22 ++++++++++++++++--- .../influxql/functions/transformations.md | 4 ++-- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/TESTING.md b/TESTING.md index e0a2f6f78..dba8f892e 100644 --- a/TESTING.md +++ b/TESTING.md @@ -129,12 +129,27 @@ Link validation uses the `link-checker` tool to validate internal and external l #### Installation -**Option 1: Download from docs-v2 releases (recommended)** +**Option 1: Build from source (macOS/local development)** + +For local development on macOS, build the link-checker from source: + +```bash +# Clone and build link-checker +git clone https://github.com/influxdata/docs-tooling.git +cd docs-tooling/link-checker +cargo build --release + +# Copy binary to your PATH or use directly +cp target/release/link-checker /usr/local/bin/ +# OR use directly: ./target/release/link-checker +``` + +**Option 2: Download pre-built binary (GitHub Actions/Linux)** The link-checker binary is distributed via docs-v2 releases for reliable access from GitHub Actions workflows: ```bash -# Download binary from docs-v2 releases +# Download Linux binary from docs-v2 releases curl -L -o link-checker \ https://github.com/influxdata/docs-v2/releases/download/link-checker-v1.0.0/link-checker-linux-x86_64 chmod +x link-checker @@ -143,7 +158,8 @@ chmod +x link-checker ./link-checker --version ``` -**Option 2: Build from source** +> [!Note] +> Pre-built binaries are currently Linux x86_64 only. For macOS development, use Option 1 to build from source. ```bash # Clone and build link-checker diff --git a/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md b/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md index 433562e74..d3ce61442 100644 --- a/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md +++ b/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md @@ -704,7 +704,7 @@ name: data ## ATAN2() -Returns the the arctangent of `y/x` in radians. +Returns the arctangent of `y/x` in radians. ### Basic syntax @@ -1609,7 +1609,7 @@ SELECT DERIVATIVE( ([ * | | // ]) [ , The advanced syntax requires a [`GROUP BY time()` clause](/influxdb/version/query-data/influxql/explore-data/group-by/#group-by-time-intervals) and a nested InfluxQL function. The query first calculates the results for the nested function at the specified `GROUP BY time()` interval and then applies the `DERIVATIVE()` function to those results. -The `unit` argument is an integer followed by a [duration](//influxdb/version/reference/glossary/#duration) and it is optional. +The `unit` argument is an integer followed by a [duration](///influxdb/version/reference/glossary/#duration) and it is optional. If the query does not specify the `unit` the `unit` defaults to the `GROUP BY time()` interval. Note that this behavior is different from the [basic syntax's](#basic-syntax-1) default behavior. From 98735f4bef2fe2605a5956b62026d23e51f64348 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 18 Aug 2025 10:21:10 -0500 Subject: [PATCH 079/179] test link-checker, positive test --- .../query-data/influxql/functions/transformations.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md b/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md index d3ce61442..7fbdd3a0a 100644 --- a/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md +++ b/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md @@ -1609,7 +1609,7 @@ SELECT DERIVATIVE( ([ * | | // ]) [ , The advanced syntax requires a [`GROUP BY time()` clause](/influxdb/version/query-data/influxql/explore-data/group-by/#group-by-time-intervals) and a nested InfluxQL function. The query first calculates the results for the nested function at the specified `GROUP BY time()` interval and then applies the `DERIVATIVE()` function to those results. -The `unit` argument is an integer followed by a [duration](///influxdb/version/reference/glossary/#duration) and it is optional. +The `unit` argument is an integer followed by a [duration](//influxdb/version/reference/glossary/#duration) and it is optional. If the query does not specify the `unit` the `unit` defaults to the `GROUP BY time()` interval. Note that this behavior is different from the [basic syntax's](#basic-syntax-1) default behavior. From f5df3cb6f06bc74305daf55e0f867650f2a758db Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 18 Aug 2025 10:28:09 -0500 Subject: [PATCH 080/179] fix(v2): broken link --- .../query-data/influxql/functions/transformations.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md b/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md index 7fbdd3a0a..48046fc66 100644 --- a/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md +++ b/content/shared/influxdb-v2/query-data/influxql/functions/transformations.md @@ -1609,7 +1609,7 @@ SELECT DERIVATIVE( ([ * | | // ]) [ , The advanced syntax requires a [`GROUP BY time()` clause](/influxdb/version/query-data/influxql/explore-data/group-by/#group-by-time-intervals) and a nested InfluxQL function. The query first calculates the results for the nested function at the specified `GROUP BY time()` interval and then applies the `DERIVATIVE()` function to those results. -The `unit` argument is an integer followed by a [duration](//influxdb/version/reference/glossary/#duration) and it is optional. +The `unit` argument is an integer followed by a [duration](/influxdb/version/reference/glossary/#duration) and it is optional. If the query does not specify the `unit` the `unit` defaults to the `GROUP BY time()` interval. Note that this behavior is different from the [basic syntax's](#basic-syntax-1) default behavior. From a8578bb0af0ff8b8d0fe961ca4e3bdb4cbc42ab1 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 18 Aug 2025 10:51:57 -0500 Subject: [PATCH 081/179] chore(ci): Removes old Cypress link checker test code --- cypress.config.js | 100 ------- cypress/e2e/content/article-links.cy.js | 370 ------------------------ cypress/e2e/content/example.cy.js | 0 cypress/support/link-cache.js | 215 -------------- cypress/support/link-reporter.js | 310 -------------------- cypress/support/run-e2e-specs.js | 54 +--- lefthook.yml | 10 - package.json | 9 - 8 files changed, 5 insertions(+), 1063 deletions(-) delete mode 100644 cypress/e2e/content/article-links.cy.js delete mode 100644 cypress/e2e/content/example.cy.js delete mode 100644 cypress/support/link-cache.js delete mode 100644 cypress/support/link-reporter.js diff --git a/cypress.config.js b/cypress.config.js index d7ffed8fc..5148f60ec 100644 --- a/cypress.config.js +++ b/cypress.config.js @@ -2,14 +2,6 @@ import { defineConfig } from 'cypress'; import { cwd as _cwd } from 'process'; import * as fs from 'fs'; import * as yaml from 'js-yaml'; -import { - BROKEN_LINKS_FILE, - FIRST_BROKEN_LINK_FILE, - initializeReport, - readBrokenLinksReport, - saveCacheStats, - saveValidationStrategy, -} from './cypress/support/link-reporter.js'; export default defineConfig({ e2e: { @@ -88,98 +80,6 @@ export default defineConfig({ } }, - // Broken links reporting tasks - initializeBrokenLinksReport() { - return initializeReport(); - }, - - // Special case domains are now handled directly in the test without additional reporting - // This task is kept for backward compatibility but doesn't do anything special - reportSpecialCaseLink(linkData) { - console.log( - `✅ Expected status code: ${linkData.url} (status: ${linkData.status}) is valid for this domain` - ); - return true; - }, - - reportBrokenLink(linkData) { - try { - // Validate link data - if (!linkData || !linkData.url || !linkData.page) { - console.error('Invalid link data provided'); - return false; - } - - // Read current report - const report = readBrokenLinksReport(); - - // Find or create entry for this page - let pageReport = report.find((r) => r.page === linkData.page); - if (!pageReport) { - pageReport = { page: linkData.page, links: [] }; - report.push(pageReport); - } - - // Check if link is already in the report to avoid duplicates - const isDuplicate = pageReport.links.some( - (link) => link.url === linkData.url && link.type === linkData.type - ); - - if (!isDuplicate) { - // Add the broken link to the page's report - pageReport.links.push({ - url: linkData.url, - status: linkData.status, - type: linkData.type, - linkText: linkData.linkText, - }); - - // Write updated report back to file - fs.writeFileSync( - BROKEN_LINKS_FILE, - JSON.stringify(report, null, 2) - ); - - // Store first broken link if not already recorded - const firstBrokenLinkExists = - fs.existsSync(FIRST_BROKEN_LINK_FILE) && - fs.readFileSync(FIRST_BROKEN_LINK_FILE, 'utf8').trim() !== ''; - - if (!firstBrokenLinkExists) { - // Store first broken link with complete information - const firstBrokenLink = { - url: linkData.url, - status: linkData.status, - type: linkData.type, - linkText: linkData.linkText, - page: linkData.page, - time: new Date().toISOString(), - }; - - fs.writeFileSync( - FIRST_BROKEN_LINK_FILE, - JSON.stringify(firstBrokenLink, null, 2) - ); - - console.error( - `🔴 FIRST BROKEN LINK: ${linkData.url} (${linkData.status}) - ${linkData.type} on page ${linkData.page}` - ); - } - - // Log the broken link immediately to console - console.error( - `❌ BROKEN LINK: ${linkData.url} (${linkData.status}) - ${linkData.type} on page ${linkData.page}` - ); - } - - return true; - } catch (error) { - console.error(`Error reporting broken link: ${error.message}`); - // Even if there's an error, we want to ensure the test knows there was a broken link - return true; - } - }, - // Cache and incremental validation tasks saveCacheStatistics(stats) { try { diff --git a/cypress/e2e/content/article-links.cy.js b/cypress/e2e/content/article-links.cy.js deleted file mode 100644 index 0ce8d4677..000000000 --- a/cypress/e2e/content/article-links.cy.js +++ /dev/null @@ -1,370 +0,0 @@ -/// - -describe('Article', () => { - let subjects = Cypress.env('test_subjects') - ? Cypress.env('test_subjects') - .split(',') - .filter((s) => s.trim() !== '') - : []; - - // Cache will be checked during test execution at the URL level - - // Always use HEAD for downloads to avoid timeouts - const useHeadForDownloads = true; - - // Set up initialization for tests - before(() => { - // Initialize the broken links report - cy.task('initializeBrokenLinksReport'); - - // Clean up expired cache entries - cy.task('cleanupCache').then((cleaned) => { - if (cleaned > 0) { - cy.log(`🧹 Cleaned up ${cleaned} expired cache entries`); - } - }); - }); - - // Display cache statistics after all tests complete - after(() => { - cy.task('getCacheStats').then((stats) => { - cy.log('📊 Link Validation Cache Statistics:'); - cy.log(` • Cache hits: ${stats.hits}`); - cy.log(` • Cache misses: ${stats.misses}`); - cy.log(` • New entries stored: ${stats.stores}`); - cy.log(` • Hit rate: ${stats.hitRate}`); - cy.log(` • Total validations: ${stats.total}`); - - if (stats.total > 0) { - const message = stats.hits > 0 - ? `✨ Cache optimization saved ${stats.hits} link validations` - : '🔄 No cache hits - all links were validated fresh'; - cy.log(message); - } - - // Save cache statistics for the reporter to display - cy.task('saveCacheStatsForReporter', { - hitRate: parseFloat(stats.hitRate.replace('%', '')), - cacheHits: stats.hits, - cacheMisses: stats.misses, - totalValidations: stats.total, - newEntriesStored: stats.stores, - cleanups: stats.cleanups - }); - }); - }); - - // Helper function to identify download links - function isDownloadLink(href) { - // Check for common download file extensions - const downloadExtensions = [ - '.pdf', - '.zip', - '.tar.gz', - '.tgz', - '.rar', - '.exe', - '.dmg', - '.pkg', - '.deb', - '.rpm', - '.xlsx', - '.csv', - '.doc', - '.docx', - '.ppt', - '.pptx', - ]; - - // Check for download domains or paths - const downloadDomains = ['dl.influxdata.com', 'downloads.influxdata.com']; - - // Check if URL contains a download extension - const hasDownloadExtension = downloadExtensions.some((ext) => - href.toLowerCase().endsWith(ext) - ); - - // Check if URL is from a download domain - const isFromDownloadDomain = downloadDomains.some((domain) => - href.toLowerCase().includes(domain) - ); - - // Return true if either condition is met - return hasDownloadExtension || isFromDownloadDomain; - } - - // Helper function for handling failed links - function handleFailedLink(url, status, type, redirectChain = '', linkText = '', pageUrl = '') { - // Report the broken link - cy.task('reportBrokenLink', { - url: url + redirectChain, - status, - type, - linkText, - page: pageUrl, - }); - - // Throw error for broken links - throw new Error( - `BROKEN ${type.toUpperCase()} LINK: ${url} (status: ${status})${redirectChain} on ${pageUrl}` - ); - } - - // Helper function to test a link with cache integration - function testLink(href, linkText = '', pageUrl) { - // Check cache first - return cy.task('isLinkCached', href).then((isCached) => { - if (isCached) { - cy.log(`✅ Cache hit: ${href}`); - return cy.task('getLinkCache', href).then((cachedResult) => { - if (cachedResult && cachedResult.result && cachedResult.result.status >= 400) { - // Cached result shows this link is broken - handleFailedLink(href, cachedResult.result.status, cachedResult.result.type || 'cached', '', linkText, pageUrl); - } - // For successful cached results, just return - no further action needed - }); - } else { - // Not cached, perform actual validation - return performLinkValidation(href, linkText, pageUrl); - } - }); - } - - // Helper function to perform actual link validation and cache the result - function performLinkValidation(href, linkText = '', pageUrl) { - // Common request options for both methods - const requestOptions = { - failOnStatusCode: true, - timeout: 15000, // Increased timeout for reliability - followRedirect: true, // Explicitly follow redirects - retryOnNetworkFailure: true, // Retry on network issues - retryOnStatusCodeFailure: true, // Retry on 5xx errors - }; - - - if (useHeadForDownloads && isDownloadLink(href)) { - cy.log(`** Testing download link with HEAD: ${href} **`); - return cy.request({ - method: 'HEAD', - url: href, - ...requestOptions, - }).then((response) => { - // Prepare result for caching - const result = { - status: response.status, - type: 'download', - timestamp: new Date().toISOString() - }; - - // Check final status after following any redirects - if (response.status >= 400) { - const redirectInfo = - response.redirects && response.redirects.length > 0 - ? ` (redirected to: ${response.redirects.join(' -> ')})` - : ''; - - // Cache the failed result - cy.task('setLinkCache', { url: href, result }); - handleFailedLink(href, response.status, 'download', redirectInfo, linkText, pageUrl); - } else { - // Cache the successful result - cy.task('setLinkCache', { url: href, result }); - } - }); - } else { - cy.log(`** Testing link: ${href} **`); - return cy.request({ - url: href, - ...requestOptions, - }).then((response) => { - // Prepare result for caching - const result = { - status: response.status, - type: 'regular', - timestamp: new Date().toISOString() - }; - - if (response.status >= 400) { - const redirectInfo = - response.redirects && response.redirects.length > 0 - ? ` (redirected to: ${response.redirects.join(' -> ')})` - : ''; - - // Cache the failed result - cy.task('setLinkCache', { url: href, result }); - handleFailedLink(href, response.status, 'regular', redirectInfo, linkText, pageUrl); - } else { - // Cache the successful result - cy.task('setLinkCache', { url: href, result }); - } - }); - } - } - - // Test setup validation - it('Test Setup Validation', function () { - cy.log(`📋 Test Configuration:`); - cy.log(` • Test subjects: ${subjects.length}`); - cy.log(` • Cache: URL-level caching with 30-day TTL`); - cy.log(` • Link validation: Internal, anchor, and allowed external links`); - - cy.log('✅ Test setup validation completed'); - }); - - subjects.forEach((subject) => { - it(`${subject} has valid internal links`, function () { - - // Add error handling for page visit failures - cy.visit(`${subject}`, { timeout: 20000 }).then(() => { - cy.log(`✅ Successfully loaded page: ${subject}`); - }); - - // Test internal links - cy.get('article, .api-content').then(($article) => { - // Find links without failing the test if none are found - const $links = $article.find('a[href^="/"]'); - if ($links.length === 0) { - cy.log('No internal links found on this page'); - return; - } - - cy.log(`🔍 Testing ${$links.length} internal links on ${subject}`); - - // Now test each link - cy.wrap($links).each(($a) => { - const href = $a.attr('href'); - const linkText = $a.text().trim(); - - try { - testLink(href, linkText, subject); - } catch (error) { - cy.log(`❌ Error testing link ${href}: ${error.message}`); - throw error; // Re-throw to fail the test - } - }); - }); - }); - - it(`${subject} has valid anchor links`, function () { - - cy.visit(`${subject}`).then(() => { - cy.log(`✅ Successfully loaded page for anchor testing: ${subject}`); - }); - - // Define selectors for anchor links to ignore, such as behavior triggers - const ignoreLinks = ['.tabs a[href^="#"]', '.code-tabs a[href^="#"]']; - - const anchorSelector = - 'a[href^="#"]:not(' + ignoreLinks.join('):not(') + ')'; - - cy.get('article, .api-content').then(($article) => { - const $anchorLinks = $article.find(anchorSelector); - if ($anchorLinks.length === 0) { - cy.log('No anchor links found on this page'); - return; - } - - cy.log(`🔗 Testing ${$anchorLinks.length} anchor links on ${subject}`); - - cy.wrap($anchorLinks).each(($a) => { - const href = $a.prop('href'); - const linkText = $a.text().trim(); - - if (href && href.length > 1) { - // Get just the fragment part - const url = new URL(href); - const anchorId = url.hash.substring(1); // Remove the # character - - if (!anchorId) { - cy.log(`Skipping empty anchor in ${href}`); - return; - } - - // Use DOM to check if the element exists - cy.window().then((win) => { - const element = win.document.getElementById(anchorId); - if (!element) { - cy.task('reportBrokenLink', { - url: `#${anchorId}`, - status: 404, - type: 'anchor', - linkText, - page: subject, - }); - cy.log(`⚠️ Missing anchor target: #${anchorId}`); - } - }); - } - }); - }); - }); - - it(`${subject} has valid external links`, function () { - - // Check if we should skip external links entirely - if (Cypress.env('skipExternalLinks') === true) { - cy.log( - 'Skipping all external links as configured by skipExternalLinks' - ); - return; - } - - cy.visit(`${subject}`).then(() => { - cy.log( - `✅ Successfully loaded page for external link testing: ${subject}` - ); - }); - - // Define allowed external domains to test - const allowedExternalDomains = ['github.com', 'kapa.ai']; - - // Test external links - cy.get('article, .api-content').then(($article) => { - // Find links without failing the test if none are found - const $links = $article.find('a[href^="http"]'); - if ($links.length === 0) { - cy.log('No external links found on this page'); - return; - } - - cy.log(`🔍 Found ${$links.length} total external links on ${subject}`); - - // Filter links to only include allowed domains - const $allowedLinks = $links.filter((_, el) => { - const href = el.getAttribute('href'); - try { - const url = new URL(href); - return allowedExternalDomains.some( - (domain) => - url.hostname === domain || url.hostname.endsWith(`.${domain}`) - ); - } catch (urlError) { - cy.log(`⚠️ Invalid URL found: ${href}`); - return false; - } - }); - - if ($allowedLinks.length === 0) { - cy.log('No links to allowed external domains found on this page'); - cy.log(` • Allowed domains: ${allowedExternalDomains.join(', ')}`); - return; - } - - cy.log( - `🌐 Testing ${$allowedLinks.length} links to allowed external domains` - ); - cy.wrap($allowedLinks).each(($a) => { - const href = $a.attr('href'); - const linkText = $a.text().trim(); - - try { - testLink(href, linkText, subject); - } catch (error) { - cy.log(`❌ Error testing external link ${href}: ${error.message}`); - throw error; - } - }); - }); - }); - }); -}); diff --git a/cypress/e2e/content/example.cy.js b/cypress/e2e/content/example.cy.js deleted file mode 100644 index e69de29bb..000000000 diff --git a/cypress/support/link-cache.js b/cypress/support/link-cache.js deleted file mode 100644 index 1a54a6e41..000000000 --- a/cypress/support/link-cache.js +++ /dev/null @@ -1,215 +0,0 @@ -/** - * Link Cache Manager for Cypress Tests - * Manages caching of link validation results at the URL level - */ - -import fs from 'fs'; -import path from 'path'; -import crypto from 'crypto'; - -const CACHE_VERSION = 'v2'; -const CACHE_KEY_PREFIX = 'link-validation'; -const LOCAL_CACHE_DIR = path.join(process.cwd(), '.cache', 'link-validation'); - -/** - * Cache manager for individual link validation results - */ -export class LinkCacheManager { - constructor(options = {}) { - this.localCacheDir = options.localCacheDir || LOCAL_CACHE_DIR; - - // Configurable cache TTL - default 30 days - this.cacheTTLDays = - options.cacheTTLDays || parseInt(process.env.LINK_CACHE_TTL_DAYS) || 30; - this.maxAge = this.cacheTTLDays * 24 * 60 * 60 * 1000; - - this.ensureLocalCacheDir(); - - // Track cache statistics - this.stats = { - hits: 0, - misses: 0, - stores: 0, - cleanups: 0 - }; - } - - ensureLocalCacheDir() { - if (!fs.existsSync(this.localCacheDir)) { - fs.mkdirSync(this.localCacheDir, { recursive: true }); - } - } - - /** - * Generate cache key for a URL - * @param {string} url - The URL to cache - * @returns {string} Cache key - */ - generateCacheKey(url) { - const urlHash = crypto - .createHash('sha256') - .update(url) - .digest('hex') - .substring(0, 16); - return `${CACHE_KEY_PREFIX}-${CACHE_VERSION}-${urlHash}`; - } - - /** - * Get cache file path for a URL - * @param {string} url - The URL - * @returns {string} File path - */ - getCacheFilePath(url) { - const cacheKey = this.generateCacheKey(url); - return path.join(this.localCacheDir, `${cacheKey}.json`); - } - - /** - * Check if a URL's validation result is cached - * @param {string} url - The URL to check - * @returns {Object|null} Cached result or null - */ - get(url) { - const cacheFile = this.getCacheFilePath(url); - - if (!fs.existsSync(cacheFile)) { - this.stats.misses++; - return null; - } - - try { - const content = fs.readFileSync(cacheFile, 'utf8'); - const cached = JSON.parse(content); - - // TTL check - const age = Date.now() - new Date(cached.cachedAt).getTime(); - - if (age > this.maxAge) { - fs.unlinkSync(cacheFile); - this.stats.misses++; - this.stats.cleanups++; - return null; - } - - this.stats.hits++; - return cached; - } catch (error) { - // Clean up corrupted cache - try { - fs.unlinkSync(cacheFile); - this.stats.cleanups++; - } catch (cleanupError) { - // Ignoring cleanup errors as they are non-critical, but logging for visibility - console.warn(`Failed to clean up corrupted cache file: ${cleanupError.message}`); - } - this.stats.misses++; - return null; - } - } - - /** - * Store validation result for a URL - * @param {string} url - The URL - * @param {Object} result - Validation result - * @returns {boolean} True if successfully cached, false otherwise - */ - set(url, result) { - const cacheFile = this.getCacheFilePath(url); - - const cacheData = { - url, - result, - cachedAt: new Date().toISOString(), - ttl: new Date(Date.now() + this.maxAge).toISOString() - }; - - try { - fs.writeFileSync(cacheFile, JSON.stringify(cacheData, null, 2)); - this.stats.stores++; - return true; - } catch (error) { - console.warn(`Failed to cache validation result for ${url}: ${error.message}`); - return false; - } - } - - /** - * Check if a URL is cached and valid - * @param {string} url - The URL to check - * @returns {boolean} True if cached and valid - */ - isCached(url) { - return this.get(url) !== null; - } - - /** - * Get cache statistics - * @returns {Object} Cache statistics - */ - getStats() { - const total = this.stats.hits + this.stats.misses; - const hitRate = total > 0 ? (this.stats.hits / total * 100).toFixed(1) : 0; - - return { - ...this.stats, - total, - hitRate: `${hitRate}%` - }; - } - - /** - * Clean up expired cache entries - * @returns {number} Number of entries cleaned up - */ - cleanup() { - let cleaned = 0; - - try { - const files = fs.readdirSync(this.localCacheDir); - const cacheFiles = files.filter(file => - file.startsWith(CACHE_KEY_PREFIX) && file.endsWith('.json') - ); - - for (const file of cacheFiles) { - const filePath = path.join(this.localCacheDir, file); - - try { - const content = fs.readFileSync(filePath, 'utf8'); - const cached = JSON.parse(content); - - const age = Date.now() - new Date(cached.cachedAt).getTime(); - - if (age > this.maxAge) { - fs.unlinkSync(filePath); - cleaned++; - } - } catch (error) { - console.warn(`Failed to process cache file "${filePath}": ${error.message}`); - // Remove corrupted files - fs.unlinkSync(filePath); - cleaned++; - } - } - } catch (error) { - console.warn(`Cache cleanup failed: ${error.message}`); - } - - this.stats.cleanups += cleaned; - return cleaned; - } -} - -/** - * Cypress task helper to integrate cache with Cypress tasks - */ -export const createCypressCacheTasks = (options = {}) => { - const cache = new LinkCacheManager(options); - - return { - getLinkCache: (url) => cache.get(url), - setLinkCache: ({ url, result }) => cache.set(url, result), - isLinkCached: (url) => cache.isCached(url), - getCacheStats: () => cache.getStats(), - cleanupCache: () => cache.cleanup() - }; -}; \ No newline at end of file diff --git a/cypress/support/link-reporter.js b/cypress/support/link-reporter.js deleted file mode 100644 index fa514c7ef..000000000 --- a/cypress/support/link-reporter.js +++ /dev/null @@ -1,310 +0,0 @@ -/** - * Broken Links Reporter - * Handles collecting, storing, and reporting broken links found during tests - */ -import fs from 'fs'; - -export const BROKEN_LINKS_FILE = '/tmp/broken_links_report.json'; -export const FIRST_BROKEN_LINK_FILE = '/tmp/first_broken_link.json'; -const SOURCES_FILE = '/tmp/test_subjects_sources.json'; -const CACHE_STATS_FILE = '/tmp/cache_statistics.json'; -const VALIDATION_STRATEGY_FILE = '/tmp/validation_strategy.json'; - -/** - * Reads the broken links report from the file system - * @returns {Array} Parsed report data or empty array if file doesn't exist - */ -export function readBrokenLinksReport() { - if (!fs.existsSync(BROKEN_LINKS_FILE)) { - return []; - } - - try { - const fileContent = fs.readFileSync(BROKEN_LINKS_FILE, 'utf8'); - - // Check if the file is empty or contains only an empty array - if (!fileContent || fileContent.trim() === '' || fileContent === '[]') { - return []; - } - - // Try to parse the JSON content - try { - const parsedContent = JSON.parse(fileContent); - - // Ensure the parsed content is an array - if (!Array.isArray(parsedContent)) { - console.error('Broken links report is not an array'); - return []; - } - - return parsedContent; - } catch (parseErr) { - console.error( - `Error parsing broken links report JSON: ${parseErr.message}` - ); - return []; - } - } catch (err) { - console.error(`Error reading broken links report: ${err.message}`); - return []; - } -} - -/** - * Reads the sources mapping file - * @returns {Object} A mapping from URLs to their source files - */ -function readSourcesMapping() { - try { - if (fs.existsSync(SOURCES_FILE)) { - const sourcesData = JSON.parse(fs.readFileSync(SOURCES_FILE, 'utf8')); - return sourcesData.reduce((acc, item) => { - if (item.url && item.source) { - acc[item.url] = item.source; - } - return acc; - }, {}); - } - } catch (err) { - console.warn(`Warning: Could not read sources mapping: ${err.message}`); - } - return {}; -} - -/** - * Read cache statistics from file - * @returns {Object|null} Cache statistics or null if not found - */ -function readCacheStats() { - try { - if (fs.existsSync(CACHE_STATS_FILE)) { - const content = fs.readFileSync(CACHE_STATS_FILE, 'utf8'); - return JSON.parse(content); - } - } catch (err) { - console.warn(`Warning: Could not read cache stats: ${err.message}`); - } - return null; -} - -/** - * Read validation strategy from file - * @returns {Object|null} Validation strategy or null if not found - */ -function readValidationStrategy() { - try { - if (fs.existsSync(VALIDATION_STRATEGY_FILE)) { - const content = fs.readFileSync(VALIDATION_STRATEGY_FILE, 'utf8'); - return JSON.parse(content); - } - } catch (err) { - console.warn(`Warning: Could not read validation strategy: ${err.message}`); - } - return null; -} - -/** - * Save cache statistics for reporting - * @param {Object} stats - Cache statistics to save - */ -export function saveCacheStats(stats) { - try { - fs.writeFileSync(CACHE_STATS_FILE, JSON.stringify(stats, null, 2)); - } catch (err) { - console.warn(`Warning: Could not save cache stats: ${err.message}`); - } -} - -/** - * Save validation strategy for reporting - * @param {Object} strategy - Validation strategy to save - */ -export function saveValidationStrategy(strategy) { - try { - fs.writeFileSync( - VALIDATION_STRATEGY_FILE, - JSON.stringify(strategy, null, 2) - ); - } catch (err) { - console.warn(`Warning: Could not save validation strategy: ${err.message}`); - } -} - -/** - * Formats and displays the broken links report to the console - * @param {Array} brokenLinksReport - The report data to display - * @returns {number} The total number of broken links found - */ -export function displayBrokenLinksReport(brokenLinksReport = null) { - // If no report provided, read from file - if (!brokenLinksReport) { - brokenLinksReport = readBrokenLinksReport(); - } - - // Read cache statistics and validation strategy - const cacheStats = readCacheStats(); - const validationStrategy = readValidationStrategy(); - - // Display cache performance first - if (cacheStats) { - console.log('\n📊 Link Validation Cache Performance:'); - console.log('======================================='); - console.log(`Cache hit rate: ${cacheStats.hitRate}%`); - console.log(`Cache hits: ${cacheStats.cacheHits}`); - console.log(`Cache misses: ${cacheStats.cacheMisses}`); - console.log(`Total validations: ${cacheStats.totalValidations || cacheStats.cacheHits + cacheStats.cacheMisses}`); - console.log(`New entries stored: ${cacheStats.newEntriesStored || 0}`); - - if (cacheStats.cleanups > 0) { - console.log(`Expired entries cleaned: ${cacheStats.cleanups}`); - } - - if (cacheStats.totalValidations > 0) { - const message = cacheStats.cacheHits > 0 - ? `✨ Cache optimization saved ${cacheStats.cacheHits} link validations` - : '🔄 No cache hits - all links were validated fresh'; - console.log(message); - } - - if (validationStrategy) { - console.log(`Files analyzed: ${validationStrategy.total}`); - console.log( - `Links needing validation: ${validationStrategy.newLinks.length}` - ); - } - console.log(''); // Add spacing after cache stats - } - - // Check both the report and first broken link file to determine if we have broken links - const firstBrokenLink = readFirstBrokenLink(); - - // Only report "no broken links" if both checks pass - if ( - (!brokenLinksReport || brokenLinksReport.length === 0) && - !firstBrokenLink - ) { - console.log('\n✅ No broken links detected in the validation report'); - return 0; - } - - // Special case: check if the single broken link file could be missing from the report - if ( - firstBrokenLink && - (!brokenLinksReport || brokenLinksReport.length === 0) - ) { - console.error( - '\n⚠️ Warning: First broken link record exists but no links in the report.' - ); - console.error('This could indicate a reporting issue.'); - } - - // Load sources mapping - const sourcesMapping = readSourcesMapping(); - - // Print a prominent header - console.error('\n\n' + '='.repeat(80)); - console.error(' 🚨 BROKEN LINKS DETECTED 🚨 '); - console.error('='.repeat(80)); - - // Show first failing link if available - if (firstBrokenLink) { - console.error('\n🔴 FIRST FAILING LINK:'); - console.error(` URL: ${firstBrokenLink.url}`); - console.error(` Status: ${firstBrokenLink.status}`); - console.error(` Type: ${firstBrokenLink.type}`); - console.error(` Page: ${firstBrokenLink.page}`); - if (firstBrokenLink.linkText) { - console.error( - ` Link text: "${firstBrokenLink.linkText.substring(0, 50)}${firstBrokenLink.linkText.length > 50 ? '...' : ''}"` - ); - } - console.error('-'.repeat(40)); - } - - let totalBrokenLinks = 0; - - brokenLinksReport.forEach((report) => { - console.error(`\n📄 PAGE: ${report.page}`); - - // Add source information if available - const source = sourcesMapping[report.page]; - if (source) { - console.error(` PAGE CONTENT SOURCE: ${source}`); - } - - console.error('-'.repeat(40)); - - report.links.forEach((link) => { - console.error(`• ${link.url}`); - console.error(` - Status: ${link.status}`); - console.error(` - Type: ${link.type}`); - if (link.linkText) { - console.error( - ` - Link text: "${link.linkText.substring(0, 50)}${link.linkText.length > 50 ? '...' : ''}"` - ); - } - console.error(''); - totalBrokenLinks++; - }); - }); - - // Print a prominent summary footer - console.error('='.repeat(80)); - console.error(`📊 TOTAL BROKEN LINKS FOUND: ${totalBrokenLinks}`); - console.error('='.repeat(80) + '\n'); - - return totalBrokenLinks; -} - -/** - * Reads the first broken link info from the file system - * @returns {Object|null} First broken link data or null if not found - */ -export function readFirstBrokenLink() { - if (!fs.existsSync(FIRST_BROKEN_LINK_FILE)) { - return null; - } - - try { - const fileContent = fs.readFileSync(FIRST_BROKEN_LINK_FILE, 'utf8'); - - // Check if the file is empty or contains whitespace only - if (!fileContent || fileContent.trim() === '') { - return null; - } - - // Try to parse the JSON content - try { - return JSON.parse(fileContent); - } catch (parseErr) { - console.error( - `Error parsing first broken link JSON: ${parseErr.message}` - ); - return null; - } - } catch (err) { - console.error(`Error reading first broken link: ${err.message}`); - return null; - } -} - -/** - * Initialize the broken links report files - * @returns {boolean} True if initialization was successful - */ -export function initializeReport() { - try { - // Create an empty array for the broken links report - fs.writeFileSync(BROKEN_LINKS_FILE, '[]', 'utf8'); - - // Reset the first broken link file by creating an empty file - // Using empty string as a clear indicator that no broken link has been recorded yet - fs.writeFileSync(FIRST_BROKEN_LINK_FILE, '', 'utf8'); - - console.debug('🔄 Initialized broken links reporting system'); - return true; - } catch (err) { - console.error(`Error initializing broken links report: ${err.message}`); - return false; - } -} diff --git a/cypress/support/run-e2e-specs.js b/cypress/support/run-e2e-specs.js index d39dfb4a2..71f1616fa 100644 --- a/cypress/support/run-e2e-specs.js +++ b/cypress/support/run-e2e-specs.js @@ -2,34 +2,10 @@ * InfluxData Documentation E2E Test Runner * * This script automates running Cypress end-to-end tests for the InfluxData documentation site. - * It handles starting a local Hugo server, mapping content files to their URLs, running Cypress tests, + * It handles starting a local Hugo server, mapping content files to their URLs, and running Cypress tests, * and reporting broken links. * - * Usage: node run-e2e-specs.js [file paths...] [--spec test // Display broken links report - const brokenLinksCount = displayBrokenLinksReport(); - - // Check if we might have special case failures - const hasSpecialCaseFailures = - results && - results.totalFailed > 0 && - brokenLinksCount === 0; - - if (hasSpecialCaseFailures) { - console.warn( - `ℹ️ Note: Tests failed (${results.totalFailed}) but no broken links were reported. This may be due to special case URLs (like Reddit) that return expected status codes.` - ); - } - - if ( - (results && results.totalFailed && results.totalFailed > 0 && !hasSpecialCaseFailures) || - brokenLinksCount > 0 - ) { - console.error( - `⚠️ Tests failed: ${results.totalFailed || 0} test(s) failed, ${brokenLinksCount || 0} broken links found` - ); - cypressFailed = true; - exitCode = 1; * - * Example: node run-e2e-specs.js content/influxdb/v2/write-data.md --spec cypress/e2e/content/article-links.cy.js + * Usage: node run-e2e-specs.js [file paths...] [--spec test specs...] */ import { spawn } from 'child_process'; @@ -39,7 +15,6 @@ import path from 'path'; import cypress from 'cypress'; import net from 'net'; import { Buffer } from 'buffer'; -import { displayBrokenLinksReport, initializeReport } from './link-reporter.js'; import { HUGO_ENVIRONMENT, HUGO_PORT, @@ -119,7 +94,7 @@ async function main() { let exitCode = 0; let hugoStarted = false; -// (Lines 124-126 removed; no replacement needed) + // (Lines 124-126 removed; no replacement needed) // Add this signal handler to ensure cleanup on unexpected termination const cleanupAndExit = (code = 1) => { @@ -364,10 +339,6 @@ async function main() { // 4. Run Cypress tests let cypressFailed = false; try { - // Initialize/clear broken links report before running tests - console.log('Initializing broken links report...'); - initializeReport(); - console.log(`Running Cypress tests for ${urlList.length} URLs...`); // Add CI-specific configuration @@ -426,19 +397,13 @@ async function main() { clearInterval(hugoHealthCheckInterval); } - // Process broken links report - const brokenLinksCount = displayBrokenLinksReport(); - // Determine why tests failed const testFailureCount = results?.totalFailed || 0; - if (testFailureCount > 0 && brokenLinksCount === 0) { + if (testFailureCount > 0) { console.warn( `ℹ️ Note: ${testFailureCount} test(s) failed but no broken links were detected in the report.` ); - console.warn( - ' This usually indicates test errors unrelated to link validation.' - ); // Provide detailed failure analysis if (results) { @@ -531,14 +496,8 @@ async function main() { // but we'll still report other test failures cypressFailed = true; exitCode = 1; - } else if (brokenLinksCount > 0) { - console.error( - `⚠️ Tests failed: ${brokenLinksCount} broken link(s) detected` - ); - cypressFailed = true; - exitCode = 1; } else if (results) { - console.log('✅ Tests completed successfully'); + console.log('✅ e2e tests completed successfully'); } } catch (err) { console.error(`❌ Cypress execution error: ${err.message}`); @@ -609,9 +568,6 @@ async function main() { console.error(' • Check if test URLs are accessible manually'); console.error(' • Review Cypress screenshots/videos if available'); - // Still try to display broken links report if available - displayBrokenLinksReport(); - cypressFailed = true; exitCode = 1; } finally { diff --git a/lefthook.yml b/lefthook.yml index 68face524..67db3a771 100644 --- a/lefthook.yml +++ b/lefthook.yml @@ -111,16 +111,6 @@ pre-push: node cypress/support/run-e2e-specs.js --spec "cypress/e2e/content/article-links.cy.js" content/example.md exit $? - # Link validation runs in GitHub actions. - # You can still run it locally for development. - # e2e-links: - # tags: test,links - # glob: 'content/*.{md,html}' - # run: | - # echo "Running link checker for: {staged_files}" - # yarn test:links {staged_files} - # exit $? - # Manage Docker containers prune-legacy-containers: priority: 1 diff --git a/package.json b/package.json index 4dfb14b81..fc09f72a5 100644 --- a/package.json +++ b/package.json @@ -55,15 +55,6 @@ "test:codeblocks:v2": "docker compose run --rm --name v2-pytest v2-pytest", "test:codeblocks:stop-monitors": "./test/scripts/monitor-tests.sh stop cloud-dedicated-pytest && ./test/scripts/monitor-tests.sh stop clustered-pytest", "test:e2e": "node cypress/support/run-e2e-specs.js", - "test:links": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\"", - "test:links:v1": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/influxdb/{v1,enterprise_influxdb}/**/*.{md,html}", - "test:links:v2": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/influxdb/{cloud,v2}/**/*.{md,html}", - "test:links:v3": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/influxdb3/**/*.{md,html}", - "test:links:chronograf": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/chronograf/**/*.{md,html}", - "test:links:kapacitor": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/kapacitor/**/*.{md,html}", - "test:links:telegraf": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/telegraf/**/*.{md,html}", - "test:links:shared": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/shared/**/*.{md,html}", - "test:links:api-docs": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" /influxdb3/core/api/,/influxdb3/enterprise/api/,/influxdb3/cloud-dedicated/api/,/influxdb3/cloud-dedicated/api/v1/,/influxdb/cloud-dedicated/api/v1/,/influxdb/cloud-dedicated/api/management/,/influxdb3/cloud-dedicated/api/management/", "test:shortcode-examples": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/example.md", "audit:cli": "node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js both local", "audit:cli:3core": "node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js core local", From 9ea4acfb2b64bf8a08db5af64a6e281867446922 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 12 Aug 2025 18:12:45 -0500 Subject: [PATCH 082/179] fix(clustered): clarify compactor scaling guidance for CPU and memory Addresses customer confusion where scaling CPU alone doesn't improve compactor performance. Compactor concurrency scales based on memory allocation, not CPU count, so both resources should be scaled together. Closes influxdata/DAR#514 add related links --- content/influxdb3/clustered/admin/scale-cluster.md | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/content/influxdb3/clustered/admin/scale-cluster.md b/content/influxdb3/clustered/admin/scale-cluster.md index 0c8b2d9b2..bd8339718 100644 --- a/content/influxdb3/clustered/admin/scale-cluster.md +++ b/content/influxdb3/clustered/admin/scale-cluster.md @@ -8,9 +8,11 @@ menu: parent: Administer InfluxDB Clustered name: Scale your cluster weight: 207 -influxdb3/clustered/tags: [scale] +influxdb3/clustered/tags: [scale, performance, Kubernetes] related: - /influxdb3/clustered/reference/internals/storage-engine/ + - /influxdb3/clustered/write-data/best-practices/data-lifecycle/ + - /influxdb3/clustered/query-data/troubleshoot-and-optimize/optimize-queries/ - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits, Kubernetes resource requests and limits --- @@ -559,11 +561,14 @@ concurrency demands or reaches the hardware limits of your underlying nodes. ### Compactor -- **Recommended**: Maintain **1 Compactor pod** and use [vertical scaling](#vertical-scaling) (especially -increasing the available CPU) for the Compactor. +- **Recommended**: Maintain **1 Compactor pod** and use [vertical scaling](#vertical-scaling) for the Compactor. +Scale CPU and memory resources together, as compactor concurrency settings scale based on memory, not CPU count. - Because compaction is a compute-heavy process, horizontal scaling increases compaction throughput, but not as efficiently as vertical scaling. +> [!Important] +> When scaling the Compactor, scale CPU and memory resources together. + ### Garbage collector The [Garbage collector](/influxdb3/clustered/reference/internals/storage-engine/#garbage-collector) is a lightweight process that typically doesn't require From a21c06bb4f663fee53d74fe2999117160f7623f2 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 18 Aug 2025 15:16:38 -0500 Subject: [PATCH 083/179] fix(v2): OSS replication:- Fix (simplify) list formatting; remove nested lists.- Convert numbered list to numbered headers to replace nested lists- Add additional headers to show alternatives- Specify Enterprise v1- Update callouts --- .../write-data/replication/replicate-data.md | 2 +- .../write-data/replication/replicate-data.md | 302 ++++++++++-------- 2 files changed, 174 insertions(+), 130 deletions(-) diff --git a/content/influxdb/cloud/write-data/replication/replicate-data.md b/content/influxdb/cloud/write-data/replication/replicate-data.md index 5389bd4d4..1d62daf3e 100644 --- a/content/influxdb/cloud/write-data/replication/replicate-data.md +++ b/content/influxdb/cloud/write-data/replication/replicate-data.md @@ -16,4 +16,4 @@ source: /shared/influxdb-v2/write-data/replication/replicate-data.md --- +// SOURCE content/shared/influxdb-v2/write-data/replication/replicate-data.md --> diff --git a/content/shared/influxdb-v2/write-data/replication/replicate-data.md b/content/shared/influxdb-v2/write-data/replication/replicate-data.md index 8c05a2fe0..90b23280e 100644 --- a/content/shared/influxdb-v2/write-data/replication/replicate-data.md +++ b/content/shared/influxdb-v2/write-data/replication/replicate-data.md @@ -1,9 +1,9 @@ Use InfluxDB replication streams (InfluxDB Edge Data Replication) to replicate the incoming data of select buckets to one or more buckets on a remote -InfluxDB OSS, InfluxDB Cloud, or InfluxDB Enterprise instance. +InfluxDB OSS, InfluxDB Cloud, or InfluxDB Enterprise v1 instance. -Replicate data from InfluxDB OSS to InfluxDB Cloud, InfluxDB OSS, or InfluxDB Enterprise. +Replicate data from InfluxDB OSS to InfluxDB Cloud, InfluxDB OSS, or InfluxDB Enterprise v1. - [Configure a replication stream](#configure-a-replication-stream) - [Replicate downsampled or processed data](#replicate-downsampled-or-processed-data) @@ -17,10 +17,9 @@ Use the [`influx` CLI](/influxdb/version/tools/influx-cli/) or the [InfluxDB {{< current-version >}} API](/influxdb/version/reference/api/) to configure a replication stream. -{{% note %}} -To replicate data to InfluxDB OSS or InfluxDB Enterprise, adjust the -remote connection values accordingly. -{{% /note %}} +> [!Note] +> To replicate data to InfluxDB OSS or InfluxDB Enterprise v1, adjust the +> remote connection values accordingly. {{< tabs-wrapper >}} {{% tabs %}} @@ -30,156 +29,202 @@ remote connection values accordingly. {{% tab-content %}} +### Step 1: Create or find a remote connection -1. In your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS instance, use - the `influx remote create` command to create a remote connection to replicate data to. +- [Create a remote connection](#create-a-remote-connection-cli) +- [Use an existing remote connection](#use-an-existing-remote-connection-cli) - **Provide the following:** +#### Create a remote connection (CLI) - - Remote connection name - {{% show-in "v2" %}}- Remote InfluxDB instance URL{{% /show-in %}} - {{% show-in "v2" %}}- Remote InfluxDB API token _(API token must have write access to the target bucket)_{{% /show-in %}} - {{% show-in "v2" %}}- Remote InfluxDB organization ID{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- [InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/){{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- InfluxDB Cloud API token _(API token must have write access to the target bucket)_{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- InfluxDB Cloud organization ID{{% /show-in %}} +In your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS instance, use +the `influx remote create` command and provide the following arguments for the remote instance: - ```sh - influx remote create \ - --name example-remote-name \ - --remote-url https://cloud2.influxdata.com \ - --remote-api-token mYsuP3r5Ecr37t0k3n \ - --remote-org-id 00xoXXoxXX00 - ``` +{{% show-in "v2" %}} +- Remote connection name +- Remote InfluxDB instance URL +- Remote InfluxDB API token _(API token must have write access to the target bucket)_ +- Remote InfluxDB organization ID +{{% /show-in %}} +{{% show-in "cloud,cloud-serverless" %}} +- Remote connection name +- [InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/) +- InfluxDB Cloud API token _(API token must have write access to the target bucket)_ +- InfluxDB Cloud organization ID +{{% /show-in %}} - If you already have remote InfluxDB connections configured, you can use an existing connection. To view existing connections, run `influx remote list`. + ```sh + influx remote create \ + --name example-remote-name \ + --remote-url https://cloud2.influxdata.com \ + --remote-api-token mYsuP3r5Ecr37t0k3n \ + --remote-org-id 00xoXXoxXX00 + ``` -2. In your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS instance, use the - `influx replication create` command to create a replication stream. +#### Use an existing remote connection (CLI) + +Alternatively, you can use an existing connection that you have already configured. +To retrieve existing connections, run `influx remote list`. + +### Step 2: Create a replication stream (CLI) + +In your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS instance, use the +`influx replication create` command and provide the following arguments: - **Provide the following:** +{{% show-in "v2" %}} +- Replication stream name +- Remote connection ID (created in the previous step) +- Local bucket ID to replicate writes from +- Remote bucket name or ID to replicate writes to. If replicating to **InfluxDB Enterprise v1**, use the `db-name/rp-name` bucket name syntax.{{% /show-in %}} +{{% show-in "cloud,cloud-serverless" %}} +- Replication stream name +- Remote connection ID (created in the previous step) +- InfluxDB OSS bucket ID to replicate writes from +- InfluxDB Cloud bucket ID to replicate writes to +{{% /show-in %}} - - Replication stream name - {{% show-in "v2" %}}- Remote connection ID{{% /show-in %}} - {{% show-in "v2" %}}- Local bucket ID to replicate writes from{{% /show-in %}} - {{% show-in "v2" %}}- Remote bucket name or ID to replicate writes to. If replicating to **InfluxDB Enterprise**, use the `db-name/rp-name` bucket name syntax.{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- Remote connection ID{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- InfluxDB OSS bucket ID to replicate writes from{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- InfluxDB Cloud bucket ID to replicate writes to{{% /show-in %}} +```sh +influx replication create \ + --name REPLICATION_STREAM_NAME \ + --remote-id REPLICATION_REMOTE_ID \ + --local-bucket-id INFLUX_BUCKET_ID \ + --remote-bucket REMOTE_INFLUX_BUCKET_NAME +``` - - ```sh - influx replication create \ - --name REPLICATION_STREAM_NAME \ - --remote-id REPLICATION_REMOTE_ID \ - --local-bucket-id INFLUX_BUCKET_ID \ - --remote-bucket REMOTE_INFLUX_BUCKET_NAME - ``` - -Once a replication stream is created, InfluxDB {{% show-in "v2" %}}OSS{{% /show-in %}} -will replicate all writes to the specified bucket to the {{% show-in "v2" %}}remote {{% /show-in %}} +After you create the replication stream, InfluxDB {{% show-in "v2" %}}OSS{{% /show-in %}} +replicates all writes to the specified local bucket to the {{% show-in "v2" %}}remote {{% /show-in %}} InfluxDB {{% show-in "cloud,cloud-serverless" %}}Cloud {{% /show-in %}}bucket. Use the `influx replication list` command to view information such as the current queue size, max queue size, and latest status code. - {{% /tab-content %}} {{% tab-content %}} -1. Send a `POST` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS `/api/v2/remotes` endpoint to create a remote connection to replicate data to. +### Step 1: Create or find a remote connection (API) - {{< keep-url >}} - {{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="POST" api-ref="/influxdb/version/api/#operation/PostRemoteConnection" >}} +- [Create a remote connection](#create-a-remote-connection-api) +- [Use an existing remote connection](#use-an-existing-remote-connection-api) - Include the following in your request: +#### Create a remote connection (API) - - **Request method:** `POST` - - **Headers:** - - **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/) - - **Content-type:** `application/json` - - **Request body:** JSON object with the following fields: - {{< req type="key" >}} - - {{< req "\*" >}} **allowInsecureTLS:** All insecure TLS connections - - **description:** Remote description - - {{< req "\*" >}} **name:** Remote connection name - - {{< req "\*" >}} **orgID:** {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS organization ID - {{% show-in "v2" %}}- {{< req "\*" >}} **remoteAPIToken:** Remote InfluxDB API token _(API token must have write access to the target bucket)_{{% /show-in %}} - {{% show-in "v2" %}}- {{< req "\*" >}} **remoteOrgID:** Remote InfluxDB organization ID{{% /show-in %}} - {{% show-in "v2" %}}- {{< req "\*" >}} **remoteURL:** Remote InfluxDB instance URL{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- {{< req "\*" >}} **remoteAPIToken:** InfluxDB Cloud API token _(API token must have write access to the target bucket)_{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- {{< req "\*" >}} **remoteOrgID:** InfluxDB Cloud organization ID{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- {{< req "\*" >}} **remoteURL:** [InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/){{% /show-in %}} +To create a remote connection to replicate data to, +send a `POST` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS `/api/v2/remotes` endpoint: - {{< keep-url >}} - ```sh - curl --request POST http://localhost:8086/api/v2/remotes \ - --header 'Authorization: Token INFLUX_OSS_TOKEN' \ - --data '{ - "allowInsecureTLS": false, - "description": "Example remote description", - "name": "Example remote name", - "orgID": "INFLUX_OSS_ORG_ID", - "remoteAPIToken": "REMOTE_INFLUX_TOKEN", - "remoteOrgID": "REMOTE_INFLUX_ORG_ID", - "remoteURL": "https://cloud2.influxdata.com" - }' - ``` +{{< keep-url >}} +{{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="POST" api-ref="/influxdb/version/api/#operation/PostRemoteConnection" >}} - If you already have remote InfluxDB connections configured, you can use an - existing connection. To view existing connections, use the `/api/v2/remotes` - endpoint with the `GET` request method. +Include the following parameters in your request: - {{< keep-url >}} - {{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="GET" api-ref="/influxdb/version/api/#operation/GetRemoteConnections" >}} +- **Request method:** `POST` +- **Headers:** + - **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/) + - **Content-type:** `application/json` +{{% show-in "v2" %}} +- **Request body:** JSON object with the following fields: + {{< req type="key" >}} + - {{< req "\*" >}} **allowInsecureTLS:** All insecure TLS connections + - **description:** Remote description + - {{< req "\*" >}} **name:** Remote connection name + - {{< req "\*" >}} **orgID:** {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS organization ID + - {{< req "\*" >}} **remoteAPIToken:** Remote InfluxDB API token _(API token must have write access to the target bucket)_ + - {{< req "\*" >}} **remoteOrgID:** Remote InfluxDB organization ID + - {{< req "\*" >}} **remoteURL:** Remote InfluxDB instance URL +{{% /show-in %}} +{{% show-in "cloud,cloud-serverless" %}} +- **Request body:** JSON object with the following fields: + {{< req type="key" >}} + - {{< req "\*" >}} **allowInsecureTLS:** All insecure TLS connections + - **description:** Remote description + - {{< req "\*" >}} **name:** Remote connection name + - {{< req "\*" >}} **orgID:** {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS organization ID + - {{< req "\*" >}} **remoteAPIToken:** InfluxDB Cloud API token _(API token must have write access to the target bucket)_ + - {{< req "\*" >}} **remoteOrgID:** InfluxDB Cloud organization ID + - {{< req "\*" >}} **remoteURL:** [InfluxDB Cloud region URL](/influxdb/cloud/reference/regions/) +{{% /show-in %}} - Include the following in your request: +{{< keep-url >}} +```sh +curl --request POST http://localhost:8086/api/v2/remotes \ + --header 'Authorization: Token INFLUX_OSS_TOKEN' \ + --data '{ + "allowInsecureTLS": false, + "description": "Example remote description", + "name": "Example remote name", + "orgID": "INFLUX_OSS_ORG_ID", + "remoteAPIToken": "REMOTE_INFLUX_TOKEN", + "remoteOrgID": "REMOTE_INFLUX_ORG_ID", + "remoteURL": "https://cloud2.influxdata.com" + }' +``` - - **Request method:** `GET` - - **Headers:** - - **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/) - - **Query parameters:** - - **orgID:** {{% show-in "v2" %}}Local{{% /show-in %}} InfluxDB OSS organization ID +#### Use an existing remote connection - {{< keep-url >}} - ```sh - curl --request GET \ - http://localhost:8086/api/v2/remotes?orgID=INFLUX_OSS_ORG_ID \ - --header 'Authorization: Token INFLUX_OSS_TOKEN' \ - ``` +Alternatively, you can use an +existing connection that you have already configured. +To retrieve existing connections, use the `/api/v2/remotes` +endpoint with the `GET` request method: -2. Send a `POST` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS - `/api/v2/replications` endpoint to create a replication stream. +{{< keep-url >}} +{{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="GET" api-ref="/influxdb/version/api/#operation/GetRemoteConnections" >}} - {{< keep-url >}} - {{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="POST" api-ref="/influxdb/version/api/#operation/PostRemoteConnection" >}} - - Include the following in your request: +Include the following parameters in your request: - - **Request method:** `POST` - - **Headers:** - - **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/) - - **Content-type:** `application/json` - - **Request body:** JSON object with the following fields: - {{< req type="key" >}} - - **dropNonRetryableData:** Drop data when a non-retryable error is encountered. - - {{< req "\*" >}} **localBucketID:** {{% show-in "v2" %}}Local{{% /show-in %}} InfluxDB OSS bucket ID to replicate writes from. - - {{< req "\*" >}} **maxAgeSeconds:** Maximum age of data in seconds before it is dropped (default is `604800`, must be greater than or equal to `0`). - - {{< req "\*" >}} **maxQueueSizeBytes:** Maximum replication queue size in bytes (default is `67108860`, must be greater than or equal to `33554430`). - - {{< req "\*" >}} **name:** Replication stream name. - - {{< req "\*" >}} **orgID:** {{% show-in "v2" %}}Local{{% /show-in %}} InfluxDB OSS organization ID. - {{% show-in "v2" %}}- {{< req "\*" >}} **remoteBucketID:** Remote bucket ID to replicate writes to.{{% /show-in %}} - {{% show-in "v2" %}}- {{< req "\*" >}} **remoteBucketName:** Remote bucket name to replicate writes to. If replicating to **InfluxDB Enterprise**, use the `db-name/rp-name` bucket name syntax.{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- {{< req "\*" >}} **remoteBucketID:** InfluxDB Cloud bucket ID to replicate writes to.{{% /show-in %}} - {{% show-in "cloud,cloud-serverless" %}}- {{< req "\*" >}} **remoteBucketName:** InfluxDB Cloud bucket name to replicate writes to.{{% /show-in %}} - - {{< req "\*" >}} **remoteID:** Remote connection ID +- **Headers:** + - **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/) +- **Query parameters:** + - **orgID:** {{% show-in "v2" %}}Local{{% /show-in %}} InfluxDB OSS organization ID - {{% note %}} -`remoteBucketID` and `remoteBucketName` are mutually exclusive. -{{% show-in "v2" %}}If replicating to **InfluxDB Enterprise**, use `remoteBucketName` with the `db-name/rp-name` bucket name syntax.{{% /show-in %}} - {{% /note %}} +{{< keep-url >}} +```sh +curl --request GET \ + http://localhost:8086/api/v2/remotes?orgID=INFLUX_OSS_ORG_ID \ + --header 'Authorization: Token INFLUX_OSS_TOKEN' \ +``` + +### Step 2: Create a replication stream (API) + +Send a `POST` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS +`/api/v2/replications` endpoint to create a replication stream. + +{{< keep-url >}} +{{< api-endpoint endpoint="localhost:8086/api/v2/remotes" method="POST" api-ref="/influxdb/version/api/#operation/PostRemoteConnection" >}} + +Include the following parameters in your request: + +- **Headers:** + - **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/) + - **Content-type:** `application/json` +{{% show-in "v2" %}} +- **Request body:** JSON object with the following fields: + {{< req type="key" >}} + - **dropNonRetryableData:** Drop data when a non-retryable error is encountered. + - {{< req "\*" >}} **localBucketID:** Local InfluxDB OSS bucket ID to replicate writes from. + - {{< req "\*" >}} **maxAgeSeconds:** Maximum age of data in seconds before it is dropped (default is `604800`, must be greater than or equal to `0`). + - {{< req "\*" >}} **maxQueueSizeBytes:** Maximum replication queue size in bytes (default is `67108860`, must be greater than or equal to `33554430`). + - {{< req "\*" >}} **name:** Replication stream name. + - {{< req "\*" >}} **orgID:** Local InfluxDB OSS organization ID. + - {{< req "\*" >}} **remoteBucketID:** Remote bucket ID to replicate writes to. + - {{< req "\*" >}} **remoteBucketName:** Remote bucket name to replicate writes to. If replicating to **InfluxDB Enterprise v1**, use the `db-name/rp-name` bucket name syntax. +{{% /show-in %}} +{{% show-in "cloud,cloud-serverless" %}} +- **Request body:** JSON object with the following fields: + {{< req type="key" >}} + - **dropNonRetryableData:** Drop data when a non-retryable error is encountered + - {{< req "\*" >}} **localBucketID:** InfluxDB OSS bucket ID to replicate writes from + - {{< req "\*" >}} **maxAgeSeconds:** Maximum age of data in seconds before it is dropped (default is `604800`, must be greater than or equal to `0`) + - {{< req "\*" >}} **maxQueueSizeBytes:** Maximum replication queue size in bytes (default is `67108860`, must be greater than or equal to `33554430`) + - {{< req "\*" >}} **name:** Replication stream name + - {{< req "\*" >}} **orgID:** InfluxDB OSS organization ID + - {{< req "\*" >}} **remoteBucketID:** InfluxDB Cloud bucket ID to replicate writes to (mutually exclusive with `remoteBucketName`) + - {{< req "\*" >}} **remoteBucketName:** InfluxDB Cloud bucket name to replicate writes to (mutually exclusive with `remoteBucketID`) + - {{< req "\*" >}} **remoteID:** Remote connection ID +{{% /show-in %}} + +> [!Note] +> `remoteBucketID` and `remoteBucketName` are mutually exclusive. +> {{% show-in "v2" %}}If replicating to **InfluxDB Enterprise v1**, use `remoteBucketName` with the `db-name/rp-name` bucket name syntax.{{% /show-in %}} {{< keep-url >}} ```sh @@ -197,19 +242,18 @@ curl --request POST http://localhost:8086/api/v2/replications \ }' ``` -Once a replication stream is created, InfluxDB {{% show-in "v2" %}}OSS{{% /show-in %}} -will replicate all writes from the specified local bucket to the {{% show-in "v2" %}}remote {{% /show-in %}} +After you create a replication stream, InfluxDB {{% show-in "v2" %}}OSS{{% /show-in %}} +replicates all writes from the specified local bucket to the {{% show-in "v2" %}}remote {{% /show-in %}} InfluxDB {{% show-in "cloud,cloud-serverless" %}}Cloud {{% /show-in %}}bucket. To get information such as the current queue size, max queue size, and latest status -code for each replication stream, send a `GET` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS `/api/v2/replications` endpoint. +code for each replication stream, send a `GET` request to your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS `/api/v2/replications` endpoint: {{< keep-url >}} {{< api-endpoint endpoint="localhost:8086/api/v2/replications" method="GET" api-ref="/influxdb/version/api/#operation/GetReplications" >}} -Include the following in your request: +Include the following parameters in your request: -- **Request method:** `GET` - **Headers:** - **Authorization:** `Token` scheme with your {{% show-in "v2" %}}local{{% /show-in %}} InfluxDB OSS [API token](/influxdb/version/admin/tokens/) - **Query parameters:** From ddb9a5584db525c8f04fab9e7a8dc6d95336b52b Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 12 Aug 2025 18:12:45 -0500 Subject: [PATCH 084/179] fix(clustered): clarify compactor scaling guidance for CPU and memory Addresses customer confusion where scaling CPU alone doesn't improve compactor performance. Compactor concurrency scales based on memory allocation, not CPU count, so both resources should be scaled together. Closes influxdata/DAR#514 add related links fix(clustered): correct anchor link in scale-cluster documentation Fix broken internal anchor link from #rrecommended-scaling-strategies-per-component to #recommended-scaling-strategies-per-component (removed extra 'r'). This was used to test the improved link-checker anchor validation functionality. fix(clustered): correct anchor link in scale-cluster documentation Fixes broken anchor link #rrecommended-scaling-strategies-per-component to the correct #recommended-scaling-strategies-per-component --- TESTING.md | 25 +++++++++++++++++++ .../clustered/admin/scale-cluster.md | 11 +++++--- 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/TESTING.md b/TESTING.md index dba8f892e..09330c298 100644 --- a/TESTING.md +++ b/TESTING.md @@ -184,6 +184,31 @@ link-checker check public/path/to/file.html link-checker config ``` +### Link Resolution Behavior + +The link-checker automatically handles relative link resolution based on the input type: + +**Local Files → Local Resolution** +```bash +# When checking local files, relative links resolve to the local filesystem +link-checker check public/influxdb3/core/admin/scale-cluster/index.html +# Relative link /influxdb3/clustered/tags/kubernetes/ becomes: +# → /path/to/public/influxdb3/clustered/tags/kubernetes/index.html +``` + +**URLs → Production Resolution** +```bash +# When checking URLs, relative links resolve to the production site +link-checker check https://docs.influxdata.com/influxdb3/core/admin/scale-cluster/ +# Relative link /influxdb3/clustered/tags/kubernetes/ becomes: +# → https://docs.influxdata.com/influxdb3/clustered/tags/kubernetes/ +``` + +**Why This Matters** +- **Testing new content**: Tag pages generated locally will be found when testing local files +- **Production validation**: Production URLs validate against the live site +- **No false positives**: New content won't appear broken when testing locally before deployment + ### Content Mapping Workflows #### Scenario 1: Map and check InfluxDB 3 Core content diff --git a/content/influxdb3/clustered/admin/scale-cluster.md b/content/influxdb3/clustered/admin/scale-cluster.md index 0c8b2d9b2..bd8339718 100644 --- a/content/influxdb3/clustered/admin/scale-cluster.md +++ b/content/influxdb3/clustered/admin/scale-cluster.md @@ -8,9 +8,11 @@ menu: parent: Administer InfluxDB Clustered name: Scale your cluster weight: 207 -influxdb3/clustered/tags: [scale] +influxdb3/clustered/tags: [scale, performance, Kubernetes] related: - /influxdb3/clustered/reference/internals/storage-engine/ + - /influxdb3/clustered/write-data/best-practices/data-lifecycle/ + - /influxdb3/clustered/query-data/troubleshoot-and-optimize/optimize-queries/ - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits, Kubernetes resource requests and limits --- @@ -559,11 +561,14 @@ concurrency demands or reaches the hardware limits of your underlying nodes. ### Compactor -- **Recommended**: Maintain **1 Compactor pod** and use [vertical scaling](#vertical-scaling) (especially -increasing the available CPU) for the Compactor. +- **Recommended**: Maintain **1 Compactor pod** and use [vertical scaling](#vertical-scaling) for the Compactor. +Scale CPU and memory resources together, as compactor concurrency settings scale based on memory, not CPU count. - Because compaction is a compute-heavy process, horizontal scaling increases compaction throughput, but not as efficiently as vertical scaling. +> [!Important] +> When scaling the Compactor, scale CPU and memory resources together. + ### Garbage collector The [Garbage collector](/influxdb3/clustered/reference/internals/storage-engine/#garbage-collector) is a lightweight process that typically doesn't require From cba3b21f1c9dc005dac277a02dca126070be4912 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 18 Aug 2025 17:40:02 -0500 Subject: [PATCH 085/179] docs(testing): document link-checker binary release process Add comprehensive documentation for maintainers on how to: - Create releases in docs-tooling (automated) - Manually distribute binaries to docs-v2 (required for private repo) - Update workflow references when needed This addresses the missing process documentation for link-checker binary distribution between the two repositories. feat(ci): update link-checker to v1.2.2 and add manual sync workflow - Update pr-link-check.yml to use link-checker-v1.2.2 with latest fixes - Add sync-link-checker-binary.yml for manual binary distribution - Improvements in v1.2.2: base URL detection, anchor validation, JSON parsing The v1.2.2 release fixes the Hugo base URL detection issue and improves anchor link validation that was tested in this PR. --- .github/workflows/pr-link-check.yml | 2 +- .../workflows/sync-link-checker-binary.yml | 68 +++++++++++++++++++ TESTING.md | 41 +++++++++++ 3 files changed, 110 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/sync-link-checker-binary.yml diff --git a/.github/workflows/pr-link-check.yml b/.github/workflows/pr-link-check.yml index b0764089a..5f5dacca8 100644 --- a/.github/workflows/pr-link-check.yml +++ b/.github/workflows/pr-link-check.yml @@ -95,7 +95,7 @@ jobs: curl -L -H "Accept: application/vnd.github+json" \ -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ -o link-checker-info.json \ - "https://api.github.com/repos/influxdata/docs-v2/releases/tags/link-checker-v1.0.0" + "https://api.github.com/repos/influxdata/docs-v2/releases/tags/link-checker-v1.2.2" # Extract download URL for linux binary DOWNLOAD_URL=$(jq -r '.assets[] | select(.name | test("link-checker.*linux")) | .url' link-checker-info.json) diff --git a/.github/workflows/sync-link-checker-binary.yml b/.github/workflows/sync-link-checker-binary.yml new file mode 100644 index 000000000..b0ac46c68 --- /dev/null +++ b/.github/workflows/sync-link-checker-binary.yml @@ -0,0 +1,68 @@ +name: Sync Link Checker Binary from docs-tooling + +on: + workflow_dispatch: + inputs: + version: + description: 'Link checker version to sync (e.g., v1.2.2)' + required: true + type: string + +jobs: + sync-binary: + name: Sync link-checker binary from docs-tooling + runs-on: ubuntu-latest + + steps: + - name: Download binary from docs-tooling release + run: | + echo "Downloading link-checker ${{ inputs.version }} from docs-tooling..." + + # Download binary from docs-tooling release + curl -L -H "Accept: application/octet-stream" \ + -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + -o link-checker-linux-x86_64 \ + "https://github.com/influxdata/docs-tooling/releases/download/link-checker-${{ inputs.version }}/link-checker-linux-x86_64" + + # Download checksums + curl -L -H "Accept: application/octet-stream" \ + -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ + -o checksums.txt \ + "https://github.com/influxdata/docs-tooling/releases/download/link-checker-${{ inputs.version }}/checksums.txt" + + # Verify downloads + ls -la link-checker-linux-x86_64 checksums.txt + + - name: Create docs-v2 release + run: | + echo "Creating link-checker-${{ inputs.version }} release in docs-v2..." + + gh release create \ + --title "Link Checker Binary ${{ inputs.version }}" \ + --notes "Link validation tooling binary for docs-v2 GitHub Actions workflows. + + This binary is distributed from the docs-tooling repository release link-checker-${{ inputs.version }}. + + ### Usage in GitHub Actions + + The binary is automatically downloaded by docs-v2 workflows for link validation. + + ### Manual Usage + + \`\`\`bash + # Download and make executable + curl -L -o link-checker https://github.com/influxdata/docs-v2/releases/download/link-checker-${{ inputs.version }}/link-checker-linux-x86_64 + chmod +x link-checker + + # Verify installation + ./link-checker --version + \`\`\` + + ### Changes in ${{ inputs.version }} + + See the [docs-tooling release](https://github.com/influxdata/docs-tooling/releases/tag/link-checker-${{ inputs.version }}) for detailed changelog." \ + link-checker-${{ inputs.version }} \ + link-checker-linux-x86_64 \ + checksums.txt + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/TESTING.md b/TESTING.md index 09330c298..233bb3a36 100644 --- a/TESTING.md +++ b/TESTING.md @@ -171,6 +171,47 @@ cargo build --release cp target/release/link-checker /usr/local/bin/ ``` +#### Binary Release Process + +**For maintainers:** To create a new link-checker release in docs-v2: + +1. **Create release in docs-tooling** (builds and releases binary automatically): + ```bash + cd docs-tooling + git tag link-checker-v1.2.x + git push origin link-checker-v1.2.x + ``` + +2. **Manually distribute to docs-v2** (required due to private repository access): + ```bash + # Download binary from docs-tooling release + curl -L -H "Authorization: Bearer $(gh auth token)" \ + -o link-checker-linux-x86_64 \ + "https://github.com/influxdata/docs-tooling/releases/download/link-checker-v1.2.x/link-checker-linux-x86_64" + + curl -L -H "Authorization: Bearer $(gh auth token)" \ + -o checksums.txt \ + "https://github.com/influxdata/docs-tooling/releases/download/link-checker-v1.2.x/checksums.txt" + + # Create docs-v2 release + gh release create \ + --repo influxdata/docs-v2 \ + --title "Link Checker Binary v1.2.x" \ + --notes "Link validation tooling binary for docs-v2 GitHub Actions workflows." \ + link-checker-v1.2.x \ + link-checker-linux-x86_64 \ + checksums.txt + ``` + +3. **Update workflow reference** (if needed): + ```bash + # Update .github/workflows/pr-link-check.yml line 98 to use new version + sed -i 's/link-checker-v[0-9.]*/link-checker-v1.2.x/' .github/workflows/pr-link-check.yml + ``` + +> [!Note] +> The manual distribution is required because docs-tooling is a private repository and the default GitHub token doesn't have cross-repository access for private repos. + #### Core Commands ```bash From 0001c1cfc44b8475813c0770b8082f118242ac6c Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 18 Aug 2025 18:21:02 -0500 Subject: [PATCH 086/179] fix(v2): missing (API) in heading --- .../shared/influxdb-v2/write-data/replication/replicate-data.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/influxdb-v2/write-data/replication/replicate-data.md b/content/shared/influxdb-v2/write-data/replication/replicate-data.md index 90b23280e..cabc178b6 100644 --- a/content/shared/influxdb-v2/write-data/replication/replicate-data.md +++ b/content/shared/influxdb-v2/write-data/replication/replicate-data.md @@ -159,7 +159,7 @@ curl --request POST http://localhost:8086/api/v2/remotes \ }' ``` -#### Use an existing remote connection +#### Use an existing remote connection (API) Alternatively, you can use an existing connection that you have already configured. From b510e6bac1e19c8954d6a401e54136af4fd5c666 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 08:38:36 -0500 Subject: [PATCH 087/179] fix(v2): broken link fragment --- content/influxdb/v2/install/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb/v2/install/_index.md b/content/influxdb/v2/install/_index.md index 2a7b4bc3a..dab19f201 100644 --- a/content/influxdb/v2/install/_index.md +++ b/content/influxdb/v2/install/_index.md @@ -473,7 +473,7 @@ _If necessary, adjust the example file paths and utilities for your system._ https://download.influxdata.com/influxdb/releases/v{{< latest-patch >}}/influxdb2-{{< latest-patch >}}_linux_arm64.tar.gz ``` -2. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-system). +2. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-os-version). 3. {{< req text="Recommended:" color="magenta" >}}: Verify the authenticity of the downloaded binary--for example, enter the following command in your terminal. From 116e4fe70a20e7b6267d8dd370378e1bc8d275fc Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 08:43:45 -0500 Subject: [PATCH 088/179] config(link-checker): exclude StackExchange network URLs Add exclusion patterns for StackExchange sites to both production and default link-checker configurations: - *.stackexchange.com - stackoverflow.com - *.stackoverflow.com These sites often block automated requests/bots, causing false positive link validation failures in CI environments. --- .ci/link-checker/default.lycherc.toml | 5 +++++ .ci/link-checker/production.lycherc.toml | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/.ci/link-checker/default.lycherc.toml b/.ci/link-checker/default.lycherc.toml index 22f97a0f9..259efd76a 100644 --- a/.ci/link-checker/default.lycherc.toml +++ b/.ci/link-checker/default.lycherc.toml @@ -50,6 +50,11 @@ exclude = [ # detection) "^https?://github\\.com", + # StackExchange network URLs (often block automated requests) + "^https?://.*\\.stackexchange\\.com", + "^https?://stackoverflow\\.com", + "^https?://.*\\.stackoverflow\\.com", + # Common documentation placeholders "YOUR_.*", "REPLACE_.*", diff --git a/.ci/link-checker/production.lycherc.toml b/.ci/link-checker/production.lycherc.toml index 9b8be5aa3..f8410208c 100644 --- a/.ci/link-checker/production.lycherc.toml +++ b/.ci/link-checker/production.lycherc.toml @@ -58,6 +58,11 @@ exclude = [ "^https?://reddit\\.com", "^https?://.*\\.reddit\\.com", + # StackExchange network URLs (often block automated requests) + "^https?://.*\\.stackexchange\\.com", + "^https?://stackoverflow\\.com", + "^https?://.*\\.stackoverflow\\.com", + # InfluxData support URLs (certificate/SSL issues in CI) "^https?://support\\.influxdata\\.com", From aaf475beef090e37f5486d98058baa0815df738b Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 08:45:40 -0500 Subject: [PATCH 089/179] fix(v2): replace broken link fragment with new URL --- content/influxdb/v2/install/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb/v2/install/_index.md b/content/influxdb/v2/install/_index.md index dab19f201..2896900cf 100644 --- a/content/influxdb/v2/install/_index.md +++ b/content/influxdb/v2/install/_index.md @@ -675,7 +675,7 @@ data isn't deleted if you delete the container._ flags for initial setup options and file system mounts. _If you don't specify InfluxDB initial setup options, you can -[set up manually](#set-up-influxdb) later using the UI or CLI in a running +[set up manually](/influxdb/v2/get-started/setup/) later using the UI or CLI in a running container._ {{% code-placeholders "ADMIN_(USERNAME|PASSWORD)|ORG_NAME|BUCKET_NAME" %}} From 683dfe233c9a3e7ee331d94e92abef82dbeeb421 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 08:59:01 -0500 Subject: [PATCH 090/179] fix(v2): replace broken link fragment with example and page link --- content/influxdb/v2/install/_index.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/content/influxdb/v2/install/_index.md b/content/influxdb/v2/install/_index.md index 2896900cf..60b60d938 100644 --- a/content/influxdb/v2/install/_index.md +++ b/content/influxdb/v2/install/_index.md @@ -731,7 +731,8 @@ and _[Operator token](/influxdb/v2/admin/tokens/#operator-token)_, and logs to s You can view the Operator token in the `/etc/influxdb2/influx-configs` file and use it to authorize -[creating an All Access token](#optional-create-all-access-tokens). +[creating an All Access token](#examples). +For more information, see [API token types](/influxdb/v2/admin/tokens/#api-token-types). _To run the InfluxDB container in [detached mode](https://docs.docker.com/engine/reference/run/#detached-vs-foreground), @@ -761,6 +762,13 @@ docker exec -it ` +```bash +# Create an All Access token +docker exec -it influxdb2 influx auth create \ + --all-access \ + --token OPERATOR_TOKEN +``` + ```bash # List CLI configurations docker exec -it influxdb2 influx config ls From de021b48ebf0cd5e31e16a6e2581d10496209afb Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 09:01:59 -0500 Subject: [PATCH 091/179] fix(telegraf): broken link fragment --- content/telegraf/v1/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/telegraf/v1/install.md b/content/telegraf/v1/install.md index 9aa40d9cd..a8ca6fb87 100644 --- a/content/telegraf/v1/install.md +++ b/content/telegraf/v1/install.md @@ -329,7 +329,7 @@ Replace the following: Choose from the following options to install Telegraf binary files for Linux ARM: - To install on Linux ARMv7(32-bit), see the [downloads page](https://www.influxdata.com/downloads/#telegraf). -- [Download and install on Linux ARMv8 (64-bit)](#download-and-install-on-linux-arm-64) +- [Download and install on Linux ARMv8 (64-bit)](#download-and-install-on-linux-armv8) ### Download and install on Linux ARMv8 From b90b20314872eaaa30e136a5180a5b2ca8250c4f Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 09:02:50 -0500 Subject: [PATCH 092/179] fix(telegraf): broken link fragment --- content/telegraf/v1/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/telegraf/v1/install.md b/content/telegraf/v1/install.md index a8ca6fb87..ac64463fb 100644 --- a/content/telegraf/v1/install.md +++ b/content/telegraf/v1/install.md @@ -15,7 +15,7 @@ To install Telegraf, do the following: - [Review requirements](#requirements) - [Download and install Telegraf](#download-and-install-telegraf) -- [Custom compile Telegraf](#custom-compile) +- [Custom compile Telegraf](#custom-compile-telegraf) ## Requirements From 4f807c9eb6e981c7de5fa0c3845c7efe8fbf673a Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 09:06:06 -0500 Subject: [PATCH 093/179] fix(v2): broken-link-fragment --- content/telegraf/v1/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/telegraf/v1/install.md b/content/telegraf/v1/install.md index ac64463fb..e09b2312b 100644 --- a/content/telegraf/v1/install.md +++ b/content/telegraf/v1/install.md @@ -121,7 +121,7 @@ InfluxData uses [GPG (GnuPG)](https://www.gnupg.org/software/) to sign released public key and encrypted private key (`.key` file) pairs that you can use to verify the integrity of packages and binaries from the InfluxData repository. -Before running the [install](#install) sample code, substitute the key-pair compatible with your OS version: +Before running the [install](#download-and-install-instructions) sample code, substitute the key-pair compatible with your OS version: For newer OS releases (for example, Ubuntu 20.04 LTS and newer, Debian Buster and newer) that support subkey verification: From 9d14efe92e90b990ba42426f6163ebbda86a3d6d Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 09:07:37 -0500 Subject: [PATCH 094/179] fix(v2): broken-link-fragment --- content/telegraf/v1/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/telegraf/v1/install.md b/content/telegraf/v1/install.md index e09b2312b..1bea2c167 100644 --- a/content/telegraf/v1/install.md +++ b/content/telegraf/v1/install.md @@ -627,7 +627,7 @@ Use the Telegraf custom builder tool to compile Telegraf with only the plugins y ### Prerequisites - Follow the instructions to install [Go](https://go.dev/) for your system. -- [Create your Telegraf configuration file](#generate-a-custom-configuration-file) with the plugins you want to use. +- [Create your Telegraf configuration file](#generate-a-configuration-file) with the plugins you want to use. ### Build the custom builder tool From 8754468dbd6482ae541ed0b74179eb2ad8cc53e2 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 09:08:40 -0500 Subject: [PATCH 095/179] fix(v2): broken-link-fragment --- content/telegraf/v1/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/telegraf/v1/install.md b/content/telegraf/v1/install.md index 1bea2c167..5ea2b1e1f 100644 --- a/content/telegraf/v1/install.md +++ b/content/telegraf/v1/install.md @@ -388,7 +388,7 @@ To install using Homebrew, do the following: 3. Choose one of the following methods to start Telegraf and begin collecting and processing metrics: - [Run Telegraf in your terminal](#run-telegraf-in-your-terminal) - - [Run Telegraf as a service](#run-telegraf-as-a-service) + - [Run Telegraf as a service](#run-telegraf-as-a-background-service) ### Run Telegraf in your terminal From 7d95a3f95b9178edabaa8715f7c3ccbb0fc0c5ef Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 4 Aug 2025 14:05:24 -0500 Subject: [PATCH 096/179] chore(qol): Copilot no longer uses instruction settings; it automatically detects instructions files and PRs. --- .vscode/settings.json | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 2c18d3282..c827452b9 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -14,17 +14,6 @@ }, "vale.valeCLI.config": "${workspaceFolder}/.vale.ini", "vale.valeCLI.minAlertLevel": "warning", - "github.copilot.chat.codeGeneration.useInstructionFiles": true, - "github.copilot.chat.codeGeneration.instructions": [ - { - "file": "${workspaceFolder}/.github/copilot-instructions.md", - } - ], - "github.copilot.chat.pullRequestDescriptionGeneration.instructions": [ - { - "file": "${workspaceFolder}/.github/copilot-instructions.md", - } - ], "cSpell.words": [ "influxctl" ] From b2aab8ad43096b57a246acc5353ca4c472074cf3 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 4 Aug 2025 14:43:48 -0500 Subject: [PATCH 097/179] test: When using env_file, the variables are loaded directly into the container's environment, so you don't need to use the syntax. Removed braces --- compose.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/compose.yaml b/compose.yaml index cd466f6e3..ea11e03cb 100644 --- a/compose.yaml +++ b/compose.yaml @@ -349,7 +349,6 @@ services: - --data-dir=/var/lib/influxdb3/data - --plugin-dir=/var/lib/influxdb3/plugins environment: - - INFLUXDB3_ENTERPRISE_LICENSE_EMAIL=${INFLUXDB3_ENTERPRISE_LICENSE_EMAIL} - INFLUXDB3_AUTH_TOKEN=/run/secrets/influxdb3-enterprise-admin-token volumes: - type: bind From c708bd865825c57ab2c3561ecc83c93dab8f8e7d Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 4 Aug 2025 15:22:02 -0500 Subject: [PATCH 098/179] chore(qol): audit-cli-documentation.js should dynamically get top-level commands and recurse through --help.\Both products work together: Running node [audit-cli-documentation.js](http://_vscodecontentref_/1) both successfully audits both Core and Enterprise Core templates use Core-specific frontmatter Enterprise templates use Enterprise-specific frontmatter Fixes audit-cli-documentation.js so that it parses commands dynamically from the CLI output. Some commands () only return top-level help output, which the script had some difficulty with.That seems mostly resolved, but might rear again. --- .../audit-cli-documentation.js | 446 ++++++++++++++---- 1 file changed, 343 insertions(+), 103 deletions(-) diff --git a/helper-scripts/influxdb3-monolith/audit-cli-documentation.js b/helper-scripts/influxdb3-monolith/audit-cli-documentation.js index 74e1af565..ec22a453f 100755 --- a/helper-scripts/influxdb3-monolith/audit-cli-documentation.js +++ b/helper-scripts/influxdb3-monolith/audit-cli-documentation.js @@ -51,39 +51,9 @@ class CLIDocAuditor { ); } - // Commands to extract help for - this.mainCommands = [ - 'create', - 'delete', - 'disable', - 'enable', - 'query', - 'show', - 'test', - 'update', - 'write', - ]; - this.subcommands = [ - 'create database', - 'create token admin', - 'create token', - 'create trigger', - 'create last_cache', - 'create distinct_cache', - 'create table', - 'show databases', - 'show tokens', - 'show system', - 'delete database', - 'delete table', - 'delete trigger', - 'update database', - 'test wal_plugin', - 'test schedule_plugin', - ]; - - // Map for command tracking during option parsing - this.commandOptionsMap = {}; + // Dynamic command discovery - populated by discoverCommands() + this.discoveredCommands = new Map(); // command -> { subcommands: [], options: [] } + this.commandOptionsMap = {}; // For backward compatibility } async fileExists(path) { @@ -154,6 +124,238 @@ class CLIDocAuditor { }); } + async ensureContainerRunning(product) { + const containerName = `influxdb3-${product}`; + + // Check if container exists and is running + const { code, stdout } = await this.runCommand('docker', [ + 'compose', + 'ps', + '--format', + 'json', + containerName, + ]); + + if (code !== 0) { + console.log(`❌ Failed to check container status for ${containerName}`); + return false; + } + + const containers = stdout.trim().split('\n').filter((line) => line); + const isRunning = containers.some((line) => { + try { + const container = JSON.parse(line); + return container.Name === containerName && container.State === 'running'; + } catch { + return false; + } + }); + + if (!isRunning) { + console.log(`🚀 Starting ${containerName}...`); + const startResult = await this.runCommand('docker', [ + 'compose', + 'up', + '-d', + containerName, + ]); + + if (startResult.code !== 0) { + console.log(`❌ Failed to start ${containerName}`); + console.log(startResult.stderr); + return false; + } + + // Wait for container to be ready + console.log(`⏳ Waiting for ${containerName} to be ready...`); + await new Promise((resolve) => setTimeout(resolve, 5000)); + } + + return true; + } + + async discoverCommands(product) { + const containerName = `influxdb3-${product}`; + + // Ensure container is running + if (!(await this.ensureContainerRunning(product))) { + throw new Error(`Failed to start container ${containerName}`); + } + + // Get main help to discover top-level commands + const mainHelp = await this.runCommand('docker', [ + 'compose', + 'exec', + '-T', + containerName, + 'influxdb3', + '--help', + ]); + + if (mainHelp.code !== 0) { + console.error(`Failed to get main help. Exit code: ${mainHelp.code}`); + console.error(`Stdout: ${mainHelp.stdout}`); + console.error(`Stderr: ${mainHelp.stderr}`); + throw new Error(`Failed to get main help: ${mainHelp.stderr}`); + } + + // Parse main commands from help output + const mainCommands = this.parseCommandsFromHelp(mainHelp.stdout); + + // Also add the root command first + this.discoveredCommands.set('influxdb3', { + subcommands: mainCommands, + options: this.parseOptionsFromHelp(mainHelp.stdout), + helpText: mainHelp.stdout, + }); + + // For backward compatibility + this.commandOptionsMap['influxdb3'] = this.parseOptionsFromHelp(mainHelp.stdout); + + // Discover subcommands and options for each main command + for (const command of mainCommands) { + await this.discoverSubcommands(containerName, command, [command]); + } + } + + parseCommandsFromHelp(helpText) { + const commands = []; + // Strip ANSI color codes first + // eslint-disable-next-line no-control-regex + const cleanHelpText = helpText.replace(/\x1b\[[0-9;]*m/g, ''); + const lines = cleanHelpText.split('\n'); + let inCommandsSection = false; + + for (const line of lines) { + const trimmed = line.trim(); + + // Look for any Commands section + if (trimmed.includes('Commands:') || trimmed === 'Resource Management:' || + trimmed === 'System Management:') { + inCommandsSection = true; + continue; + } + + // Stop at next section (but don't stop on management sections) + if (inCommandsSection && /^[A-Z][a-z]+:$/.test(trimmed) && + !trimmed.includes('Commands:') && + trimmed !== 'Resource Management:' && + trimmed !== 'System Management:') { + break; + } + + // Parse command lines (typically indented with command name) + if (inCommandsSection && /^\s+[a-z]/.test(line)) { + const match = line.match(/^\s+([a-z][a-z0-9_-]*)/); + if (match) { + commands.push(match[1]); + } + } + } + + return commands; + } + + parseOptionsFromHelp(helpText) { + const options = []; + const lines = helpText.split('\n'); + let inOptionsSection = false; + + for (const line of lines) { + const trimmed = line.trim(); + + // Look for Options: section + if (trimmed === 'Options:') { + inOptionsSection = true; + continue; + } + + // Stop at next section + if (inOptionsSection && /^[A-Z][a-z]+:$/.test(trimmed)) { + break; + } + + // Parse option lines + if (inOptionsSection && /^\s*-/.test(line)) { + const optionMatch = line.match(/--([a-z][a-z0-9-]*)/); + if (optionMatch) { + options.push(`--${optionMatch[1]}`); + } + } + } + + return options; + } + + async discoverSubcommands(containerName, commandPath, commandParts) { + // Get help for this command + + // First try with --help + let helpResult = await this.runCommand('docker', [ + 'compose', + 'exec', + '-T', + containerName, + 'influxdb3', + ...commandParts, + '--help', + ]); + + // If --help returns main help or fails, try without --help + if (helpResult.code !== 0 || helpResult.stdout.includes('InfluxDB 3 Core Server and Command Line Tools')) { + helpResult = await this.runCommand('docker', [ + 'compose', + 'exec', + '-T', + containerName, + 'influxdb3', + ...commandParts, + ]); + } + + if (helpResult.code !== 0) { + // Check if stderr contains useful help information + if (helpResult.stderr && helpResult.stderr.includes('Usage:') && helpResult.stderr.includes('Commands:')) { + // Use stderr as the help text since it contains the command usage info + helpResult = { code: 0, stdout: helpResult.stderr, stderr: '' }; + } else { + // Command might not exist or might not have subcommands + return; + } + } + + // If the result is still the main help, skip this command + if (helpResult.stdout.includes('InfluxDB 3 Core Server and Command Line Tools')) { + return; + } + + const helpText = helpResult.stdout; + const subcommands = this.parseCommandsFromHelp(helpText); + const options = this.parseOptionsFromHelp(helpText); + + // Store the command info + const fullCommand = `influxdb3 ${commandParts.join(' ')}`; + this.discoveredCommands.set(fullCommand, { + subcommands, + options, + helpText, + }); + + // For backward compatibility + this.commandOptionsMap[fullCommand] = options; + + // Recursively discover subcommands (but limit depth) + if (subcommands.length > 0 && commandParts.length < 3) { + for (const subcommand of subcommands) { + await this.discoverSubcommands( + containerName, + `${commandPath} ${subcommand}`, + [...commandParts, subcommand] + ); + } + } + } + async extractCurrentCLI(product, outputFile) { process.stdout.write( `Extracting current CLI help from influxdb3-${product}...` @@ -164,57 +366,30 @@ class CLIDocAuditor { if (this.version === 'local') { const containerName = `influxdb3-${product}`; - // Check if container is running - const { code, stdout } = await this.runCommand('docker', [ - 'ps', - '--format', - '{{.Names}}', - ]); - if (code !== 0 || !stdout.includes(containerName)) { + // Ensure container is running and discover commands + if (!(await this.ensureContainerRunning(product))) { console.log(` ${Colors.RED}✗${Colors.NC}`); - console.log(`Error: Container ${containerName} is not running.`); - console.log(`Start it with: docker compose up -d influxdb3-${product}`); return false; } - // Extract comprehensive help - let fileContent = ''; + // Discover all commands dynamically + await this.discoverCommands(product); - // Main help - const mainHelp = await this.runCommand('docker', [ - 'exec', - containerName, - 'influxdb3', - '--help', - ]); - fileContent += mainHelp.code === 0 ? mainHelp.stdout : mainHelp.stderr; - - // Extract all subcommand help - for (const cmd of this.mainCommands) { - fileContent += `\n\n===== influxdb3 ${cmd} --help =====\n`; - const cmdHelp = await this.runCommand('docker', [ - 'exec', - containerName, - 'influxdb3', - cmd, - '--help', - ]); - fileContent += cmdHelp.code === 0 ? cmdHelp.stdout : cmdHelp.stderr; + // Generate comprehensive help output + let fileContent = `===== influxdb3 --help =====\n`; + + // Add root command help first + const rootCommand = this.discoveredCommands.get('influxdb3'); + if (rootCommand) { + fileContent += rootCommand.helpText; } - // Extract detailed subcommand help - for (const subcmd of this.subcommands) { - fileContent += `\n\n===== influxdb3 ${subcmd} --help =====\n`; - const cmdParts = [ - 'exec', - containerName, - 'influxdb3', - ...subcmd.split(' '), - '--help', - ]; - const subcmdHelp = await this.runCommand('docker', cmdParts); - fileContent += - subcmdHelp.code === 0 ? subcmdHelp.stdout : subcmdHelp.stderr; + // Add all other discovered command help + for (const [command, info] of this.discoveredCommands) { + if (command !== 'influxdb3') { + fileContent += `\n\n===== ${command} --help =====\n`; + fileContent += info.helpText; + } } await fs.writeFile(outputFile, fileContent); @@ -233,7 +408,8 @@ class CLIDocAuditor { return false; } - // Extract help from specific version + // For version-specific images, we'll use a simpler approach + // since we can't easily discover commands without a running container let fileContent = ''; // Main help @@ -246,8 +422,12 @@ class CLIDocAuditor { ]); fileContent += mainHelp.code === 0 ? mainHelp.stdout : mainHelp.stderr; - // Extract subcommand help - for (const cmd of this.mainCommands) { + // Parse main commands and get their help + const mainCommands = this.parseCommandsFromHelp( + mainHelp.code === 0 ? mainHelp.stdout : mainHelp.stderr + ); + + for (const cmd of mainCommands) { fileContent += `\n\n===== influxdb3 ${cmd} --help =====\n`; const cmdHelp = await this.runCommand('docker', [ 'run', @@ -258,6 +438,25 @@ class CLIDocAuditor { '--help', ]); fileContent += cmdHelp.code === 0 ? cmdHelp.stdout : cmdHelp.stderr; + + // Try to get subcommands + const subcommands = this.parseCommandsFromHelp( + cmdHelp.code === 0 ? cmdHelp.stdout : cmdHelp.stderr + ); + + for (const subcmd of subcommands) { + fileContent += `\n\n===== influxdb3 ${cmd} ${subcmd} --help =====\n`; + const subcmdHelp = await this.runCommand('docker', [ + 'run', + '--rm', + image, + 'influxdb3', + cmd, + subcmd, + '--help', + ]); + fileContent += subcmdHelp.code === 0 ? subcmdHelp.stdout : subcmdHelp.stderr; + } } await fs.writeFile(outputFile, fileContent); @@ -284,8 +483,10 @@ class CLIDocAuditor { .trim(); output += `## ${currentCommand}\n\n`; inOptions = false; - // Initialize options list for this command - this.commandOptionsMap[currentCommand] = []; + // Initialize options list for this command if not exists + if (!this.commandOptionsMap[currentCommand]) { + this.commandOptionsMap[currentCommand] = []; + } } // Detect options sections else if (line.trim() === 'Options:') { @@ -343,7 +544,7 @@ class CLIDocAuditor { const lines = content.split('\n'); let inCommand = false; let helpText = []; - const commandHeader = `===== influxdb3 ${command} --help =====`; + const commandHeader = `===== influxdb3 ${command} --help`; for (let i = 0; i < lines.length; i++) { if (lines[i] === commandHeader) { @@ -361,7 +562,7 @@ class CLIDocAuditor { return helpText.join('\n').trim(); } - async generateDocumentationTemplate(command, helpText) { + async generateDocumentationTemplate(command, helpText, product) { // Parse the help text to extract description and options const lines = helpText.split('\n'); let description = ''; @@ -402,14 +603,18 @@ class CLIDocAuditor { } } + // Generate product-specific frontmatter + const productTag = product === 'enterprise' ? 'influxdb3/enterprise' : 'influxdb3/core'; + const menuRef = product === 'enterprise' ? 'influxdb3_enterprise_reference' : 'influxdb3_core_reference'; + // Generate markdown template let template = `--- title: influxdb3 ${command} description: > The \`influxdb3 ${command}\` command ${description.toLowerCase()}. -influxdb3/core/tags: [cli] +${productTag}/tags: [cli] menu: - influxdb3_core_reference: + ${menuRef}: parent: influxdb3 cli weight: 201 --- @@ -587,22 +792,8 @@ Replace the following: let missingCount = 0; const missingDocs = []; - // Map commands to expected documentation files - const commandToFile = { - 'create database': 'create/database.md', - 'create token': 'create/token/_index.md', - 'create token admin': 'create/token/admin.md', - 'create trigger': 'create/trigger.md', - 'create table': 'create/table.md', - 'create last_cache': 'create/last_cache.md', - 'create distinct_cache': 'create/distinct_cache.md', - 'show databases': 'show/databases.md', - 'show tokens': 'show/tokens.md', - 'delete database': 'delete/database.md', - 'delete table': 'delete/table.md', - query: 'query.md', - write: 'write.md', - }; + // Build command to file mapping dynamically from discovered commands + const commandToFile = this.buildCommandToFileMapping(); // Extract commands from CLI help const content = await fs.readFile(cliFile, 'utf8'); @@ -666,7 +857,8 @@ Replace the following: const helpText = await this.extractCommandHelp(content, command); const docTemplate = await this.generateDocumentationTemplate( command, - helpText + helpText, + product ); // Save patch file @@ -847,6 +1039,54 @@ Replace the following: } } + buildCommandToFileMapping() { + // Build a mapping from discovered commands to expected documentation files + const mapping = {}; + + // Common patterns for command to file mapping + const patterns = { + 'create database': 'create/database.md', + 'create token': 'create/token/_index.md', + 'create token admin': 'create/token/admin.md', + 'create trigger': 'create/trigger.md', + 'create table': 'create/table.md', + 'create last_cache': 'create/last_cache.md', + 'create distinct_cache': 'create/distinct_cache.md', + 'show databases': 'show/databases.md', + 'show tokens': 'show/tokens.md', + 'show system': 'show/system.md', + 'delete database': 'delete/database.md', + 'delete table': 'delete/table.md', + 'delete trigger': 'delete/trigger.md', + 'update database': 'update/database.md', + 'test wal_plugin': 'test/wal_plugin.md', + 'test schedule_plugin': 'test/schedule_plugin.md', + query: 'query.md', + write: 'write.md', + }; + + // Add discovered commands that match patterns + for (const [command, info] of this.discoveredCommands) { + const cleanCommand = command.replace('influxdb3 ', ''); + if (patterns[cleanCommand]) { + mapping[cleanCommand] = patterns[cleanCommand]; + } else if (cleanCommand !== '' && cleanCommand.includes(' ')) { + // Generate file path for subcommands + const parts = cleanCommand.split(' '); + if (parts.length === 2) { + mapping[cleanCommand] = `${parts[0]}/${parts[1]}.md`; + } else if (parts.length === 3) { + mapping[cleanCommand] = `${parts[0]}/${parts[1]}/${parts[2]}.md`; + } + } else if (cleanCommand !== '' && !cleanCommand.includes(' ')) { + // Single command + mapping[cleanCommand] = `${cleanCommand}.md`; + } + } + + return mapping; + } + async run() { console.log( `${Colors.BLUE}🔍 InfluxDB 3 CLI Documentation Audit${Colors.NC}` From 92210137b20a5f66afe45faa67c2f98ab73207f6 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 5 Aug 2025 13:48:07 -0500 Subject: [PATCH 099/179] chore(qol): Make agents more collaborative and not automatically agreeable. --- .github/copilot-instructions.md | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index d0fc9113f..4a541203f 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -4,6 +4,10 @@ Always follow these instructions first and fallback to additional search and con ## Working Effectively +### Collaboration approach + +Be a critical thinking partner, provide honest feedback, and identify potential issues. + ### Bootstrap, Build, and Test the Repository Execute these commands in order to set up a complete working environment: @@ -54,16 +58,18 @@ yarn test:codeblocks:v2 yarn test:codeblocks:telegraf ``` -#### Link Validation (takes 10-30 minutes, NEVER CANCEL - set timeout to 45+ minutes): +#### Link Validation (takes 1-5 minutes): + +Runs automatically on pull requests. +Requires the **link-checker** binary from the repo release artifacts. ```bash -# Test all links (very long-running) -yarn test:links - # Test specific files/products (faster) -yarn test:links content/influxdb3/core/**/*.md -yarn test:links:v3 -yarn test:links:v2 +# JSON format is required for accurate reporting +link-checker map content/influxdb3/core/**/*.md \ +| link-checker check \ + --config .ci/link-checker/production.lycherc.toml + --format json ``` #### Style Linting (takes 30-60 seconds): @@ -168,7 +174,8 @@ yarn test:links content/example.md - **Package Manager**: Yarn (1.22.22+) with Node.js (20.19.4+) - **Testing Framework**: - Pytest with pytest-codeblocks (for code examples) - - Cypress (for link validation and E2E tests) + - Cypress (for E2E tests) + - influxdata/docs-link-checker (for link validation) - Vale (for style and writing guidelines) - **Containerization**: Docker with Docker Compose - **Linting**: ESLint, Prettier, Vale @@ -176,16 +183,6 @@ yarn test:links content/example.md ## Common Tasks and Build Times -### Time Expectations (CRITICAL - NEVER CANCEL) - -- **Dependency installation**: 4 seconds -- **Hugo static build**: 75 seconds (NEVER CANCEL - timeout: 180+ seconds) -- **Hugo server startup**: 92 seconds (NEVER CANCEL - timeout: 150+ seconds) -- **Code block tests**: 5-15 minutes per product (NEVER CANCEL - timeout: 30+ minutes) -- **Link validation**: 10-30 minutes (NEVER CANCEL - timeout: 45+ minutes) -- **Style linting**: 30-60 seconds -- **Docker image build**: 30+ seconds (may fail due to network restrictions) - ### Network Connectivity Issues In restricted environments, these commands may fail due to external dependency downloads: From 3f4ad5fb376f50c806b583e61690992a3c350294 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 5 Aug 2025 16:20:03 -0500 Subject: [PATCH 100/179] chore(scripts): redo cli audit script: Moved to new tooling repo. Removed package scripts for now. Script Gets commands from source code and grep docs for commands- replaces the CLI audit script- searches tagged repo branch source code for CLI commands- searches docs content the commands- allows including and excluding "categories" of docs paths to search --- helper-scripts/influxdb3-monolith/README.md | 373 ----- .../influxdb3-monolith/apply-cli-patches.js | 277 ---- .../audit-cli-documentation.js | 1214 ----------------- .../influxdb3-monolith/setup-auth-tokens.sh | 164 --- package.json | 7 +- 5 files changed, 1 insertion(+), 2034 deletions(-) delete mode 100644 helper-scripts/influxdb3-monolith/README.md delete mode 100755 helper-scripts/influxdb3-monolith/apply-cli-patches.js delete mode 100755 helper-scripts/influxdb3-monolith/audit-cli-documentation.js delete mode 100644 helper-scripts/influxdb3-monolith/setup-auth-tokens.sh diff --git a/helper-scripts/influxdb3-monolith/README.md b/helper-scripts/influxdb3-monolith/README.md deleted file mode 100644 index 34ce14c4c..000000000 --- a/helper-scripts/influxdb3-monolith/README.md +++ /dev/null @@ -1,373 +0,0 @@ -# InfluxDB 3 Monolith (Core and Enterprise) Helper Scripts - -This directory contains helper scripts specifically for InfluxDB 3 Core and Enterprise (monolith deployments), as opposed to distributed/clustered deployments. - -## Overview - -These scripts help with documentation workflows for InfluxDB 3 Core and Enterprise, including CLI change detection, authentication setup, API analysis, and release preparation. - -## Prerequisites - -- **Docker and Docker Compose**: For running InfluxDB 3 containers -- **Node.js 16+**: For running JavaScript ESM scripts -- **Active containers**: InfluxDB 3 Core and/or Enterprise containers running via `docker compose` -- **Secret files**: Docker Compose secrets for auth tokens (`~/.env.influxdb3-core-admin-token` and `~/.env.influxdb3-enterprise-admin-token`) - -## Scripts - -### 🔐 Authentication & Setup - -#### `setup-auth-tokens.sh` -Creates and configures authentication tokens for InfluxDB 3 containers. - -**Usage:** -```bash -./setup-auth-tokens.sh [core|enterprise|both] -``` - -**What it does:** -- Checks existing tokens in secret files (`~/.env.influxdb3-core-admin-token` and `~/.env.influxdb3-enterprise-admin-token`) -- Starts containers if not running -- Creates admin tokens using `influxdb3 create token --admin` -- Updates appropriate secret files with new tokens -- Tests tokens to ensure they work - -**Example:** -```bash -# Set up both Core and Enterprise tokens -./setup-auth-tokens.sh both - -# Set up only Enterprise -./setup-auth-tokens.sh enterprise -``` - -### 🔍 CLI Documentation Audit - -#### `audit-cli-documentation.js` -JavaScript ESM script that audits InfluxDB 3 CLI commands against existing documentation to identify missing or outdated content. - -**Usage:** -```bash -node audit-cli-documentation.js [core|enterprise|both] [version|local] -``` - -**Features:** -- Compares actual CLI help output with documented commands -- Identifies missing documentation for new CLI options -- Finds documented options that no longer exist in the CLI -- Supports both released versions and local containers -- Generates detailed audit reports with recommendations -- Handles authentication automatically using Docker secrets - -**Examples:** -```bash -# Audit Core documentation against local container -node audit-cli-documentation.js core local - -# Audit Enterprise documentation against specific version -node audit-cli-documentation.js enterprise v3.2.0 - -# Audit both products against local containers -node audit-cli-documentation.js both local -``` - -**Output:** -- `../output/cli-audit/documentation-audit-{product}-{version}.md` - Detailed audit report -- `../output/cli-audit/parsed-cli-{product}-{version}.md` - Parsed CLI structure -- `../output/cli-audit/patches/{product}/` - Generated patches for missing documentation - -### 🛠️ CLI Documentation Updates - -#### `apply-cli-patches.js` -JavaScript ESM script that applies generated patches to update CLI documentation with missing options. - -**Usage:** -```bash -node apply-cli-patches.js [core|enterprise|both] [--dry-run] -``` - -**Features:** -- Applies patches generated by `audit-cli-documentation.js` -- Updates CLI reference documentation with missing options -- Supports dry-run mode to preview changes -- Maintains existing documentation structure and formatting -- Creates backups before applying changes - -**Examples:** -```bash -# Preview changes without applying (dry run) -node apply-cli-patches.js core --dry-run - -# Apply patches to Enterprise documentation -node apply-cli-patches.js enterprise - -# Apply patches to both products -node apply-cli-patches.js both -``` - -**Output:** -- Updates CLI reference documentation files in place -- Creates backup files with `.backup` extension -- Logs all changes made to the documentation - -## Quick Start Guide - -### 1. Initial Setup - -```bash -# Navigate to the monolith scripts directory -cd helper-scripts/influxdb3-monolith - -# Make scripts executable -chmod +x *.sh - -# Set up authentication for both products -./setup-auth-tokens.sh both - -# Restart containers to load new secrets -docker compose down && docker compose up -d influxdb3-core influxdb3-enterprise -``` - -### 2. CLI Documentation Audit - -```bash -# Start your containers -docker compose up -d influxdb3-core influxdb3-enterprise - -# Audit CLI documentation -node audit-cli-documentation.js core local -node audit-cli-documentation.js enterprise local - -# Review the output -ls ../output/cli-audit/ -``` - -### 3. Development Workflow - -```bash -# Audit documentation for both products -node audit-cli-documentation.js both local - -# Check the audit results -cat ../output/cli-audit/documentation-audit-core-local.md -cat ../output/cli-audit/documentation-audit-enterprise-local.md - -# Apply patches if needed (dry run first) -node apply-cli-patches.js both --dry-run -``` - -### 4. Release Documentation Updates - -For release documentation, use the audit and patch workflow: - -```bash -# Audit against released version -node audit-cli-documentation.js enterprise v3.2.0 - -# Review missing documentation -cat ../output/cli-audit/documentation-audit-enterprise-v3.2.0.md - -# Apply patches to update documentation -node apply-cli-patches.js enterprise - -# Verify changes look correct -git diff content/influxdb3/enterprise/reference/cli/ -``` - -## Container Integration - -The scripts work with your Docker Compose setup: - -**Expected container names:** -- `influxdb3-core` (port 8282) -- `influxdb3-enterprise` (port 8181) - -**Docker Compose secrets:** -- `influxdb3-core-admin-token` - Admin token for Core (stored in `~/.env.influxdb3-core-admin-token`) -- `influxdb3-enterprise-admin-token` - Admin token for Enterprise (stored in `~/.env.influxdb3-enterprise-admin-token`) -- `INFLUXDB3_LICENSE_EMAIL` - Enterprise license email (set in `.env.3ent` env_file) - -## Use Cases - -### 📋 Release Documentation - -1. **Pre-release audit:** - ```bash - node audit-cli-documentation.js core v3.2.0 - ``` - -2. **Review audit results and update documentation** -3. **Apply patches for missing content** -4. **Test documented commands work correctly** - -### 🔬 Development Testing - -1. **Audit local development:** - ```bash - node audit-cli-documentation.js enterprise local - ``` - -2. **Verify new features are documented** -3. **Test authentication setup** -4. **Apply patches to keep docs current** - -### 🚀 Release Preparation - -1. **Final audit before release:** - ```bash - node audit-cli-documentation.js both local - ``` - -2. **Apply all pending patches** -3. **Update examples and tutorials** -4. **Verify all CLI commands work as documented** - -## Output Structure - -``` -helper-scripts/ -├── output/ -│ └── cli-audit/ -│ ├── documentation-audit-core-local.md # CLI documentation audit report -│ ├── documentation-audit-enterprise-v3.2.0.md # CLI documentation audit report -│ ├── parsed-cli-core-local.md # Parsed CLI structure -│ ├── parsed-cli-enterprise-v3.2.0.md # Parsed CLI structure -│ └── patches/ -│ ├── core/ # Generated patches for Core -│ │ ├── influxdb3-cli-patch-001.md -│ │ └── influxdb3-cli-patch-002.md -│ └── enterprise/ # Generated patches for Enterprise -│ ├── influxdb3-cli-patch-001.md -│ └── influxdb3-cli-patch-002.md -└── influxdb3-monolith/ - ├── README.md # This file - ├── setup-auth-tokens.sh # Auth setup - ├── audit-cli-documentation.js # CLI documentation audit - └── apply-cli-patches.js # CLI documentation patches -``` - -## Error Handling - -### Common Issues - -**Container not running:** -```bash -# Check status -docker compose ps - -# Start specific service -docker compose up -d influxdb3-core -``` - -**Authentication failures:** -```bash -# Recreate tokens -./setup-auth-tokens.sh both - -# Test manually -docker exec influxdb3-core influxdb3 create token --admin -``` - -**Version not found:** -```bash -# Check available versions -docker pull influxdb:3-core:3.2.0 -docker pull influxdb:3-enterprise:3.2.0 -``` - -### Debug Mode - -Enable debug output for troubleshooting: -```bash -DEBUG=1 node audit-cli-documentation.js core local -``` - -## Integration with CI/CD - -### GitHub Actions Example - -```yaml -- name: Audit CLI Documentation - run: | - cd helper-scripts/influxdb3-monolith - node audit-cli-documentation.js core ${{ env.VERSION }} - -- name: Upload CLI Audit Results - uses: actions/upload-artifact@v3 - with: - name: cli-audit - path: helper-scripts/output/cli-audit/ -``` - -### CircleCI Example - -```yaml -- run: - name: CLI Documentation Audit - command: | - cd helper-scripts/influxdb3-monolith - node audit-cli-documentation.js enterprise v3.2.0 - -- store_artifacts: - path: helper-scripts/output/cli-audit/ -``` - -## Best Practices - -### 🔒 Security -- Secret files (`~/.env.influxdb3-*-admin-token`) are stored in your home directory and not in version control -- Rotate auth tokens regularly by re-running `setup-auth-tokens.sh` -- Use minimal token permissions when possible - -### 📚 Documentation -- Run audits early in release cycle -- Review all audit reports for missing content -- Apply patches to keep documentation current -- Test all documented commands work correctly - -### 🔄 Workflow -- Use `local` version for development testing -- Audit against released versions for release prep -- Generate patches before documentation updates -- Validate changes with stakeholders - -## Troubleshooting - -### Script Permissions -```bash -chmod +x *.sh -``` - -### Missing Dependencies -```bash -# Node.js dependencies -node --version # Should be 16 or higher - -# Docker Compose -docker compose version -``` - -### Container Health -```bash -# Check container logs -docker logs influxdb3-core -docker logs influxdb3-enterprise - -# Test basic connectivity -docker exec influxdb3-core influxdb3 --version -``` - -## Contributing - -When adding new scripts to this directory: - -1. **Follow naming conventions**: Use lowercase with hyphens -2. **Add usage documentation**: Include help text in scripts -3. **Handle errors gracefully**: Use proper exit codes -4. **Test with both products**: Ensure Core and Enterprise compatibility -5. **Update this README**: Document new functionality - -## Related Documentation - -- [InfluxDB 3 Core CLI Reference](/influxdb3/core/reference/cli/) -- [InfluxDB 3 Enterprise CLI Reference](/influxdb3/enterprise/reference/cli/) diff --git a/helper-scripts/influxdb3-monolith/apply-cli-patches.js b/helper-scripts/influxdb3-monolith/apply-cli-patches.js deleted file mode 100755 index 07c2f7d71..000000000 --- a/helper-scripts/influxdb3-monolith/apply-cli-patches.js +++ /dev/null @@ -1,277 +0,0 @@ -#!/usr/bin/env node - -/** - * Apply CLI documentation patches generated by audit-cli-documentation.js - * Usage: node apply-cli-patches.js [core|enterprise|both] [--dry-run] - */ - -import { promises as fs } from 'fs'; -import { join, dirname } from 'path'; -import { fileURLToPath } from 'url'; -import { process } from 'node:process'; - -const __filename = fileURLToPath(import.meta.url); -const __dirname = dirname(__filename); - -// Color codes -const Colors = { - RED: '\x1b[0;31m', - GREEN: '\x1b[0;32m', - YELLOW: '\x1b[1;33m', - BLUE: '\x1b[0;34m', - NC: '\x1b[0m', // No Color -}; - -async function fileExists(path) { - try { - await fs.access(path); - return true; - } catch { - return false; - } -} - -async function ensureDir(dir) { - await fs.mkdir(dir, { recursive: true }); -} - -async function extractFrontmatter(content) { - const lines = content.split('\n'); - if (lines[0] !== '---') return { frontmatter: null, content }; - - const frontmatterLines = []; - let i = 1; - while (i < lines.length && lines[i] !== '---') { - frontmatterLines.push(lines[i]); - i++; - } - - if (i >= lines.length) return { frontmatter: null, content }; - - const frontmatterText = frontmatterLines.join('\n'); - const remainingContent = lines.slice(i + 1).join('\n'); - - return { frontmatter: frontmatterText, content: remainingContent }; -} - -async function getActualDocumentationPath(docPath, projectRoot) { - // Check if the documentation file exists and has a source field - const fullPath = join(projectRoot, docPath); - - if (await fileExists(fullPath)) { - const content = await fs.readFile(fullPath, 'utf8'); - const { frontmatter } = await extractFrontmatter(content); - - if (frontmatter) { - // Look for source: field in frontmatter - const sourceMatch = frontmatter.match(/^source:\s*(.+)$/m); - if (sourceMatch) { - const sourcePath = sourceMatch[1].trim(); - return sourcePath; - } - } - } - - return docPath; -} - -async function applyPatches(product, dryRun = false) { - const patchDir = join( - dirname(__dirname), - 'output', - 'cli-audit', - 'patches', - product - ); - const projectRoot = join(__dirname, '..', '..'); - - console.log( - `${Colors.BLUE}📋 Applying CLI documentation patches for ${product}${Colors.NC}` - ); - if (dryRun) { - console.log( - `${Colors.YELLOW}🔍 DRY RUN - No files will be created${Colors.NC}` - ); - } - console.log(); - - // Check if patch directory exists - if (!(await fileExists(patchDir))) { - console.log(`${Colors.YELLOW}No patches found for ${product}.${Colors.NC}`); - console.log("Run 'yarn audit:cli' first to generate patches."); - return; - } - - // Read all patch files - const patchFiles = await fs.readdir(patchDir); - const mdFiles = patchFiles.filter((f) => f.endsWith('.md')); - - if (mdFiles.length === 0) { - console.log( - `${Colors.YELLOW}No patch files found in ${patchDir}${Colors.NC}` - ); - return; - } - - console.log(`Found ${mdFiles.length} patch file(s) to apply:\n`); - - // Map patch files to their destination - const baseCliPath = `content/influxdb3/${product}/reference/cli/influxdb3`; - const commandToFile = { - 'create-database.md': `${baseCliPath}/create/database.md`, - 'create-token.md': `${baseCliPath}/create/token/_index.md`, - 'create-token-admin.md': `${baseCliPath}/create/token/admin.md`, - 'create-trigger.md': `${baseCliPath}/create/trigger.md`, - 'create-table.md': `${baseCliPath}/create/table.md`, - 'create-last_cache.md': `${baseCliPath}/create/last_cache.md`, - 'create-distinct_cache.md': `${baseCliPath}/create/distinct_cache.md`, - 'show-databases.md': `${baseCliPath}/show/databases.md`, - 'show-tokens.md': `${baseCliPath}/show/tokens.md`, - 'delete-database.md': `${baseCliPath}/delete/database.md`, - 'delete-table.md': `${baseCliPath}/delete/table.md`, - 'query.md': `${baseCliPath}/query.md`, - 'write.md': `${baseCliPath}/write.md`, - }; - - let applied = 0; - let skipped = 0; - - for (const patchFile of mdFiles) { - const destinationPath = commandToFile[patchFile]; - - if (!destinationPath) { - console.log( - `${Colors.YELLOW}⚠️ Unknown patch file: ${patchFile}${Colors.NC}` - ); - continue; - } - - // Get the actual documentation path (handles source: frontmatter) - const actualPath = await getActualDocumentationPath( - destinationPath, - projectRoot - ); - const fullDestPath = join(projectRoot, actualPath); - const patchPath = join(patchDir, patchFile); - - // Check if destination already exists - if (await fileExists(fullDestPath)) { - console.log( - `${Colors.YELLOW}⏭️ Skipping${Colors.NC} ${patchFile} - destination already exists:` - ); - console.log(` ${actualPath}`); - skipped++; - continue; - } - - if (dryRun) { - console.log(`${Colors.BLUE}🔍 Would create${Colors.NC} ${actualPath}`); - console.log(` from patch: ${patchFile}`); - if (actualPath !== destinationPath) { - console.log(` (resolved from: ${destinationPath})`); - } - applied++; - } else { - try { - // Ensure destination directory exists - await ensureDir(dirname(fullDestPath)); - - // Copy patch to destination - const content = await fs.readFile(patchPath, 'utf8'); - - // Update the menu configuration based on product - let updatedContent = content; - if (product === 'enterprise') { - updatedContent = content - .replace('influxdb3/core/tags:', 'influxdb3/enterprise/tags:') - .replace( - 'influxdb3_core_reference:', - 'influxdb3_enterprise_reference:' - ); - } - - await fs.writeFile(fullDestPath, updatedContent); - - console.log(`${Colors.GREEN}✅ Created${Colors.NC} ${actualPath}`); - console.log(` from patch: ${patchFile}`); - if (actualPath !== destinationPath) { - console.log(` (resolved from: ${destinationPath})`); - } - applied++; - } catch (error) { - console.log( - `${Colors.RED}❌ Error${Colors.NC} creating ${actualPath}:` - ); - console.log(` ${error.message}`); - } - } - } - - console.log(); - console.log(`${Colors.BLUE}Summary:${Colors.NC}`); - console.log(`- Patches ${dryRun ? 'would be' : ''} applied: ${applied}`); - console.log(`- Files skipped (already exist): ${skipped}`); - console.log(`- Total patch files: ${mdFiles.length}`); - - if (!dryRun && applied > 0) { - console.log(); - console.log( - `${Colors.GREEN}✨ Success!${Colors.NC} Created ${applied} new ` + - 'documentation file(s).' - ); - console.log(); - console.log('Next steps:'); - console.log('1. Review the generated files and customize the content'); - console.log('2. Add proper examples with placeholders'); - console.log('3. Update descriptions and add any missing options'); - console.log('4. Run tests: yarn test:links'); - } -} - -async function main() { - const args = process.argv.slice(2); - const product = - args.find((arg) => ['core', 'enterprise', 'both'].includes(arg)) || 'both'; - const dryRun = args.includes('--dry-run'); - - if (args.includes('--help') || args.includes('-h')) { - console.log( - 'Usage: node apply-cli-patches.js [core|enterprise|both] [--dry-run]' - ); - console.log(); - console.log('Options:'); - console.log( - ' --dry-run Show what would be done without creating files' - ); - console.log(); - console.log('Examples:'); - console.log( - ' node apply-cli-patches.js # Apply patches for both products' - ); - console.log( - ' node apply-cli-patches.js core --dry-run # Preview core patches' - ); - console.log( - ' node apply-cli-patches.js enterprise # Apply enterprise patches' - ); - process.exit(0); - } - - try { - if (product === 'both') { - await applyPatches('core', dryRun); - console.log(); - await applyPatches('enterprise', dryRun); - } else { - await applyPatches(product, dryRun); - } - } catch (error) { - console.error(`${Colors.RED}Error:${Colors.NC}`, error.message); - process.exit(1); - } -} - -// Run if called directly -if (import.meta.url === `file://${process.argv[1]}`) { - main(); -} diff --git a/helper-scripts/influxdb3-monolith/audit-cli-documentation.js b/helper-scripts/influxdb3-monolith/audit-cli-documentation.js deleted file mode 100755 index ec22a453f..000000000 --- a/helper-scripts/influxdb3-monolith/audit-cli-documentation.js +++ /dev/null @@ -1,1214 +0,0 @@ -#!/usr/bin/env node - -/** - * Audit CLI documentation against current CLI help output - * Usage: node audit-cli-documentation.js [core|enterprise|both] [version] - * Example: node audit-cli-documentation.js core 3.2.0 - */ - -import { spawn } from 'child_process'; -import { promises as fs } from 'fs'; -import { homedir } from 'os'; -import { join, dirname } from 'path'; -import { fileURLToPath } from 'url'; -import { - validateVersionInputs, - getRepositoryRoot, -} from '../common/validate-tags.js'; - -const __filename = fileURLToPath(import.meta.url); -const __dirname = dirname(__filename); - -// Color codes -const Colors = { - RED: '\x1b[0;31m', - GREEN: '\x1b[0;32m', - YELLOW: '\x1b[1;33m', - BLUE: '\x1b[0;34m', - NC: '\x1b[0m', // No Color -}; - -class CLIDocAuditor { - constructor(product = 'both', version = 'local') { - this.product = product; - this.version = version; - this.outputDir = join(dirname(__dirname), 'output', 'cli-audit'); - - // Token paths - check environment variables first (Docker Compose), then fall back to local files - const coreTokenEnv = process.env.INFLUXDB3_CORE_TOKEN; - const enterpriseTokenEnv = process.env.INFLUXDB3_ENTERPRISE_TOKEN; - - if (coreTokenEnv && this.fileExists(coreTokenEnv)) { - // Running in Docker Compose with secrets - this.coreTokenFile = coreTokenEnv; - this.enterpriseTokenFile = enterpriseTokenEnv; - } else { - // Running locally - this.coreTokenFile = join(homedir(), '.env.influxdb3-core-admin-token'); - this.enterpriseTokenFile = join( - homedir(), - '.env.influxdb3-enterprise-admin-token' - ); - } - - // Dynamic command discovery - populated by discoverCommands() - this.discoveredCommands = new Map(); // command -> { subcommands: [], options: [] } - this.commandOptionsMap = {}; // For backward compatibility - } - - async fileExists(path) { - try { - await fs.access(path); - return true; - } catch { - return false; - } - } - - async ensureDir(dir) { - await fs.mkdir(dir, { recursive: true }); - } - - async loadTokens() { - let coreToken = null; - let enterpriseToken = null; - - try { - if (await this.fileExists(this.coreTokenFile)) { - const stat = await fs.stat(this.coreTokenFile); - if (stat.size > 0) { - coreToken = (await fs.readFile(this.coreTokenFile, 'utf8')).trim(); - } - } - } catch { - // Token file doesn't exist or can't be read - } - - try { - if (await this.fileExists(this.enterpriseTokenFile)) { - const stat = await fs.stat(this.enterpriseTokenFile); - if (stat.size > 0) { - enterpriseToken = ( - await fs.readFile(this.enterpriseTokenFile, 'utf8') - ).trim(); - } - } - } catch { - // Token file doesn't exist or can't be read - } - - return { coreToken, enterpriseToken }; - } - - runCommand(cmd, args = []) { - return new Promise((resolve) => { - const child = spawn(cmd, args, { encoding: 'utf8' }); - let stdout = ''; - let stderr = ''; - - child.stdout.on('data', (data) => { - stdout += data.toString(); - }); - - child.stderr.on('data', (data) => { - stderr += data.toString(); - }); - - child.on('close', (code) => { - resolve({ code, stdout, stderr }); - }); - - child.on('error', (err) => { - resolve({ code: 1, stdout: '', stderr: err.message }); - }); - }); - } - - async ensureContainerRunning(product) { - const containerName = `influxdb3-${product}`; - - // Check if container exists and is running - const { code, stdout } = await this.runCommand('docker', [ - 'compose', - 'ps', - '--format', - 'json', - containerName, - ]); - - if (code !== 0) { - console.log(`❌ Failed to check container status for ${containerName}`); - return false; - } - - const containers = stdout.trim().split('\n').filter((line) => line); - const isRunning = containers.some((line) => { - try { - const container = JSON.parse(line); - return container.Name === containerName && container.State === 'running'; - } catch { - return false; - } - }); - - if (!isRunning) { - console.log(`🚀 Starting ${containerName}...`); - const startResult = await this.runCommand('docker', [ - 'compose', - 'up', - '-d', - containerName, - ]); - - if (startResult.code !== 0) { - console.log(`❌ Failed to start ${containerName}`); - console.log(startResult.stderr); - return false; - } - - // Wait for container to be ready - console.log(`⏳ Waiting for ${containerName} to be ready...`); - await new Promise((resolve) => setTimeout(resolve, 5000)); - } - - return true; - } - - async discoverCommands(product) { - const containerName = `influxdb3-${product}`; - - // Ensure container is running - if (!(await this.ensureContainerRunning(product))) { - throw new Error(`Failed to start container ${containerName}`); - } - - // Get main help to discover top-level commands - const mainHelp = await this.runCommand('docker', [ - 'compose', - 'exec', - '-T', - containerName, - 'influxdb3', - '--help', - ]); - - if (mainHelp.code !== 0) { - console.error(`Failed to get main help. Exit code: ${mainHelp.code}`); - console.error(`Stdout: ${mainHelp.stdout}`); - console.error(`Stderr: ${mainHelp.stderr}`); - throw new Error(`Failed to get main help: ${mainHelp.stderr}`); - } - - // Parse main commands from help output - const mainCommands = this.parseCommandsFromHelp(mainHelp.stdout); - - // Also add the root command first - this.discoveredCommands.set('influxdb3', { - subcommands: mainCommands, - options: this.parseOptionsFromHelp(mainHelp.stdout), - helpText: mainHelp.stdout, - }); - - // For backward compatibility - this.commandOptionsMap['influxdb3'] = this.parseOptionsFromHelp(mainHelp.stdout); - - // Discover subcommands and options for each main command - for (const command of mainCommands) { - await this.discoverSubcommands(containerName, command, [command]); - } - } - - parseCommandsFromHelp(helpText) { - const commands = []; - // Strip ANSI color codes first - // eslint-disable-next-line no-control-regex - const cleanHelpText = helpText.replace(/\x1b\[[0-9;]*m/g, ''); - const lines = cleanHelpText.split('\n'); - let inCommandsSection = false; - - for (const line of lines) { - const trimmed = line.trim(); - - // Look for any Commands section - if (trimmed.includes('Commands:') || trimmed === 'Resource Management:' || - trimmed === 'System Management:') { - inCommandsSection = true; - continue; - } - - // Stop at next section (but don't stop on management sections) - if (inCommandsSection && /^[A-Z][a-z]+:$/.test(trimmed) && - !trimmed.includes('Commands:') && - trimmed !== 'Resource Management:' && - trimmed !== 'System Management:') { - break; - } - - // Parse command lines (typically indented with command name) - if (inCommandsSection && /^\s+[a-z]/.test(line)) { - const match = line.match(/^\s+([a-z][a-z0-9_-]*)/); - if (match) { - commands.push(match[1]); - } - } - } - - return commands; - } - - parseOptionsFromHelp(helpText) { - const options = []; - const lines = helpText.split('\n'); - let inOptionsSection = false; - - for (const line of lines) { - const trimmed = line.trim(); - - // Look for Options: section - if (trimmed === 'Options:') { - inOptionsSection = true; - continue; - } - - // Stop at next section - if (inOptionsSection && /^[A-Z][a-z]+:$/.test(trimmed)) { - break; - } - - // Parse option lines - if (inOptionsSection && /^\s*-/.test(line)) { - const optionMatch = line.match(/--([a-z][a-z0-9-]*)/); - if (optionMatch) { - options.push(`--${optionMatch[1]}`); - } - } - } - - return options; - } - - async discoverSubcommands(containerName, commandPath, commandParts) { - // Get help for this command - - // First try with --help - let helpResult = await this.runCommand('docker', [ - 'compose', - 'exec', - '-T', - containerName, - 'influxdb3', - ...commandParts, - '--help', - ]); - - // If --help returns main help or fails, try without --help - if (helpResult.code !== 0 || helpResult.stdout.includes('InfluxDB 3 Core Server and Command Line Tools')) { - helpResult = await this.runCommand('docker', [ - 'compose', - 'exec', - '-T', - containerName, - 'influxdb3', - ...commandParts, - ]); - } - - if (helpResult.code !== 0) { - // Check if stderr contains useful help information - if (helpResult.stderr && helpResult.stderr.includes('Usage:') && helpResult.stderr.includes('Commands:')) { - // Use stderr as the help text since it contains the command usage info - helpResult = { code: 0, stdout: helpResult.stderr, stderr: '' }; - } else { - // Command might not exist or might not have subcommands - return; - } - } - - // If the result is still the main help, skip this command - if (helpResult.stdout.includes('InfluxDB 3 Core Server and Command Line Tools')) { - return; - } - - const helpText = helpResult.stdout; - const subcommands = this.parseCommandsFromHelp(helpText); - const options = this.parseOptionsFromHelp(helpText); - - // Store the command info - const fullCommand = `influxdb3 ${commandParts.join(' ')}`; - this.discoveredCommands.set(fullCommand, { - subcommands, - options, - helpText, - }); - - // For backward compatibility - this.commandOptionsMap[fullCommand] = options; - - // Recursively discover subcommands (but limit depth) - if (subcommands.length > 0 && commandParts.length < 3) { - for (const subcommand of subcommands) { - await this.discoverSubcommands( - containerName, - `${commandPath} ${subcommand}`, - [...commandParts, subcommand] - ); - } - } - } - - async extractCurrentCLI(product, outputFile) { - process.stdout.write( - `Extracting current CLI help from influxdb3-${product}...` - ); - - await this.loadTokens(); - - if (this.version === 'local') { - const containerName = `influxdb3-${product}`; - - // Ensure container is running and discover commands - if (!(await this.ensureContainerRunning(product))) { - console.log(` ${Colors.RED}✗${Colors.NC}`); - return false; - } - - // Discover all commands dynamically - await this.discoverCommands(product); - - // Generate comprehensive help output - let fileContent = `===== influxdb3 --help =====\n`; - - // Add root command help first - const rootCommand = this.discoveredCommands.get('influxdb3'); - if (rootCommand) { - fileContent += rootCommand.helpText; - } - - // Add all other discovered command help - for (const [command, info] of this.discoveredCommands) { - if (command !== 'influxdb3') { - fileContent += `\n\n===== ${command} --help =====\n`; - fileContent += info.helpText; - } - } - - await fs.writeFile(outputFile, fileContent); - console.log(` ${Colors.GREEN}✓${Colors.NC}`); - } else { - // Use specific version image - const image = `influxdb:${this.version}-${product}`; - - process.stdout.write(`Extracting CLI help from ${image}...`); - - // Pull image if needed - const pullResult = await this.runCommand('docker', ['pull', image]); - if (pullResult.code !== 0) { - console.log(` ${Colors.RED}✗${Colors.NC}`); - console.log(`Error: Failed to pull image ${image}`); - return false; - } - - // For version-specific images, we'll use a simpler approach - // since we can't easily discover commands without a running container - let fileContent = ''; - - // Main help - const mainHelp = await this.runCommand('docker', [ - 'run', - '--rm', - image, - 'influxdb3', - '--help', - ]); - fileContent += mainHelp.code === 0 ? mainHelp.stdout : mainHelp.stderr; - - // Parse main commands and get their help - const mainCommands = this.parseCommandsFromHelp( - mainHelp.code === 0 ? mainHelp.stdout : mainHelp.stderr - ); - - for (const cmd of mainCommands) { - fileContent += `\n\n===== influxdb3 ${cmd} --help =====\n`; - const cmdHelp = await this.runCommand('docker', [ - 'run', - '--rm', - image, - 'influxdb3', - cmd, - '--help', - ]); - fileContent += cmdHelp.code === 0 ? cmdHelp.stdout : cmdHelp.stderr; - - // Try to get subcommands - const subcommands = this.parseCommandsFromHelp( - cmdHelp.code === 0 ? cmdHelp.stdout : cmdHelp.stderr - ); - - for (const subcmd of subcommands) { - fileContent += `\n\n===== influxdb3 ${cmd} ${subcmd} --help =====\n`; - const subcmdHelp = await this.runCommand('docker', [ - 'run', - '--rm', - image, - 'influxdb3', - cmd, - subcmd, - '--help', - ]); - fileContent += subcmdHelp.code === 0 ? subcmdHelp.stdout : subcmdHelp.stderr; - } - } - - await fs.writeFile(outputFile, fileContent); - console.log(` ${Colors.GREEN}✓${Colors.NC}`); - } - - return true; - } - - async parseCLIHelp(helpFile, parsedFile) { - const content = await fs.readFile(helpFile, 'utf8'); - const lines = content.split('\n'); - - let output = '# CLI Commands and Options\n\n'; - let currentCommand = ''; - let inOptions = false; - - for (const line of lines) { - // Detect command headers - if (line.startsWith('===== influxdb3') && line.endsWith('--help =====')) { - currentCommand = line - .replace('===== ', '') - .replace(' --help =====', '') - .trim(); - output += `## ${currentCommand}\n\n`; - inOptions = false; - // Initialize options list for this command if not exists - if (!this.commandOptionsMap[currentCommand]) { - this.commandOptionsMap[currentCommand] = []; - } - } - // Detect options sections - else if (line.trim() === 'Options:') { - output += '### Options:\n\n'; - inOptions = true; - } - // Parse option lines - else if (inOptions && /^\s*-/.test(line)) { - // Extract option and description - const optionMatch = line.match(/--[a-z][a-z0-9-]*/); - const shortMatch = line.match(/\s-[a-zA-Z],/); - - if (optionMatch) { - const option = optionMatch[0]; - const shortOption = shortMatch - ? shortMatch[0].replace(/[,\s]/g, '') - : null; - - // Extract description by removing option parts - let description = line.replace(/^\s*-[^\s]*\s*/, ''); - description = description.replace(/^\s*--[^\s]*\s*/, '').trim(); - - if (shortOption) { - output += `- \`${shortOption}, ${option}\`: ${description}\n`; - } else { - output += `- \`${option}\`: ${description}\n`; - } - - // Store option with its command context - if (currentCommand && option) { - this.commandOptionsMap[currentCommand].push(option); - } - } - } - // Reset options flag for new sections - else if (/^[A-Z][a-z]+:$/.test(line.trim())) { - inOptions = false; - } - } - - await fs.writeFile(parsedFile, output); - } - - findDocsPath(product) { - if (product === 'core') { - return 'content/influxdb3/core/reference/cli/influxdb3'; - } else if (product === 'enterprise') { - return 'content/influxdb3/enterprise/reference/cli/influxdb3'; - } - return ''; - } - - async extractCommandHelp(content, command) { - // Find the section for this specific command in the CLI help - const lines = content.split('\n'); - let inCommand = false; - let helpText = []; - const commandHeader = `===== influxdb3 ${command} --help`; - - for (let i = 0; i < lines.length; i++) { - if (lines[i] === commandHeader) { - inCommand = true; - continue; - } - if (inCommand && lines[i].startsWith('===== influxdb3')) { - break; - } - if (inCommand) { - helpText.push(lines[i]); - } - } - - return helpText.join('\n').trim(); - } - - async generateDocumentationTemplate(command, helpText, product) { - // Parse the help text to extract description and options - const lines = helpText.split('\n'); - let description = ''; - let usage = ''; - let options = []; - let inOptions = false; - - for (let i = 0; i < lines.length; i++) { - const line = lines[i]; - - if (i === 0 && !line.startsWith('Usage:') && line.trim()) { - description = line.trim(); - } - if (line.startsWith('Usage:')) { - usage = line.replace('Usage:', '').trim(); - } - if (line.trim() === 'Options:') { - inOptions = true; - continue; - } - if (inOptions && /^\s*-/.test(line)) { - const optionMatch = line.match(/--([a-z][a-z0-9-]*)/); - const shortMatch = line.match(/\s-([a-zA-Z]),/); - if (optionMatch) { - const optionName = optionMatch[1]; - const shortOption = shortMatch ? shortMatch[1] : null; - let optionDesc = line - .replace(/^\s*-[^\s]*\s*/, '') - .replace(/^\s*--[^\s]*\s*/, '') - .trim(); - - options.push({ - name: optionName, - short: shortOption, - description: optionDesc, - }); - } - } - } - - // Generate product-specific frontmatter - const productTag = product === 'enterprise' ? 'influxdb3/enterprise' : 'influxdb3/core'; - const menuRef = product === 'enterprise' ? 'influxdb3_enterprise_reference' : 'influxdb3_core_reference'; - - // Generate markdown template - let template = `--- -title: influxdb3 ${command} -description: > - The \`influxdb3 ${command}\` command ${description.toLowerCase()}. -${productTag}/tags: [cli] -menu: - ${menuRef}: - parent: influxdb3 cli -weight: 201 ---- - -# influxdb3 ${command} - -${description} - -## Usage - -\`\`\`bash -${usage || `influxdb3 ${command} [OPTIONS]`} -\`\`\` - -`; - - if (options.length > 0) { - template += `## Options - -| Option | Description | -|--------|-------------| -`; - - for (const opt of options) { - const optionDisplay = opt.short - ? `\`-${opt.short}\`, \`--${opt.name}\`` - : `\`--${opt.name}\``; - template += `| ${optionDisplay} | ${opt.description} |\n`; - } - } - - template += ` -## Examples - -### Example 1: Basic usage - -{{% code-placeholders "PLACEHOLDER1|PLACEHOLDER2" %}} -\`\`\`bash -influxdb3 ${command} --example PLACEHOLDER1 -\`\`\` -{{% /code-placeholders %}} - -Replace the following: - -- {{% code-placeholder-key %}}\`PLACEHOLDER1\`{{% /code-placeholder-key %}}: Description of placeholder -`; - - return template; - } - - async extractFrontmatter(content) { - const lines = content.split('\n'); - if (lines[0] !== '---') return { frontmatter: null, content }; - - const frontmatterLines = []; - let i = 1; - while (i < lines.length && lines[i] !== '---') { - frontmatterLines.push(lines[i]); - i++; - } - - if (i >= lines.length) return { frontmatter: null, content }; - - const frontmatterText = frontmatterLines.join('\n'); - const remainingContent = lines.slice(i + 1).join('\n'); - - return { frontmatter: frontmatterText, content: remainingContent }; - } - - async getActualContentPath(filePath) { - // Get the actual content path, resolving source fields - try { - const content = await fs.readFile(filePath, 'utf8'); - const { frontmatter } = await this.extractFrontmatter(content); - - if (frontmatter) { - const sourceMatch = frontmatter.match(/^source:\s*(.+)$/m); - if (sourceMatch) { - let sourcePath = sourceMatch[1].trim(); - // Handle relative paths from project root - if (sourcePath.startsWith('/shared/')) { - sourcePath = `content${sourcePath}`; - } - return sourcePath; - } - } - return null; // No source field found - } catch { - return null; - } - } - - async parseDocumentedOptions(filePath) { - // Parse a documentation file to extract all documented options - try { - const content = await fs.readFile(filePath, 'utf8'); - const options = []; - - // Look for options in various patterns: - // 1. Markdown tables with option columns - // 2. Option lists with backticks - // 3. Code examples with --option flags - - // Pattern 1: Markdown tables (| Option | Description |) - const tableMatches = content.match(/\|\s*`?--[a-z][a-z0-9-]*`?\s*\|/gi); - if (tableMatches) { - for (const match of tableMatches) { - const option = match.match(/--[a-z][a-z0-9-]*/i); - if (option) { - options.push(option[0]); - } - } - } - - // Pattern 2: Backtick-enclosed options in text - const backtickMatches = content.match(/`--[a-z][a-z0-9-]*`/gi); - if (backtickMatches) { - for (const match of backtickMatches) { - const option = match.replace(/`/g, ''); - options.push(option); - } - } - - // Pattern 3: Options in code blocks - const codeBlockMatches = content.match(/```[\s\S]*?```/g); - if (codeBlockMatches) { - for (const block of codeBlockMatches) { - const blockOptions = block.match(/--[a-z][a-z0-9-]*/gi); - if (blockOptions) { - options.push(...blockOptions); - } - } - } - - // Pattern 4: Environment variable mappings (INFLUXDB3_* to --option) - const envMatches = content.match( - /\|\s*`INFLUXDB3_[^`]*`\s*\|\s*`--[a-z][a-z0-9-]*`\s*\|/gi - ); - if (envMatches) { - for (const match of envMatches) { - const option = match.match(/--[a-z][a-z0-9-]*/); - if (option) { - options.push(option[0]); - } - } - } - - // Remove duplicates and return sorted - return [...new Set(options)].sort(); - } catch { - return []; - } - } - - async auditDocs(product, cliFile, auditFile) { - const docsPath = this.findDocsPath(product); - const sharedPath = 'content/shared/influxdb3-cli'; - const patchDir = join(this.outputDir, 'patches', product); - await this.ensureDir(patchDir); - - let output = `# CLI Documentation Audit - ${product}\n`; - output += `Generated: ${new Date().toISOString()}\n\n`; - - // GitHub base URL for edit links - const githubBase = 'https://github.com/influxdata/docs-v2/edit/master'; - const githubNewBase = 'https://github.com/influxdata/docs-v2/new/master'; - - // VSCode links for local editing - const vscodeBase = 'vscode://file'; - const projectRoot = join(__dirname, '..', '..'); - - // Check for missing documentation - output += '## Missing Documentation\n\n'; - - let missingCount = 0; - const missingDocs = []; - - // Build command to file mapping dynamically from discovered commands - const commandToFile = this.buildCommandToFileMapping(); - - // Extract commands from CLI help - const content = await fs.readFile(cliFile, 'utf8'); - const lines = content.split('\n'); - - for (const line of lines) { - if (line.startsWith('===== influxdb3') && line.endsWith('--help =====')) { - const command = line - .replace('===== influxdb3 ', '') - .replace(' --help =====', ''); - - if (commandToFile[command]) { - const expectedFile = commandToFile[command]; - const productFile = join(docsPath, expectedFile); - const sharedFile = join(sharedPath, expectedFile); - - const productExists = await this.fileExists(productFile); - const sharedExists = await this.fileExists(sharedFile); - - let needsContent = false; - let targetPath = null; - let stubPath = null; - - if (!productExists && !sharedExists) { - // Completely missing - needsContent = true; - targetPath = productFile; - } else if (productExists) { - // Check if it has a source field pointing to missing content - const actualPath = await this.getActualContentPath(productFile); - if (actualPath && !(await this.fileExists(actualPath))) { - needsContent = true; - targetPath = actualPath; - stubPath = productFile; - } - } else if (sharedExists) { - // Shared file exists, check if it has content - const actualPath = await this.getActualContentPath(sharedFile); - if (actualPath && !(await this.fileExists(actualPath))) { - needsContent = true; - targetPath = actualPath; - stubPath = sharedFile; - } - } - - if (needsContent && targetPath) { - const githubNewUrl = `${githubNewBase}/${targetPath}`; - const localPath = join(projectRoot, targetPath); - - output += `- **Missing**: Documentation for \`influxdb3 ${command}\`\n`; - if (stubPath) { - output += ` - Stub exists at: \`${stubPath}\`\n`; - output += ` - Content needed at: \`${targetPath}\`\n`; - } else { - output += ` - Expected: \`${targetPath}\` or \`${sharedFile}\`\n`; - } - output += ` - [Create on GitHub](${githubNewUrl})\n`; - output += ` - Local: \`${localPath}\`\n`; - - // Generate documentation template - const helpText = await this.extractCommandHelp(content, command); - const docTemplate = await this.generateDocumentationTemplate( - command, - helpText, - product - ); - - // Save patch file - const patchFileName = `${command.replace(/ /g, '-')}.md`; - const patchFile = join(patchDir, patchFileName); - await fs.writeFile(patchFile, docTemplate); - - output += ` - **Template generated**: \`${patchFile}\`\n`; - - missingDocs.push({ command, file: targetPath, patchFile }); - missingCount++; - } - } - } - } - - if (missingCount === 0) { - output += 'No missing documentation files detected.\n'; - } else { - output += '\n### Quick Actions\n\n'; - output += - 'Copy and paste these commands to create missing documentation:\n\n'; - output += '```bash\n'; - for (const doc of missingDocs) { - const relativePatch = join( - 'helper-scripts/output/cli-audit/patches', - product, - `${doc.command.replace(/ /g, '-')}.md` - ); - output += `# Create ${doc.command} documentation\n`; - output += `mkdir -p $(dirname ${doc.file})\n`; - output += `cp ${relativePatch} ${doc.file}\n\n`; - } - output += '```\n'; - } - - output += '\n'; - - // Check for outdated options in existing docs - output += '## Existing Documentation Review\n\n'; - - // Parse CLI help first to populate commandOptionsMap - const parsedFile = join( - this.outputDir, - `parsed-cli-${product}-${this.version}.md` - ); - await this.parseCLIHelp(cliFile, parsedFile); - - // For each command, check if documentation exists and compare content - const existingDocs = []; - for (const [command, expectedFile] of Object.entries(commandToFile)) { - const productFile = join(docsPath, expectedFile); - const sharedFile = join(sharedPath, expectedFile); - - let docFile = null; - let actualContentFile = null; - - // Find the documentation file - if (await this.fileExists(productFile)) { - docFile = productFile; - // Check if it's a stub with source field - const actualPath = await this.getActualContentPath(productFile); - actualContentFile = actualPath - ? join(projectRoot, actualPath) - : join(projectRoot, productFile); - } else if (await this.fileExists(sharedFile)) { - docFile = sharedFile; - actualContentFile = join(projectRoot, sharedFile); - } - - if (docFile && (await this.fileExists(actualContentFile))) { - const githubEditUrl = `${githubBase}/${docFile}`; - const localPath = join(projectRoot, docFile); - const vscodeUrl = `${vscodeBase}/${localPath}`; - - // Get CLI options for this command - const cliOptions = this.commandOptionsMap[`influxdb3 ${command}`] || []; - - // Parse documentation content to find documented options - const documentedOptions = - await this.parseDocumentedOptions(actualContentFile); - - // Find missing options (in CLI but not in docs) - const missingOptions = cliOptions.filter( - (opt) => !documentedOptions.includes(opt) - ); - - // Find extra options (in docs but not in CLI) - const extraOptions = documentedOptions.filter( - (opt) => !cliOptions.includes(opt) - ); - - existingDocs.push({ - command, - file: docFile, - actualContentFile: actualContentFile.replace( - join(projectRoot, ''), - '' - ), - githubUrl: githubEditUrl, - localPath, - vscodeUrl, - cliOptions, - documentedOptions, - missingOptions, - extraOptions, - }); - } - } - - if (existingDocs.length > 0) { - output += 'Review these existing documentation files for accuracy:\n\n'; - - for (const doc of existingDocs) { - output += `### \`influxdb3 ${doc.command}\`\n`; - output += `- **File**: \`${doc.file}\`\n`; - if (doc.actualContentFile !== doc.file) { - output += `- **Content**: \`${doc.actualContentFile}\`\n`; - } - output += `- [Edit on GitHub](${doc.githubUrl})\n`; - output += `- [Open in VS Code](${doc.vscodeUrl})\n`; - output += `- **Local**: \`${doc.localPath}\`\n`; - - // Show option analysis - if (doc.missingOptions.length > 0) { - output += `- **⚠️ Missing from docs** (${doc.missingOptions.length} options):\n`; - for (const option of doc.missingOptions.sort()) { - output += ` - \`${option}\`\n`; - } - } - - if (doc.extraOptions.length > 0) { - output += `- **ℹ️ Documented but not in CLI** (${doc.extraOptions.length} options):\n`; - for (const option of doc.extraOptions.sort()) { - output += ` - \`${option}\`\n`; - } - } - - if (doc.missingOptions.length === 0 && doc.extraOptions.length === 0) { - output += `- **✅ Options match** (${doc.cliOptions.length} options)\n`; - } - - if (doc.cliOptions.length > 0) { - output += `- **All CLI Options** (${doc.cliOptions.length}):\n`; - const uniqueOptions = [...new Set(doc.cliOptions)].sort(); - for (const option of uniqueOptions) { - const status = doc.missingOptions.includes(option) ? '❌' : '✅'; - output += ` - ${status} \`${option}\`\n`; - } - } - output += '\n'; - } - } - - output += '\n## Summary\n'; - output += `- Missing documentation files: ${missingCount}\n`; - output += `- Existing documentation files: ${existingDocs.length}\n`; - output += `- Generated templates: ${missingCount}\n`; - output += '- Options are grouped by command for easier review\n\n'; - - output += '## Automation Suggestions\n\n'; - output += - '1. **Use generated templates**: Check the `patches` directory for pre-filled documentation templates\n'; - output += - '2. **Batch creation**: Use the shell commands above to quickly create all missing files\n'; - output += - '3. **CI Integration**: Add this audit to your CI pipeline to catch missing docs early\n'; - output += - '4. **Auto-PR**: Create a GitHub Action that runs this audit and opens PRs for missing docs\n\n'; - - await fs.writeFile(auditFile, output); - console.log(`📄 Audit complete: ${auditFile}`); - - if (missingCount > 0) { - console.log( - `📝 Generated ${missingCount} documentation templates in: ${patchDir}` - ); - } - } - - buildCommandToFileMapping() { - // Build a mapping from discovered commands to expected documentation files - const mapping = {}; - - // Common patterns for command to file mapping - const patterns = { - 'create database': 'create/database.md', - 'create token': 'create/token/_index.md', - 'create token admin': 'create/token/admin.md', - 'create trigger': 'create/trigger.md', - 'create table': 'create/table.md', - 'create last_cache': 'create/last_cache.md', - 'create distinct_cache': 'create/distinct_cache.md', - 'show databases': 'show/databases.md', - 'show tokens': 'show/tokens.md', - 'show system': 'show/system.md', - 'delete database': 'delete/database.md', - 'delete table': 'delete/table.md', - 'delete trigger': 'delete/trigger.md', - 'update database': 'update/database.md', - 'test wal_plugin': 'test/wal_plugin.md', - 'test schedule_plugin': 'test/schedule_plugin.md', - query: 'query.md', - write: 'write.md', - }; - - // Add discovered commands that match patterns - for (const [command, info] of this.discoveredCommands) { - const cleanCommand = command.replace('influxdb3 ', ''); - if (patterns[cleanCommand]) { - mapping[cleanCommand] = patterns[cleanCommand]; - } else if (cleanCommand !== '' && cleanCommand.includes(' ')) { - // Generate file path for subcommands - const parts = cleanCommand.split(' '); - if (parts.length === 2) { - mapping[cleanCommand] = `${parts[0]}/${parts[1]}.md`; - } else if (parts.length === 3) { - mapping[cleanCommand] = `${parts[0]}/${parts[1]}/${parts[2]}.md`; - } - } else if (cleanCommand !== '' && !cleanCommand.includes(' ')) { - // Single command - mapping[cleanCommand] = `${cleanCommand}.md`; - } - } - - return mapping; - } - - async run() { - console.log( - `${Colors.BLUE}🔍 InfluxDB 3 CLI Documentation Audit${Colors.NC}` - ); - console.log('======================================='); - console.log(`Product: ${this.product}`); - console.log(`Version: ${this.version}`); - console.log(); - - // Ensure output directory exists - await this.ensureDir(this.outputDir); - - if (this.product === 'core') { - const cliFile = join( - this.outputDir, - `current-cli-core-${this.version}.txt` - ); - const auditFile = join( - this.outputDir, - `documentation-audit-core-${this.version}.md` - ); - - if (await this.extractCurrentCLI('core', cliFile)) { - await this.auditDocs('core', cliFile, auditFile); - } - } else if (this.product === 'enterprise') { - const cliFile = join( - this.outputDir, - `current-cli-enterprise-${this.version}.txt` - ); - const auditFile = join( - this.outputDir, - `documentation-audit-enterprise-${this.version}.md` - ); - - if (await this.extractCurrentCLI('enterprise', cliFile)) { - await this.auditDocs('enterprise', cliFile, auditFile); - } - } else if (this.product === 'both') { - // Core - const cliFileCore = join( - this.outputDir, - `current-cli-core-${this.version}.txt` - ); - const auditFileCore = join( - this.outputDir, - `documentation-audit-core-${this.version}.md` - ); - - if (await this.extractCurrentCLI('core', cliFileCore)) { - await this.auditDocs('core', cliFileCore, auditFileCore); - } - - // Enterprise - const cliFileEnt = join( - this.outputDir, - `current-cli-enterprise-${this.version}.txt` - ); - const auditFileEnt = join( - this.outputDir, - `documentation-audit-enterprise-${this.version}.md` - ); - - if (await this.extractCurrentCLI('enterprise', cliFileEnt)) { - await this.auditDocs('enterprise', cliFileEnt, auditFileEnt); - } - } else { - console.error(`Error: Invalid product '${this.product}'`); - console.error( - 'Usage: node audit-cli-documentation.js [core|enterprise|both] [version]' - ); - process.exit(1); - } - - console.log(); - console.log( - `${Colors.GREEN}✅ CLI documentation audit complete!${Colors.NC}` - ); - console.log(); - console.log('Next steps:'); - console.log(`1. Review the audit reports in: ${this.outputDir}`); - console.log('2. Update missing documentation files'); - console.log('3. Verify options match current CLI behavior'); - console.log('4. Update examples and usage patterns'); - } -} - -// Main execution -async function main() { - const args = process.argv.slice(2); - const product = args[0] || 'both'; - const version = args[1] || 'local'; - - // Validate product - if (!['core', 'enterprise', 'both'].includes(product)) { - console.error(`Error: Invalid product '${product}'`); - console.error( - 'Usage: node audit-cli-documentation.js [core|enterprise|both] [version]' - ); - console.error('Example: node audit-cli-documentation.js core 3.2.0'); - process.exit(1); - } - - // Validate version tag - try { - const repoRoot = await getRepositoryRoot(); - await validateVersionInputs(version, null, repoRoot); - } catch (error) { - console.error(`Version validation failed: ${error.message}`); - process.exit(1); - } - - const auditor = new CLIDocAuditor(product, version); - await auditor.run(); -} - -// Run if called directly -if (import.meta.url === `file://${process.argv[1]}`) { - main().catch((err) => { - console.error('Error:', err); - process.exit(1); - }); -} - -export { CLIDocAuditor }; diff --git a/helper-scripts/influxdb3-monolith/setup-auth-tokens.sh b/helper-scripts/influxdb3-monolith/setup-auth-tokens.sh deleted file mode 100644 index 6990d757f..000000000 --- a/helper-scripts/influxdb3-monolith/setup-auth-tokens.sh +++ /dev/null @@ -1,164 +0,0 @@ -#!/bin/bash -# Set up authentication tokens for InfluxDB 3 Core and Enterprise containers -# Usage: ./setup-auth-tokens.sh [core|enterprise|both] - -set -e - -# Color codes -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' - -# Parse arguments -TARGET=${1:-both} - -echo -e "${BLUE}🔐 InfluxDB 3 Authentication Setup${NC}" -echo "==================================" -echo "" - -# Check for and load existing secret files -SECRET_CORE_FILE="$HOME/.env.influxdb3-core-admin-token" -SECRET_ENT_FILE="$HOME/.env.influxdb3-enterprise-admin-token" - -if [ -f "$SECRET_CORE_FILE" ]; then - echo "✅ Found existing Core token secret file" -else - echo "📝 Creating new Core token secret file: $SECRET_CORE_FILE" - touch "$SECRET_CORE_FILE" -fi - -if [ -f "$SECRET_ENT_FILE" ]; then - echo "✅ Found existing Enterprise token secret file" -else - echo "📝 Creating new Enterprise token secret file: $SECRET_ENT_FILE" - touch "$SECRET_ENT_FILE" -fi - -echo "" - -# Function to setup auth for a product -setup_auth() { - local product=$1 - local container_name="influxdb3-${product}" - local port - local secret_file - - case "$product" in - "core") - port="8282" - secret_file="$SECRET_CORE_FILE" - ;; - "enterprise") - port="8181" - secret_file="$SECRET_ENT_FILE" - ;; - esac - - echo -e "${BLUE}Setting up $(echo ${product} | awk '{print toupper(substr($0,1,1)) tolower(substr($0,2))}') authentication...${NC}" - - # Check if token already exists in secret file - if [ -s "$secret_file" ]; then - local existing_token=$(cat "$secret_file") - echo "✅ Token already exists in secret file" - echo " Token: ${existing_token:0:20}..." - - # Test if the token works - echo -n "🧪 Testing existing token..." - if docker exec "${container_name}" influxdb3 show databases --token "${existing_token}" --host "http://localhost:${port}" > /dev/null 2>&1; then - echo -e " ${GREEN}✓ Working${NC}" - return 0 - else - echo -e " ${YELLOW}⚠ Not working, will create new token${NC}" - fi - fi - - # Check if container is running - if ! docker ps --format '{{.Names}}' | grep -q "^${container_name}$"; then - echo "🚀 Starting ${container_name} container..." - if ! docker compose up -d "${container_name}"; then - echo -e "${RED}❌ Failed to start container${NC}" - return 1 - fi - - echo -n "⏳ Waiting for container to be ready..." - sleep 5 - echo -e " ${GREEN}✓${NC}" - else - echo "✅ Container ${container_name} is running" - fi - - # Create admin token - echo "🔑 Creating admin token..." - - local token_output - if token_output=$(docker exec "${container_name}" influxdb3 create token --admin 2>&1); then - # Extract the token from the "Token: " line - local new_token=$(echo "$token_output" | grep "^Token: " | sed 's/^Token: //' | tr -d '\r\n') - - echo -e "✅ ${GREEN}Token created successfully!${NC}" - echo " Token: ${new_token:0:20}..." - - # Update secret file - echo "${new_token}" > "$secret_file" - - echo "📝 Updated secret file: $secret_file" - - # Test the new token - echo -n "🧪 Testing new token..." - if docker exec "${container_name}" influxdb3 show databases --token "${new_token}" --host "http://localhost:${port}" > /dev/null 2>&1; then - echo -e " ${GREEN}✓ Working${NC}" - else - echo -e " ${YELLOW}⚠ Test failed, but token was created${NC}" - fi - - else - echo -e "${RED}❌ Failed to create token${NC}" - echo "Error output: $token_output" - return 1 - fi - - echo "" -} - -# Main execution -case "$TARGET" in - "core") - setup_auth "core" - ;; - "enterprise") - setup_auth "enterprise" - ;; - "both") - setup_auth "core" - setup_auth "enterprise" - ;; - *) - echo "Usage: $0 [core|enterprise|both]" - exit 1 - ;; -esac - -echo -e "${GREEN}🎉 Authentication setup complete!${NC}" -echo "" -echo "📋 Next steps:" -echo "1. Restart containers to load new secrets:" -echo " docker compose down && docker compose up -d influxdb3-core influxdb3-enterprise" -echo "2. Test CLI commands with authentication:" -echo " ./detect-cli-changes.sh core 3.1.0 local" -echo " ./detect-cli-changes.sh enterprise 3.1.0 local" -echo "" -echo "📄 Your secret files now contain:" - -# Show Core tokens -if [ -f "$SECRET_CORE_FILE" ] && [ -s "$SECRET_CORE_FILE" ]; then - token_preview=$(head -c 20 "$SECRET_CORE_FILE") - echo " $SECRET_CORE_FILE: ${token_preview}..." -fi - -# Show Enterprise tokens -if [ -f "$SECRET_ENT_FILE" ] && [ -s "$SECRET_ENT_FILE" ]; then - token_preview=$(head -c 20 "$SECRET_ENT_FILE") - echo " $SECRET_ENT_FILE: ${token_preview}..." -fi \ No newline at end of file diff --git a/package.json b/package.json index fc09f72a5..e57beed19 100644 --- a/package.json +++ b/package.json @@ -55,12 +55,7 @@ "test:codeblocks:v2": "docker compose run --rm --name v2-pytest v2-pytest", "test:codeblocks:stop-monitors": "./test/scripts/monitor-tests.sh stop cloud-dedicated-pytest && ./test/scripts/monitor-tests.sh stop clustered-pytest", "test:e2e": "node cypress/support/run-e2e-specs.js", - "test:shortcode-examples": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/example.md", - "audit:cli": "node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js both local", - "audit:cli:3core": "node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js core local", - "audit:cli:3ent": "node ./helper-scripts/influxdb3-monolith/audit-cli-documentation.js enterprise local", - "audit:cli:apply": "node ./helper-scripts/influxdb3-monolith/apply-cli-patches.js both", - "audit:cli:apply:dry": "node ./helper-scripts/influxdb3-monolith/apply-cli-patches.js both --dry-run" + "test:shortcode-examples": "node cypress/support/run-e2e-specs.js --spec \"cypress/e2e/content/article-links.cy.js\" content/example.md" }, "type": "module", "browserslist": [ From 9a0d4035d89b844b9537e2f7ad70486398ba6719 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 10:57:24 -0500 Subject: [PATCH 101/179] config(link-checker): exclude Docker Hub URLs Add exclusion pattern for hub.docker.com to both production and default link-checker configurations. Docker Hub often implements rate limiting and bot detection that causes false positive link validation failures in CI environments. --- .ci/link-checker/default.lycherc.toml | 3 +++ .ci/link-checker/production.lycherc.toml | 3 +++ 2 files changed, 6 insertions(+) diff --git a/.ci/link-checker/default.lycherc.toml b/.ci/link-checker/default.lycherc.toml index 259efd76a..f769afc36 100644 --- a/.ci/link-checker/default.lycherc.toml +++ b/.ci/link-checker/default.lycherc.toml @@ -55,6 +55,9 @@ exclude = [ "^https?://stackoverflow\\.com", "^https?://.*\\.stackoverflow\\.com", + # Docker Hub URLs (rate limiting and bot detection) + "^https?://hub\\.docker\\.com", + # Common documentation placeholders "YOUR_.*", "REPLACE_.*", diff --git a/.ci/link-checker/production.lycherc.toml b/.ci/link-checker/production.lycherc.toml index f8410208c..37f692e47 100644 --- a/.ci/link-checker/production.lycherc.toml +++ b/.ci/link-checker/production.lycherc.toml @@ -63,6 +63,9 @@ exclude = [ "^https?://stackoverflow\\.com", "^https?://.*\\.stackoverflow\\.com", + # Docker Hub URLs (rate limiting and bot detection) + "^https?://hub\\.docker\\.com", + # InfluxData support URLs (certificate/SSL issues in CI) "^https?://support\\.influxdata\\.com", From fe455525d45452066fd0fa9e6a6262155746a089 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 19 Aug 2025 10:57:58 -0500 Subject: [PATCH 102/179] fix(v2): broken link fragment --- content/influxdb/v2/install/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb/v2/install/_index.md b/content/influxdb/v2/install/_index.md index 60b60d938..d9398a993 100644 --- a/content/influxdb/v2/install/_index.md +++ b/content/influxdb/v2/install/_index.md @@ -112,7 +112,7 @@ _If `gpg` isn't available on your system, see The following steps guide you through using GPG to verify InfluxDB binary releases: -1. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-system). +1. [Choose the InfluxData key-pair for your OS version](#choose-the-influxdata-key-pair-for-your-os-version). 2. Download and import the InfluxData public key. `gpg --import` outputs to stderr. From c1de7e71be8710c617a73c0a8bc72720d16864ec Mon Sep 17 00:00:00 2001 From: David Rusnak Date: Tue, 19 Aug 2025 15:24:43 -0400 Subject: [PATCH 103/179] docs: add artifacts and release notes for clustered release 20250814 --- .../reference/release-notes/clustered.md | 25 + .../20250814-1819052/app-instance-schema.json | 3255 +++++++++++++++++ .../20250814-1819052/example-customer.yml | 342 ++ 3 files changed, 3622 insertions(+) create mode 100644 static/downloads/clustered-release-artifacts/20250814-1819052/app-instance-schema.json create mode 100644 static/downloads/clustered-release-artifacts/20250814-1819052/example-customer.yml diff --git a/content/influxdb3/clustered/reference/release-notes/clustered.md b/content/influxdb3/clustered/reference/release-notes/clustered.md index a82fa5c9b..81167f85f 100644 --- a/content/influxdb3/clustered/reference/release-notes/clustered.md +++ b/content/influxdb3/clustered/reference/release-notes/clustered.md @@ -61,6 +61,31 @@ directory. This new directory contains artifacts associated with the specified r --- +## 20250814-1819052 + +### Quickstart + +```yaml +spec: + package: + image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250814-1819052 +``` + +### Bug Fixes + +- Fix incorrect service address for tokens in Clustered auth sidecar. If you were overriding the `AUTHZ_TOKEN_SVC_ADDRESS` environment variable in your `AppInstance`, you can now remove that override. +- Remove default `fallbackScrapeProtocol` environment variable for prometheus-operator. +- Update Grafana to `12.1.1` to address CVE-2025-6023 and CVE-2025-6197. + +### Changes + +#### Database Engine + +- Update DataFusion to `48`. +- Tweak compaction to reduce write amplification and querier cache churn in some circumstances. + +--- + ## 20250721-1796368 {date="2025-07-21"} ### Quickstart diff --git a/static/downloads/clustered-release-artifacts/20250814-1819052/app-instance-schema.json b/static/downloads/clustered-release-artifacts/20250814-1819052/app-instance-schema.json new file mode 100644 index 000000000..51eb13f3b --- /dev/null +++ b/static/downloads/clustered-release-artifacts/20250814-1819052/app-instance-schema.json @@ -0,0 +1,3255 @@ +{ + "additionalProperties": false, + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "metadata": { + "type": "object" + }, + "spec": { + "additionalProperties": false, + "properties": { + "imagePullSecrets": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "package": { + "properties": { + "apiVersion": { + "type": "string" + }, + "image": { + "type": "string" + }, + "spec": { + "additionalProperties": false, + "properties": { + "admin": { + "additionalProperties": false, + "description": "OAuth configuration for restricting access to Clustered", + "properties": { + "dsn": { + "additionalProperties": false, + "description": "The dsn for the postgres compatible database", + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "identityProvider": { + "description": "The identity provider to be used e.g. \"keycloak\", \"auth0\", \"azure\"", + "type": "string" + }, + "internalSigningKey": { + "description": "Internal JWT secrets", + "properties": { + "id": { + "additionalProperties": false, + "description": "random ID that uniquely identifies this keypair. Generally a UUID.", + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "privateKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "publicKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "required": [ + "id", + "privateKey", + "publicKey" + ], + "type": "object" + }, + "jwksEndpoint": { + "description": "The JWKS endpoint given by your identity provider. This should look like \"https://{identityProviderDomain}/.well-known/jwks.json\"", + "type": "string" + }, + "users": { + "description": "The list of users to grant access to Clustered via influxctl", + "item": { + "properties": { + "email": { + "description": "The email of the user within your identity provider.", + "type": "string" + }, + "firstName": { + "description": "The first name of the user that will be used in Clustered.", + "type": "string" + }, + "id": { + "description": "The identifier of the user within your identity provider.", + "type": "string" + }, + "lastName": { + "description": "The last name of the user that will be used in Clustered.", + "type": "string" + }, + "userGroups": { + "description": "Optional list of user groups to assign to the user, rather than the default groups. The following groups are currently supported: Admin, Auditor, Member", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "firstName", + "lastName", + "email", + "id" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "catalog": { + "additionalProperties": false, + "description": "Configuration for the postgres-compatible database that is used as a catalog/metadata store", + "properties": { + "dsn": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "type": "object" + }, + "components": { + "additionalProperties": false, + "properties": { + "catalog": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "common": { + "additionalProperties": false, + "description": "Common configuration to all components. They will be overridden by component-specific configuration.\nAny value defined in the component-specific settings will be merged with values defined in the common settings.\n", + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "compactor": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "garbage-collector": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "granite": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ingester": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "querier": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "router": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "egress": { + "additionalProperties": false, + "description": "Configuration for how external resources are accessed from Clustered components", + "properties": { + "customCertificates": { + "additionalProperties": false, + "description": "Custom certificate or CA Bundle. Used to verify outbound connections performed by influxdb, such as OIDC servers,\npostgres databases, or object store API endpoints.\n\nEquivalent to the SSL_CERT_FILE environment variable used by OpenSSL.\n", + "examples": [ + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "type": "object" + }, + "featureFlags": { + "description": "An array of feature flag names. Feature flags (aka feature gates) control features that\nhave not yet been released. They can be experimental to varying degrees (alpha, beta, rc).\n", + "properties": { + "clusteredAuth": { + "description": "Use the authorization service optimized for Clustered deployments.\n\nThis authorization service communicates directly with the locally deployed\ngranite service, which allows it to become ready to validate access tokens\npromptly on pod start up. It also offers more control over the invalidation\nschedule for cached tokens, and may slightly reduce query latency.\n", + "type": "string" + }, + "enableDefaultResourceLimits": { + "description": "Enable Default Resource Limits for Containers\n\nWhen enabled, all containers will have `requests.cpu`, `requests.memory`,\n`limits.cpu`, and `limits.memory` defined. This is particularily useful\nfor namespaces that include a ResourceQuota. When enabling this feature\nflag, make sure to specify the resource limits and requests for the IOx\ncomponents as the defaults may not be properly sized for your cluster.\n", + "type": "string" + }, + "grafana": { + "description": "An experimental, minimal installation of a Grafana Deployment to use alongside Clustered.\n\nOnly this flag if you do not have your own metric visualisation setup and wish\nto experiment with Clustered. It is tested with Grafana v12.1.1.\n", + "type": "string" + }, + "localTracing": { + "description": "Experimental installation of Jaeger for tracing capabilities with InfluxDB 3.\n\nOnly enable this flag when instructed to do so by the support team.\n", + "type": "string" + }, + "noGrpcProbes": { + "description": "Remove gRPC liveness/readiness probes for debug service", + "type": "string" + }, + "noMinReadySeconds": { + "description": "Experimental flag for Kubernetes clusters that are lower than v1.25.\n\nNo longer uses minReadySeconds for workloads, this will cause downtime.\n", + "type": "string" + }, + "noPrometheus": { + "description": "Disable the install of the default bare-bones Prometheus StatefulSet installation alongside Clustered.\n\nThis feature flag is useful when you already have a monitoring setup and wish to utilise it.\n\nNOTE: In future releases, the `debug-service` will have a partial, minor, dependency on a Prometheus instance being available.\nIf you do not wish for this service to utilise your own installation of Prometheus, disabling it here may cause issues.\n", + "type": "string" + }, + "serviceMonitor": { + "description": "Deprecated. Use observability.serviceMonitor instead.\n\nCreate a ServiceMonitor resource for InfluxDB3.\n", + "type": "string" + }, + "useLicensedBinaries": { + "description": "This flag is deprecated and no longer has any effect. Licensed binaries are now always used.\n", + "type": "string" + } + }, + "type": "array" + }, + "hostingEnvironment": { + "additionalProperties": false, + "description": "Environment or cloud-specific configuration elements which are utilised by InfluxDB Clustered.", + "properties": { + "aws": { + "additionalProperties": false, + "description": "Configuration for hosting on AWS.", + "properties": { + "eksRoleArn": { + "default": "", + "description": "IAM role ARN to apply to the IOx ServiceAccount, used with EKS IRSA.", + "type": "string" + } + }, + "type": "object" + }, + "gke": { + "additionalProperties": false, + "description": "Configuration for hosting on Google Kubernetes Engine (GKE).", + "properties": { + "workloadIdentity": { + "additionalProperties": false, + "description": "Authentication via GKE workload identity. This will annotate the relevant Kubernetes ServiceAccount objects.\nSee https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity for further details.\n", + "properties": { + "serviceAccountEmail": { + "description": "Google IAM Service Account email, this should be in the format \"NAME@PROJECT_ID.iam.gserviceaccount.com\".", + "type": "string" + } + }, + "required": [ + "serviceAccountEmail" + ], + "type": "object" + } + }, + "type": "object" + }, + "openshift": { + "additionalProperties": false, + "description": "Configuration for hosting on Red Hat OpenShift.", + "properties": { }, + "type": "object" + } + }, + "type": "object" + }, + "images": { + "description": "Manipulate how images are retrieved for Clustered. This is typically useful for air-gapped environments when you need to use an internal registry.", + "properties": { + "overrides": { + "description": "Override specific images using the contained predicate fields.\n\nThis takes precedence over the registryOverride field.\n", + "item": { + "description": "Remaps an image matching naming predicates\n", + "properties": { + "name": { + "description": "Naming predicate: the part of the image name that comes after the registry name, e.g.\nIf the image name is \"oci.influxdata.com/foo/bar:1234\", the name field matches \"foo/bar\"\n", + "type": "string" + }, + "newFQIN": { + "description": "Rewrite expression: when a naming predicate matches this image, rewrite the image reference\nusing this Fully Qualified Image Name. i.e. this replaces the whole registry/imagename:tag@digest\nparts of the input image reference.\n", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "registryOverride": { + "default": "", + "description": "Place a new registry prefix infront of all Clustered component images.\n\nThis is used when you wish to maintain the original registry path for images and simply relocate them underneath\nyour own registry.\n\nExample:\nregistryOverride: 'newReg' means 'myregistry/test' becomes 'newReg/myregistry/test'\n", + "type": "string" + } + }, + "type": "object" + }, + "ingesterStorage": { + "additionalProperties": false, + "description": "Storage configuration for the Clustered ingesters.", + "properties": { + "storage": { + "description": "A higher value provides more disk space for the Write-Ahead Log (WAL) to each ingester, allowing for a greater set of leading edge data to be maintained in-memory.\nThis also reduces the frequency of WAL rotations, leading to better query performance and less burden on the compactor.\n\nNote that at 90% capacity, an ingester will stop accepting writes in order to persist its active WAL into the configured object store as parquet files.\n", + "type": "string" + }, + "storageClassName": { + "default": "", + "type": "string" + } + }, + "required": [ + "storage" + ], + "type": "object" + }, + "ingress": { + "additionalProperties": false, + "description": "Configuration for how Clustered components are accessed.", + "properties": { + "grpc": { + "additionalProperties": false, + "description": "Configuration for components which utilise gRPC", + "properties": { + "className": { + "default": "", + "type": "string" + } + }, + "type": "object" + }, + "hosts": { + "description": "A number of hosts/domains to use as entrypoints within the Ingress resources.", + "type": "array" + }, + "http": { + "additionalProperties": false, + "description": "Configuration for components which utilise HTTP", + "properties": { + "className": { + "default": "", + "type": "string" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "description": "Template to apply across configured Ingress-type resources.\nThis allows you to specify a range of third party annotations onto the created Ingress objects and/or\nalter the kind of Ingress you would like to use, e.g. 'Route'.\n", + "oneOf": [ + { + "properties": { + "apiVersion": { + "const": "networking.istio.io/v1beta1" + }, + "kind": { + "const": "Gateway" + }, + "selector": { + "default": { }, + "description": "This selector determines which Istio ingress gateway pods will be chosen\nto handle traffic for the created Gateway resources. A blank selector means that all\ngateway pods in the cluster will handle traffic.\n\nFor more details, see https://istio.io/latest/docs/reference/config/networking/gateway/#Gateway\n", + "type": "object" + } + }, + "required": [ + "apiVersion", + "kind" + ] + }, + { + "properties": { + "apiVersion": { + "enum": [ + "networking.k8s.io/v1", + "route.openshift.io/v1" + ], + "type": "string" + }, + "kind": { + "enum": [ + "Ingress", + "Route" + ], + "type": "string" + } + } + } + ], + "properties": { + "apiVersion": { + "default": "networking.k8s.io/v1", + "enum": [ + "networking.k8s.io/v1", + "route.openshift.io/v1", + "networking.istio.io/v1beta1" + ], + "type": "string" + }, + "kind": { + "default": "Ingress", + "enum": [ + "Ingress", + "Route", + "Gateway" + ], + "type": "string" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations to place onto the objects which enable ingress.", + "type": "object" + } + }, + "type": "object" + }, + "selector": { + "description": "Selector to specify which gateway deployment utilises the configured ingress configuration.\n\nNote that this is only for Istio Gateway, see https://istio.io/latest/docs/reference/config/networking/gateway/#Gateway for further details\n", + "type": "object" + } + }, + "type": "object" + }, + "tlsSecretName": { + "default": "", + "description": "Kubernetes Secret name which contains TLS certificates.\n\nIf you are using cert-manager, this is the name of the Secret to create containing certificates.\nNote that cert-manager is externally managed and is not apart of a Clustered configuration.\n", + "type": "string" + } + }, + "type": "object" + }, + "monitoringStorage": { + "additionalProperties": false, + "description": "Storage configuration for the Prometheus instance shipped alongside Clustered for basic monitoring purposes.", + "properties": { + "storage": { + "description": "The amount of storage to provision for the attached volume, e.g. \"10Gi\".", + "type": "string" + }, + "storageClassName": { + "default": "", + "type": "string" + } + }, + "required": [ + "storage" + ], + "type": "object" + }, + "objectStore": { + "additionalProperties": false, + "description": "Configuration for the backing object store of IOx.", + "oneOf": [ + { + "required": [ + "bucket", + "region" + ] + }, + { + "required": [ + "s3", + "bucket" + ] + }, + { + "required": [ + "azure", + "bucket" + ] + }, + { + "required": [ + "google", + "bucket" + ] + } + ], + "properties": { + "accessKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "allowHttp": { + "default": "false", + "type": "string" + }, + "azure": { + "additionalProperties": false, + "description": "Configuration for Azure Blob Storage.", + "properties": { + "accessKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "account": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "required": [ + "accessKey", + "account" + ], + "type": "object" + }, + "bucket": { + "type": "string" + }, + "endpoint": { + "default": "", + "type": "string" + }, + "google": { + "additionalProperties": false, + "description": "Configuration for Google Cloud Storage.", + "properties": { + "serviceAccountSecret": { + "additionalProperties": false, + "description": "Authentication via Google IAM Service Account credentials file using a Kubernetes Secret name and key.\nSee https://cloud.google.com/kubernetes-engine/docs/tutorials/authenticating-to-cloud-platform for further details.\n\nIf you wish to use GKE IAM annotations, refer to the hostingEnviornment section of the schema.\n", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "region": { + "default": "", + "description": "The region in which the bucket resides. This may not be required dependent on your object store provider.", + "type": "string" + }, + "s3": { + "additionalProperties": false, + "description": "Configuration for AWS S3 (compatible) object stores.", + "properties": { + "accessKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "allowHttp": { + "description": "Allow the S3 client to accept insecure HTTP, as well as HTTPS connections to object store.", + "type": "string" + }, + "endpoint": { + "default": "", + "description": "S3 bucket region, see https://docs.aws.amazon.com/general/latest/gr/s3.html#s3_region for further details.", + "type": "string" + }, + "region": { + "description": "AWS region for the bucket, such as us-east-1.", + "type": "string" + }, + "secretKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "required": [ + "region" + ], + "type": "object" + }, + "secretKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "type": "object" + }, + "observability": { + "additionalProperties": false, + "default": { }, + "description": "Configuration for gaining operational insight into Clustered components", + "properties": { + "retention": { + "default": "12h", + "description": "The retention period for prometheus", + "type": "string" + }, + "serviceMonitor": { + "additionalProperties": false, + "description": "Configure a ServiceMonitor resource to easily expose InfluxDB metrics via the Prometheus Operator.\nSee the Prometheus Operator documentation for usage:\nhttps://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md\n", + "properties": { + "fallbackScrapeProtocol": { + "default": null, + "description": "Specifies which protocol to use when scraping endpoints that return a blank or invalid Content-Type header.\n\nRequired for Prometheus v3.0.0+ only, which enforces Content-Type validation (unlike v2).\n\nFor most standard Prometheus metrics endpoints, including InfluxDB, use \"PrometheusText0.0.4\".\n", + "type": "string" + }, + "interval": { + "default": "30s", + "description": "A duration string that controls the length of time between scrape attempts, ex: '15s', or '1m'", + "type": "string" + }, + "scrapeTimeout": { + "default": null, + "description": "A duration string that controls the scrape timeout duration, ex: '10s'", + "type": "string" + } + }, + "required": [ ], + "type": "object" + } + }, + "type": "object" + }, + "resources": { + "additionalProperties": false, + "properties": { + "catalog": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "4", + "type": "string" + }, + "memory": { + "default": "16Gi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "compactor": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "8", + "type": "string" + }, + "memory": { + "default": "32Gi", + "type": "string" + }, + "replicas": { + "default": 1, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "garbage-collector": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + }, + "replicas": { + "const": 1, + "description": "Replica configuration for the Garbage Collector.\nNOTE: This component does not support horizontal scaling at this time.\nRefer to https://docs.influxdata.com/influxdb/clustered/reference/internals/storage-engine/#garbage-collector-scaling-strategies\nfor more details.\n", + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "granite": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": "500M", + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "0.5", + "type": "string" + }, + "memory": { + "default": "500M", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ingester": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "6", + "type": "string" + }, + "memory": { + "default": "24Gi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "prometheus": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "500m", + "type": "string" + }, + "memory": { + "default": "512Mi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "querier": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "8", + "type": "string" + }, + "memory": { + "default": "32Gi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "router": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "1", + "type": "string" + }, + "memory": { + "default": "2Gi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "catalog", + "objectStore", + "ingesterStorage", + "monitoringStorage" + ], + "type": "object" + } + }, + "required": [ + "image", + "apiVersion" + ], + "type": "object" + }, + "pause": { + "default": false, + "type": "boolean" + } + }, + "type": "object" + }, + "status": { + "additionalProperties": true, + "type": "object" + } + }, + "type": "object" +} + diff --git a/static/downloads/clustered-release-artifacts/20250814-1819052/example-customer.yml b/static/downloads/clustered-release-artifacts/20250814-1819052/example-customer.yml new file mode 100644 index 000000000..2a0aeb736 --- /dev/null +++ b/static/downloads/clustered-release-artifacts/20250814-1819052/example-customer.yml @@ -0,0 +1,342 @@ +# yaml-language-server: $schema=app-instance-schema.json +apiVersion: kubecfg.dev/v1alpha1 +kind: AppInstance +metadata: + name: influxdb + namespace: influxdb +spec: + # One or more secrets that are used to pull the images from an authenticated registry. + # This will either be the secret provided to you, if using our registry, or a secret for your own registry + # if self-hosting the images. + imagePullSecrets: + - name: + package: + # The version of the clustered package that will be used. + # This determines the version of all of the individual components. + # When a new version of the product is released, this version should be updated and any + # new config options should be updated below. + image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250814-1819052 + apiVersion: influxdata.com/v1alpha1 + spec: + # # Provides a way to pass down hosting environment specific configuration, such as an role ARN when using EKS IRSA. + # # This section contains three multually-exclusive "blocks". Uncomment the block named after the hosting environment + # # you run: "aws", "openshift" or "gke". + # hostingEnvironment: + # # # Uncomment this block if you're running in EKS. + # # aws: + # # eksRoleArn: 'arn:aws:iam::111111111111:role/your-influxdb-clustered-role' + # # + # # # Uncomment this block if you're running inside OpenShift. + # # # Note: there are currently no OpenShift-specific parameters. You have to pass an empty object + # # # as a marker that you're choosing OpenShift as hosting environment. + # # openshift: {} + # # + # # # Uncomment this block if you're running in GKE: + # # gke: + # # # Authenticate to Google Cloud services via workload identity, this + # # # annotates the 'iox' ServiceAccount with the role name you specify. + # # # NOTE: This setting just enables GKE specific authentication mechanism, + # # # You still need to enable `spec.objectStore.google` below if you want to use GCS. + # # workloadIdentity: + # # # Google Service Account name to use for the workload identity. + # # serviceAccountEmail: @.iam.gserviceaccount.com + catalog: + # A postgresql style DSN that points at a postgresql compatible database. + # eg: postgres://[user[:password]@][netloc][:port][/dbname][?param1=value1&...] + dsn: + valueFrom: + secretKeyRef: + name: + key: + + # images: + # # This can be used to override a specific image name with its FQIN + # # (Fully Qualified Image Name) for testing. eg. + # overrides: + # - name: influxdb2-artifacts/iox/iox + # newFQIN: mycompany/test-iox-build:aninformativetag + # + # # Set this variable to the prefix of your internal registry. This will be prefixed to all expected images. + # # eg. us-docker.pkg.dev/iox:latest => registry.mycompany.io/us-docker.pkg.dev/iox:latest + # registryOverride: + + objectStore: + # Bucket that the parquet files will be stored in + bucket: + + # Uncomment one of the following (s3, azure) + # to enable the configuration of your object store + s3: + # URL for S3 Compatible object store + endpoint: + + # Set to true to allow communication over HTTP (instead of HTTPS) + allowHttp: "false" + + # S3 Access Key + # This can also be provided as a valueFrom: secretKeyRef: + accessKey: + value: + + # S3 Secret Key + # This can also be provided as a valueFrom: secretKeyRef: + secretKey: + value: + + # This value is required for AWS S3, it may or may not be required for other providers. + region: + + # azure: + # Azure Blob Storage Access Key + # This can also be provided as a valueFrom: secretKeyRef: + # accessKey: + # value: + + # Azure Blob Storage Account + # This can also be provided as a valueFrom: secretKeyRef: + # account: + # value: + + # There are two main ways you can access a Google: + # + # a) GKE Workload Identity: configure workload identity in the top level `hostingEnvironment.gke` section. + # b) Explicit service account secret (JSON) file: use the `serviceAccountSecret` field here + # + # If you pick (a) you may not need to uncomment anything else in this section, + # but you still need to tell influxdb that you intend to use Google Cloud Storage. + # so you need to specify an empty object. Uncomment the following line: + # + # google: {} + # + # + # If you pick (b), uncomment the following block: + # + # google: + # # If you're authenticating to Google Cloud service using a Service Account credentials file, as opposed + # # as to use workload identity (see above) you need to provide a reference to a k8s secret containing the credentials file. + # serviceAccountSecret: + # # Kubernetes Secret name containing the credentials for a Google IAM Service Account. + # name: + # # The key within the Secret containing the credentials. + # key: + + # Parameters to tune observability configuration, such as Prometheus ServiceMonitor's. + observability: {} + # retention: 12h + # serviceMonitor: + # interval: 10s + # scrapeTimeout: 30s + + # Ingester pods have a volume attached. + ingesterStorage: + # (Optional) Set the storage class. This will differ based on the K8s environment and desired storage characteristics. + # If not set, the default storage class will be used. + # storageClassName: + # Set the storage size (minimum 2Gi recommended) + storage: + + # Monitoring pods have a volume attached. + monitoringStorage: + # (Optional) Set the storage class. This will differ based on the K8s environment and desired storage characteristics. + # If not set, the default storage class will be used. + # storageClassName: + # Set the storage size (minimum 10Gi recommended) + storage: + + # Uncomment the follow block if using our provided Ingress. + # + # We currently only support the ingress NGINX ingress controller: https://github.com/kubernetes/ingress-nginx + # + # ingress: + # hosts: + # # This is the host on which you will access Influxdb 3.0, for both reads and writes + # - + + # (Optional) + # The name of the Kubernetes Secret containing a TLS certificate, this should exist in the same namespace as the Clustered installation. + # If you are using cert-manager, enter a name for the Secret it should create. + # tlsSecretName: + + # http: + # # Usually you have only one ingress controller installed in a given cluster. + # # In case you have more than one, you have to specify the "class name" of the ingress controller you want to use + # className: nginx + + # grpc: + # # Usually you have only one ingress controller installed in a given cluster. + # # In case you have more than one, you have to specify the "class name" of the ingress controller you want to use + # className: nginx + # + # Enables specifying which 'type' of Ingress to use, alongside whether to place additional annotations + # onto those objects, this is useful for third party software in your environment, such as cert-manager. + # template: + # apiVersion: 'route.openshift.io/v1' + # kind: 'Route' + # metadata: + # annotations: + # 'example-annotation': 'annotation-value' + + # Enables specifying customizations for the various components in InfluxDB 3.0. + # components: + # # router: + # # template: + # # containers: + # # iox: + # # env: + # # INFLUXDB_IOX_MAX_HTTP_REQUESTS: "5000" + # # nodeSelector: + # # disktype: ssd + # # tolerations: + # # - effect: NoSchedule + # # key: example + # # operator: Exists + # # Common customizations for all components go in a pseudo-component called "common" + # # common: + # # template: + # # # Metadata contains custom annotations (and labels) to be added to a component. E.g.: + # # metadata: + # # annotations: + # # telegraf.influxdata.com/class: "foo" + + # Example of setting nodeAffinity for the querier component to ensure it runs on nodes with specific labels + # components: + # # querier: + # # template: + # # affinity: + # # nodeAffinity: + # # requiredDuringSchedulingIgnoredDuringExecution: + # # Node must have these labels to be considered for scheduling + # # nodeSelectorTerms: + # # - matchExpressions: + # # - key: required + # # operator: In + # # values: + # # - ssd + # # preferredDuringSchedulingIgnoredDuringExecution: + # # Scheduler will prefer nodes with these labels but they're not required + # # - weight: 1 + # # preference: + # # matchExpressions: + # # - key: preferred + # # operator: In + # # values: + # # - postgres + + # Example of setting podAntiAffinity for the querier component to ensure it runs on nodes with specific labels + # components: + # # querier: + # # template: + # # affinity: + # # podAntiAffinity: + # # requiredDuringSchedulingIgnoredDuringExecution: + # # Ensures that the pod will not be scheduled on a node if another pod matching the labelSelector is already running there + # # - labelSelector: + # # matchExpressions: + # # - key: app + # # operator: In + # # values: + # # - querier + # # topologyKey: "kubernetes.io/hostname" + # # preferredDuringSchedulingIgnoredDuringExecution: + # # Scheduler will prefer not to schedule pods together but may do so if necessary + # # - weight: 1 + # # podAffinityTerm: + # # labelSelector: + # # matchExpressions: + # # - key: app + # # operator: In + # # values: + # # - querier + # # topologyKey: "kubernetes.io/hostname" + + # Uncomment the following block to tune the various pods for their cpu/memory/replicas based on workload needs. + # Only uncomment the specific resources you want to change, anything uncommented will use the package default. + # (You can read more about k8s resources and limits in https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits) + # + # resources: + # # The ingester handles data being written + # ingester: + # requests: + # cpu: + # memory: + # replicas: # The default for ingesters is 3 to increase availability + # + # # optionally you can specify the resource limits which improves isolation. + # # (see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits) + # # limits: + # # cpu: + # # memory: + + # # The compactor reorganizes old data to improve query and storage efficiency. + # compactor: + # requests: + # cpu: + # memory: + # replicas: # the default is 1 + + # # The querier handles querying data. + # querier: + # requests: + # cpu: + # memory: + # replicas: # the default is 3 + + # # The router performs some api routing. + # router: + # requests: + # cpu: + # memory: + # replicas: # the default is 3 + + admin: + # The list of users to grant access to Clustered via influxctl + users: + # First name of user + - firstName: + # Last name of user + lastName: + # Email of user + email: + # The ID that the configured Identity Provider uses for the user in oauth flows + id: + # Optional list of user groups to assign to the user, rather than the default groups. The following groups are currently supported: Admin, Auditor, Member + userGroups: + - + + # The dsn for the postgres compatible database (note this is the same as defined above) + dsn: + valueFrom: + secretKeyRef: + name: + key: + # The identity provider to be used e.g. "keycloak", "auth0", "azure", etc + # Note for Azure Active Directory it must be exactly "azure" + identityProvider: + # The JWKS endpoint provided by the Identity Provider + jwksEndpoint: + + # # This (optional) section controls how InfluxDB issues outbound requests to other services + # egress: + # # If you're using a custom CA you will need to specify the full custom CA bundle here. + # # + # # NOTE: the custom CA is currently only honoured for outbound requests used to obtain + # # the JWT public keys from your identiy provider (see `jwksEndpoint`). + # customCertificates: + # valueFrom: + # configMapKeyRef: + # key: ca.pem + # name: custom-ca + + # We also include the ability to enable some features that are not yet ready for general availability + # or for which we don't yet have a proper place to turn on an optional feature in the configuration file. + # To turn on these you should include the name of the feature flag in the `featureFlag` array. + # + # featureFlags: + # # Uncomment to install a Grafana deployment. + # # Depends on one of the prometheus features being deployed. + # # - grafana + + # # The following 2 flags should be uncommented for k8s API 1.21 support. + # # Note that this is an experimental configuration. + # # - noMinReadySeconds + # # - noGrpcProbes From 52ea0bf2cc27877173fcb6a3b6ec56975b677ef2 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 20 Aug 2025 07:52:27 -0500 Subject: [PATCH 104/179] Update _index.md Previous Kapacitor version in menu --- content/kapacitor/v1/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/kapacitor/v1/_index.md b/content/kapacitor/v1/_index.md index bd71793fd..5f9ecda8b 100644 --- a/content/kapacitor/v1/_index.md +++ b/content/kapacitor/v1/_index.md @@ -5,7 +5,7 @@ description: > create alerts, run ETL jobs and detect anomalies. menu: kapacitor_v1: - name: Kapacitor v1.7 + name: Kapacitor v1.8 weight: 1 --- From 73bb35d4de129338efb38bacc140588304ec35d1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Aug 2025 14:42:57 +0000 Subject: [PATCH 105/179] chore(deps): bump mermaid from 11.9.0 to 11.10.0 Bumps [mermaid](https://github.com/mermaid-js/mermaid) from 11.9.0 to 11.10.0. - [Release notes](https://github.com/mermaid-js/mermaid/releases) - [Commits](https://github.com/mermaid-js/mermaid/compare/mermaid@11.9.0...mermaid@11.10.0) --- updated-dependencies: - dependency-name: mermaid dependency-version: 11.10.0 dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- package.json | 2 +- yarn.lock | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/package.json b/package.json index e57beed19..10896b237 100644 --- a/package.json +++ b/package.json @@ -36,7 +36,7 @@ "js-yaml": "^4.1.0", "lefthook": "^1.10.10", "markdown-link": "^0.1.1", - "mermaid": "^11.4.1", + "mermaid": "^11.10.0", "vanillajs-datepicker": "^1.3.4" }, "scripts": { diff --git a/yarn.lock b/yarn.lock index 00856c0cd..a17503f82 100644 --- a/yarn.lock +++ b/yarn.lock @@ -3667,10 +3667,10 @@ merge2@^1.3.0: resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== -mermaid@^11.4.1: - version "11.9.0" - resolved "https://registry.yarnpkg.com/mermaid/-/mermaid-11.9.0.tgz#fdc055d0f2a7f2afc13a78cb3e3c9b1374614e2e" - integrity sha512-YdPXn9slEwO0omQfQIsW6vS84weVQftIyyTGAZCwM//MGhPzL1+l6vO6bkf0wnP4tHigH1alZ5Ooy3HXI2gOag== +mermaid@^11.10.0: + version "11.10.0" + resolved "https://registry.yarnpkg.com/mermaid/-/mermaid-11.10.0.tgz#4949f98d08cfdc4cda429372ed2f843a64c99946" + integrity sha512-oQsFzPBy9xlpnGxUqLbVY8pvknLlsNIJ0NWwi8SUJjhbP1IT0E0o1lfhU4iYV3ubpy+xkzkaOyDUQMn06vQElQ== dependencies: "@braintree/sanitize-url" "^7.0.4" "@iconify/utils" "^2.1.33" From 8151caa4f3d1e8ac8743765cc6c0355767bce02c Mon Sep 17 00:00:00 2001 From: David Rusnak Date: Wed, 20 Aug 2025 12:30:40 -0400 Subject: [PATCH 106/179] docs: add date to release notes --- .../influxdb3/clustered/reference/release-notes/clustered.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb3/clustered/reference/release-notes/clustered.md b/content/influxdb3/clustered/reference/release-notes/clustered.md index 81167f85f..0973ace20 100644 --- a/content/influxdb3/clustered/reference/release-notes/clustered.md +++ b/content/influxdb3/clustered/reference/release-notes/clustered.md @@ -61,7 +61,7 @@ directory. This new directory contains artifacts associated with the specified r --- -## 20250814-1819052 +## 20250814-1819052 {date="2025-08-14"} ### Quickstart From 29ff1ba739f73a1d4b9975e413c33ff39adeaec2 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 20 Aug 2025 16:16:48 -0500 Subject: [PATCH 107/179] chore(tools): remove minor version from product name in sidebarDisplay Kapacitor v1 instead of Kapacitor v1. --- content/chronograf/v1/_index.md | 2 +- content/kapacitor/v1/_index.md | 2 +- content/telegraf/v1/_index.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/content/chronograf/v1/_index.md b/content/chronograf/v1/_index.md index ca0c40807..244caf8a3 100644 --- a/content/chronograf/v1/_index.md +++ b/content/chronograf/v1/_index.md @@ -6,7 +6,7 @@ description: > monitoring data and easily create alerting and automation rules. menu: chronograf_v1: - name: Chronograf v1.10 + name: Chronograf v1 weight: 1 --- diff --git a/content/kapacitor/v1/_index.md b/content/kapacitor/v1/_index.md index 5f9ecda8b..b4eef1de8 100644 --- a/content/kapacitor/v1/_index.md +++ b/content/kapacitor/v1/_index.md @@ -5,7 +5,7 @@ description: > create alerts, run ETL jobs and detect anomalies. menu: kapacitor_v1: - name: Kapacitor v1.8 + name: Kapacitor v1 weight: 1 --- diff --git a/content/telegraf/v1/_index.md b/content/telegraf/v1/_index.md index 2ef2e0fb2..e089b9dfc 100644 --- a/content/telegraf/v1/_index.md +++ b/content/telegraf/v1/_index.md @@ -5,7 +5,7 @@ description: > time series platform, used to collect and report metrics. Telegraf supports four categories of plugins -- input, output, aggregator, and processor. menu: telegraf_v1: - name: Telegraf v1.35 + name: Telegraf v1 weight: 1 related: - /resources/videos/intro-to-telegraf/ From 2a56bec0c9fa5c60bc62a1f1285a80cf8a300f30 Mon Sep 17 00:00:00 2001 From: Phil Bracikowski <13472206+philjb@users.noreply.github.com> Date: Wed, 20 Aug 2025 14:53:15 -0700 Subject: [PATCH 108/179] Adjust wording for the two types of tsi index: inmen and on disk tsi1 --- content/influxdb/v1/administration/upgrading.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb/v1/administration/upgrading.md b/content/influxdb/v1/administration/upgrading.md index f67dd08e4..3fffdadaf 100644 --- a/content/influxdb/v1/administration/upgrading.md +++ b/content/influxdb/v1/administration/upgrading.md @@ -9,7 +9,7 @@ menu: --- -We recommend enabling Time Series Index (TSI) (step 3 of Upgrade to InfluxDB 1.11.x). [Switch between TSM and TSI](#switch-index-types) as needed. To learn more about TSI, see: +We recommend enabling Time Series Index (TSI) (step 3 of Upgrade to InfluxDB 1.11.x). [Switch between TSI and inmem index types](#switch-index-types) as needed. To learn more about TSI, see: - [Time Series Index (TSI) overview](/influxdb/v1/concepts/time-series-index/) - [Time Series Index (TSI) details](/influxdb/v1/concepts/tsi-details/) From 127b15b6e2a7f126fde0ce5184b946663e394cb4 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 20 Aug 2025 17:25:15 -0500 Subject: [PATCH 109/179] fix(v1): improve TSI recommendation clarity and fix grammar typos - Make TSI recommendation more actionable by explaining benefits (removes RAM limits, better performance for high-cardinality data) - Fix "from to" grammar typos in index switching instructions - Add specific scenarios for when to switch between TSI and inmem index types - Remove "above" directional language per style guidelines Source: Verified against InfluxDB v1 documentation via MCP docs verification - TSI details: https://docs.influxdata.com/influxdb/v1/concepts/tsi-details/ - TSI overview: https://docs.influxdata.com/influxdb/v1/concepts/time-series-index/ --- content/influxdb/v1/administration/upgrading.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/content/influxdb/v1/administration/upgrading.md b/content/influxdb/v1/administration/upgrading.md index 3fffdadaf..4b9fa82b0 100644 --- a/content/influxdb/v1/administration/upgrading.md +++ b/content/influxdb/v1/administration/upgrading.md @@ -9,7 +9,7 @@ menu: --- -We recommend enabling Time Series Index (TSI) (step 3 of Upgrade to InfluxDB 1.11.x). [Switch between TSI and inmem index types](#switch-index-types) as needed. To learn more about TSI, see: +We recommend enabling Time Series Index (TSI) (step 3 of Upgrade to InfluxDB 1.11.x) because it removes RAM-based limits on series cardinality and provides better performance for high-cardinality datasets compared to the default in-memory index. [Switch between TSI and inmem index types](#switch-index-types) as needed. To learn more about TSI, see: - [Time Series Index (TSI) overview](/influxdb/v1/concepts/time-series-index/) - [Time Series Index (TSI) details](/influxdb/v1/concepts/tsi-details/) @@ -53,8 +53,8 @@ Run the `buildtsi` command using the user account that you are going to run the Switch index types at any time by doing one of the following: -- To switch from to `inmem` to `tsi1`, complete steps 3 and 4 above in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x). -- To switch from to `tsi1` to `inmem`, change `tsi1` to `inmem` by completing steps 3a-3c and 4 above in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x). +- To switch from `inmem` to `tsi1` (for example, when experiencing high memory usage or out-of-memory errors with high-cardinality data), complete steps 3 and 4 in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x). +- To switch from `tsi1` to `inmem` (for example, for small datasets where memory is not a constraint), change `tsi1` to `inmem` by completing steps 3a-3c and 4 in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x). ## Downgrade InfluxDB From e42df8a4adb24665e24d073362fb29a2647de66e Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 20 Aug 2025 17:48:58 -0500 Subject: [PATCH 110/179] docs(v1): restructure upgrade guide for better UX and progressive disclosure - Restructure content flow to follow progressive disclosure principles - Move index type decision to Important callout after basic upgrade steps - Improve headings with active voice ("Switch index types anytime") - Enhance callout formatting (Important, Tip, Warning callouts) - Consolidate Enterprise upgrade information into dedicated section - Improve information hierarchy and scanability Changes are primarily formatting and phrasing improvements to enhance developer experience and follow Google Developer Documentation best practices. --- .../influxdb/v1/administration/upgrading.md | 67 ++++++++++++------- 1 file changed, 43 insertions(+), 24 deletions(-) diff --git a/content/influxdb/v1/administration/upgrading.md b/content/influxdb/v1/administration/upgrading.md index 4b9fa82b0..33de0ea92 100644 --- a/content/influxdb/v1/administration/upgrading.md +++ b/content/influxdb/v1/administration/upgrading.md @@ -6,22 +6,12 @@ menu: name: Upgrade InfluxDB weight: 25 parent: Administration +related: + - /enterprise_influxdb/v1/guides/migration/ + - /enterprise_influxdb/v1/administration/upgrading/ --- - -We recommend enabling Time Series Index (TSI) (step 3 of Upgrade to InfluxDB 1.11.x) because it removes RAM-based limits on series cardinality and provides better performance for high-cardinality datasets compared to the default in-memory index. [Switch between TSI and inmem index types](#switch-index-types) as needed. To learn more about TSI, see: - -- [Time Series Index (TSI) overview](/influxdb/v1/concepts/time-series-index/) -- [Time Series Index (TSI) details](/influxdb/v1/concepts/tsi-details/) - -> **_Note:_** The default configuration continues to use TSM-based shards with in-memory indexes (as in earlier versions). - -{{% note %}} -### Upgrade to InfluxDB Enterprise - -To upgrade from InfluxDB OSS to InfluxDB Enterprise, [contact InfluxData Sales](https://www.influxdata.com/contact-sales/) -and see [Migrate to InfluxDB Enterprise](/enterprise_influxdb/v1/guides/migration/). -{{% /note %}} +Upgrade to the latest version of InfluxDB OSS v1. ## Upgrade to InfluxDB 1.11.x @@ -29,7 +19,26 @@ and see [Migrate to InfluxDB Enterprise](/enterprise_influxdb/v1/guides/migratio 2. Migrate configuration file customizations from your existing configuration file to the InfluxDB 1.11.x [configuration file](/influxdb/v1/administration/config/). Add or modify your environment variables as needed. -3. To enable TSI in InfluxDB 1.11.x, complete the following steps: +> [!Important] +> #### Choose your index type +> InfluxDB 1.11.x supports two index types: +> +> - **Time Series Index (TSI)** - Recommended for most users. Removes RAM-based limits on series cardinality and provides better performance for high-cardinality datasets. +> - **In-memory index (inmem)** - Default option that maintains compatibility with earlier versions but has RAM limitations. +> +> **When to use TSI:** +> - High-cardinality datasets (many unique tag combinations) +> - Experiencing high memory usage or out-of-memory errors +> - Large production deployments +> +> **When to use inmem:** +> - Small datasets where memory is not a constraint +> - Development or testing environments +> - Maintaining compatibility with existing tooling +> +> To learn more about TSI, see [Time Series Index overview](/influxdb/v1/concepts/time-series-index/) and [TSI details](/influxdb/v1/concepts/tsi-details/). + +3. **Optional:** To enable TSI in InfluxDB 1.11.x, complete the following steps: 1. If using the InfluxDB configuration file, find the `[data]` section, uncomment `index-version = "inmem"` and change the value to `tsi1`. @@ -43,26 +52,36 @@ and see [Migrate to InfluxDB Enterprise](/enterprise_influxdb/v1/guides/migratio ``` 4. Build TSI by running the [influx_inspect buildtsi](/influxdb/v1/tools/influx_inspect/#buildtsi) command. - {{% note %}} -Run the `buildtsi` command using the user account that you are going to run the database as, or ensure that the permissions match afterward. - {{% /note %}} + > [!Important] + > Run the `buildtsi` command using the user account that you are going to run the database as, or ensure that the permissions match afterward. 4. Restart the `influxdb` service. +> [!Tip] +> #### Switch index types anytime +> +> The default configuration continues to use TSM-based shards with in-memory indexes (as in earlier versions). You can [switch between TSI and inmem index types](#switch-index-types) at any time. + ## Switch index types -Switch index types at any time by doing one of the following: +You can switch between index types at any time after upgrading: -- To switch from `inmem` to `tsi1` (for example, when experiencing high memory usage or out-of-memory errors with high-cardinality data), complete steps 3 and 4 in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x). -- To switch from `tsi1` to `inmem` (for example, for small datasets where memory is not a constraint), change `tsi1` to `inmem` by completing steps 3a-3c and 4 in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x). +**Switch from inmem to TSI:** +- Complete steps 3 and 4 in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x) +- Recommended when experiencing high memory usage or out-of-memory errors with high-cardinality data + +**Switch from TSI to inmem:** +- Change `tsi1` to `inmem` by completing steps 3a-3c and 4 in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x) +- Suitable for small datasets where memory is not a constraint ## Downgrade InfluxDB To downgrade to an earlier version, complete the procedures above in [Upgrade to InfluxDB 1.11.x](#upgrade-to-influxdb-111x), replacing the version numbers with the version that you want to downgrade to. After downloading the release, migrating your configuration settings, and enabling TSI or TSM, make sure to [rebuild your index](/influxdb/v1/administration/rebuild-tsi-index/). ->**Note:** Some versions of InfluxDB may have breaking changes that impact your ability to upgrade and downgrade. For example, you cannot downgrade from InfluxDB 1.3 or later to an earlier version. Please review the applicable version of release notes to check for compatibility issues between releases. +> [!Warning] +> Some versions of InfluxDB may have breaking changes that impact your ability to upgrade and downgrade. For example, you cannot downgrade from InfluxDB 1.3 or later to an earlier version. Please review the applicable version of release notes to check for compatibility issues between releases. -## Upgrade InfluxDB Enterprise clusters +## Upgrade to InfluxDB Enterprise -See [Upgrading InfluxDB Enterprise clusters](/enterprise_influxdb/v1/administration/upgrading/). +To upgrade from InfluxDB OSS to InfluxDB Enterprise, [contact InfluxData Sales](https://www.influxdata.com/contact-sales/). From a816f51c299a70c34e5cfb9a670456b395e250c3 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Thu, 21 Aug 2025 08:48:53 -0500 Subject: [PATCH 111/179] Apply suggestions from code review Co-authored-by: Scott Anderson --- content/chronograf/v1/_index.md | 2 +- content/kapacitor/v1/_index.md | 2 +- content/telegraf/v1/_index.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/content/chronograf/v1/_index.md b/content/chronograf/v1/_index.md index 244caf8a3..b447ab871 100644 --- a/content/chronograf/v1/_index.md +++ b/content/chronograf/v1/_index.md @@ -6,7 +6,7 @@ description: > monitoring data and easily create alerting and automation rules. menu: chronograf_v1: - name: Chronograf v1 + name: Chronograf weight: 1 --- diff --git a/content/kapacitor/v1/_index.md b/content/kapacitor/v1/_index.md index b4eef1de8..23bd229de 100644 --- a/content/kapacitor/v1/_index.md +++ b/content/kapacitor/v1/_index.md @@ -5,7 +5,7 @@ description: > create alerts, run ETL jobs and detect anomalies. menu: kapacitor_v1: - name: Kapacitor v1 + name: Kapacitor weight: 1 --- diff --git a/content/telegraf/v1/_index.md b/content/telegraf/v1/_index.md index e089b9dfc..0e9898b0b 100644 --- a/content/telegraf/v1/_index.md +++ b/content/telegraf/v1/_index.md @@ -5,7 +5,7 @@ description: > time series platform, used to collect and report metrics. Telegraf supports four categories of plugins -- input, output, aggregator, and processor. menu: telegraf_v1: - name: Telegraf v1 + name: Telegraf weight: 1 related: - /resources/videos/intro-to-telegraf/ From 0742ced3c96262a610764dd77e0f09f9002e5a66 Mon Sep 17 00:00:00 2001 From: Peter Barnett Date: Thu, 21 Aug 2025 11:34:55 -0400 Subject: [PATCH 112/179] chore: add more clarity to Explorer quick start --- content/influxdb3/explorer/_index.md | 3 +++ content/influxdb3/explorer/get-started.md | 1 + 2 files changed, 4 insertions(+) diff --git a/content/influxdb3/explorer/_index.md b/content/influxdb3/explorer/_index.md index a0827d43e..fd10bd1d8 100644 --- a/content/influxdb3/explorer/_index.md +++ b/content/influxdb3/explorer/_index.md @@ -42,7 +42,10 @@ docker run --detach \ --publish 8889:8888 \ influxdata/influxdb3-ui:{{% latest-patch %}} \ --mode=admin + +# Visit http://localhost:8888 to begin using Explorer ``` + Install and run InfluxDB 3 Explorer Get started with InfluxDB 3 Explorer diff --git a/content/influxdb3/explorer/get-started.md b/content/influxdb3/explorer/get-started.md index 863ecbfec..a91a4a122 100644 --- a/content/influxdb3/explorer/get-started.md +++ b/content/influxdb3/explorer/get-started.md @@ -35,6 +35,7 @@ InfluxDB 3 Explorer supports the following InfluxDB 3 products: - **Server URL**: The URL used to connect to your InfluxDB 3 server. - Select the protocol to use (http or https). - Provide the host and, if necessary, the port. + - _If connecting to a local, non-Docker instance, use host.docker.internal_ - **Token**: The authorization token to use to connect to your InfluxDB 3 server. We recommend using an InfluxDB 3 _admin_ token. From f2ebfde75b3a51e8cac6ebcb1f1765435e74ed2b Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Thu, 21 Aug 2025 09:58:49 -0600 Subject: [PATCH 113/179] hotfix: remove top messaging from explorer docs --- layouts/partials/article/special-state.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/layouts/partials/article/special-state.html b/layouts/partials/article/special-state.html index 6a89cd7d4..3661c392f 100644 --- a/layouts/partials/article/special-state.html +++ b/layouts/partials/article/special-state.html @@ -5,7 +5,7 @@ {{ $productKey := cond (eq $product "influxdb3") (print "influxdb3_" (replaceRE "-" "_" $version)) $product }} {{ $productData := index $.Site.Data.products $productKey }} {{ $displayName := $productData.name }} -{{ $earlyAccessList := slice "influxdb3/explorer" }} +{{ $earlyAccessList := slice "" }} {{ if in $earlyAccessList (print $product "/" $version )}}
From 0281e51c2c921e8f940caad43479b9334ce941a4 Mon Sep 17 00:00:00 2001 From: peterbarnett03 Date: Thu, 21 Aug 2025 13:18:39 -0400 Subject: [PATCH 114/179] Update content/influxdb3/explorer/_index.md Co-authored-by: Jason Stirnaman --- content/influxdb3/explorer/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb3/explorer/_index.md b/content/influxdb3/explorer/_index.md index fd10bd1d8..f08e8f18a 100644 --- a/content/influxdb3/explorer/_index.md +++ b/content/influxdb3/explorer/_index.md @@ -48,4 +48,4 @@ docker run --detach \ Install and run InfluxDB 3 Explorer -Get started with InfluxDB 3 Explorer +Get started using InfluxDB 3 Explorer From 1de1a6158934a0f6416c917df82dacef3f35913e Mon Sep 17 00:00:00 2001 From: peterbarnett03 Date: Thu, 21 Aug 2025 13:18:45 -0400 Subject: [PATCH 115/179] Update content/influxdb3/explorer/get-started.md Co-authored-by: Jason Stirnaman --- content/influxdb3/explorer/get-started.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb3/explorer/get-started.md b/content/influxdb3/explorer/get-started.md index a91a4a122..6cb6baf57 100644 --- a/content/influxdb3/explorer/get-started.md +++ b/content/influxdb3/explorer/get-started.md @@ -35,7 +35,7 @@ InfluxDB 3 Explorer supports the following InfluxDB 3 products: - **Server URL**: The URL used to connect to your InfluxDB 3 server. - Select the protocol to use (http or https). - Provide the host and, if necessary, the port. - - _If connecting to a local, non-Docker instance, use host.docker.internal_ + - _If connecting to a local, non-Docker instance, use `host.docker.internal`._ For more information about host.docker.internal, see the [Docker documentation](https://docs.docker.com/desktop/features/networking). - **Token**: The authorization token to use to connect to your InfluxDB 3 server. We recommend using an InfluxDB 3 _admin_ token. From e2823b768841fe9ab253bddee2506901dfe70538 Mon Sep 17 00:00:00 2001 From: peterbarnett03 Date: Thu, 21 Aug 2025 13:18:57 -0400 Subject: [PATCH 116/179] Update content/influxdb3/explorer/_index.md Co-authored-by: Jason Stirnaman --- content/influxdb3/explorer/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb3/explorer/_index.md b/content/influxdb3/explorer/_index.md index f08e8f18a..dd5965be8 100644 --- a/content/influxdb3/explorer/_index.md +++ b/content/influxdb3/explorer/_index.md @@ -47,5 +47,5 @@ docker run --detach \ ``` -Install and run InfluxDB 3 Explorer +For installation and configuration options, see [Install and run InfluxDB 3 Explorer](/influxdb3/explorer/install/). Get started using InfluxDB 3 Explorer From ca8777614724b03302d9d0c42185877ec1aea332 Mon Sep 17 00:00:00 2001 From: peterbarnett03 Date: Thu, 21 Aug 2025 13:19:03 -0400 Subject: [PATCH 117/179] Update content/influxdb3/explorer/_index.md Co-authored-by: Jason Stirnaman --- content/influxdb3/explorer/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb3/explorer/_index.md b/content/influxdb3/explorer/_index.md index dd5965be8..1fae003e1 100644 --- a/content/influxdb3/explorer/_index.md +++ b/content/influxdb3/explorer/_index.md @@ -43,7 +43,7 @@ docker run --detach \ influxdata/influxdb3-ui:{{% latest-patch %}} \ --mode=admin -# Visit http://localhost:8888 to begin using Explorer +# Visit http://localhost:8888 in your browser to begin using InfluxDB 3 Explorer ``` From 3aa4c0eae1c4ff0b7038bb58e1d1c2d6604babb9 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Thu, 21 Aug 2025 14:50:44 -0500 Subject: [PATCH 118/179] Update content/influxdb/v1/administration/upgrading.md --- content/influxdb/v1/administration/upgrading.md | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/content/influxdb/v1/administration/upgrading.md b/content/influxdb/v1/administration/upgrading.md index 33de0ea92..cff4beca5 100644 --- a/content/influxdb/v1/administration/upgrading.md +++ b/content/influxdb/v1/administration/upgrading.md @@ -27,14 +27,15 @@ Upgrade to the latest version of InfluxDB OSS v1. > - **In-memory index (inmem)** - Default option that maintains compatibility with earlier versions but has RAM limitations. > > **When to use TSI:** -> - High-cardinality datasets (many unique tag combinations) -> - Experiencing high memory usage or out-of-memory errors -> - Large production deployments -> +> - General purpose production instances. +> - Especially recommended for: +> - High-cardinality datasets (many unique tag combinations) +> - Experiencing high memory usage or out-of-memory errors +> - Large production deployments +> > **When to use inmem:** -> - Small datasets where memory is not a constraint -> - Development or testing environments -> - Maintaining compatibility with existing tooling +> - Small datasets when memory is not a constraint +> - Ephemeral deployments such as development or testing environments > > To learn more about TSI, see [Time Series Index overview](/influxdb/v1/concepts/time-series-index/) and [TSI details](/influxdb/v1/concepts/tsi-details/). From e7e59322acf12111ace325589c845c35d89a44f2 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Thu, 21 Aug 2025 14:57:22 -0500 Subject: [PATCH 119/179] Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- content/influxdb/v1/administration/upgrading.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb/v1/administration/upgrading.md b/content/influxdb/v1/administration/upgrading.md index cff4beca5..de12b4046 100644 --- a/content/influxdb/v1/administration/upgrading.md +++ b/content/influxdb/v1/administration/upgrading.md @@ -24,7 +24,7 @@ Upgrade to the latest version of InfluxDB OSS v1. > InfluxDB 1.11.x supports two index types: > > - **Time Series Index (TSI)** - Recommended for most users. Removes RAM-based limits on series cardinality and provides better performance for high-cardinality datasets. -> - **In-memory index (inmem)** - Default option that maintains compatibility with earlier versions but has RAM limitations. +> - **In-memory index (inmem)** - Default option that maintains compatibility with earlier versions but is limited by available system RAM (series cardinality is limited by available RAM). > > **When to use TSI:** > - General purpose production instances. From d49d69ba2612485984df21984f4c9ed6d1367a41 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Fri, 22 Aug 2025 11:08:48 -0600 Subject: [PATCH 120/179] fix: add note to distributed about duplicate points on flush (#6330) --- .../best-practices/optimize-writes.md | 16 +- .../best-practices/optimize-writes.md | 16 +- .../reference/syntax/line-protocol.md | 261 +----------------- .../best-practices/optimize-writes.md | 19 +- content/shared/v3-line-protocol.md | 18 +- 5 files changed, 66 insertions(+), 264 deletions(-) diff --git a/content/influxdb3/cloud-dedicated/write-data/best-practices/optimize-writes.md b/content/influxdb3/cloud-dedicated/write-data/best-practices/optimize-writes.md index 71917fe4c..8f0de2ca7 100644 --- a/content/influxdb3/cloud-dedicated/write-data/best-practices/optimize-writes.md +++ b/content/influxdb3/cloud-dedicated/write-data/best-practices/optimize-writes.md @@ -416,9 +416,23 @@ The following example creates sample data for two series (the combination of mea ### Avoid sending duplicate data -Use Telegraf and the [Dedup processor plugin](/telegraf/v1/plugins/#processor-dedup) to filter data whose field values are exact repetitions of previous values. +When writing duplicate points (points with the same timestamp and tag set), +InfluxDB deduplicates the data by creating a union of the duplicate points. Deduplicating your data can reduce your write payload size and resource usage. +> [!Important] +> #### Write ordering for duplicate points +> +> InfluxDB attempts to honor write ordering for duplicate points, with the most +> recently written point taking precedence. However, when data is flushed from +> the in-memory buffer to Parquet files—typically every 15 minutes, but +> sometimes sooner—this ordering is not guaranteed if duplicate points are flushed +> at the same time. As a result, the last written duplicate point may not always +> be retained in storage. + +Use Telegraf and the [Dedup processor plugin](/telegraf/v1/plugins/#processor-dedup) +to filter data whose field values are exact repetitions of previous values. + The following example shows how to use Telegraf to remove points that repeat field values, and then write the data to InfluxDB: 1. In your terminal, enter the following command to create the sample data file and calculate the number of seconds between the earliest timestamp and _now_. diff --git a/content/influxdb3/cloud-serverless/write-data/best-practices/optimize-writes.md b/content/influxdb3/cloud-serverless/write-data/best-practices/optimize-writes.md index bb4c9fe92..ed01029d2 100644 --- a/content/influxdb3/cloud-serverless/write-data/best-practices/optimize-writes.md +++ b/content/influxdb3/cloud-serverless/write-data/best-practices/optimize-writes.md @@ -430,9 +430,23 @@ The following example creates sample data for two series (the combination of mea ### Avoid sending duplicate data -Use Telegraf and the [Dedup processor plugin](/telegraf/v1/plugins/#processor-dedup) to filter data whose field values are exact repetitions of previous values. +When writing duplicate points (points with the same timestamp and tag set), +InfluxDB deduplicates the data by creating a union of the duplicate points. Deduplicating your data can reduce your write payload size and resource usage. +> [!Important] +> #### Write ordering for duplicate points +> +> InfluxDB attempts to honor write ordering for duplicate points, with the most +> recently written point taking precedence. However, when data is flushed from +> the in-memory buffer to Parquet files—typically every 15 minutes, but +> sometimes sooner—this ordering is not guaranteed if duplicate points are flushed +> at the same time. As a result, the last written duplicate point may not always +> be retained in storage. + +Use Telegraf and the [Dedup processor plugin](/telegraf/v1/plugins/#processor-dedup) +to filter data whose field values are exact repetitions of previous values. + The following example shows how to use Telegraf to remove points that repeat field values, and then write the data to InfluxDB: 1. In your terminal, enter the following command to create the sample data file and calculate the number of seconds between the earliest timestamp and _now_. diff --git a/content/influxdb3/clustered/reference/syntax/line-protocol.md b/content/influxdb3/clustered/reference/syntax/line-protocol.md index 7947d9e93..87ff87707 100644 --- a/content/influxdb3/clustered/reference/syntax/line-protocol.md +++ b/content/influxdb3/clustered/reference/syntax/line-protocol.md @@ -2,7 +2,7 @@ title: Line protocol reference description: > InfluxDB uses line protocol to write data points. - It is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point. + It is a text-based format that provides the table, tag set, field set, and timestamp of a data point. menu: influxdb3_clustered: name: Line protocol @@ -11,261 +11,8 @@ weight: 102 influxdb3/clustered/tags: [write, line protocol, syntax] related: - /influxdb3/clustered/write-data/ +source: /shared/v3-line-protocol.md --- -InfluxDB uses line protocol to write data points. -It is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point. - -- [Elements of line protocol](#elements-of-line-protocol) -- [Data types and format](#data-types-and-format) -- [Quotes](#quotes) -- [Special characters](#special-characters) -- [Comments](#comments) -- [Naming restrictions](#naming-restrictions) -- [Duplicate points](#duplicate-points) - -```js -// Syntax -[,=[,=]] =[,=] [] - -// Example -myMeasurement,tag1=value1,tag2=value2 fieldKey="fieldValue" 1556813561098000000 -``` - -Lines separated by the newline character `\n` represent a single point -in InfluxDB. Line protocol is whitespace sensitive. - -> [!Note] -> Line protocol does not support the newline character `\n` in tag or field values. - -## Elements of line protocol - -{{< influxdb/line-protocol commas=false whitespace=false >}} - -### Measurement -({{< req >}}) -The measurement name. -InfluxDB accepts one measurement per point. -_Measurement names are case-sensitive and subject to [naming restrictions](#naming-restrictions)._ - -_**Data type:** [String](#string)_ - - -### Tag set -_**Optional**_ – -All tag key-value pairs for the point. -Key-value relationships are denoted with the `=` operand. -Multiple tag key-value pairs are comma-delimited. -_Tag keys and tag values are case-sensitive. -Tag keys are subject to [naming restrictions](#naming-restrictions). -Tag values cannot be empty; instead, omit the tag from the tag set._ - -_**Key data type:** [String](#string)_ -_**Value data type:** [String](#string)_ - -### Field set -({{< req >}}) -All field key-value pairs for the point. -Points must have at least one field. -_Field keys and string values are case-sensitive. -Field keys are subject to [naming restrictions](#naming-restrictions)._ - -_**Key data type:** [String](#string)_ -_**Value data type:** [Float](#float) | [Integer](#integer) | [UInteger](#uinteger) | [String](#string) | [Boolean](#boolean)_ - -> [!Note] -> _Always double quote string field values. More on quotes [below](#quotes)._ -> -> ```sh -> measurementName fieldKey="field string value" 1556813561098000000 -> ``` - -### Timestamp -_**Optional**_ – -The [unix timestamp](/influxdb/v2/reference/glossary/#unix-timestamp) for the data point. -InfluxDB accepts one timestamp per point. -If no timestamp is provided, InfluxDB uses the system time (UTC) of its host machine. - -_**Data type:** [Unix timestamp](#unix-timestamp)_ - -> [!Note] -> #### Important notes about timestamps -> -> - To ensure a data point includes the time a metric is observed (not received by InfluxDB), -> include the timestamp. -> - If your timestamps are not in nanoseconds, specify the precision of your timestamps -> when [writing the data to InfluxDB](/influxdb/v2/write-data/#timestamp-precision). - -### Whitespace -Whitespace in line protocol determines how InfluxDB interprets the data point. -The **first unescaped space** delimits the measurement and the tag set from the field set. -The **second unescaped space** delimits the field set from the timestamp. - -{{< influxdb/line-protocol elements=false commas=false >}} - -## Data types and format - -### Float -IEEE-754 64-bit floating-point numbers. -Default numerical type. -_InfluxDB supports scientific notation in float field values._ - -##### Float field value examples -```js -myMeasurement fieldKey=1.0 -myMeasurement fieldKey=1 -myMeasurement fieldKey=-1.234456e+78 -``` - -### Integer -Signed 64-bit integers. -Trailing `i` on the number specifies an integer. - -| Minimum integer | Maximum integer | -| --------------- | --------------- | -| `-9223372036854775808i` | `9223372036854775807i` | - -##### Integer field value examples -```js -myMeasurement fieldKey=1i -myMeasurement fieldKey=12485903i -myMeasurement fieldKey=-12485903i -``` - -### UInteger -Unsigned 64-bit integers. -Trailing `u` on the number specifies an unsigned integer. - -| Minimum uinteger | Maximum uinteger | -| ---------------- | ---------------- | -| `0u` | `18446744073709551615u` | - -##### UInteger field value examples -```js -myMeasurement fieldKey=1u -myMeasurement fieldKey=12485903u -``` - -### String -Plain text string. -Length limit 64KB. - -##### String example -```sh -# String measurement name, field key, and field value -myMeasurement fieldKey="this is a string" -``` - -### Boolean -Stores `true` or `false` values. - -| Boolean value | Accepted syntax | -|:-------------:|:--------------- | -| True | `t`, `T`, `true`, `True`, `TRUE` | -| False | `f`, `F`, `false`, `False`, `FALSE` | - -##### Boolean field value examples -```js -myMeasurement fieldKey=true -myMeasurement fieldKey=false -myMeasurement fieldKey=t -myMeasurement fieldKey=f -myMeasurement fieldKey=TRUE -myMeasurement fieldKey=FALSE -``` - -> [!Note] -> Do not quote boolean field values. -> Quoted field values are interpreted as strings. - -### Unix timestamp -Unix timestamp in a [specified precision](/influxdb/v2/reference/glossary/#unix-timestamp). -Default precision is nanoseconds (`ns`). - -| Minimum timestamp | Maximum timestamp | -| ----------------- | ----------------- | -| `-9223372036854775806` | `9223372036854775806` | - -##### Unix timestamp example -```js -myMeasurementName fieldKey="fieldValue" 1556813561098000000 -``` - -## Quotes -Line protocol supports single and double quotes as described in the following table: - -| Element | Double quotes | Single quotes | -| :------ | :------------: |:-------------: | -| Measurement | _Limited_ * | _Limited_ * | -| Tag key | _Limited_ * | _Limited_ * | -| Tag value | _Limited_ * | _Limited_ * | -| Field key | _Limited_ * | _Limited_ * | -| Field value | **Strings only** | Never | -| Timestamp | Never | Never | - -\* _Line protocol accepts double and single quotes in -measurement names, tag keys, tag values, and field keys, but interprets them as -part of the name, key, or value._ - -## Special Characters -Line protocol supports special characters in [string elements](#string). -In the following contexts, it requires escaping certain characters with a backslash (`\`): - -| Element | Escape characters | -|:------- |:----------------- | -| Measurement | Comma, Space | -| Tag key | Comma, Equals Sign, Space | -| Tag value | Comma, Equals Sign, Space | -| Field key | Comma, Equals Sign, Space | -| Field value | Double quote, Backslash | - -You do not need to escape other special characters. - -##### Examples of special characters in line protocol -```sh -# Measurement name with spaces -my\ Measurement fieldKey="string value" - -# Double quotes in a string field value -myMeasurement fieldKey="\"string\" within a string" - -# Tag keys and values with spaces -myMeasurement,tag\ Key1=tag\ Value1,tag\ Key2=tag\ Value2 fieldKey=100 - -# Emojis -myMeasurement,tagKey=🍭 fieldKey="Launch 🚀" 1556813561098000000 -``` - -### Escaping backslashes -Line protocol supports both literal backslashes and backslashes as an escape character. -With two contiguous backslashes, the first is interpreted as an escape character. -For example: - -| Backslashes | Interpreted as | -|:-----------:|:-------------:| -| `\` | `\` | -| `\\` | `\` | -| `\\\` | `\\` | -| `\\\\` | `\\` | -| `\\\\\` | `\\\` | -| `\\\\\\` | `\\\` | - -## Comments -Line protocol interprets `#` at the beginning of a line as a comment character -and ignores all subsequent characters until the next newline `\n`. - -```sh -# This is a comment -myMeasurement fieldKey="string value" 1556813561098000000 -``` - -## Naming restrictions -Measurement names, tag keys, and field keys cannot begin with an underscore `_`. -The `_` namespace is reserved for InfluxDB system use. - -## Duplicate points -A point is uniquely identified by the measurement name, tag set, and timestamp. -If you submit line protocol with the same measurement, tag set, and timestamp, -but with a different field set, the field set becomes the union of the old -field set and the new field set, where any conflicts favor the new field set. - + diff --git a/content/influxdb3/clustered/write-data/best-practices/optimize-writes.md b/content/influxdb3/clustered/write-data/best-practices/optimize-writes.md index 9e9dff460..b19502238 100644 --- a/content/influxdb3/clustered/write-data/best-practices/optimize-writes.md +++ b/content/influxdb3/clustered/write-data/best-practices/optimize-writes.md @@ -14,7 +14,8 @@ related: - /influxdb3/clustered/write-data/use-telegraf/ --- -Use these tips to optimize performance and system overhead when writing data to InfluxDB. +Use these tips to optimize performance and system overhead when writing data to +{{% product-name %}}. - [Batch writes](#batch-writes) - [Sort tags by key](#sort-tags-by-key) @@ -422,9 +423,23 @@ The following example creates sample data for two series (the combination of mea ### Avoid sending duplicate data -Use Telegraf and the [Dedup processor plugin](/telegraf/v1/plugins/#processor-dedup) to filter data whose field values are exact repetitions of previous values. +When writing duplicate points (points with the same timestamp and tag set), +InfluxDB deduplicates the data by creating a union of the duplicate points. Deduplicating your data can reduce your write payload size and resource usage. +> [!Important] +> #### Write ordering for duplicate points +> +> InfluxDB attempts to honor write ordering for duplicate points, with the most +> recently written point taking precedence. However, when data is flushed from +> the in-memory buffer to Parquet files—typically every 15 minutes, but +> sometimes sooner—this ordering is not guaranteed if duplicate points are flushed +> at the same time. As a result, the last written duplicate point may not always +> be retained in storage. + +Use Telegraf and the [Dedup processor plugin](/telegraf/v1/plugins/#processor-dedup) +to filter data whose field values are exact repetitions of previous values. + The following example shows how to use Telegraf to remove points that repeat field values, and then write the data to InfluxDB: 1. In your terminal, enter the following command to create the sample data file and calculate the number of seconds between the earliest timestamp and _now_. diff --git a/content/shared/v3-line-protocol.md b/content/shared/v3-line-protocol.md index 0f197053d..323fb8d26 100644 --- a/content/shared/v3-line-protocol.md +++ b/content/shared/v3-line-protocol.md @@ -44,7 +44,7 @@ _**Data type:** [String](#string)_ ### Tag set -_**Optional**_ – +(_**Optional**_) All tag key-value pairs for the point. Key-value relationships are denoted with the `=` operand. Multiple tag key-value pairs are comma-delimited. @@ -75,8 +75,8 @@ _**Value data type:** [Float](#float) | [Integer](#integer) | [UInteger](#uinteg ### Timestamp -_**Optional**_ – -The [unix timestamp](/influxdb3/version/reference/glossary/#unix-timestamp) for the data point. +(_**Optional**_) +The [Unix timestamp](/influxdb3/version/reference/glossary/#unix-timestamp) for the data point. InfluxDB accepts one timestamp per point. If no timestamp is provided, InfluxDB uses the system time (UTC) of its host machine. @@ -282,3 +282,15 @@ A point is uniquely identified by the table name, tag set, and timestamp. If you submit line protocol with the same table, tag set, and timestamp, but with a different field set, the field set becomes the union of the old field set and the new field set, where any conflicts favor the new field set. + +{{% show-in "cloud-dedicated,clustered" %}} +> [!Important] +> #### Write ordering for duplicate points +> +> {{% product-name %}} attempts to honor write ordering for duplicate points, +> with the most recently written point taking precedence. However, when data is +> flushed from the in-memory buffer to Parquet files—typically every 15 minutes, +> but sometimes sooner—this ordering is not guaranteed if duplicate points are +> flushed at the same time. As a result, the last written duplicate point may +> not always be retained in storage. +{{% /show-in %}} From 0ddd5a22578c9d0ec92bf170555c7c5513eb2735 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Fri, 22 Aug 2025 16:14:22 -0500 Subject: [PATCH 121/179] feat(influxdb3): Back up and restore Core and Enterprise:- Recommends a process for back up and recovery of files in object storage.- Provides examples for Core and Enterprise'- Prescribes the order to stop, back up, and restart nodes. --- .../influxdb3/core/admin/backup-restore.md | 23 + .../enterprise/admin/backup-restore.md | 25 + .../shared/influxdb3-admin/backup-restore.md | 494 ++++++++++++++++++ 3 files changed, 542 insertions(+) create mode 100644 content/influxdb3/core/admin/backup-restore.md create mode 100644 content/influxdb3/enterprise/admin/backup-restore.md create mode 100644 content/shared/influxdb3-admin/backup-restore.md diff --git a/content/influxdb3/core/admin/backup-restore.md b/content/influxdb3/core/admin/backup-restore.md new file mode 100644 index 000000000..9da134730 --- /dev/null +++ b/content/influxdb3/core/admin/backup-restore.md @@ -0,0 +1,23 @@ +--- +title: Back up and restore data +seotitle: Back up and restore {{< product-name >}} +description: > + Manually back up and restore your {{< product-name >}} instance by copying + object storage files in the recommended order. +menu: + influxdb3_core: + name: Back up and restore + parent: Administer InfluxDB Core +weight: 105 +influxdb3/core/tags: [backup, restore, administration, object storage] +related: + - /influxdb3/core/admin/databases/ + - /influxdb3/core/reference/cli/influxdb3/serve/ + - /influxdb3/core/install/ + - /influxdb3/core/reference/internals/durability/ +source: /shared/influxdb3-admin/backup-restore.md +--- + + diff --git a/content/influxdb3/enterprise/admin/backup-restore.md b/content/influxdb3/enterprise/admin/backup-restore.md new file mode 100644 index 000000000..a8338f322 --- /dev/null +++ b/content/influxdb3/enterprise/admin/backup-restore.md @@ -0,0 +1,25 @@ +--- +title: Back up and restore data +seotitle: Back up and restore {{< product-name >}} +description: > + Manually back up and restore your {{< product-name >}} cluster by copying + object storage files in the recommended order for each node type. +menu: + influxdb3_enterprise: + name: Back up and restore + parent: Administer InfluxDB Enterprise +weight: 105 +influxdb3/enterprise/tags: [backup, restore, administration, cluster, object storage] +related: + - /influxdb3/enterprise/admin/databases/ + - /influxdb3/enterprise/admin/license/ + - /influxdb3/enterprise/reference/cli/influxdb3/serve/ + - /influxdb3/enterprise/install/ + - /influxdb3/enterprise/get-started/multi-server/ + - /influxdb3/enterprise/reference/internals/durability/ +source: /shared/influxdb3-admin/backup-restore.md +--- + + diff --git a/content/shared/influxdb3-admin/backup-restore.md b/content/shared/influxdb3-admin/backup-restore.md new file mode 100644 index 000000000..37e9fe98c --- /dev/null +++ b/content/shared/influxdb3-admin/backup-restore.md @@ -0,0 +1,494 @@ + +{{% product-name %}} persists all data and metadata to object storage. +Back up your data by copying object storage files in a specific order to ensure consistency and reliability. + +> [!Warning] +> Currently, {{% product-name %}} does not include built-in backup and restore tools. +> Because copying files during periods of activity is a transient process, the manual backup process _cannot guarantee 100% reliability_. +> Follow the recommended procedures and copy order to minimize risk of creating inconsistent backups. + +## Supported object storage + +InfluxDB 3 supports the following object storage backends for data persistence: + +- **File system** (local directory) +- **AWS S3** and S3-compatible storage ([MinIO](/influxdb3/version/object-storage/minio/)) +- **Azure Blob Storage** +- **Google Cloud Storage** + +> [!Note] +> Backup and restore procedures don't apply to memory-based [object stores](/influxdb3/version/reference/config-options/#object-store). + +## File structure + +{{% show-in "core" %}} + +| Location | Description | +| ----------------------------------------- | --------------------------------------------------------------------------------------------- | +| `/` | Root directory for all node state | +| `/_catalog_checkpoint` | Catalog state checkpoint file | +| `/catalogs/` | Catalog log files tracking catalog state changes | +| `/wal/` | [Write-ahead log files](/influxdb3/core/reference/internals/durability/#write-ahead-log-wal-persistence) containing written data | +| `/snapshots/` | Snapshot files summarizing persisted [Parquet files](/influxdb3/core/reference/internals/durability/#parquet-storage) | +| `/dbs////` | [Parquet files](/influxdb3/core/reference/internals/durability/#parquet-storage) organized by [database](/influxdb3/core/admin/databases/), [table](/influxdb3/core/admin/tables/), and time | +| `/table-snapshots//
/` | Table snapshot files (regenerated on restart, optional for backup) | + +{{% /show-in %}} +{{% show-in "enterprise" %}} + +| Location | Description | +| ----------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Cluster files** | | +| `/_catalog_checkpoint` | Catalog state checkpoint file | +| `/catalogs/` | Catalog log files | +| `/commercial_license` | Commercial [license](/influxdb3/enterprise/admin/license/) file (if applicable) | +| `/trial_or_home_license` | Trial or home [license](/influxdb3/enterprise/admin/license/) file (if applicable) | +| `/enterprise` | Enterprise configuration file | +| **Node files** | | +| `/wal/` | [Write-ahead log files](/influxdb3/enterprise/reference/internals/durability/#write-ahead-log-wal-persistence) containing written data | +| `/snapshots/` | Snapshot files | +| `/dbs//
//` | [Parquet files](/influxdb3/enterprise/reference/internals/durability/#parquet-storage) organized by [database](/influxdb3/enterprise/admin/databases/), [table](/influxdb3/enterprise/admin/tables/), and time | +| `/table-snapshots//
/` | Table snapshot files (regenerated on restart, optional for backup) | +| **Compactor node additional files** | | +| `/cs` | Compaction summary files | +| `/cd` | Compaction detail files | +| `/c` | Generation detail and [Parquet files](/influxdb3/enterprise/reference/internals/durability/#parquet-storage) | +{{% /show-in %}} + +## Backup process + +> [!Important] +> Copy files in the recommended order to reduce risk of creating inconsistent backups. Perform backups during downtime or minimal load periods when possible. + +{{% show-in "core" %}} + +**Recommended backup order:** +1. Snapshots directory +2. Database (dbs) directory +3. WAL directory +4. Catalogs directory +5. Catalog checkpoint file + +{{< tabs-wrapper >}} +{{% tabs %}} +[File system](#) +[S3](#) +{{% /tabs %}} +{{% tab-content %}} + + +```bash { placeholders="NODE_ID" } +#!/bin/bash +NODE_ID="NODE_ID" +DATA_DIR="/path/to/data" +BACKUP_DIR="/backup/$(date +%Y%m%d-%H%M%S)" + +mkdir -p "$BACKUP_DIR" + +# Copy in recommended order +cp -r $DATA_DIR/${NODE_ID}/snapshots "$BACKUP_DIR/" +cp -r $DATA_DIR/${NODE_ID}/dbs "$BACKUP_DIR/" +cp -r $DATA_DIR/${NODE_ID}/wal "$BACKUP_DIR/" +cp -r $DATA_DIR/${NODE_ID}/catalogs "$BACKUP_DIR/" +cp $DATA_DIR/${NODE_ID}/_catalog_checkpoint "$BACKUP_DIR/" + +echo "Backup completed to $BACKUP_DIR" +``` + +Replace {{% code-placeholder-key %}}`NODE_ID`{{% /code-placeholder-key %}} with your [node ID](/influxdb3/core/reference/config-options/#node-id). + +> [!Note] +> This example works with Docker containers that use volume mounts for data persistence. Adjust the `DATA_DIR` path to match your volume mount configuration. + + +{{% /tab-content %}} +{{% tab-content %}} + + +```bash { placeholders="NODE_ID|SOURCE_BUCKET|BACKUP_BUCKET" } +#!/bin/bash +NODE_ID="NODE_ID" +SOURCE_BUCKET="SOURCE_BUCKET" +BACKUP_BUCKET="BACKUP_BUCKET" +BACKUP_PREFIX="backup-$(date +%Y%m%d-%H%M%S)" + +# Copy in recommended order +aws s3 sync s3://${SOURCE_BUCKET}/${NODE_ID}/snapshots \ + s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${NODE_ID}/snapshots/ + +aws s3 sync s3://${SOURCE_BUCKET}/${NODE_ID}/dbs \ + s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${NODE_ID}/dbs/ + +aws s3 sync s3://${SOURCE_BUCKET}/${NODE_ID}/wal \ + s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${NODE_ID}/wal/ + +aws s3 sync s3://${SOURCE_BUCKET}/${NODE_ID}/catalogs \ + s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${NODE_ID}/catalogs/ + +aws s3 cp s3://${SOURCE_BUCKET}/${NODE_ID}/_catalog_checkpoint \ + s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${NODE_ID}/ + +echo "Backup completed to s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}" +``` + +Replace the following: +- {{% code-placeholder-key %}}`NODE_ID`{{% /code-placeholder-key %}}: your [node ID](/influxdb3/core/reference/config-options/#node-id) +- {{% code-placeholder-key %}}`SOURCE_BUCKET`{{% /code-placeholder-key %}}: your InfluxDB data bucket +- {{% code-placeholder-key %}}`BACKUP_BUCKET`{{% /code-placeholder-key %}}: your backup destination bucket + + +{{% /tab-content %}} +{{< /tabs-wrapper >}} +{{% /show-in %}} + +{{% show-in "enterprise" %}} + +**Recommended backup order:** +1. Compactor node directories (cs, cd, c) +2. All nodes' snapshots, dbs, wal directories +3. Cluster catalog and checkpoint +4. License files + +{{< tabs-wrapper >}} +{{% tabs %}} +[S3](#) +[File system](#) +{{% /tabs %}} +{{% tab-content %}} + + +```bash { placeholders="CLUSTER_ID|COMPACTOR_NODE|NODE1|NODE2|NODE3|SOURCE_BUCKET|BACKUP_BUCKET" } +#!/bin/bash +CLUSTER_ID="CLUSTER_ID" +COMPACTOR_NODE="COMPACTOR_NODE" +DATA_NODES=("NODE1" "NODE2" "NODE3") +SOURCE_BUCKET="SOURCE_BUCKET" +BACKUP_BUCKET="BACKUP_BUCKET" +BACKUP_PREFIX="backup-$(date +%Y%m%d-%H%M%S)" + +# 1. Backup compactor node first +echo "Backing up compactor node..." +aws s3 sync s3://${SOURCE_BUCKET}/${COMPACTOR_NODE}/cs \ + s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${COMPACTOR_NODE}/cs/ + +aws s3 sync s3://${SOURCE_BUCKET}/${COMPACTOR_NODE}/cd \ + s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${COMPACTOR_NODE}/cd/ + +aws s3 sync s3://${SOURCE_BUCKET}/${COMPACTOR_NODE}/c \ + s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${COMPACTOR_NODE}/c/ + +# 2. Backup all nodes (including compactor) +ALL_NODES=("${DATA_NODES[@]}" "$COMPACTOR_NODE") +for NODE_ID in "${ALL_NODES[@]}"; do + echo "Backing up node: ${NODE_ID}" + aws s3 sync s3://${SOURCE_BUCKET}/${NODE_ID}/snapshots \ + s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${NODE_ID}/snapshots/ + + aws s3 sync s3://${SOURCE_BUCKET}/${NODE_ID}/dbs \ + s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${NODE_ID}/dbs/ + + aws s3 sync s3://${SOURCE_BUCKET}/${NODE_ID}/wal \ + s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${NODE_ID}/wal/ +done + +# 3. Backup cluster catalog +echo "Backing up cluster catalog..." +aws s3 sync s3://${SOURCE_BUCKET}/${CLUSTER_ID}/catalogs \ + s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${CLUSTER_ID}/catalogs/ + +aws s3 cp s3://${SOURCE_BUCKET}/${CLUSTER_ID}/_catalog_checkpoint \ + s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${CLUSTER_ID}/ + +aws s3 cp s3://${SOURCE_BUCKET}/${CLUSTER_ID}/enterprise \ + s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${CLUSTER_ID}/ + +# 4. Backup license files (may not exist) +aws s3 cp s3://${SOURCE_BUCKET}/${CLUSTER_ID}/commercial_license \ + s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${CLUSTER_ID}/ 2>/dev/null || true + +aws s3 cp s3://${SOURCE_BUCKET}/${CLUSTER_ID}/trial_or_home_license \ + s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${CLUSTER_ID}/ 2>/dev/null || true + +echo "Backup completed to s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}" +``` + +Replace the following: +- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: your [cluster ID](/influxdb3/enterprise/reference/config-options/#cluster-id) +- {{% code-placeholder-key %}}`COMPACTOR_NODE`{{% /code-placeholder-key %}}: your [compactor](/influxdb3/enterprise/get-started/multi-server/#high-availability-with-a-dedicated-compactor) node ID +- {{% code-placeholder-key %}}`NODE1`, `NODE2`, `NODE3`{{% /code-placeholder-key %}}: your data [node IDs](/influxdb3/enterprise/reference/config-options/#node-id) +- {{% code-placeholder-key %}}`SOURCE_BUCKET`{{% /code-placeholder-key %}}: your InfluxDB data bucket +- {{% code-placeholder-key %}}`BACKUP_BUCKET`{{% /code-placeholder-key %}}: your backup destination bucket + + +{{% /tab-content %}} +{{% tab-content %}} + + +```bash { placeholders="CLUSTER_ID|COMPACTOR_NODE|NODE1|NODE2|NODE3" } +#!/bin/bash +CLUSTER_ID="CLUSTER_ID" +COMPACTOR_NODE="COMPACTOR_NODE" +DATA_NODES=("NODE1" "NODE2" "NODE3") +DATA_DIR="/path/to/data" +BACKUP_DIR="/backup/$(date +%Y%m%d-%H%M%S)" + +mkdir -p "$BACKUP_DIR" + +# 1. Backup compactor node first +echo "Backing up compactor node..." +cp -r $DATA_DIR/${COMPACTOR_NODE}/cs "$BACKUP_DIR/${COMPACTOR_NODE}/" +cp -r $DATA_DIR/${COMPACTOR_NODE}/cd "$BACKUP_DIR/${COMPACTOR_NODE}/" +cp -r $DATA_DIR/${COMPACTOR_NODE}/c "$BACKUP_DIR/${COMPACTOR_NODE}/" + +# 2. Backup all nodes +ALL_NODES=("${DATA_NODES[@]}" "$COMPACTOR_NODE") +for NODE_ID in "${ALL_NODES[@]}"; do + echo "Backing up node: ${NODE_ID}" + mkdir -p "$BACKUP_DIR/${NODE_ID}" + cp -r $DATA_DIR/${NODE_ID}/snapshots "$BACKUP_DIR/${NODE_ID}/" + cp -r $DATA_DIR/${NODE_ID}/dbs "$BACKUP_DIR/${NODE_ID}/" + cp -r $DATA_DIR/${NODE_ID}/wal "$BACKUP_DIR/${NODE_ID}/" +done + +# 3. Backup cluster catalog +echo "Backing up cluster catalog..." +mkdir -p "$BACKUP_DIR/${CLUSTER_ID}" +cp -r $DATA_DIR/${CLUSTER_ID}/catalogs "$BACKUP_DIR/${CLUSTER_ID}/" +cp $DATA_DIR/${CLUSTER_ID}/_catalog_checkpoint "$BACKUP_DIR/${CLUSTER_ID}/" +cp $DATA_DIR/${CLUSTER_ID}/enterprise "$BACKUP_DIR/${CLUSTER_ID}/" + +# 4. Backup license files (if they exist) +[ -f "$DATA_DIR/${CLUSTER_ID}/commercial_license" ] && \ + cp $DATA_DIR/${CLUSTER_ID}/commercial_license "$BACKUP_DIR/${CLUSTER_ID}/" +[ -f "$DATA_DIR/${CLUSTER_ID}/trial_or_home_license" ] && \ + cp $DATA_DIR/${CLUSTER_ID}/trial_or_home_license "$BACKUP_DIR/${CLUSTER_ID}/" + +echo "Backup completed to $BACKUP_DIR" +``` + +Replace the following: +- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: your [cluster ID](/influxdb3/enterprise/reference/config-options/#cluster-id) +- {{% code-placeholder-key %}}`COMPACTOR_NODE`{{% /code-placeholder-key %}}: your [compactor](/influxdb3/enterprise/get-started/multi-server/#high-availability-with-a-dedicated-compactor) node ID +- {{% code-placeholder-key %}}`NODE1`, `NODE2`, `NODE3`{{% /code-placeholder-key %}}: your data [node IDs](/influxdb3/enterprise/reference/config-options/#node-id) + + +{{% /tab-content %}} +{{< /tabs-wrapper >}} +{{% /show-in %}} + +## Restore process + +> [!Warning] +> Restoring overwrites existing data. Always verify you have correct backups before proceeding. + +{{% show-in "core" %}} + +#### File system restore example + +```bash { placeholders="NODE_ID|BACKUP_DATE" } +#!/bin/bash +NODE_ID="NODE_ID" +BACKUP_DIR="/backup/BACKUP_DATE" +DATA_DIR="/path/to/data" + +# 1. Stop InfluxDB +systemctl stop influxdb3 || docker stop influxdb3-core + +# 2. Optional: Clear existing data for clean restore +rm -rf ${DATA_DIR}/${NODE_ID}/* + +# 3. Restore in reverse order of backup +mkdir -p ${DATA_DIR}/${NODE_ID} +cp ${BACKUP_DIR}/_catalog_checkpoint ${DATA_DIR}/${NODE_ID}/ +cp -r ${BACKUP_DIR}/catalogs ${DATA_DIR}/${NODE_ID}/ +cp -r ${BACKUP_DIR}/wal ${DATA_DIR}/${NODE_ID}/ +cp -r ${BACKUP_DIR}/dbs ${DATA_DIR}/${NODE_ID}/ +cp -r ${BACKUP_DIR}/snapshots ${DATA_DIR}/${NODE_ID}/ + +# 4. Set correct permissions (important for Docker) +chown -R influxdb:influxdb ${DATA_DIR}/${NODE_ID} + +# 5. Start InfluxDB +systemctl start influxdb3 || docker start influxdb3-core +``` + +Replace the following: +- {{% code-placeholder-key %}}`NODE_ID`{{% /code-placeholder-key %}}: your [node ID](/influxdb3/core/reference/config-options/#node-id) +- {{% code-placeholder-key %}}`BACKUP_DATE`{{% /code-placeholder-key %}}: backup directory timestamp (for example, 20240115-143022) + +#### S3 restore example + +```bash { placeholders="NODE_ID|BACKUP_DATE|BACKUP_BUCKET|TARGET_BUCKET" } +#!/bin/bash +NODE_ID="NODE_ID" +BACKUP_BUCKET="BACKUP_BUCKET" +BACKUP_PREFIX="backup-BACKUP_DATE" +TARGET_BUCKET="TARGET_BUCKET" + +# 1. Stop InfluxDB +# Implementation depends on your deployment method + +# 2. Optional: Clear existing data for clean restore +aws s3 rm s3://${TARGET_BUCKET}/${NODE_ID} --recursive + +# 3. Restore from backup +aws s3 sync s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${NODE_ID}/ \ + s3://${TARGET_BUCKET}/${NODE_ID}/ + +# 4. Start InfluxDB +# Implementation depends on your deployment method +``` + +Replace the following: +- {{% code-placeholder-key %}}`NODE_ID`{{% /code-placeholder-key %}}: your node ID +- {{% code-placeholder-key %}}`BACKUP_DATE`{{% /code-placeholder-key %}}: backup timestamp +- {{% code-placeholder-key %}}`BACKUP_BUCKET`{{% /code-placeholder-key %}}: bucket containing backup +- {{% code-placeholder-key %}}`TARGET_BUCKET`{{% /code-placeholder-key %}}: target bucket for restoration +{{% /show-in %}} + +{{% show-in "enterprise" %}} + +#### S3 restore example + +```bash { placeholders="CLUSTER_ID|COMPACTOR_NODE|NODE1|NODE2|NODE3|BACKUP_DATE|BACKUP_BUCKET|TARGET_BUCKET" } +#!/bin/bash +CLUSTER_ID="CLUSTER_ID" +COMPACTOR_NODE="COMPACTOR_NODE" +DATA_NODES=("NODE1" "NODE2" "NODE3") +BACKUP_BUCKET="BACKUP_BUCKET" +BACKUP_PREFIX="backup-BACKUP_DATE" +TARGET_BUCKET="TARGET_BUCKET" + +# 1. Stop all InfluxDB 3 Enterprise nodes +# Implementation depends on your orchestration + +# 2. Restore cluster catalog and license first +aws s3 cp s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${CLUSTER_ID}/_catalog_checkpoint \ + s3://${TARGET_BUCKET}/${CLUSTER_ID}/ + +aws s3 sync s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${CLUSTER_ID}/catalogs \ + s3://${TARGET_BUCKET}/${CLUSTER_ID}/catalogs/ + +aws s3 cp s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${CLUSTER_ID}/enterprise \ + s3://${TARGET_BUCKET}/${CLUSTER_ID}/ + +# Restore license files if they exist +aws s3 cp s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${CLUSTER_ID}/commercial_license \ + s3://${TARGET_BUCKET}/${CLUSTER_ID}/ 2>/dev/null || true + +aws s3 cp s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${CLUSTER_ID}/trial_or_home_license \ + s3://${TARGET_BUCKET}/${CLUSTER_ID}/ 2>/dev/null || true + +# 3. Restore all nodes +ALL_NODES=("${DATA_NODES[@]}" "$COMPACTOR_NODE") +for NODE_ID in "${ALL_NODES[@]}"; do + echo "Restoring node: ${NODE_ID}" + aws s3 sync s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}/${NODE_ID}/ \ + s3://${TARGET_BUCKET}/${NODE_ID}/ +done + +# 4. Start InfluxDB Enterprise nodes +# Start in order: data nodes, compactor +``` + +Replace the following: +- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: your [cluster ID](/influxdb3/enterprise/reference/config-options/#cluster-id) +- {{% code-placeholder-key %}}`COMPACTOR_NODE`{{% /code-placeholder-key %}}: your [compactor](/influxdb3/enterprise/get-started/multi-server/#high-availability-with-a-dedicated-compactor) node ID +- {{% code-placeholder-key %}}`NODE1`, `NODE2`, `NODE3`{{% /code-placeholder-key %}}: your data [node IDs](/influxdb3/enterprise/reference/config-options/#node-id) +- {{% code-placeholder-key %}}`BACKUP_DATE`{{% /code-placeholder-key %}}: backup timestamp +- {{% code-placeholder-key %}}`BACKUP_BUCKET`{{% /code-placeholder-key %}}: bucket containing backup +- {{% code-placeholder-key %}}`TARGET_BUCKET`{{% /code-placeholder-key %}}: target bucket for restoration + +#### File system restore example + +```bash { placeholders="CLUSTER_ID|COMPACTOR_NODE|NODE1|NODE2|NODE3|BACKUP_DATE" } +#!/bin/bash +CLUSTER_ID="CLUSTER_ID" +COMPACTOR_NODE="COMPACTOR_NODE" +DATA_NODES=("NODE1" "NODE2" "NODE3") +BACKUP_DIR="/backup/BACKUP_DATE" +DATA_DIR="/path/to/data" + +# 1. Stop all InfluxDB 3 Enterprise nodes +# Implementation depends on your deployment method + +# 2. Optional: Clear existing data +ALL_NODES=("${DATA_NODES[@]}" "$COMPACTOR_NODE") +for NODE_ID in "${ALL_NODES[@]}"; do + rm -rf ${DATA_DIR}/${NODE_ID}/* +done +rm -rf ${DATA_DIR}/${CLUSTER_ID}/* + +# 3. Restore cluster catalog and license +mkdir -p ${DATA_DIR}/${CLUSTER_ID} +cp ${BACKUP_DIR}/${CLUSTER_ID}/_catalog_checkpoint ${DATA_DIR}/${CLUSTER_ID}/ +cp -r ${BACKUP_DIR}/${CLUSTER_ID}/catalogs ${DATA_DIR}/${CLUSTER_ID}/ +cp ${BACKUP_DIR}/${CLUSTER_ID}/enterprise ${DATA_DIR}/${CLUSTER_ID}/ + +# Restore license files if they exist +[ -f "${BACKUP_DIR}/${CLUSTER_ID}/commercial_license" ] && \ + cp ${BACKUP_DIR}/${CLUSTER_ID}/commercial_license ${DATA_DIR}/${CLUSTER_ID}/ +[ -f "${BACKUP_DIR}/${CLUSTER_ID}/trial_or_home_license" ] && \ + cp ${BACKUP_DIR}/${CLUSTER_ID}/trial_or_home_license ${DATA_DIR}/${CLUSTER_ID}/ + +# 4. Restore all nodes +for NODE_ID in "${ALL_NODES[@]}"; do + echo "Restoring node: ${NODE_ID}" + mkdir -p ${DATA_DIR}/${NODE_ID} + cp -r ${BACKUP_DIR}/${NODE_ID}/* ${DATA_DIR}/${NODE_ID}/ +done + +# 5. Set correct permissions +chown -R influxdb:influxdb ${DATA_DIR} + +# 6. Start InfluxDB Enterprise nodes +# Start in order: data nodes, compactor +``` + +Replace the following: +- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: your [cluster ID](/influxdb3/enterprise/reference/config-options/#cluster-id) +- {{% code-placeholder-key %}}`COMPACTOR_NODE`{{% /code-placeholder-key %}}: your [compactor](/influxdb3/enterprise/get-started/multi-server/#high-availability-with-a-dedicated-compactor) node ID +- {{% code-placeholder-key %}}`NODE1`, `NODE2`, `NODE3`{{% /code-placeholder-key %}}: your data [node IDs](/influxdb3/enterprise/reference/config-options/#node-id) +- {{% code-placeholder-key %}}`BACKUP_DATE`{{% /code-placeholder-key %}}: backup directory timestamp +{{% /show-in %}} + +## Important considerations + +### Recovery expectations + +> [!Warning] +> Recovery succeeds to a consistent point in time, which is the **latest snapshot included** in the backup. Data written after that snapshot may not be present if its WAL was deleted after the backup. Any Parquet files without a snapshot reference are ignored. + +{{% show-in "enterprise" %}} +### License files + +> [!Important] +> License files are tied to: +> - The specific cloud provider (AWS, Azure, GCS) +> - The specific bucket name +> - For file storage: the exact file path +> +> You cannot restore a license file to a different bucket or path. Contact InfluxData support if you need to migrate to a different bucket. +{{% /show-in %}} + +### Docker considerations + +When running {{% product-name %}} in containers: +- **Volume consistency**: Use the same volume mounts for backup and restore operations +- **File permissions**: Ensure container user can read restored files (use `chown` if needed) +- **Backup access**: Mount a backup directory to copy files from containers to the host +{{% show-in "enterprise" %}}- **Node coordination**: Stop and start all Enterprise nodes (querier, ingester, compactor) in the correct order{{% /show-in %}} + +### Table snapshot files + +Files in `/table-snapshots/` are intentionally excluded from backup: +- These files are periodically overwritten +- They regenerate automatically on server restart +- Including them doesn't harm but increases backup size unnecessarily + +### Timing recommendations + +- Perform backups during downtime or minimal load periods +- Copying files while the database is active may create inconsistent backups +- Consider using filesystem or storage snapshots if available +- Compression is optional but recommended for long-term storage From f975f164fd4166810130d437cf2e16dfd188c922 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Fri, 22 Aug 2025 16:58:29 -0500 Subject: [PATCH 122/179] chore(influxdb3): adjust Admin weights for the sidebar nav --- content/influxdb3/core/admin/backup-restore.md | 4 ++-- content/influxdb3/core/admin/mcp-server.md | 2 +- content/influxdb3/enterprise/admin/backup-restore.md | 4 ++-- content/influxdb3/enterprise/admin/mcp-server.md | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/content/influxdb3/core/admin/backup-restore.md b/content/influxdb3/core/admin/backup-restore.md index 9da134730..7f8cb23b9 100644 --- a/content/influxdb3/core/admin/backup-restore.md +++ b/content/influxdb3/core/admin/backup-restore.md @@ -7,8 +7,8 @@ description: > menu: influxdb3_core: name: Back up and restore - parent: Administer InfluxDB Core -weight: 105 + parent: Administer InfluxDB +weight: 120 influxdb3/core/tags: [backup, restore, administration, object storage] related: - /influxdb3/core/admin/databases/ diff --git a/content/influxdb3/core/admin/mcp-server.md b/content/influxdb3/core/admin/mcp-server.md index 65eb0ae4e..e9a680abf 100644 --- a/content/influxdb3/core/admin/mcp-server.md +++ b/content/influxdb3/core/admin/mcp-server.md @@ -8,7 +8,7 @@ menu: influxdb3_core: name: Use the InfluxDB MCP server parent: Administer InfluxDB -weight: 110 +weight: 205 influxdb3/core/tags: [MCP, LLM, AI] related: - https://github.com/influxdata/influxdb3_mcp_server, InfluxDB 3 MCP Server GitHub Repository diff --git a/content/influxdb3/enterprise/admin/backup-restore.md b/content/influxdb3/enterprise/admin/backup-restore.md index a8338f322..a82b41f7b 100644 --- a/content/influxdb3/enterprise/admin/backup-restore.md +++ b/content/influxdb3/enterprise/admin/backup-restore.md @@ -7,8 +7,8 @@ description: > menu: influxdb3_enterprise: name: Back up and restore - parent: Administer InfluxDB Enterprise -weight: 105 + parent: Administer InfluxDB +weight: 120 influxdb3/enterprise/tags: [backup, restore, administration, cluster, object storage] related: - /influxdb3/enterprise/admin/databases/ diff --git a/content/influxdb3/enterprise/admin/mcp-server.md b/content/influxdb3/enterprise/admin/mcp-server.md index 4f6760e80..a4a9e45ec 100644 --- a/content/influxdb3/enterprise/admin/mcp-server.md +++ b/content/influxdb3/enterprise/admin/mcp-server.md @@ -8,7 +8,7 @@ menu: influxdb3_enterprise: name: Use the InfluxDB MCP server parent: Administer InfluxDB -weight: 110 +weight: 205 influxdb3/enterprise/tags: [MCP, LLM, AI] related: - https://github.com/influxdata/influxdb3_mcp_server, InfluxDB 3 MCP Server GitHub Repository From 372b9b5d422fdc45bf592b5df0b6364b49545035 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Fri, 22 Aug 2025 17:05:28 -0500 Subject: [PATCH 123/179] Update content/shared/influxdb3-admin/backup-restore.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- content/shared/influxdb3-admin/backup-restore.md | 1 + 1 file changed, 1 insertion(+) diff --git a/content/shared/influxdb3-admin/backup-restore.md b/content/shared/influxdb3-admin/backup-restore.md index 37e9fe98c..c8bff55ed 100644 --- a/content/shared/influxdb3-admin/backup-restore.md +++ b/content/shared/influxdb3-admin/backup-restore.md @@ -439,6 +439,7 @@ for NODE_ID in "${ALL_NODES[@]}"; do done # 5. Set correct permissions +# NOTE: Adjust 'influxdb:influxdb' to match your actual deployment user/group configuration. chown -R influxdb:influxdb ${DATA_DIR} # 6. Start InfluxDB Enterprise nodes From 194bf0446d4737be651868e29d3f80370d09ace8 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Fri, 22 Aug 2025 17:30:36 -0500 Subject: [PATCH 124/179] fix(ci): update link-checker to v1.2.3 to fix false positives for new content - Use link-checker-v1.2.3 which includes fix for base URL detection - Prevents false positives when checking local files for new pages - Resolves broken link errors for pages that exist locally but not in production yet --- .github/workflows/pr-link-check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-link-check.yml b/.github/workflows/pr-link-check.yml index 5f5dacca8..e5af97bbe 100644 --- a/.github/workflows/pr-link-check.yml +++ b/.github/workflows/pr-link-check.yml @@ -95,7 +95,7 @@ jobs: curl -L -H "Accept: application/vnd.github+json" \ -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ -o link-checker-info.json \ - "https://api.github.com/repos/influxdata/docs-v2/releases/tags/link-checker-v1.2.2" + "https://api.github.com/repos/influxdata/docs-v2/releases/tags/link-checker-v1.2.3" # Extract download URL for linux binary DOWNLOAD_URL=$(jq -r '.assets[] | select(.name | test("link-checker.*linux")) | .url' link-checker-info.json) From 081952249b1a8f1804a67318f20d8564a76baea9 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Fri, 22 Aug 2025 17:50:19 -0500 Subject: [PATCH 125/179] hotfix(influxdb3): processing engine source and description (closes #6335) --- content/influxdb3/core/reference/processing-engine.md | 10 ++++++---- .../enterprise/reference/processing-engine.md | 10 ++++++---- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/content/influxdb3/core/reference/processing-engine.md b/content/influxdb3/core/reference/processing-engine.md index f37ea9346..b38f80147 100644 --- a/content/influxdb3/core/reference/processing-engine.md +++ b/content/influxdb3/core/reference/processing-engine.md @@ -1,16 +1,18 @@ --- title: Processing engine reference description: > - The InfluxDB 3 Processing engine is an embedded Python virtual machine that runs inside the InfluxDB database. It executes Python code in response to database events without requiring external application servers or middleware. + The InfluxDB 3 Processing engine is an embedded Python virtual machine + that runs inside {{< product-name >}} to execute Python code in response to triggers you define without requiring external application servers or middleware. menu: influxdb3_core: name: Processing engine parent: Reference weight: 101 influxdb3/core/tags: [plugin, database, python] -source: /shared/influxdb3-processing-engine.md +source: /shared/influxdb3-reference/influxdb3-processing-engine.md --- \ No newline at end of file +The content of this file is at +//SOURCE - content/shared/influxdb3-reference/influxdb3-processing-engine.md +--> diff --git a/content/influxdb3/enterprise/reference/processing-engine.md b/content/influxdb3/enterprise/reference/processing-engine.md index 4cb27d28d..f776dd0fb 100644 --- a/content/influxdb3/enterprise/reference/processing-engine.md +++ b/content/influxdb3/enterprise/reference/processing-engine.md @@ -1,16 +1,18 @@ --- title: Processing engine reference description: > - The InfluxDB 3 Processing engine is an embedded Python virtual machine that runs inside the InfluxDB database. It executes Python code in response to database events without requiring external application servers or middleware. + The InfluxDB 3 Processing engine is an embedded Python virtual machine + that runs inside {{< product-name >}} to execute Python code in response to triggers you define without requiring external application servers or middleware. menu: influxdb3_enterprise: name: Processing engine parent: Reference weight: 101 influxdb3/core/tags: [plugin, database, python] -source: /shared/influxdb3-processing-engine.md +source: /shared/influxdb3-reference/influxdb3-processing-engine.md --- \ No newline at end of file +The content of this file is at +//SOURCE - content/shared/influxdb3-reference/influxdb3-processing-engine.md +--> From 311d5d01ea894d74657dea204b88d6c3a2bf80e2 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Sat, 23 Aug 2025 11:42:13 -0500 Subject: [PATCH 126/179] Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- content/shared/influxdb3-admin/backup-restore.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/influxdb3-admin/backup-restore.md b/content/shared/influxdb3-admin/backup-restore.md index c8bff55ed..532c0bf50 100644 --- a/content/shared/influxdb3-admin/backup-restore.md +++ b/content/shared/influxdb3-admin/backup-restore.md @@ -40,7 +40,7 @@ InfluxDB 3 supports the following object storage backends for data persistence: | ----------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | **Cluster files** | | | `/_catalog_checkpoint` | Catalog state checkpoint file | -| `/catalogs/` | Catalog log files | +| `/catalogs/` | Catalog log files tracking catalog state changes | | `/commercial_license` | Commercial [license](/influxdb3/enterprise/admin/license/) file (if applicable) | | `/trial_or_home_license` | Trial or home [license](/influxdb3/enterprise/admin/license/) file (if applicable) | | `/enterprise` | Enterprise configuration file | From 942c76d0c89e5d5aad5a52af2be9b5478edd9c53 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Sun, 24 Aug 2025 21:29:26 -0500 Subject: [PATCH 127/179] fix: links with old hostname and path --- content/enterprise_influxdb/v1/flux/guides/scalar-values.md | 2 +- content/influxdb/v1/flux/guides/scalar-values.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/content/enterprise_influxdb/v1/flux/guides/scalar-values.md b/content/enterprise_influxdb/v1/flux/guides/scalar-values.md index 529bd2ef5..605336a02 100644 --- a/content/enterprise_influxdb/v1/flux/guides/scalar-values.md +++ b/content/enterprise_influxdb/v1/flux/guides/scalar-values.md @@ -217,7 +217,7 @@ The temperature was ${string(v: lastReported._value)}°F." The following sample data set represents fictional temperature metrics collected from three locations. -It's formatted in [annotated CSV](https://v2.docs.influxdata.com/v2.0/reference/syntax/annotated-csv/) and imported +It's formatted in [annotated CSV](/influxdb/v2/reference/syntax/annotated-csv/) and imported into the Flux query using the [`csv.from()` function](/flux/v0/stdlib/csv/from/). Place the following at the beginning of your query to use the sample data: diff --git a/content/influxdb/v1/flux/guides/scalar-values.md b/content/influxdb/v1/flux/guides/scalar-values.md index b1ac4fc3b..b2b6ccaa1 100644 --- a/content/influxdb/v1/flux/guides/scalar-values.md +++ b/content/influxdb/v1/flux/guides/scalar-values.md @@ -232,7 +232,7 @@ The temperature was ${string(v: lastReported._value)}°F." The following sample data set represents fictional temperature metrics collected from three locations. -It's formatted in [annotated CSV](https://v2.docs.influxdata.com/v2.0/reference/syntax/annotated-csv/) and imported +It's formatted in [annotated CSV](/influxdb/v2/reference/syntax/annotated-csv/) and imported into the Flux query using the [`csv.from()` function](/flux/v0/stdlib/csv/from/). Place the following at the beginning of your query to use the sample data: From f587fbaf4888e1c18dcd4da43b7881cc48449314 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Sun, 24 Aug 2025 21:32:26 -0500 Subject: [PATCH 128/179] fix: links containing hostname --- content/enterprise_influxdb/v1/features/_index.md | 2 +- content/enterprise_influxdb/v1/guides/hardware_sizing.md | 2 +- content/influxdb/cloud/account-management/billing.md | 2 +- content/influxdb/v1/tools/api.md | 2 +- content/influxdb/v2/reference/release-notes/influxdb.md | 2 +- .../influxdb3/cloud-dedicated/admin/monitor-your-cluster.md | 2 +- .../influxdb3/cloud-dedicated/reference/internals/security.md | 2 +- content/influxdb3/clustered/reference/internals/security.md | 4 ++-- .../influxdb3/clustered/reference/release-notes/clustered.md | 2 +- .../kapacitor/v1/reference/about_the_project/release-notes.md | 2 +- .../influxdata-platform/tools/measurements-internal.md | 2 +- content/resources/how-to-guides/alert-to-zapier.md | 4 ++-- .../how-to-guides/state-changes-across-task-executions.md | 2 +- .../influxdb3-query-guides/execute-queries/influxdb3-api.md | 2 +- .../telegraf/v1/data_formats/input/prometheus-remote-write.md | 2 +- 15 files changed, 17 insertions(+), 17 deletions(-) diff --git a/content/enterprise_influxdb/v1/features/_index.md b/content/enterprise_influxdb/v1/features/_index.md index 063d0479d..cd398a4ec 100644 --- a/content/enterprise_influxdb/v1/features/_index.md +++ b/content/enterprise_influxdb/v1/features/_index.md @@ -29,7 +29,7 @@ Certain configurations (e.g., 3 meta and 2 data node) provide high-availability while making certain tradeoffs in query performance when compared to a single node. Further increasing the number of nodes can improve performance in both respects. -For example, a cluster with 4 data nodes and a [replication factor](https://docs.influxdata.com/enterprise_influxdb/v1/concepts/glossary/#replication-factor) +For example, a cluster with 4 data nodes and a [replication factor](/enterprise_influxdb/v1/concepts/glossary/#replication-factor) of 2 can support a higher volume of write traffic than a single node could. It can also support a higher *query* workload, as the data is replicated in two locations. Performance of the queries may be on par with a single diff --git a/content/enterprise_influxdb/v1/guides/hardware_sizing.md b/content/enterprise_influxdb/v1/guides/hardware_sizing.md index b969143be..6f6336705 100644 --- a/content/enterprise_influxdb/v1/guides/hardware_sizing.md +++ b/content/enterprise_influxdb/v1/guides/hardware_sizing.md @@ -18,7 +18,7 @@ Review configuration and hardware guidelines for InfluxDB Enterprise: * [Recommended cluster configurations](#recommended-cluster-configurations) * [Storage: type, amount, and configuration](#storage-type-amount-and-configuration) -For InfluxDB OSS instances, see [OSS hardware sizing guidelines](https://docs.influxdata.com/influxdb/v1/guides/hardware_sizing/). +For InfluxDB OSS instances, see [OSS hardware sizing guidelines](/influxdb/v1/guides/hardware_sizing/). > **Disclaimer:** Your numbers may vary from recommended guidelines. Guidelines provide estimated benchmarks for implementing the most performant system for your business. diff --git a/content/influxdb/cloud/account-management/billing.md b/content/influxdb/cloud/account-management/billing.md index 3034fcae8..07c3b2a53 100644 --- a/content/influxdb/cloud/account-management/billing.md +++ b/content/influxdb/cloud/account-management/billing.md @@ -103,7 +103,7 @@ If you exceed your plan's [adjustable quotas or limits](/influxdb/cloud/account- If you exceed the series cardinality limit, InfluxDB adds a rate limit event warning on the **Usage** page, and begins to reject write requests with new series. To start processing write requests again, do the following as needed: -- **Series cardinality limits**: If you exceed the series cardinality limit, see how to [resolve high series cardinality](https://docs.influxdata.com/influxdb/v2/write-data/best-practices/resolve-high-cardinality/). +- **Series cardinality limits**: If you exceed the series cardinality limit, see how to [resolve high series cardinality](/influxdb/v2/write-data/best-practices/resolve-high-cardinality/). - **Free plan**: To raise rate limits, [upgrade to a Usage-based Plan](#upgrade-to-usage-based-plan). #### Write and query limits (HTTP response code) diff --git a/content/influxdb/v1/tools/api.md b/content/influxdb/v1/tools/api.md index eb5d1fa24..cbb7fb70a 100644 --- a/content/influxdb/v1/tools/api.md +++ b/content/influxdb/v1/tools/api.md @@ -20,7 +20,7 @@ Responses use standard HTTP response codes and JSON format. To send API requests, you can use the [InfluxDB v1 client libraries](/influxdb/v1/tools/api_client_libraries/), the [InfluxDB v2 client libraries](/influxdb/v1/tools/api_client_libraries/), -[Telegraf](https://docs.influxdata.com/telegraf/v1/), +[Telegraf](/telegraf/v1/), or the client of your choice. {{% note %}} diff --git a/content/influxdb/v2/reference/release-notes/influxdb.md b/content/influxdb/v2/reference/release-notes/influxdb.md index 9633439d6..2de95e4ce 100644 --- a/content/influxdb/v2/reference/release-notes/influxdb.md +++ b/content/influxdb/v2/reference/release-notes/influxdb.md @@ -643,7 +643,7 @@ to migrate InfluxDB key-value metadata schemas to earlier 2.x versions when nece #### Telegraf -- Add the following new [Telegraf plugins](https://docs.influxdata.com/telegraf/v1/plugins/) to the Load Data page: +- Add the following new [Telegraf plugins](/telegraf/v1/plugins/) to the Load Data page: - Alibaba (Aliyun) CloudMonitor Service Statistics (`aliyuncms`) - AMD ROCm System Management Interface (SMI) (`amd_rocm_smi`) - Counter-Strike: Global Offensive (CS:GO) (`csgo`) diff --git a/content/influxdb3/cloud-dedicated/admin/monitor-your-cluster.md b/content/influxdb3/cloud-dedicated/admin/monitor-your-cluster.md index 27ccd7c2a..d4ae8c139 100644 --- a/content/influxdb3/cloud-dedicated/admin/monitor-your-cluster.md +++ b/content/influxdb3/cloud-dedicated/admin/monitor-your-cluster.md @@ -328,7 +328,7 @@ following levels: - **L3**: 4 L2 files compacted together Parquet files store data partitioned by time and optionally tags -_(see [Manage data partition](https://docs.influxdata.com/influxdb3/cloud-dedicated/admin/custom-partitions/))_. +_(see [Manage data partition](/influxdb3/cloud-dedicated/admin/custom-partitions/))_. After four L0 files accumulate for a partition, they're eligible for compaction. If the compactor is keeping up with the incoming write load, all compaction events have exactly four files. diff --git a/content/influxdb3/cloud-dedicated/reference/internals/security.md b/content/influxdb3/cloud-dedicated/reference/internals/security.md index e5fa1e943..b1303af8b 100644 --- a/content/influxdb3/cloud-dedicated/reference/internals/security.md +++ b/content/influxdb3/cloud-dedicated/reference/internals/security.md @@ -67,7 +67,7 @@ by periodically creating, recording, and writing test data into test buckets. The service periodically executes queries to ensure the data hasn't been lost or corrupted. A separate instance of this service lives within each {{% product-name %}} cluster. Additionally, the service creates out-of-band backups in -[line protocol](https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/), +[line protocol](/influxdb/cloud/reference/syntax/line-protocol/), and ensures the backup data matches the data on disk. ## Cloud infrastructure diff --git a/content/influxdb3/clustered/reference/internals/security.md b/content/influxdb3/clustered/reference/internals/security.md index c12776c27..38765b03e 100644 --- a/content/influxdb3/clustered/reference/internals/security.md +++ b/content/influxdb3/clustered/reference/internals/security.md @@ -62,7 +62,7 @@ by periodically creating, recording, and writing test data into test buckets. The service periodically executes queries to ensure the data hasn't been lost or corrupted. A separate instance of this service lives within each InfluxDB cluster. Additionally, the service creates out-of-band backups in -[line protocol](https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/), +[line protocol](/influxdb/cloud/reference/syntax/line-protocol/), and ensures the backup data matches the data on disk. ## Cloud infrastructure @@ -229,7 +229,7 @@ User accounts can be created by InfluxData on the InfluxDB Clustered system via User accounts can create database tokens with data read and/or write permissions. API requests from custom applications require a database token with sufficient permissions. For more information on the types of tokens and ways to create them, see -[Manage tokens](https://docs.influxdata.com/influxdb3/clustered/admin/tokens/). +[Manage tokens](/influxdb3/clustered/admin/tokens/). ### Role-based access controls (RBAC) diff --git a/content/influxdb3/clustered/reference/release-notes/clustered.md b/content/influxdb3/clustered/reference/release-notes/clustered.md index 0973ace20..e81fca982 100644 --- a/content/influxdb3/clustered/reference/release-notes/clustered.md +++ b/content/influxdb3/clustered/reference/release-notes/clustered.md @@ -1419,7 +1419,7 @@ This now enables the use of Azure blob storage. The "Install InfluxDB Clustered" instructions (formerly known as "GETTING_STARTED") are now available on the public -[InfluxDB Clustered documentation](https://docs.influxdata.com/influxdb3/clustered/install/). +[InfluxDB Clustered documentation](/influxdb3/clustered/install/). The `example-customer.yml` (also known as `myinfluxdb.yml`) example configuration file still lives in the release bundle alongside the `RELEASE_NOTES`. diff --git a/content/kapacitor/v1/reference/about_the_project/release-notes.md b/content/kapacitor/v1/reference/about_the_project/release-notes.md index 567d00928..2ae546029 100644 --- a/content/kapacitor/v1/reference/about_the_project/release-notes.md +++ b/content/kapacitor/v1/reference/about_the_project/release-notes.md @@ -162,7 +162,7 @@ aliases: - Add new `auto-attributes` configuration option to BigPanda node. - Ability to add new headers to HTTP posts directly in `env var` config. - `Topic queue length` is now configurable. This allows you to set a `topic-buffer-length` parameter in the Kapacitor config file in the -[alert](https://docs.influxdata.com/kapacitor/v1/administration/configuration/#alert) section. The default is 5000. Minimum length +[alert](/kapacitor/v1/administration/configuration/#alert) section. The default is 5000. Minimum length is 1000. - Add new `address template` to email alert. Email addresses no longer need to be hardcoded; can be derived directly from data. diff --git a/content/platform/monitoring/influxdata-platform/tools/measurements-internal.md b/content/platform/monitoring/influxdata-platform/tools/measurements-internal.md index 8a027b218..2b4669c0e 100644 --- a/content/platform/monitoring/influxdata-platform/tools/measurements-internal.md +++ b/content/platform/monitoring/influxdata-platform/tools/measurements-internal.md @@ -444,7 +444,7 @@ Tracks the disk usage of all hinted handoff queues for a given node (not the byt a lag occurs between when bytes are processed and when they're removed from the disk. `queueTotalSize` is used to determine when a node's hinted handoff queue has reached the -maximum size configured in the [hinted-handoff max-size](https://docs.influxdata.com/enterprise_influxdb/v1.9/administration/configure/config-data-nodes/#max-size) parameter. +maximum size configured in the [hinted-handoff max-size](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#max-size) parameter. --- diff --git a/content/resources/how-to-guides/alert-to-zapier.md b/content/resources/how-to-guides/alert-to-zapier.md index 17d5c6633..529530b43 100644 --- a/content/resources/how-to-guides/alert-to-zapier.md +++ b/content/resources/how-to-guides/alert-to-zapier.md @@ -40,11 +40,11 @@ Zapier, August 2022 ## Create an InfluxDB check -[Create an InfluxDB check](https://docs.influxdata.com/influxdb/cloud/monitor-alert/checks/create) to query and alert on a metric you want to monitor. +[Create an InfluxDB check](/influxdb/cloud/monitor-alert/checks/create) to query and alert on a metric you want to monitor. Use a default **threshold** check as the task. _It is possible to use your own task written in Flux code, but for this guide, use the InfluxDB UI to create the check._ -Once the check is completed, [create a notification endpoint](https://docs.influxdata.com/influxdb/cloud/monitor-alert/notification-endpoints/create/). Select **HTTP** as an endpoint. +Once the check is completed, [create a notification endpoint](/influxdb/cloud/monitor-alert/notification-endpoints/create/). Select **HTTP** as an endpoint. {{< img-hd src="static/img/resources/notification-endpoint.png" alt="Create a check" />}} {{% caption %}} diff --git a/content/resources/how-to-guides/state-changes-across-task-executions.md b/content/resources/how-to-guides/state-changes-across-task-executions.md index 31ca6ea4f..ef2cf5858 100644 --- a/content/resources/how-to-guides/state-changes-across-task-executions.md +++ b/content/resources/how-to-guides/state-changes-across-task-executions.md @@ -38,7 +38,7 @@ Create a task where you: 1. Import packages and define task options and secrets. Import the following packages: - [Flux Telegram package](/flux/v0/stdlib/contrib/sranka/telegram/): This package - [Flux InfluxDB secrets package](/flux/v0/stdlib/influxdata/influxdb/secrets/): This package contains the [secrets.get()](/flux/v0/stdlib/influxdata/influxdb/secrets/get/) function which allows you to retrieve secrets from the InfluxDB secret store. Learn how to [manage secrets](/influxdb/v2/admin/secrets/) in InfluxDB to use this package. - - [Flux InfluxDB monitoring package](https://docs.influxdata.com/flux/v0/stdlib/influxdata/influxdb/monitor/): This package contains functions and tools for monitoring your data. + - [Flux InfluxDB monitoring package](/flux/v0/stdlib/influxdata/influxdb/monitor/): This package contains functions and tools for monitoring your data. ```js diff --git a/content/shared/influxdb3-query-guides/execute-queries/influxdb3-api.md b/content/shared/influxdb3-query-guides/execute-queries/influxdb3-api.md index ef8f0c6a1..fa18d6586 100644 --- a/content/shared/influxdb3-query-guides/execute-queries/influxdb3-api.md +++ b/content/shared/influxdb3-query-guides/execute-queries/influxdb3-api.md @@ -62,7 +62,7 @@ about your database server and table schemas in {{% product-name %}}. > In examples, tables with `"table_name":"system_` are user-created tables for CPU, memory, disk, > network, and other resource statistics collected and written > by the user--for example, using the `psutil` Python library or -> [Telegraf](https://docs.influxdata.com/telegraf/v1/get-started/) to collect +> [Telegraf](/telegraf/v1/get-started/) to collect > and write system metrics to an InfluxDB 3 database. ##### Show tables diff --git a/content/telegraf/v1/data_formats/input/prometheus-remote-write.md b/content/telegraf/v1/data_formats/input/prometheus-remote-write.md index ec6f018b0..33620938e 100644 --- a/content/telegraf/v1/data_formats/input/prometheus-remote-write.md +++ b/content/telegraf/v1/data_formats/input/prometheus-remote-write.md @@ -64,6 +64,6 @@ prompb.WriteRequest{ prometheus_remote_write,instance=localhost:9090,job=prometheus,quantile=0.99 go_gc_duration_seconds=4.63 1614889298859000000 ``` -## For alignment with the [InfluxDB v1.x Prometheus Remote Write Spec](https://docs.influxdata.com/influxdb/v1/supported_protocols/prometheus/#how-prometheus-metrics-are-parsed-in-influxdb) +## For alignment with the [InfluxDB v1.x Prometheus Remote Write Spec](/influxdb/v1/supported_protocols/prometheus/#how-prometheus-metrics-are-parsed-in-influxdb) - Use the [Starlark processor rename prometheus remote write script](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star) to rename the measurement name to the fieldname and rename the fieldname to value. From 3a4db18f6b76514254a59d9238d0cfa8648ccd4e Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Sun, 24 Aug 2025 22:30:59 -0500 Subject: [PATCH 129/179] chore: use version in links --- .../shared/influxdb3-admin/backup-restore.md | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/content/shared/influxdb3-admin/backup-restore.md b/content/shared/influxdb3-admin/backup-restore.md index 532c0bf50..1ea93c0f6 100644 --- a/content/shared/influxdb3-admin/backup-restore.md +++ b/content/shared/influxdb3-admin/backup-restore.md @@ -28,9 +28,9 @@ InfluxDB 3 supports the following object storage backends for data persistence: | `/` | Root directory for all node state | | `/_catalog_checkpoint` | Catalog state checkpoint file | | `/catalogs/` | Catalog log files tracking catalog state changes | -| `/wal/` | [Write-ahead log files](/influxdb3/core/reference/internals/durability/#write-ahead-log-wal-persistence) containing written data | -| `/snapshots/` | Snapshot files summarizing persisted [Parquet files](/influxdb3/core/reference/internals/durability/#parquet-storage) | -| `/dbs//
//` | [Parquet files](/influxdb3/core/reference/internals/durability/#parquet-storage) organized by [database](/influxdb3/core/admin/databases/), [table](/influxdb3/core/admin/tables/), and time | +| `/wal/` | [Write-ahead log files](/influxdb3/version/reference/internals/durability/#write-ahead-log-wal-persistence) containing written data | +| `/snapshots/` | Snapshot files summarizing persisted [Parquet files](/influxdb3/version/reference/internals/durability/#parquet-storage) | +| `/dbs//
//` | [Parquet files](/influxdb3/version/reference/internals/durability/#parquet-storage) organized by [database](/influxdb3/version/admin/databases/), [table](/influxdb3/version/admin/tables/), and time | | `/table-snapshots//
/` | Table snapshot files (regenerated on restart, optional for backup) | {{% /show-in %}} @@ -41,18 +41,18 @@ InfluxDB 3 supports the following object storage backends for data persistence: | **Cluster files** | | | `/_catalog_checkpoint` | Catalog state checkpoint file | | `/catalogs/` | Catalog log files tracking catalog state changes | -| `/commercial_license` | Commercial [license](/influxdb3/enterprise/admin/license/) file (if applicable) | -| `/trial_or_home_license` | Trial or home [license](/influxdb3/enterprise/admin/license/) file (if applicable) | +| `/commercial_license` | Commercial [license](/influxdb3/version/admin/license/) file (if applicable) | +| `/trial_or_home_license` | Trial or home [license](/influxdb3/version/admin/license/) file (if applicable) | | `/enterprise` | Enterprise configuration file | | **Node files** | | -| `/wal/` | [Write-ahead log files](/influxdb3/enterprise/reference/internals/durability/#write-ahead-log-wal-persistence) containing written data | +| `/wal/` | [Write-ahead log files](/influxdb3/version/reference/internals/durability/#write-ahead-log-wal-persistence) containing written data | | `/snapshots/` | Snapshot files | -| `/dbs//
//` | [Parquet files](/influxdb3/enterprise/reference/internals/durability/#parquet-storage) organized by [database](/influxdb3/enterprise/admin/databases/), [table](/influxdb3/enterprise/admin/tables/), and time | +| `/dbs//
//` | [Parquet files](/influxdb3/version/reference/internals/durability/#parquet-storage) organized by [database](/influxdb3/version/admin/databases/), [table](/influxdb3/version/admin/tables/), and time | | `/table-snapshots//
/` | Table snapshot files (regenerated on restart, optional for backup) | | **Compactor node additional files** | | | `/cs` | Compaction summary files | | `/cd` | Compaction detail files | -| `/c` | Generation detail and [Parquet files](/influxdb3/enterprise/reference/internals/durability/#parquet-storage) | +| `/c` | Generation detail and [Parquet files](/influxdb3/version/reference/internals/durability/#parquet-storage) | {{% /show-in %}} ## Backup process @@ -95,7 +95,7 @@ cp $DATA_DIR/${NODE_ID}/_catalog_checkpoint "$BACKUP_DIR/" echo "Backup completed to $BACKUP_DIR" ``` -Replace {{% code-placeholder-key %}}`NODE_ID`{{% /code-placeholder-key %}} with your [node ID](/influxdb3/core/reference/config-options/#node-id). +Replace {{% code-placeholder-key %}}`NODE_ID`{{% /code-placeholder-key %}} with your [node ID](/influxdb3/version/reference/config-options/#node-id). > [!Note] > This example works with Docker containers that use volume mounts for data persistence. Adjust the `DATA_DIR` path to match your volume mount configuration. @@ -132,7 +132,7 @@ echo "Backup completed to s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}" ``` Replace the following: -- {{% code-placeholder-key %}}`NODE_ID`{{% /code-placeholder-key %}}: your [node ID](/influxdb3/core/reference/config-options/#node-id) +- {{% code-placeholder-key %}}`NODE_ID`{{% /code-placeholder-key %}}: your [node ID](/influxdb3/version/reference/config-options/#node-id) - {{% code-placeholder-key %}}`SOURCE_BUCKET`{{% /code-placeholder-key %}}: your InfluxDB data bucket - {{% code-placeholder-key %}}`BACKUP_BUCKET`{{% /code-placeholder-key %}}: your backup destination bucket @@ -213,9 +213,9 @@ echo "Backup completed to s3://${BACKUP_BUCKET}/${BACKUP_PREFIX}" ``` Replace the following: -- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: your [cluster ID](/influxdb3/enterprise/reference/config-options/#cluster-id) -- {{% code-placeholder-key %}}`COMPACTOR_NODE`{{% /code-placeholder-key %}}: your [compactor](/influxdb3/enterprise/get-started/multi-server/#high-availability-with-a-dedicated-compactor) node ID -- {{% code-placeholder-key %}}`NODE1`, `NODE2`, `NODE3`{{% /code-placeholder-key %}}: your data [node IDs](/influxdb3/enterprise/reference/config-options/#node-id) +- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: your [cluster ID](/influxdb3/version/reference/config-options/#cluster-id) +- {{% code-placeholder-key %}}`COMPACTOR_NODE`{{% /code-placeholder-key %}}: your [compactor](/influxdb3/version/get-started/multi-server/#high-availability-with-a-dedicated-compactor) node ID +- {{% code-placeholder-key %}}`NODE1`, `NODE2`, `NODE3`{{% /code-placeholder-key %}}: your data [node IDs](/influxdb3/version/reference/config-options/#node-id) - {{% code-placeholder-key %}}`SOURCE_BUCKET`{{% /code-placeholder-key %}}: your InfluxDB data bucket - {{% code-placeholder-key %}}`BACKUP_BUCKET`{{% /code-placeholder-key %}}: your backup destination bucket @@ -267,9 +267,9 @@ echo "Backup completed to $BACKUP_DIR" ``` Replace the following: -- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: your [cluster ID](/influxdb3/enterprise/reference/config-options/#cluster-id) -- {{% code-placeholder-key %}}`COMPACTOR_NODE`{{% /code-placeholder-key %}}: your [compactor](/influxdb3/enterprise/get-started/multi-server/#high-availability-with-a-dedicated-compactor) node ID -- {{% code-placeholder-key %}}`NODE1`, `NODE2`, `NODE3`{{% /code-placeholder-key %}}: your data [node IDs](/influxdb3/enterprise/reference/config-options/#node-id) +- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: your [cluster ID](/influxdb3/version/reference/config-options/#cluster-id) +- {{% code-placeholder-key %}}`COMPACTOR_NODE`{{% /code-placeholder-key %}}: your [compactor](/influxdb3/version/get-started/multi-server/#high-availability-with-a-dedicated-compactor) node ID +- {{% code-placeholder-key %}}`NODE1`, `NODE2`, `NODE3`{{% /code-placeholder-key %}}: your data [node IDs](/influxdb3/version/reference/config-options/#node-id) {{% /tab-content %}} @@ -313,7 +313,7 @@ systemctl start influxdb3 || docker start influxdb3-core ``` Replace the following: -- {{% code-placeholder-key %}}`NODE_ID`{{% /code-placeholder-key %}}: your [node ID](/influxdb3/core/reference/config-options/#node-id) +- {{% code-placeholder-key %}}`NODE_ID`{{% /code-placeholder-key %}}: your [node ID](/influxdb3/version/reference/config-options/#node-id) - {{% code-placeholder-key %}}`BACKUP_DATE`{{% /code-placeholder-key %}}: backup directory timestamp (for example, 20240115-143022) #### S3 restore example @@ -392,9 +392,9 @@ done ``` Replace the following: -- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: your [cluster ID](/influxdb3/enterprise/reference/config-options/#cluster-id) -- {{% code-placeholder-key %}}`COMPACTOR_NODE`{{% /code-placeholder-key %}}: your [compactor](/influxdb3/enterprise/get-started/multi-server/#high-availability-with-a-dedicated-compactor) node ID -- {{% code-placeholder-key %}}`NODE1`, `NODE2`, `NODE3`{{% /code-placeholder-key %}}: your data [node IDs](/influxdb3/enterprise/reference/config-options/#node-id) +- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: your [cluster ID](/influxdb3/version/reference/config-options/#cluster-id) +- {{% code-placeholder-key %}}`COMPACTOR_NODE`{{% /code-placeholder-key %}}: your [compactor](/influxdb3/version/get-started/multi-server/#high-availability-with-a-dedicated-compactor) node ID +- {{% code-placeholder-key %}}`NODE1`, `NODE2`, `NODE3`{{% /code-placeholder-key %}}: your data [node IDs](/influxdb3/version/reference/config-options/#node-id) - {{% code-placeholder-key %}}`BACKUP_DATE`{{% /code-placeholder-key %}}: backup timestamp - {{% code-placeholder-key %}}`BACKUP_BUCKET`{{% /code-placeholder-key %}}: bucket containing backup - {{% code-placeholder-key %}}`TARGET_BUCKET`{{% /code-placeholder-key %}}: target bucket for restoration @@ -447,9 +447,9 @@ chown -R influxdb:influxdb ${DATA_DIR} ``` Replace the following: -- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: your [cluster ID](/influxdb3/enterprise/reference/config-options/#cluster-id) -- {{% code-placeholder-key %}}`COMPACTOR_NODE`{{% /code-placeholder-key %}}: your [compactor](/influxdb3/enterprise/get-started/multi-server/#high-availability-with-a-dedicated-compactor) node ID -- {{% code-placeholder-key %}}`NODE1`, `NODE2`, `NODE3`{{% /code-placeholder-key %}}: your data [node IDs](/influxdb3/enterprise/reference/config-options/#node-id) +- {{% code-placeholder-key %}}`CLUSTER_ID`{{% /code-placeholder-key %}}: your [cluster ID](/influxdb3/version/reference/config-options/#cluster-id) +- {{% code-placeholder-key %}}`COMPACTOR_NODE`{{% /code-placeholder-key %}}: your [compactor](/influxdb3/version/get-started/multi-server/#high-availability-with-a-dedicated-compactor) node ID +- {{% code-placeholder-key %}}`NODE1`, `NODE2`, `NODE3`{{% /code-placeholder-key %}}: your data [node IDs](/influxdb3/version/reference/config-options/#node-id) - {{% code-placeholder-key %}}`BACKUP_DATE`{{% /code-placeholder-key %}}: backup directory timestamp {{% /show-in %}} From 38083f4425efff78b607d8d1b20c5c8b8ee63ec3 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Sun, 24 Aug 2025 22:33:41 -0500 Subject: [PATCH 130/179] chore: ignore Claude URLs and full production URLs (esp. canonical meta links) --- .ci/link-checker/production.lycherc.toml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.ci/link-checker/production.lycherc.toml b/.ci/link-checker/production.lycherc.toml index 37f692e47..c838590df 100644 --- a/.ci/link-checker/production.lycherc.toml +++ b/.ci/link-checker/production.lycherc.toml @@ -69,6 +69,15 @@ exclude = [ # InfluxData support URLs (certificate/SSL issues in CI) "^https?://support\\.influxdata\\.com", + # AI platforms (often block automated requests) + "^https?://claude\\.ai", + "^https?://.*\\.claude\\.ai", + + # Production site URLs (when testing locally, these should be relative) + # This excludes canonical URLs and other absolute production URLs + # TODO: Remove after fixing canonical URL generation or link-checker domain replacement + "^https://docs\\.influxdata\\.com/", + # Common documentation placeholders "YOUR_.*", "REPLACE_.*", From 7fcd338b2ee1d97f2400700c260f87de116edf44 Mon Sep 17 00:00:00 2001 From: Peter Barnett Date: Mon, 25 Aug 2025 09:32:48 -0400 Subject: [PATCH 131/179] update: fix some wording and call out restore process --- content/influxdb3/core/admin/backup-restore.md | 2 +- .../enterprise/admin/backup-restore.md | 2 +- .../shared/influxdb3-admin/backup-restore.md | 17 +++++++++++++++-- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/content/influxdb3/core/admin/backup-restore.md b/content/influxdb3/core/admin/backup-restore.md index 7f8cb23b9..da22891ee 100644 --- a/content/influxdb3/core/admin/backup-restore.md +++ b/content/influxdb3/core/admin/backup-restore.md @@ -2,7 +2,7 @@ title: Back up and restore data seotitle: Back up and restore {{< product-name >}} description: > - Manually back up and restore your {{< product-name >}} instance by copying + Back up and restore your {{< product-name >}} instance by copying object storage files in the recommended order. menu: influxdb3_core: diff --git a/content/influxdb3/enterprise/admin/backup-restore.md b/content/influxdb3/enterprise/admin/backup-restore.md index a82b41f7b..c066fb8d1 100644 --- a/content/influxdb3/enterprise/admin/backup-restore.md +++ b/content/influxdb3/enterprise/admin/backup-restore.md @@ -2,7 +2,7 @@ title: Back up and restore data seotitle: Back up and restore {{< product-name >}} description: > - Manually back up and restore your {{< product-name >}} cluster by copying + Back up and restore your {{< product-name >}} cluster by copying object storage files in the recommended order for each node type. menu: influxdb3_enterprise: diff --git a/content/shared/influxdb3-admin/backup-restore.md b/content/shared/influxdb3-admin/backup-restore.md index 1ea93c0f6..8aed7dd78 100644 --- a/content/shared/influxdb3-admin/backup-restore.md +++ b/content/shared/influxdb3-admin/backup-restore.md @@ -4,8 +4,7 @@ Back up your data by copying object storage files in a specific order to ensure > [!Warning] > Currently, {{% product-name %}} does not include built-in backup and restore tools. -> Because copying files during periods of activity is a transient process, the manual backup process _cannot guarantee 100% reliability_. -> Follow the recommended procedures and copy order to minimize risk of creating inconsistent backups. +> Because copying files during periods of activity is a transient process, we highly recommended you follow the below procedures and copy order to minimize risk of creating inconsistent backups. ## Supported object storage @@ -283,6 +282,13 @@ Replace the following: {{% show-in "core" %}} +**Recommended restore order:** +1. Catalog checkpoint file +2. Catalogs directory +3. WAL directory +4. Database (dbs) directory +5. Snapshots directory + #### File system restore example ```bash { placeholders="NODE_ID|BACKUP_DATE" } @@ -348,6 +354,13 @@ Replace the following: {{% show-in "enterprise" %}} +**Recommended restore order:** +1. Cluster catalog and checkpoint +2. License files +3. All nodes' snapshots, dbs, wal directories +4. Compactor node directories (cs, cd, c) + + #### S3 restore example ```bash { placeholders="CLUSTER_ID|COMPACTOR_NODE|NODE1|NODE2|NODE3|BACKUP_DATE|BACKUP_BUCKET|TARGET_BUCKET" } From 3df82f362e4c809940d4711b2e51309b171b626d Mon Sep 17 00:00:00 2001 From: peterbarnett03 Date: Wed, 27 Aug 2025 10:32:06 -0400 Subject: [PATCH 132/179] fix: remove buffer-mem-limit-mb (#6341) --- .../core/reference/cli/influxdb3/serve.md | 1 - .../reference/cli/influxdb3/serve.md | 1 - .../shared/influxdb3-cli/config-options.md | 19 ------------------- hugo_stats.json | 1 - 4 files changed, 22 deletions(-) diff --git a/content/influxdb3/core/reference/cli/influxdb3/serve.md b/content/influxdb3/core/reference/cli/influxdb3/serve.md index debeb8ddc..ba971238d 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/core/reference/cli/influxdb3/serve.md @@ -48,7 +48,6 @@ influxdb3 serve [OPTIONS] --node-id | | `--azure-storage-access-key` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-access-key)_ | | | `--azure-storage-account` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-account)_ | | | `--bucket` | _See [configuration options](/influxdb3/core/reference/config-options/#bucket)_ | -| | `--buffer-mem-limit-mb` | _See [configuration options](/influxdb3/core/reference/config-options/#buffer-mem-limit-mb)_ | | | `--data-dir` | _See [configuration options](/influxdb3/core/reference/config-options/#data-dir)_ | | | `--datafusion-config` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-config)_ | | | `--datafusion-max-parquet-fanout` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-max-parquet-fanout)_ | diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md index 7fb25d5d9..40a97edd4 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md @@ -49,7 +49,6 @@ influxdb3 serve [OPTIONS] \ | | `--azure-storage-access-key` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-storage-access-key)_ | | | `--azure-storage-account` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-storage-account)_ | | | `--bucket` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#bucket)_ | -| | `--buffer-mem-limit-mb` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#buffer-mem-limit-mb)_ | | | `--catalog-sync-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#catalog-sync-interval)_ | | {{< req "\*" >}} | `--cluster-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#cluster-id)_ | | | `--compaction-check-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-check-interval)_ | diff --git a/content/shared/influxdb3-cli/config-options.md b/content/shared/influxdb3-cli/config-options.md index 13d98106c..863538a2e 100644 --- a/content/shared/influxdb3-cli/config-options.md +++ b/content/shared/influxdb3-cli/config-options.md @@ -111,7 +111,6 @@ influxdb3 serve - [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) - [Memory](#memory) - [exec-mem-pool-bytes](#exec-mem-pool-bytes) - - [buffer-mem-limit-mb](#buffer-mem-limit-mb) - [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) - [Write-Ahead Log (WAL)](#write-ahead-log-wal) - [wal-flush-interval](#wal-flush-interval) @@ -997,7 +996,6 @@ influxdb3 create token --admin --regenerate --host http://127.0.0.1:8182 ### Memory - [exec-mem-pool-bytes](#exec-mem-pool-bytes) -- [buffer-mem-limit-mb](#buffer-mem-limit-mb) - [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) #### exec-mem-pool-bytes @@ -1013,23 +1011,6 @@ example: `8000000000` or `10%`). | :---------------------- | :------------------------------ | | `--exec-mem-pool-bytes` | `INFLUXDB3_EXEC_MEM_POOL_BYTES` | -{{% show-in "core" %}} ---- - -#### buffer-mem-limit-mb - - -Specifies the size limit of the buffered data in MB. If this limit is exceeded, -the server forces a snapshot. - -**Default:** `5000` - -| influxdb3 serve option | Environment variable | -| :---------------------- | :------------------------------ | -| `--buffer-mem-limit-mb` | `INFLUXDB3_BUFFER_MEM_LIMIT_MB` | - -{{% /show-in %}} - --- #### force-snapshot-mem-threshold diff --git a/hugo_stats.json b/hugo_stats.json index 7550a0b1f..3c062cc6b 100644 --- a/hugo_stats.json +++ b/hugo_stats.json @@ -2212,7 +2212,6 @@ "buckets-api", "buckets-total", "buffer", - "buffer-mem-limit-mb", "bug-fixes", "bug-fixes-1", "bug-fixes-10", From e5c6d1b015aa4ab8525d64f1c1c758eb4c7db776 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Wed, 27 Aug 2025 11:42:41 -0600 Subject: [PATCH 133/179] InfluxDB Core and Enterprise 3.4 (#6344) * update: add 3.4 notes * update: 3.4 notes * Update content/shared/v3-core-enterprise-release-notes/_index.md Co-authored-by: Scott Anderson * Update content/shared/v3-core-enterprise-release-notes/_index.md Co-authored-by: Scott Anderson * Update content/shared/v3-core-enterprise-release-notes/_index.md Co-authored-by: Scott Anderson * Update content/shared/v3-core-enterprise-release-notes/_index.md Co-authored-by: Scott Anderson * Update content/shared/v3-core-enterprise-release-notes/_index.md Co-authored-by: Scott Anderson * Update content/shared/v3-core-enterprise-release-notes/_index.md Co-authored-by: Scott Anderson * Update content/shared/v3-core-enterprise-release-notes/_index.md Co-authored-by: Scott Anderson * Update content/shared/v3-core-enterprise-release-notes/_index.md Co-authored-by: Scott Anderson * Update content/shared/v3-core-enterprise-release-notes/_index.md Co-authored-by: Scott Anderson * feat(influxdb-3.4): added cli updates for influxdb 3.4 * Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fix: add whitespace to influxdb3 write doc * fix(data): update patch versions for core and enterprise * Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --------- Co-authored-by: Peter Barnett Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../shared/influxdb3-cli/config-options.md | 143 ++++++++++++++---- content/shared/influxdb3-cli/write.md | 48 ++++-- .../_index.md | 46 ++++++ data/products.yml | 4 +- 4 files changed, 196 insertions(+), 45 deletions(-) diff --git a/content/shared/influxdb3-cli/config-options.md b/content/shared/influxdb3-cli/config-options.md index 863538a2e..522bc7b5d 100644 --- a/content/shared/influxdb3-cli/config-options.md +++ b/content/shared/influxdb3-cli/config-options.md @@ -42,9 +42,7 @@ influxdb3 serve - [General](#general) {{% show-in "enterprise" %}} - [cluster-id](#cluster-id){{% /show-in %}} - [data-dir](#data-dir) -{{% show-in "enterprise" %}} - [license-email](#license-email) - - [license-file](#license-file) - - [mode](#mode){{% /show-in %}} +{{% show-in "enterprise" %}} - [mode](#mode){{% /show-in %}} - [node-id](#node-id) {{% show-in "enterprise" %}} - [node-id-from-env](#node-id-from-env){{% /show-in %}} - [object-store](#object-store) @@ -56,7 +54,11 @@ influxdb3 serve {{% show-in "enterprise" %}} - [num-database-limit](#num-database-limit) - [num-table-limit](#num-table-limit) - - [num-total-columns-per-table-limit](#num-total-columns-per-table-limit){{% /show-in %}} + - [num-total-columns-per-table-limit](#num-total-columns-per-table-limit) +- [Licensing](#licensing) + - [license-email](#license-email) + - [license-file](#license-file) + - [license-type](#license-type){{% /show-in %}} - [AWS](#aws) - [aws-access-key-id](#aws-access-key-id) - [aws-secret-access-key](#aws-secret-access-key) @@ -65,11 +67,14 @@ influxdb3 serve - [aws-session-token](#aws-session-token) - [aws-allow-http](#aws-allow-http) - [aws-skip-signature](#aws-skip-signature) + - [aws-credentials-file](#aws-credentials-file) - [Google Cloud Service](#google-cloud-service) - [google-service-account](#google-service-account) - [Microsoft Azure](#microsoft-azure) - [azure-storage-account](#azure-storage-account) - [azure-storage-access-key](#azure-storage-access-key) + - [azure-endpoint](#azure-endpoint) + - [azure-allow-http](#azure-allow-http) - [Object Storage](#object-storage) - [bucket](#bucket) - [object-store-connection-limit](#object-store-connection-limit) @@ -181,8 +186,6 @@ influxdb3 serve {{% /show-in %}} - [data-dir](#data-dir) {{% show-in "enterprise" %}} -- [license-email](#license-email) -- [license-file](#license-file) - [mode](#mode) {{% /show-in %}} - [node-id](#node-id) @@ -217,30 +220,6 @@ Required when using the `file` [object store](#object-store). --- {{% show-in "enterprise" %}} -#### license-email - -Specifies the email address to associate with your {{< product-name >}} license -and automatically responds to the interactive email prompt when the server starts. -This option is mutually exclusive with [license-file](#license-file). - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------------------- | -| `--license-email` | `INFLUXDB3_ENTERPRISE_LICENSE_EMAIL` | - ---- - -#### license-file - -Specifies the path to a license file for {{< product-name >}}. When provided, the license -file's contents are used instead of requesting a new license. -This option is mutually exclusive with [license-email](#license-email). - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------------------- | -| `--license-file` | `INFLUXDB3_ENTERPRISE_LICENSE_FILE` | - ---- - #### mode Sets the mode to start the server in. @@ -274,6 +253,8 @@ configuration--for example, the same bucket. | :--------------------- | :--------------------------------- | | `--node-id` | `INFLUXDB3_NODE_IDENTIFIER_PREFIX` | +--- + {{% show-in "enterprise" %}} #### node-id-from-env @@ -402,8 +383,52 @@ Default is {{% influxdb3/limit "column" %}}. | :------------------------------------ | :------------------------------------------------------- | | `--num-total-columns-per-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TOTAL_COLUMNS_PER_TABLE_LIMIT` | {{% /show-in %}} + --- +{{% show-in "enterprise" %}} +### Licensing + +#### license-email + +Specifies the email address to associate with your {{< product-name >}} license +and automatically responds to the interactive email prompt when the server starts. +This option is mutually exclusive with [license-file](#license-file). + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------------- | +| `--license-email` | `INFLUXDB3_ENTERPRISE_LICENSE_EMAIL` | + +--- + +#### license-file + +Specifies the path to a license file for {{< product-name >}}. When provided, the license +file's contents are used instead of requesting a new license. +This option is mutually exclusive with [license-email](#license-email). + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------------- | +| `--license-file` | `INFLUXDB3_ENTERPRISE_LICENSE_FILE` | + +--- + +#### license-type + +Specifies the type of {{% product-name %}} license to use and bypasses the +interactive license prompt. Provide one of the following license types: + +- `home` +- `trial` +- `commercial` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------------- | +| `--license-type` | `INFLUXDB3_ENTERPRISE_LICENSE_TYPE` | + +--- +{{% /show-in %}} + ### AWS - [aws-access-key-id](#aws-access-key-id) @@ -413,6 +438,7 @@ Default is {{% influxdb3/limit "column" %}}. - [aws-session-token](#aws-session-token) - [aws-allow-http](#aws-allow-http) - [aws-skip-signature](#aws-skip-signature) +- [aws-credentials-file](#aws-credentials-file) #### aws-access-key-id @@ -490,6 +516,37 @@ If enabled, S3 object stores do not fetch credentials and do not sign requests. --- +#### aws-credentials-file + +Specifies the path to your S3 credentials file. +When using a credentials file, settings in the file override the corresponding +CLI flags. + +S3 credential files are JSON-formatted and should contain the following: + +```json { placeholders="AWS_(ACCESS_KEY_ID|SECRET_ACCESS_KEY|SESSION_TOKEN)|UNIX_SECONDS_TIMESTAMP" } +{ + "aws_access_key_id": "AWS_ACCESS_KEY_ID", + "aws_secret_access_key": "AWS_SECRET_ACCESS_KEY", + "aws_session_token": "AWS_SESSION_TOKEN", + "expiry": "UNIX_SECONDS_TIMESTAMP" +} +``` + +The `aws_session_token` and `expiry` fields are optional. +The file is automatically checked for updates at the expiry time or at 1-hour +intervals. + +If the object store returns an "Unauthenticated" error, InfluxDB will attempt to +update its in-memory credentials from this file and then retry the object store +request. + +| influxdb3 serve option | Environment variable | +| :----------------------- | :--------------------- | +| `--aws-credentials-file` | `AWS_CREDENTIALS_FILE` | + +--- + ### Google Cloud Service - [google-service-account](#google-service-account) @@ -509,6 +566,8 @@ JSON file that contains the Google credentials. - [azure-storage-account](#azure-storage-account) - [azure-storage-access-key](#azure-storage-access-key) +- [azure-endpoint](#azure-endpoint) +- [azure-allow-http](#azure-allow-http) #### azure-storage-account @@ -532,6 +591,30 @@ values in the Storage account's **Settings > Access keys**. --- +#### azure-endpoint + +When using Microsoft Azure as the object store, set this to the Azure Blob +Storage endpoint. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--azure-endpoint` | `AZURE_ENDPOINT` | + +--- + +#### azure-allow-http + +When using Microsoft Azure as the object store, allow unencrypted HTTP requests +to Azure Blob Storage. + +**Default:** `false` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--azure-allow-http` | `AZURE_ALLOW_HTTP` | + +--- + ### Object Storage - [bucket](#bucket) diff --git a/content/shared/influxdb3-cli/write.md b/content/shared/influxdb3-cli/write.md index c19242f6f..d87e5b6e0 100644 --- a/content/shared/influxdb3-cli/write.md +++ b/content/shared/influxdb3-cli/write.md @@ -31,7 +31,8 @@ influxdb3 write [OPTIONS] --database [LINE_PROTOCOL]... | | `--token` | _({{< req >}})_ Authentication token | | `-f` | `--file` | A file that contains line protocol to write | | | `--accept-partial` | Accept partial writes | -| | `--precision` | Precision of data timestamps (`ns`, `us`, `ms`, or `s`) | | +| | `--no-sync` | Do not wait for WAL sync before acknowledging the write request | +| | `--precision` | Precision of data timestamps (`ns`, `us`, `ms`, or `s`) | | | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) | | `-h` | `--help` | Print help information | | | `--help-all` | Print detailed help information | @@ -50,6 +51,8 @@ You can use the following environment variables to set command options: - [Write line protocol to your InfluxDB 3 server](#write-line-protocol-to-your-influxdb-3-server) - [Write line protocol and accept partial writes](#write-line-protocol-and-accept-partial-writes) +- [Write line protocol with specific timestamp precision](#write-line-protocol-with-specific-timestamp-precision) +- [Write line protocol and immediately return a response](#write-line-protocol-and-immediately-return-a-response) In the examples below, replace the following: @@ -58,8 +61,6 @@ In the examples below, replace the following: - {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: Authentication token -{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}} - ### Write line protocol to your InfluxDB 3 server {{< code-tabs-wrapper >}} @@ -72,7 +73,7 @@ In the examples below, replace the following: {{% influxdb/custom-timestamps %}} -```bash +```bash { placeholders="DATABASE_NAME|AUTH_TOKEN" } influxdb3 write \ --database DATABASE_NAME \ --token AUTH_TOKEN \ @@ -83,7 +84,7 @@ influxdb3 write \ {{% code-tab-content %}} -```bash +```bash { placeholders="DATABASE_NAME|AUTH_TOKEN" } influxdb3 write \ --database DATABASE_NAME \ --token AUTH_TOKEN \ @@ -93,7 +94,7 @@ influxdb3 write \ {{% code-tab-content %}} -```bash +```bash { placeholders="DATABASE_NAME|AUTH_TOKEN" } cat ./data.lp | influxdb3 write \ --database DATABASE_NAME \ --token AUTH_TOKEN @@ -113,7 +114,7 @@ cat ./data.lp | influxdb3 write \ {{% influxdb/custom-timestamps %}} -```bash +```bash { placeholders="DATABASE_NAME|AUTH_TOKEN" } influxdb3 write \ --accept-partial \ --database DATABASE_NAME \ @@ -125,7 +126,7 @@ influxdb3 write \ {{% code-tab-content %}} -```bash +```bash { placeholders="DATABASE_NAME|AUTH_TOKEN" } influxdb3 write \ --accept-partial \ --database DATABASE_NAME \ @@ -136,7 +137,7 @@ influxdb3 write \ {{% code-tab-content %}} -```bash +```bash { placeholders="DATABASE_NAME|AUTH_TOKEN" } cat ./data.lp | influxdb3 write \ --accept-partial \ --database DATABASE_NAME \ @@ -145,14 +146,15 @@ cat ./data.lp | influxdb3 write \ {{% /code-tab-content %}} {{< /code-tabs-wrapper >}} -## Write line protocol with specific timestamp precision +### Write line protocol with specific timestamp precision -By default, in CLI and HTTP API write requests, {{% product-name %}} uses the timestamp magnitude to auto-detect the precision. +By default, in CLI and HTTP API write requests, {{% product-name %}} uses the +timestamp magnitude to auto-detect the precision. To avoid any ambiguity, specify the `--precision {ns|us|ms|s}` option: -```bash +```bash { placeholders="DATABASE_NAME|AUTH_TOKEN" } influxdb3 write \ --database DATABASE_NAME \ --token AUTH_TOKEN \ @@ -163,4 +165,24 @@ home,room=Living\ Room temp=21.4,hum=35.9,co=0i 1641027600 home,room=Kitchen temp=23.0,hum=36.2,co=0i 1641027600 ' ``` -{{% /code-placeholders %}} + +### Write line protocol and immediately return a response + +By default, {{% product-name %}} waits to respond to write requests until the +written data is flushed from the Write-Ahead Log (WAL) to object storage +(every 1s by default). +Use the `--no-sync` option to immediately return a response without waiting for +the WAL to flush. This improves perceived write response times, but may hide certain +types of write errors--for example: malformed line protocol or type conflicts. + +> [!Tip] +> Only use `--no-sync` when low write latency is more important than guaranteed data durability. +> Avoid using this option for critical or irreplaceable data, as it increases the risk of silent data loss. + +```bash { placeholders="DATABASE_NAME|AUTH_TOKEN" } +influxdb3 write \ + --database DATABASE_NAME \ + --token AUTH_TOKEN \ + --no-sync \ + 'home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1641024000' +``` diff --git a/content/shared/v3-core-enterprise-release-notes/_index.md b/content/shared/v3-core-enterprise-release-notes/_index.md index 896487a73..205d41e2f 100644 --- a/content/shared/v3-core-enterprise-release-notes/_index.md +++ b/content/shared/v3-core-enterprise-release-notes/_index.md @@ -5,6 +5,52 @@ > All updates to Core are automatically included in Enterprise. > The Enterprise sections below only list updates exclusive to Enterprise. +## v3.4.0 {date="2025-08-27"} + +### Core + +#### Features + +- **Token Provisioning**: + - Generate admin tokens offline and use them when starting the database if tokens do not already exist. + This is meant for automated deployments and containerized environments. + ([#26734](https://github.com/influxdata/influxdb/pull/26734)) +- **Azure Endpoint**: + - Use the `--azure-endpoint` option with `influxdb3 serve` to specify the Azure Blob Storage endpoint for object store connections. ([#26687](https://github.com/influxdata/influxdb/pull/26687)) +- **No_Sync via CLI**: + - Use the `--no-sync` option with `influxdb3 write` to skip waiting for WAL persistence on write and immediately return a response to the write request. ([#26703](https://github.com/influxdata/influxdb/pull/26703)) + +#### Bug Fixes +- Validate tag and field names when creating tables ([#26641](https://github.com/influxdata/influxdb/pull/26641)) +- Using GROUP BY twice on the same column no longer causes incorrect data ([#26732](https://github.com/influxdata/influxdb/pull/26732)) + +#### Security & Misc +- Reduce verbosity of the TableIndexCache log. ([#26709](https://github.com/influxdata/influxdb/pull/26709)) +- WAL replay concurrency limit defaults to number of CPU cores, preventing possible OOMs. ([#26715](https://github.com/influxdata/influxdb/pull/26715)) +- Remove unsafe signal_handler code. ([#26685](https://github.com/influxdata/influxdb/pull/26685)) +- Upgrade Python version to 3.13.7-20250818. ([#26686](https://github.com/influxdata/influxdb/pull/26686), [#26700](https://github.com/influxdata/influxdb/pull/26700)) +- Tags with `/` in the name no longer break the primary key. + + +### Enterprise + +All Core updates are included in Enterprise. Additional Enterprise-specific features and fixes: + +#### Features + +- **Token Provisioning**: + - Generate _resource_ and _admin_ tokens offline and use them when starting the database. + +- Select a home or trial license without using an interactive terminal. + Use `--license-type` [home | trial | commercial] option to the `influxdb3 serve` command to automate the selection of the license type. + +#### Bug Fixes + +- Don't initialize the Processing Engine when the specified `--mode` does not require it. +- Don't panic when `INFLUXDB3_PLUGIN_DIR` is set in containers without the Processing Engine enabled. + + + ## v3.3.0 {date="2025-07-29"} ### Core diff --git a/data/products.yml b/data/products.yml index 62b027b66..5b9d4fa61 100644 --- a/data/products.yml +++ b/data/products.yml @@ -6,7 +6,7 @@ influxdb3_core: versions: [core] list_order: 2 latest: core - latest_patch: 3.3.0 + latest_patch: 3.4.0 placeholder_host: localhost:8181 ai_sample_questions: - How do I install and run InfluxDB 3 Core? @@ -21,7 +21,7 @@ influxdb3_enterprise: versions: [enterprise] list_order: 2 latest: enterprise - latest_patch: 3.3.0 + latest_patch: 3.4.0 placeholder_host: localhost:8181 ai_sample_questions: - How do I install and run InfluxDB 3 Enterprise? From e32a8f62cfdaf66d8eb3ad40d872edf68795114a Mon Sep 17 00:00:00 2001 From: peterbarnett03 Date: Wed, 27 Aug 2025 19:35:11 -0400 Subject: [PATCH 134/179] chore:update version numbers (#6345) --- data/products.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/products.yml b/data/products.yml index 5b9d4fa61..99778363e 100644 --- a/data/products.yml +++ b/data/products.yml @@ -35,7 +35,7 @@ influxdb3_explorer: menu_category: tools list_order: 1 latest: explorer - latest_patch: 1.1.0 + latest_patch: 1.2.0 placeholder_host: localhost:8888 ai_sample_questions: - How do I query data using InfluxDB 3 Explorer? From 3222c7a9fbb470b720ba94ace6e0eb2fbdbbfbea Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Thu, 28 Aug 2025 07:03:03 -0600 Subject: [PATCH 135/179] InfluxDB 3.4 offline tokens (#6346) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * update: add 3.4 notes * update: 3.4 notes * Update content/shared/v3-core-enterprise-release-notes/_index.md Co-authored-by: Scott Anderson * Update content/shared/v3-core-enterprise-release-notes/_index.md Co-authored-by: Scott Anderson * Update content/shared/v3-core-enterprise-release-notes/_index.md Co-authored-by: Scott Anderson * Update content/shared/v3-core-enterprise-release-notes/_index.md Co-authored-by: Scott Anderson * Update content/shared/v3-core-enterprise-release-notes/_index.md Co-authored-by: Scott Anderson * Update content/shared/v3-core-enterprise-release-notes/_index.md Co-authored-by: Scott Anderson * Update content/shared/v3-core-enterprise-release-notes/_index.md Co-authored-by: Scott Anderson * Update content/shared/v3-core-enterprise-release-notes/_index.md Co-authored-by: Scott Anderson * Update content/shared/v3-core-enterprise-release-notes/_index.md Co-authored-by: Scott Anderson * feat(influxdb-3.4): added cli updates for influxdb 3.4 * Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fix: add whitespace to influxdb3 write doc * feat(offline-tokens): add offline tokens to config options page * feat(offline-tokens): add task-based offline token docs * fix(links): fixed broken links and added TOC to offline tokens guide * Apply suggestions from code review Co-authored-by: マルコメ * chore(offline-tokens): add token string security standards * chore(offline-tokens): add note about token string prefix --------- Co-authored-by: Peter Barnett Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: マルコメ --- .vscode/settings.json | 3 +- .../core/admin/tokens/admin/preconfigured.md | 17 + .../cli/influxdb3/create/token/admin.md | 2 +- .../core/reference/cli/influxdb3/serve.md | 4 + .../admin/tokens/admin/preconfigured.md | 17 + .../admin/tokens/resource/preconfigured.md | 168 ++++++++++ .../cli/influxdb3/create/token/admin.md | 2 +- .../cli/influxdb3/create/token/permission.md | 108 +++++- .../reference/cli/influxdb3/serve.md | 5 + .../tokens/admin/preconfigured.md | 115 +++++++ .../shared/influxdb3-cli/config-options.md | 314 ++++++++++++++---- .../influxdb3-cli/create/token/admin.md | 79 ++++- 12 files changed, 739 insertions(+), 95 deletions(-) create mode 100644 content/influxdb3/core/admin/tokens/admin/preconfigured.md create mode 100644 content/influxdb3/enterprise/admin/tokens/admin/preconfigured.md create mode 100644 content/influxdb3/enterprise/admin/tokens/resource/preconfigured.md create mode 100644 content/shared/influxdb3-admin/tokens/admin/preconfigured.md diff --git a/.vscode/settings.json b/.vscode/settings.json index c827452b9..e52c927b3 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -15,6 +15,7 @@ "vale.valeCLI.config": "${workspaceFolder}/.vale.ini", "vale.valeCLI.minAlertLevel": "warning", "cSpell.words": [ - "influxctl" + "influxctl", + "preconfigured" ] } \ No newline at end of file diff --git a/content/influxdb3/core/admin/tokens/admin/preconfigured.md b/content/influxdb3/core/admin/tokens/admin/preconfigured.md new file mode 100644 index 000000000..5e319be4d --- /dev/null +++ b/content/influxdb3/core/admin/tokens/admin/preconfigured.md @@ -0,0 +1,17 @@ +--- +title: Use a preconfigured admin token +description: > + Start {{% product-name %}} with a preconfigured "offline" admin token file. + If no admin tokens already exist, InfluxDB automatically creates an admin token + using the provided admin token file. +menu: + influxdb3_core: + parent: Admin tokens + name: Use preconfigured admin token +weight: 202 +source: /shared/influxdb3-admin/tokens/admin/preconfigured.md +--- + + diff --git a/content/influxdb3/core/reference/cli/influxdb3/create/token/admin.md b/content/influxdb3/core/reference/cli/influxdb3/create/token/admin.md index 37f212eff..9ab898932 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/create/token/admin.md +++ b/content/influxdb3/core/reference/cli/influxdb3/create/token/admin.md @@ -1,5 +1,5 @@ --- -title: influxdb3 create token --admin +title: influxdb3 create token \--admin description: > The `influxdb3 create token --admin` subcommand creates an operator token or named admin token with full administrative privileges for server actions. menu: diff --git a/content/influxdb3/core/reference/cli/influxdb3/serve.md b/content/influxdb3/core/reference/cli/influxdb3/serve.md index ba971238d..a66a59209 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/core/reference/cli/influxdb3/serve.md @@ -38,13 +38,17 @@ influxdb3 serve [OPTIONS] --node-id | | `--object-store` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store)_ | | | `--admin-token-recovery-http-bind` | _See [configuration options](/influxdb3/core/reference/config-options/#admin-token-recovery-http-bind)_ | | | `--admin-token-recovery-tcp-listener-file-path` | _See [configuration options](/influxdb3/core/reference/config-options/#admin-token-recovery-tcp-listener-file-path)_ | +| | `--admin-token-file` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#admin-token-file)_ | | | `--aws-access-key-id` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-access-key-id)_ | | | `--aws-allow-http` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-allow-http)_ | +| | `--aws-credentials-file` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-credentials-file)_ | | | `--aws-default-region` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-default-region)_ | | | `--aws-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-endpoint)_ | | | `--aws-secret-access-key` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-secret-access-key)_ | | | `--aws-session-token` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-session-token)_ | | | `--aws-skip-signature` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-skip-signature)_ | +| | `--azure-allow-http` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-allow-http)_ | +| | `--azure-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/##azure-endpoint)_ | | | `--azure-storage-access-key` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-access-key)_ | | | `--azure-storage-account` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-account)_ | | | `--bucket` | _See [configuration options](/influxdb3/core/reference/config-options/#bucket)_ | diff --git a/content/influxdb3/enterprise/admin/tokens/admin/preconfigured.md b/content/influxdb3/enterprise/admin/tokens/admin/preconfigured.md new file mode 100644 index 000000000..0eeabd02d --- /dev/null +++ b/content/influxdb3/enterprise/admin/tokens/admin/preconfigured.md @@ -0,0 +1,17 @@ +--- +title: Use a preconfigured admin token +description: > + Start {{% product-name %}} with a preconfigured "offline" admin token file. + If no admin tokens already exist, InfluxDB automatically creates an admin token + using the provided admin token file. +menu: + influxdb3_enterprise: + parent: Admin tokens + name: Use preconfigured admin token +weight: 202 +source: /shared/influxdb3-admin/tokens/admin/preconfigured.md +--- + + diff --git a/content/influxdb3/enterprise/admin/tokens/resource/preconfigured.md b/content/influxdb3/enterprise/admin/tokens/resource/preconfigured.md new file mode 100644 index 000000000..a8d636cb7 --- /dev/null +++ b/content/influxdb3/enterprise/admin/tokens/resource/preconfigured.md @@ -0,0 +1,168 @@ +--- +title: Use a preconfigured permission (resource) tokens +description: > + Start {{% product-name %}} with a preconfigured "offline" permission (resource) tokens file. + If no tokens already exist, InfluxDB automatically creates resource tokens + specified in the provided permissions (resource) tokens file. +menu: + influxdb3_enterprise: + parent: Resource tokens + name: Use preconfigured resource tokens +weight: 202 +--- + +Start {{% product-name %}} with a preconfigured "offline" permission (resource) tokens file. +If no tokens already exist, InfluxDB automatically creates resource tokens +specified in the provided permission (resource) tokens file. + +- [Generate an offline permissions (resource) tokens file](#generate-an-offline-permissions-resource-tokens-file) + - [Offline permission tokens file schema](#offline-permission-tokens-file-schema) +- [Start InfluxDB with the preconfigured permission tokens](#start-influxdb-with-the-preconfigured-permission-tokens) + +## Generate an offline permissions (resource) tokens file + +Use the `influxdb3 create token` command to generate an offline permission (resource) +tokens file. You can also specify corresponding databases to create when starting InfluxDB. +Include the following options: + +{{% req type="key" %}} + +- {{% req "\*" %}} `--name`: The name of the admin token + _(replace {{% code-placeholder-key %}}`TOKEN_NAME`{{% /code-placeholder-key %}})_ +- {{% req "\*" %}} `--permissions`: + The [token permissions](/influxdb3/enterprise/reference/cli/influxdb3/create/token/permission/#permission-format) + _(replace {{% code-placeholder-key %}}`TOKEN_PERMISSIONS`{{% /code-placeholder-key %}})_ +- `--expiry`: Duration for the token to remain valid, in + [humantime](https://docs.rs/humantime/latest/humantime/fn.parse_duration.html) + format--for example `10d` for 10 days or `1y` for 1 year + _(replace {{% code-placeholder-key %}}`DURATION`{{% /code-placeholder-key %}})_ +- {{% req "\*" %}} `--offline` +- `--create-databases`: Comma separated list of database names to + create when starting the server + _(replace {{% code-placeholder-key %}}`DATABASE_LIST`{{% /code-placeholder-key %}})_ +- {{% req "\*" %}} `--output-file`: File path to use for the generated token file + _(replace {{% code-placeholder-key %}}`path/to/tokens.json`{{% /code-placeholder-key %}})_ + + + +```bash { placeholders="TOKEN_(NAME|PERMISSIONS)|DURATION|DATABASE_LIST|path/to/tokens.json" } +influxdb3 create token \ + --name TOKEN_NAME \ + --permission "TOKEN_PERMISSIONS" \ + --expiry DURATION \ + --offline \ + --create-databases DATABASE_LIST \ + --output-file path/to/tokens.json +``` + +> [!Note] +> #### Add multiple tokens to a permission tokens file +> +> If you write a new offline permission token to an existing permission token file, +> the command appends the new token to the existing output file. +> +> #### You can write or generate your own permission tokens file +> +> The `influxdb3 create token --offline` command makes generating an +> offline permission tokens file easy, but it is not required. +> You can write or generate your own permission tokens file using the +> [required JSON schema](#offline-permission-tokens-file-schema). +> +> ##### Token string security standards +> +> If writing or generating your own permission tokens file, ensure that token +> strings are sufficiently secure. We recommend the following: +> +> - Use a cryptographically secure pseudorandom number generator. +> - Ensure sufficient length and entropy. Generate and base64-encode a random +> string of at least 16 bytes (128 bits). +> - Prepend the generated string with `apiv3_` for InfluxDB compatibility. + +> [!Important] +> #### Token file permissions +> +> Token file permissions should be restricted `0600` to protect the tokens. + +### Offline permission tokens file schema + +An offline permission tokens file is a JSON-formatted file that contains a single +object with the following fields: + +- **create_databases**: (Optional) + _Array of database names to create when starting the server_ + +- **tokens**: _Array of token objects_ + - **token**: The raw token string (must begin with `apiv3_`) + - **name**: A unique token name + - **expiry_millis**: (Optional) Token expiration time as a + millisecond Unix timestamp + - **permissions**: Array of [token permission](/influxdb3/enterprise/reference/cli/influxdb3/create/token/permission/#permission-format) strings. + +```json +{ + "create_databases": [ + "db1", + "db2", + "db3", + "db4" + ], + "tokens": [ + { + "token": "apiv3_0XXXX-xxxXxXxxxXX_OxxxX...", + "name": "token-1", + "expiry_millis": 1756400061529, + "permissions": [ + "db:db1,db2:read,write", + "db:db3:read" + ] + }, + { + "token": "apiv3_0XXXX-xxxXxXxxxXX_OxxxX...", + "name": "token-2", + "expiry_millis": 1756400061529, + "permissions": [ + "db:db4:read,write" + ] + } + ] +} +``` + +## Start InfluxDB with the preconfigured permission tokens + +When starting {{% product-name %}}, include the `--permission-tokens-file` +option with the `influxdb3 serve` command or set the +`INFLUXDB3_PERMISSION_TOKENS_FILE` environment +variable to provide the preconfigured offline permission tokens file: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[CLI option](#) +[Environment variable](#) +{{% /code-tabs %}} +{{% code-tab-content %}} + + +```bash { placeholders="path/to/admin-token.json" } +influxdb3 serve \ + # ... \ + --permission-tokens-file path/to/admin-token.json +``` + +{{% /code-tab-content %}} +{{% code-tab-content %}} + + +```bash { placeholders="path/to/admin-token.json" } +INFLUXDB3_PERMISSION_TOKENS_FILE=path/to/admin-token.json + +influxdb3 serve \ + # ... \ +``` + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +When the server starts, you can use the preconfigured permission (resource) tokens +to write data to and query data from with your {{% product-name %}} instance or +cluster. diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/admin.md b/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/admin.md index e6a0f8168..da56a933f 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/admin.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/admin.md @@ -1,5 +1,5 @@ --- -title: influxdb3 create token --admin +title: influxdb3 create token \--admin description: > The `influxdb3 create token --admin` subcommand creates an operator token or named admin token with full administrative privileges for server actions. menu: diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/permission.md b/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/permission.md index b3e03a926..4961adc22 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/permission.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/create/token/permission.md @@ -1,5 +1,5 @@ --- -title: influxdb3 create token --permission +title: influxdb3 create token \--permission description: > The `influxdb3 create token` command with the `--permission` option creates a new authentication token with fine-grained access permissions for specific resources in {{< product-name >}}. @@ -25,19 +25,19 @@ influxdb3 create token --permission --name [OPTIONS] ## Options -| Option | | Description | -| :----- | :----------- | :----------------------------- | -| | `--permission ` | Permissions in `RESOURCE_TYPE:RESOURCE_NAMES:ACTIONS` format--for example, `db:*:read,write`, `system:*:read`. `--permission` may be specified multiple times | -| | `--name ` | Name of the token | -| `-H` | `--host ` | The host URL of the running InfluxDB 3 Enterprise server [env: INFLUXDB3_HOST_URL=] [default: http://127.0.0.1:8181] | -| | `--token ` | The {{% token-link "enterprise" "admin" %}} [env: INFLUXDB3_AUTH_TOKEN=] | -| | `--expiry ` | The token expiration time as a duration (for example, 1h, 7d, 1y). If not set, the token does not expire until revoked | -| | `--tls-ca ` | An optional arg to use a custom CA for testing with self-signed certs [env: INFLUXDB3_TLS_CA=] | -| | `--format ` | Output format (`json` or `text` _(default)_) | -| `-h` | `--help` | Print help information | -| | `--help-all` | Print detailed help information | +| Option | | Description | +| :----- | :-------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| | `--permission ` | Permissions in `RESOURCE_TYPE:RESOURCE_NAMES:ACTIONS` format--for example, `db:*:read,write`, `system:*:read`. `--permission` may be specified multiple times | +| | `--name ` | Name of the token | +| `-H` | `--host ` | The host URL of the running InfluxDB 3 Enterprise server [env: INFLUXDB3_HOST_URL=] [default: http://127.0.0.1:8181] | +| | `--token ` | The {{% token-link "enterprise" "admin" %}} [env: INFLUXDB3_AUTH_TOKEN=] | +| | `--expiry ` | The token expiration time as a duration (for example, 1h, 7d, 1y). If not set, the token does not expire until revoked | +| | `--tls-ca ` | An optional arg to use a custom CA for testing with self-signed certs [env: INFLUXDB3_TLS_CA=] | +| | `--format ` | Output format (`json` or `text` _(default)_) | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information | -## Permission Format +## Permission format The `--permission` option takes a value in the format `RESOURCE_TYPE:RESOURCE_NAMES:ACTIONS`. @@ -50,6 +50,16 @@ The `--permission` option takes a value in the format `RESOURCE_TYPE:RESOURCE_NA ## Examples +- [Create a token with read and write access to a database](#create-a-token-with-read-and-write-access-to-a-database) +- [Create a token with read-only access to a database](#create-a-token-with-read-only-access-to-a-database) +- [Create a token with access to multiple databases](#create-a-token-with-access-to-multiple-databases) +- [Create a token with access to all databases](#create-a-token-with-access-to-all-databases) +- [Create a token that expires in seven days](#create-a-token-that-expires-in-seven-days) +- [Create a system token for health information](#create-a-system-token-for-health-information) +- [Create a token with access to all system information](#create-a-token-with-access-to-all-system-information) +- [Create a token with multiple permissions](#create-a-token-with-multiple-permissions) +- [Generate an offline permission (resource) tokens file](#generate-an-offline-permission-resource-tokens-file) + ### Create a token with read and write access to a database ```bash @@ -115,3 +125,75 @@ influxdb3 create token \ --permission "system:health:read" \ --name "Multi-permission token" ``` + +### Generate an offline permission (resource) tokens file + +Generate an offline permission (resource) tokens file to use if no resource +tokens exist when the server starts. Once started, you can interact with the +server using the provided tokens. Offline permission tokens are designed to help +with automated deployments. + +Include the following options: + +- `--name` _({{% req %}})_ +- `--permissions` _({{% req %}})_ +- `--offline` _({{% req %}})_ +- `--output-file` _({{% req %}})_ +- `--create-databases` _(Optional)_ +- `--expiry` _(Optional)_ + + +```bash { placeholders="TOKEN_(NAME|PERMISSIONS)|DURATION|DATABASE_LIST|path/to/tokens.json" } +influxdb3 create token \ + --name TOKEN_NAME \ + --permission "TOKEN_PERMISSIONS" \ + --expiry DURATION \ + --offline \ + --create-databases DATABASE_LIST \ + --output-file path/to/tokens.json +``` + +Replace the following: + +- {{% code-placeholder-key %}}`TOKEN_NAME`{{% /code-placeholder-key %}}: + Name for your offline permission token +- {{% code-placeholder-key %}}`TOKEN_PERMISSIONS`{{% /code-placeholder-key %}}: + [Token permissions](#permission-format). +- {{% code-placeholder-key %}}`DURATION`{{% /code-placeholder-key %}}: + Duration for the token to remain valid, in + [humantime](https://docs.rs/humantime/latest/humantime/fn.parse_duration.html) + format (for example, `10d` for 10 days or `1y` for 1 year). +- {{% code-placeholder-key %}}`DATABASE_LIST`{{% /code-placeholder-key %}}: + Comma-separated list of database names to create when starting the + {{% product-name %}} server using the generated tokens file +- {{% code-placeholder-key %}}`path/to/tokens.json`{{% /code-placeholder-key %}}: + File path to use for the generated tokens file + +{{< expand-wrapper >}} +{{% expand "View example offline permission tokens file" %}} +```json +{ + "create_databases": [ + "db1", + "db2", + "db3" + ], + "tokens": [ + { + "token": "apiv3_0XXXX-xxxXxXxxxXX_OxxxX...", + "name": "example-token", + "expiry_millis": 1756400061529, + "permissions": [ + "db:db1,db2:read,write", + "db:db3:read" + ] + } + ] +} +``` +{{% /expand %}} +{{< /expand-wrapper >}} + +> [!Note] +> If you write a new offline permission token to an existing permission token file, +> the command appends the new token to the existing output file. diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md index 40a97edd4..c44102dd3 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md @@ -39,13 +39,17 @@ influxdb3 serve [OPTIONS] \ | :--------------- | :--------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------ | | | `--admin-token-recovery-http-bind` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#admin-token-recovery-http-bind)_ | | | `--admin-token-recovery-tcp-listener-file-path` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#admin-token-recovery-tcp-listener-file-path)_ | +| | `--admin-token-file` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#admin-token-file)_ | | | `--aws-access-key-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-access-key-id)_ | | | `--aws-allow-http` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-allow-http)_ | +| | `--aws-credentials-file` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-credentials-file)_ | | | `--aws-default-region` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-default-region)_ | | | `--aws-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-endpoint)_ | | | `--aws-secret-access-key` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-secret-access-key)_ | | | `--aws-session-token` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-session-token)_ | | | `--aws-skip-signature` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-skip-signature)_ | +| | `--azure-allow-http` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-allow-http)_ | +| | `--azure-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/##azure-endpoint)_ | | | `--azure-storage-access-key` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-storage-access-key)_ | | | `--azure-storage-account` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-storage-account)_ | | | `--bucket` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#bucket)_ | @@ -111,6 +115,7 @@ influxdb3 serve [OPTIONS] \ | | `--parquet-mem-cache-prune-percentage` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#parquet-mem-cache-prune-percentage)_ | | | `--parquet-mem-cache-query-path-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#parquet-mem-cache-query-path-duration)_ | | | `--parquet-mem-cache-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#parquet-mem-cache-size)_ | +| | `--permission-tokens-file` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#permission-tokens-file)_ | | | `--plugin-dir` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#plugin-dir)_ | | | `--preemptive-cache-age` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#preemptive-cache-age)_ | | | `--query-file-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#query-file-limit)_ | diff --git a/content/shared/influxdb3-admin/tokens/admin/preconfigured.md b/content/shared/influxdb3-admin/tokens/admin/preconfigured.md new file mode 100644 index 000000000..00c1f1130 --- /dev/null +++ b/content/shared/influxdb3-admin/tokens/admin/preconfigured.md @@ -0,0 +1,115 @@ + +Start {{% product-name %}} with a preconfigured "offline" admin token file. +If no admin tokens already exist, InfluxDB automatically creates an admin token +using the provided admin token file. +Offline tokens are designed to help with automated deployments. + +- [Generate an offline admin token file](#generate-an-offline-admin-token-file) + - [Offline admin token file schema](#offline-admin-token-file-schema) +- [Start InfluxDB with the preconfigured admin token](#start-influxdb-with-the-preconfigured-admin-token) + +## Generate an offline admin token file + +Use the `influxdb3 create token --admin` command to generate an offline admin +token file. Include the following options: + +{{% req type="key" %}} + +- `--name`: The name of the admin token _(default is `_admin`)_ + _(replace {{% code-placeholder-key %}}`TOKEN_NAME`{{% /code-placeholder-key %}})_ +- `--expiry`: Duration for the token to remain valid, in + [humantime](https://docs.rs/humantime/latest/humantime/fn.parse_duration.html) + format (for example, `10d` for 10 days or `1y` for 1 year). + _(replace {{% code-placeholder-key %}}`DURATION`{{% /code-placeholder-key %}})_ +- {{% req "\*" %}} `--offline` +- {{% req "\*" %}} `--output-file`: File path to use for the generated token file + _(replace {{% code-placeholder-key %}}`path/to/tokens.json`{{% /code-placeholder-key %}})_ + + + +```bash { placeholders="TOKEN_NAME|DURATION|path/to/admin-token.json" } +influxdb3 create token --admin \ + --name TOKEN_NAME \ + --expiry DURATION \ + --offline \ + --output-file path/to/admin-token.json +``` + +> [!Note] +> #### You can write or generate your own admin token file +> +> The `influxdb3 create token --admin --offline` command makes generating +> offline admin token files easy, but it is not required. +> You can also write or generate your own admin token files using the +> [required JSON schema](#offline-admin-token-file-schema). +> +> ##### Token string security standards +> +> If writing or generating your own admin token file, ensure that the token +> string is sufficiently secure. We recommend the following: +> +> - Use a cryptographically secure pseudorandom number generator. +> - Ensure sufficient length and entropy. Generate and base64-encode a random +> string of at least 16 bytes (128 bits). +> - Prepend the generated string with `apiv3_` for InfluxDB compatibility. + +> [!Important] +> #### Token file permissions +> +> Token file permissions should be restricted `0600` to protect the token. + +### Offline admin token file schema + +An offline admin token file is a JSON-formatted file that contains a single +object with the following fields: + +- **token**: The raw token string (must begin with `apiv3_`) +- **name**: The token name (default is `_admin`) +- **expiry_millis**: (Optional) Token expiration time as a + millisecond Unix timestamp + +```json +{ + "token": "apiv3_0XXXX-xxxXxXxxxXX_OxxxX...", + "name": "_admin", + "expiry_millis": 1756400061529 +} +``` + +## Start InfluxDB with the preconfigured admin token + +When starting {{% product-name %}}, include the `--admin-token-file` option with the +`influxdb3 serve` command or set the `INFLUXDB3_ADMIN_TOKEN_FILE` environment +variable to provide the preconfigured offline admin token file: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[CLI option](#) +[Environment variable](#) +{{% /code-tabs %}} +{{% code-tab-content %}} + + +```bash { placeholders="path/to/admin-token.json" } +influxdb3 serve \ + # ... \ + --admin-token-file path/to/admin-token.json +``` + +{{% /code-tab-content %}} +{{% code-tab-content %}} + + +```bash { placeholders="path/to/admin-token.json" } +INFLUXDB3_ADMIN_TOKEN_FILE=path/to/admin-token.json + +influxdb3 serve \ + # ... \ +``` + +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +When the server starts, you can use the preconfigured admin token to interact with +your {{% product-name %}}{{% show-in "enterprise" %}} cluster or{{% /show-in %}} +instance. diff --git a/content/shared/influxdb3-cli/config-options.md b/content/shared/influxdb3-cli/config-options.md index 522bc7b5d..be8ce2e7e 100644 --- a/content/shared/influxdb3-cli/config-options.md +++ b/content/shared/influxdb3-cli/config-options.md @@ -46,11 +46,6 @@ influxdb3 serve - [node-id](#node-id) {{% show-in "enterprise" %}} - [node-id-from-env](#node-id-from-env){{% /show-in %}} - [object-store](#object-store) - - [tls-key](#tls-key) - - [tls-cert](#tls-cert) - - [tls-minimum-versions](#tls-minimum-version) - - [without-auth](#without-auth) - - [disable-authz](#disable-authz) {{% show-in "enterprise" %}} - [num-database-limit](#num-database-limit) - [num-table-limit](#num-table-limit) @@ -59,6 +54,15 @@ influxdb3 serve - [license-email](#license-email) - [license-file](#license-file) - [license-type](#license-type){{% /show-in %}} +- [Security](#security) + - [tls-key](#tls-key) + - [tls-cert](#tls-cert) + - [tls-minimum-versions](#tls-minimum-version) + - [without-auth](#without-auth) + - [disable-authz](#disable-authz) + - [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) + - [admin-token-file](#admin-token-file) + {{% show-in "enterprise" %}}- [permission-tokens-file](#permission-tokens-file){{% /show-in %}} - [AWS](#aws) - [aws-access-key-id](#aws-access-key-id) - [aws-secret-access-key](#aws-secret-access-key) @@ -113,7 +117,6 @@ influxdb3 serve - [HTTP](#http) - [max-http-request-size](#max-http-request-size) - [http-bind](#http-bind) - - [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) - [Memory](#memory) - [exec-mem-pool-bytes](#exec-mem-pool-bytes) - [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) @@ -295,8 +298,97 @@ This option supports the following values: | :--------------------- | :----------------------- | | `--object-store` | `INFLUXDB3_OBJECT_STORE` | +{{% show-in "enterprise" %}} --- +#### num-database-limit + +Limits the total number of active databases. +Default is {{% influxdb3/limit "database" %}}. + +| influxdb3 serve option | Environment variable | +| :---------------------- | :---------------------------------------- | +| `--num-database-limit` | `INFLUXDB3_ENTERPRISE_NUM_DATABASE_LIMIT` | + +--- + +#### num-table-limit + +Limits the total number of active tables across all databases. +Default is {{% influxdb3/limit "table" %}}. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------------------------- | +| `--num-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TABLE_LIMIT` | + +--- + +#### num-total-columns-per-table-limit + +Limits the total number of columns per table. +Default is {{% influxdb3/limit "column" %}}. + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :------------------------------------------------------- | +| `--num-total-columns-per-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TOTAL_COLUMNS_PER_TABLE_LIMIT` | +{{% /show-in %}} + +--- + +{{% show-in "enterprise" %}} +### Licensing + +#### license-email + +Specifies the email address to associate with your {{< product-name >}} license +and automatically responds to the interactive email prompt when the server starts. +This option is mutually exclusive with [license-file](#license-file). + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------------- | +| `--license-email` | `INFLUXDB3_ENTERPRISE_LICENSE_EMAIL` | + +--- + +#### license-file + +Specifies the path to a license file for {{< product-name >}}. When provided, the license +file's contents are used instead of requesting a new license. +This option is mutually exclusive with [license-email](#license-email). + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------------- | +| `--license-file` | `INFLUXDB3_ENTERPRISE_LICENSE_FILE` | + +--- + +#### license-type + +Specifies the type of {{% product-name %}} license to use and bypasses the +interactive license prompt. Provide one of the following license types: + +- `home` +- `trial` +- `commercial` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------------- | +| `--license-type` | `INFLUXDB3_ENTERPRISE_LICENSE_TYPE` | + +--- +{{% /show-in %}} + +### Security + +- [tls-key](#tls-key) +- [tls-cert](#tls-cert) +- [tls-minimum-versions](#tls-minimum-version) +- [without-auth](#without-auth) +- [disable-authz](#disable-authz) +- [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) +- [admin-token-file](#admin-token-file) +{{% show-in "enterprise" %}}- [permission-tokens-file](#permission-tokens-file){{% /show-in %}} + #### tls-key The path to a key file for TLS to be enabled. @@ -349,43 +441,169 @@ Valid values are `health`, `ping`, and `metrics`. | :--------------------- | :------------------------ | | `--disable-authz` | `INFLUXDB3_DISABLE_AUTHZ` | +--- + +#### admin-token-recovery-http-bind + +Enables an admin token recovery HTTP server on a separate port. +This server allows regenerating lost admin tokens without existing authentication. +The server automatically shuts down after a successful token regeneration. + +> [!Warning] +> This option creates an unauthenticated endpoint that can regenerate admin tokens. +> Only use this when you have lost access to your admin token and ensure the +> server is only accessible from trusted networks. + +**Default:** `127.0.0.1:8182` (when enabled) + +| influxdb3 serve option | Environment variable | +| :--------------------------------- | :----------------------------------------- | +| `--admin-token-recovery-http-bind` | `INFLUXDB3_ADMIN_TOKEN_RECOVERY_HTTP_BIND` | + +##### Example usage + +```bash +# Start server with recovery endpoint +influxdb3 serve --admin-token-recovery-http-bind + +# In another terminal, regenerate the admin token +influxdb3 create token --admin --regenerate --host http://127.0.0.1:8182 +``` + +--- + +#### admin-token-file + +Specifies an offline admin token file to use if no tokens exist when the server +starts. Once started, you can interact with the server using the provided token. +Offline admin tokens are designed to help with automated deployments. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------------- | +| `--admin-token-file` | `INFLUXDB3_ADMIN_TOKEN_FILE` | + +Offline admin tokens are defined in a JSON-formatted file. +Use the following command to generate an offline admin token file: + + +```bash { placeholders="./path/to/admin-token.json" } +influxdb3 create token --admin \ + --name "example-admin-token" \ + --expiry 1d \ + --offline \ + --output-file ./path/to/admin-token.json +``` + +{{< expand-wrapper >}} +{{% expand "View example offline admin token file" %}} +```json +{ + "token": "apiv3_0XXXX-xxxXxXxxxXX_OxxxX...", + "name": "example-admin-token", + "expiry_millis": 1756400061529 +} +``` +{{% /expand %}} +{{< /expand-wrapper >}} + +##### Example usage + + + +```bash { placeholders="./path/to/admin-token.json" } +# Generate and admin token offline +influxdb3 create token \ + --admin \ + --name "example-admin-token" \ + --expiry 1d \ + --offline \ + --output-file ./path/to/admin-token.json + +# Start {{% product-name %}} using the generated token +influxdb3 serve --admin-token-file ./path/to/admin-token.json +``` + +--- + {{% show-in "enterprise" %}} ---- +#### permission-tokens-file -#### num-database-limit +Specifies an offline permission (resource) tokens file to use if no resource +tokens exist when the server starts. Once started, you can interact with the +server using the provided tokens. Offline permission tokens are designed to help +with automated deployments. -Limits the total number of active databases. -Default is {{% influxdb3/limit "database" %}}. +| influxdb3 serve option | Environment variable | +| :------------------------- | :--------------------------------- | +| `--permission-tokens-file` | `INFLUXDB3_PERMISSION_TOKENS_FILE` | -| influxdb3 serve option | Environment variable | -| :---------------------- | :---------------------------------------- | -| `--num-database-limit` | `INFLUXDB3_ENTERPRISE_NUM_DATABASE_LIMIT` | +Multiple tokens with database-level permissions can be defined. +You can also specify databases to create at startup. +Use the a command similar to the following to generate an offline permission +token file: + +```bash { placeholders="./path/to/tokens.json" } +influxdb3 create token \ + --name "example-token" \ + --permission "db:db1,db2:read,write" \ + --permission "db:db3:read" \ + --expiry 1d \ + --offline \ + --create-databases db1,db2 \ + --output-file ./path/to/tokens.json +``` + +{{< expand-wrapper >}} +{{% expand "View example offline permission tokens file" %}} +```json +{ + "create_databases": [ + "db1", + "db2", + "d3" + ], + "tokens": [ + { + "token": "apiv3_0XXXX-xxxXxXxxxXX_OxxxX...", + "name": "example-token", + "expiry_millis": 1756400061529, + "permissions": [ + "db:db1,db2:read,write", + "db:db3:read" + ] + } + ] +} +``` +{{% /expand %}} +{{< /expand-wrapper >}} + +> [!Note] +> If you write a new offline permission token to an existing permission token file, +> the command appends the new token to the existing output file. + +##### Example usage + + + +```bash { placeholders="./path/to/tokens.json" } +# Generate and admin token offline +influxdb3 create token \ + --name "example-token" \ + --permission "db:db1,db2:read,write" \ + --permission "db:db3:read" \ + --expiry 1d \ + --offline \ + --create-databases db1,db2 \ + --output-file ./path/to/tokens.json + +# Start {{% product-name %}} using the generated token +influxdb3 serve --permission-tokens-file ./path/to/tokens.json +``` --- - -#### num-table-limit - -Limits the total number of active tables across all databases. -Default is {{% influxdb3/limit "table" %}}. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------------------------- | -| `--num-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TABLE_LIMIT` | - ---- - -#### num-total-columns-per-table-limit - -Limits the total number of columns per table. -Default is {{% influxdb3/limit "column" %}}. - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :------------------------------------------------------- | -| `--num-total-columns-per-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TOTAL_COLUMNS_PER_TABLE_LIMIT` | {{% /show-in %}} ---- - {{% show-in "enterprise" %}} ### Licensing @@ -1025,7 +1243,6 @@ Provides custom configuration to DataFusion as a comma-separated list of - [max-http-request-size](#max-http-request-size) - [http-bind](#http-bind) -- [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) #### max-http-request-size @@ -1051,31 +1268,6 @@ Defines the address on which InfluxDB serves HTTP API requests. --- -#### admin-token-recovery-http-bind - -Enables an admin token recovery HTTP server on a separate port. This server allows regenerating lost admin tokens without existing authentication. The server automatically shuts down after a successful token regeneration. - -> [!Warning] -> This option creates an unauthenticated endpoint that can regenerate admin tokens. Only use this when you have lost access to your admin token and ensure the server is only accessible from trusted networks. - -**Default:** `127.0.0.1:8182` (when enabled) - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--admin-token-recovery-http-bind` | `INFLUXDB3_ADMIN_TOKEN_RECOVERY_HTTP_BIND` | - -##### Example usage - -```bash -# Start server with recovery endpoint -influxdb3 serve --admin-token-recovery-http-bind - -# In another terminal, regenerate the admin token -influxdb3 create token --admin --regenerate --host http://127.0.0.1:8182 -``` - ---- - ### Memory - [exec-mem-pool-bytes](#exec-mem-pool-bytes) diff --git a/content/shared/influxdb3-cli/create/token/admin.md b/content/shared/influxdb3-cli/create/token/admin.md index 6a91594d8..dc333f9e0 100644 --- a/content/shared/influxdb3-cli/create/token/admin.md +++ b/content/shared/influxdb3-cli/create/token/admin.md @@ -9,20 +9,27 @@ influxdb3 create token --admin [OPTIONS] ## Options {.no-shorthand} -| Option | Description | -|:-------|:------------| -| `--regenerate` | Regenerates the operator token. Requires `--token` and the current operator token | -| `--name ` | Name of the token | -| `--expiry ` | Expires in `duration`--for example, 10d for 10 days 1y for 1 year | -| `--host ` | The host URL of the running InfluxDB 3 server [env: `INFLUXDB3_HOST_URL=`] [default: `http://127.0.0.1:8181`] | -| `--token ` | An existing admin token for the InfluxDB 3 server | -| `--tls-ca ` | An optional arg to use a custom ca for useful for testing with self signed certs | -| `--format ` | Output format for token [possible values: `json`, `text`] | -| `-h`, `--help` | Print help information | -| `--help-all` | Print more detailed help information | +| Option | Description | +| :-------------- | :------------------------------------------------------------------------------------------------------------ | +| `--regenerate` | Regenerates the operator token. Requires `--token` and the current operator token | +| `--name` | Name of the token | +| `--expiry` | Expires in `duration`--for example, 10d for 10 days 1y for 1 year | +| `--host` | The host URL of the running InfluxDB 3 server [env: `INFLUXDB3_HOST_URL=`] [default: `http://127.0.0.1:8181`] | +| `--token` | An existing admin token for the InfluxDB 3 server | +| `--tls-ca` | An optional arg to use a custom ca for useful for testing with self signed certs | +| `--format` | Output format for token [possible values: `json`, `text`] | +| `--offline` | Generate token without connecting to server (for automation) | +| `--output-file` | File path to save the token (required with `--offline`) | +| `-h`, `--help` | Print help information | +| `--help-all` | Print more detailed help information | ## Examples +- [Create an operator token](#create-an-operator-token) +- [Use the operator token to create a named admin token](#use-the-operator-token-to-create-a-named-admin-token) +- [Use the token to create a database](#use-the-token-to-create-a-database) +- [Generate an offline admin token](#generate-an-offline-admin-token) + ### Create an operator token The operator token is a special token that has full administrative privileges on the InfluxDB server and doesn't expire. @@ -40,17 +47,15 @@ For CLI commands, use the `--token` option or the `INFLUXDB3_AUTH_TOKEN` environ ### Use the operator token to create a named admin token -{{% code-placeholders "OPERATOR_TOKEN|TOKEN_NAME|EXPIRY" %}} -```bash +```bash { placeholders="OPERATOR_TOKEN|TOKEN_NAME|EXPIRY" } influxdb3 create token \ --admin \ --token OPERATOR_TOKEN \ --name TOKEN_NAME \ --expiry DURATION ``` -{{% /code-placeholders %}} Replace the following: @@ -60,16 +65,13 @@ Replace the following: ### Use the token to create a database -{{% code-placeholders "YOUR_ADMIN_TOKEN|DATABASE_NAME" %}} - -```bash +```bash { placeholders="YOUR_ADMIN_TOKEN|DATABASE_NAME" } influxdb3 create database \ --token ADMIN_TOKEN \ DATABASE_NAME ``` -{{% /code-placeholders %}} Replace the following: @@ -83,3 +85,44 @@ Replace the following: > ```bash > export INFLUXDB3_AUTH_TOKEN=ADMIN_TOKEN > ``` + +### Generate an offline admin token + +Generate an offline admin token file to use if no tokens exist when the server +starts. Once started, you can interact with the server using the provided token. +Offline admin tokens are designed to help with automated deployments. + +Include the following options: + +- `--offline` _({{% req %}})_ +- `--output-file` _({{% req %}})_ +- `--name` _(default is `_admin`)_ +- `--expiry` _(Optional)_ + + + +```bash { placeholders="TOKEN_NAME|DURATION|path/to/admin-token.json" } +influxdb3 create token --admin \ + --name TOKEN_NAME \ + --expiry DURATION \ + --offline \ + --output-file path/to/admin-token.json +``` + +Replace the following: + +- {{% code-placeholder-key %}}`TOKEN_NAME`{{% /code-placeholder-key %}}: Name for your offline admin token +- {{% code-placeholder-key %}}`DURATION`{{% /code-placeholder-key %}}: Duration for the token to remain valid, in [humantime](https://docs.rs/humantime/latest/humantime/fn.parse_duration.html) format (for example, `10d` for 10 days or `1y` for 1 year). +- {{% code-placeholder-key %}}`path/to/admin-token.json`{{% /code-placeholder-key %}}: File path to use for the generated token file + +{{< expand-wrapper >}} +{{% expand "View example offline admin token file" %}} +```json +{ + "token": "apiv3_0XXXX-xxxXxXxxxXX_OxxxX...", + "name": "example-admin-token", + "expiry_millis": 1756400061529 +} +``` +{{% /expand %}} +{{< /expand-wrapper >}} From deb63070933d87cfa29b07a2a5fe60b4fe460fc2 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Thu, 28 Aug 2025 07:14:37 -0600 Subject: [PATCH 136/179] hotfix: add related links to offline token guides --- content/influxdb3/core/admin/tokens/admin/preconfigured.md | 5 ++++- .../enterprise/admin/tokens/admin/preconfigured.md | 6 +++++- .../enterprise/admin/tokens/resource/preconfigured.md | 6 +++++- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/content/influxdb3/core/admin/tokens/admin/preconfigured.md b/content/influxdb3/core/admin/tokens/admin/preconfigured.md index 5e319be4d..95fe96a5d 100644 --- a/content/influxdb3/core/admin/tokens/admin/preconfigured.md +++ b/content/influxdb3/core/admin/tokens/admin/preconfigured.md @@ -8,7 +8,10 @@ menu: influxdb3_core: parent: Admin tokens name: Use preconfigured admin token -weight: 202 +weight: 202 +related: + - /influxdb3/core/reference/config-options/#admin-token-file, Configuration options > admin-token-file + - /influxdb3/core/reference/cli/influxdb3/create/token/admin/ source: /shared/influxdb3-admin/tokens/admin/preconfigured.md --- diff --git a/content/influxdb3/enterprise/admin/tokens/admin/preconfigured.md b/content/influxdb3/enterprise/admin/tokens/admin/preconfigured.md index 0eeabd02d..72c54fc6e 100644 --- a/content/influxdb3/enterprise/admin/tokens/admin/preconfigured.md +++ b/content/influxdb3/enterprise/admin/tokens/admin/preconfigured.md @@ -8,7 +8,11 @@ menu: influxdb3_enterprise: parent: Admin tokens name: Use preconfigured admin token -weight: 202 +weight: 202 +related: + - /influxdb3/enterprise/admin/tokens/resource/preconfigured/ + - /influxdb3/enterprise/reference/config-options/#admin-token-file, Configuration options > admin-token-file + - /influxdb3/enterprise/reference/cli/influxdb3/create/token/admin/ source: /shared/influxdb3-admin/tokens/admin/preconfigured.md --- diff --git a/content/influxdb3/enterprise/admin/tokens/resource/preconfigured.md b/content/influxdb3/enterprise/admin/tokens/resource/preconfigured.md index a8d636cb7..a5e2e140b 100644 --- a/content/influxdb3/enterprise/admin/tokens/resource/preconfigured.md +++ b/content/influxdb3/enterprise/admin/tokens/resource/preconfigured.md @@ -8,7 +8,11 @@ menu: influxdb3_enterprise: parent: Resource tokens name: Use preconfigured resource tokens -weight: 202 +weight: 202 +related: + - /influxdb3/enterprise/admin/tokens/admin/preconfigured/ + - /influxdb3/enterprise/reference/config-options/#permission-tokens-file, Configuration options > permission-tokens-file + - /influxdb3/enterprise/reference/cli/influxdb3/create/token/permission/ --- Start {{% product-name %}} with a preconfigured "offline" permission (resource) tokens file. From a44a06051d85340af8de61b6dbce2f6584d95d9f Mon Sep 17 00:00:00 2001 From: jaal2001 Date: Thu, 28 Aug 2025 17:00:28 +0200 Subject: [PATCH 137/179] Update config-options.md Provide both available options for "log-destination" to make it more obvious that no filesystem logs are supported. --- content/shared/influxdb3-cli/config-options.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/content/shared/influxdb3-cli/config-options.md b/content/shared/influxdb3-cli/config-options.md index be8ce2e7e..395009514 100644 --- a/content/shared/influxdb3-cli/config-options.md +++ b/content/shared/influxdb3-cli/config-options.md @@ -939,6 +939,11 @@ Sets the filter directive for logs. Specifies the destination for logs. +This option supports the following values: + +- `stdout` _(default)_ +- `stderr` + **Default:** `stdout` | influxdb3 serve option | Environment variable | @@ -1993,4 +1998,4 @@ Enables the experimental PachaTree storage engine for improved performance. | :---------------------- | :------------------------------------- | | `--use-pacha-tree` | `INFLUXDB3_ENTERPRISE_USE_PACHA_TREE` | -{{% /show-in %}} \ No newline at end of file +{{% /show-in %}} From 9e53443f3ab4bda63623276aef4d8dbaddbdab13 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Thu, 28 Aug 2025 10:04:27 -0600 Subject: [PATCH 138/179] hotfix: add influxdb 3.4 notification --- data/notifications.yaml | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/data/notifications.yaml b/data/notifications.yaml index 9bdb29622..13766334e 100644 --- a/data/notifications.yaml +++ b/data/notifications.yaml @@ -40,22 +40,23 @@ # - [The plan for InfluxDB 3.0 Open Source](https://influxdata.com/blog/the-plan-for-influxdb-3-0-open-source) # - [InfluxDB 3.0 benchmarks](https://influxdata.com/blog/influxdb-3-0-is-2.5x-45x-faster-compared-to-influxdb-open-source/) -- id: influxdb3.3-explorer-1.1 +- id: influxdb3.4-explorer-1.2 level: note scope: - / - title: New in InfluxDB 3.3 + title: New in InfluxDB 3.4 slug: | - Key enhancements in InfluxDB 3.3 and the InfluxDB 3 Explorer 1.1. + Key enhancements in InfluxDB 3.4 and the InfluxDB 3 Explorer 1.2. - See the Blog Post + See the Blog Post message: | - InfluxDB 3.3 is now available for both Core and Enterprise, which introduces new - managed plugins for the Processing Engine. This makes it easier to address common - time series tasks with just a plugin. InfluxDB 3 Explorer 1.1 is also available, - which includes InfluxDB plugin management and other new features. + InfluxDB 3.4 is now available for both Core and Enterprise, which introduces + offline token generation for use in automated deployments and configurable + license type selection that lets you bypass the interactive license prompt. + InfluxDB 3 Explorer 1.2 is also available, which includes InfluxDB cache + management and other new features. For more information, check out: - - [See the announcement blog post](https://www.influxdata.com/blog/influxdb-3-3/) + - [See the announcement blog post](https://www.influxdata.com/blog/influxdb-3-4/) - [Get Started with InfluxDB 3 Explorer](/influxdb3/explorer/get-started/) From de8f5d2b340cab8d39f75414f52b7241fb13bc72 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Thu, 28 Aug 2025 17:39:57 -0600 Subject: [PATCH 139/179] hotfix(clustered): add missing release artifacts to latest clustered release notes --- .../clustered/reference/release-notes/clustered.md | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/content/influxdb3/clustered/reference/release-notes/clustered.md b/content/influxdb3/clustered/reference/release-notes/clustered.md index e81fca982..3b382e8c2 100644 --- a/content/influxdb3/clustered/reference/release-notes/clustered.md +++ b/content/influxdb3/clustered/reference/release-notes/clustered.md @@ -71,6 +71,11 @@ spec: image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250814-1819052 ``` +#### Release artifacts +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20250814-1819052/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20250814-1819052/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + ### Bug Fixes - Fix incorrect service address for tokens in Clustered auth sidecar. If you were overriding the `AUTHZ_TOKEN_SVC_ADDRESS` environment variable in your `AppInstance`, you can now remove that override. @@ -81,7 +86,7 @@ spec: #### Database Engine -- Update DataFusion to `48`. +- Update DataFusion to v48. - Tweak compaction to reduce write amplification and querier cache churn in some circumstances. --- @@ -212,8 +217,8 @@ spec: - Change the default of `INFLUXDB_IOX_CREATE_CATALOG_BACKUP_INTERVAL` from `1h` to `4h`. - Introduce the following environment variables to help in cases where the - object store is large enough that the the garbage collector cannot keep up - when cleaning obsolete objects: + object store is large enough that the garbage collector cannot keep up when + cleaning obsolete objects: - `INFLUXDB_IOX_GC_PRIMARY_OBJECTSTORE_PARTITIONS` - `INFLUXDB_IOX_GC_SECONDARY_OBJECTSTORE_PARTITIONS` From 93bdc26e6b0dea8e3e075099ccc7b269e23da983 Mon Sep 17 00:00:00 2001 From: Peter Barnett Date: Fri, 29 Aug 2025 09:50:33 -0400 Subject: [PATCH 140/179] chore: updates to 3.4.1 + Explorer page updates --- content/influxdb3/explorer/_index.md | 14 +++----------- .../v3-core-enterprise-release-notes/_index.md | 14 +++++++++++--- data/products.yml | 4 ++-- layouts/partials/article/feedback.html | 4 ++-- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/content/influxdb3/explorer/_index.md b/content/influxdb3/explorer/_index.md index 1fae003e1..026d64c03 100644 --- a/content/influxdb3/explorer/_index.md +++ b/content/influxdb3/explorer/_index.md @@ -11,19 +11,11 @@ weight: 1 InfluxDB 3 Explorer is the standalone web application designed for visualizing, querying, and managing your data stored in InfluxDB 3 Core and Enterprise. Explorer provides an intuitive interface for interacting with your time series data, streamlining database operations and enhancing data insights. -> [!Important] -> #### InfluxDB 3 Core or Enterprise v3.1.0 or later required -> -> InfluxDB 3 Explorer is compatible with the following: -> -> - [InfluxDB 3 Core v3.1.0 or later](/influxdb3/core/install/) -> - [InfluxDB 3 Enterprise v3.1.0 or later](/influxdb3/enterprise/install/) - ## Key features Use InfluxDB 3 Explorer for: -- **Database and query management**: Create and manage InfluxDB 3 databases, admin and resource tokens, and configure new InfluxDB 3 Enterprise instances +- **Database management**: Create and manage InfluxDB 3 instances, databases, tokens, plugins, and more - **Data visualization and analysis**: Query data with a built-in visualizer for enhanced data insights - **Data ingestion**: Write new data and setup Telegraf configurations @@ -33,14 +25,14 @@ Run the Docker image to start InfluxDB 3 Explorer: ```sh # Pull the Docker image -docker pull influxdata/influxdb3-ui:{{% latest-patch %}} +docker pull influxdata/influxdb3-ui # Run the Docker container docker run --detach \ --name influxdb3-explorer \ --publish 8888:80 \ --publish 8889:8888 \ - influxdata/influxdb3-ui:{{% latest-patch %}} \ + influxdata/influxdb3-ui \ --mode=admin # Visit http://localhost:8888 in your browser to begin using InfluxDB 3 Explorer diff --git a/content/shared/v3-core-enterprise-release-notes/_index.md b/content/shared/v3-core-enterprise-release-notes/_index.md index 205d41e2f..c13d3c770 100644 --- a/content/shared/v3-core-enterprise-release-notes/_index.md +++ b/content/shared/v3-core-enterprise-release-notes/_index.md @@ -5,6 +5,14 @@ > All updates to Core are automatically included in Enterprise. > The Enterprise sections below only list updates exclusive to Enterprise. +## v3.4.1 {date="2025-08-28"} + +### Core + +#### Bug Fixes +- Upgrading from 3.3.0 to 3.4.x no longer causes possible catalog migration issues ([#26756](https://github.com/influxdata/influxdb/pull/26756)) + + ## v3.4.0 {date="2025-08-27"} ### Core @@ -68,7 +76,7 @@ All Core updates are included in Enterprise. Additional Enterprise-specific feat #### Bug Fixes - **Database reliability**: - - Fix URL encoded table name handling failures ([#26586](https://github.com/influxdata/influxdb/pull/26586)) + - Fix url encoded table name handling failures ([#26586](https://github.com/influxdata/influxdb/pull/26586)) - Allow hard deletion of existing soft-deleted schema ([#26574](https://github.com/influxdata/influxdb/pull/26574)) - **Authentication**: Fix AWS S3 API error handling when tokens are expired ([#1013](https://github.com/influxdata/influxdb/pull/1013)) - **Query processing**: Set nanosecond precision as default for V1 query API CSV output ([#26577](https://github.com/influxdata/influxdb/pull/26577)) @@ -172,8 +180,8 @@ All Core updates are included in Enterprise. Additional Enterprise-specific feat - **License management improvements**: - New `influxdb3 show license` command to display current license information - **Table-level retention period support**: Add retention period support for individual tables in addition to database-level retention, providing granular data lifecycle management - - New CLI commands: `create table --retention-period` and `update table --retention-period` - - Set or clear table-specific retention policies independent of database settings + - New CLI commands: `create table --retention-period` and `update table --retention-period` + - Set or clear table-specific retention policies independent of database settings - **Compaction improvements**: - Address compactor restart issues for better reliability - Track compacted generation durations in catalog for monitoring diff --git a/data/products.yml b/data/products.yml index 99778363e..6fef7096b 100644 --- a/data/products.yml +++ b/data/products.yml @@ -6,7 +6,7 @@ influxdb3_core: versions: [core] list_order: 2 latest: core - latest_patch: 3.4.0 + latest_patch: 1 placeholder_host: localhost:8181 ai_sample_questions: - How do I install and run InfluxDB 3 Core? @@ -21,7 +21,7 @@ influxdb3_enterprise: versions: [enterprise] list_order: 2 latest: enterprise - latest_patch: 3.4.0 + latest_patch: 3.4.1 placeholder_host: localhost:8181 ai_sample_questions: - How do I install and run InfluxDB 3 Enterprise? diff --git a/layouts/partials/article/feedback.html b/layouts/partials/article/feedback.html index 950cce937..0fcf1dd22 100644 --- a/layouts/partials/article/feedback.html +++ b/layouts/partials/article/feedback.html @@ -55,8 +55,8 @@
  • InfluxDB Discord Server (Preferred)
  • InfluxDB Community Slack
  • {{ else }} -
  • InfluxDB Community Slack (Preferred)
  • -
  • InfluxDB Discord Server
  • +
  • InfluxDB Community Slack (Preferred)
  • +
  • InfluxDB Discord Server
  • {{ end }}
  • InfluxData Community
  • InfluxDB Subreddit
  • From 8c0dbacdfccc8738d8cce1c29dea9281a8f14261 Mon Sep 17 00:00:00 2001 From: Peter Barnett Date: Fri, 29 Aug 2025 10:07:01 -0400 Subject: [PATCH 141/179] fix: 1 -> 3.4.1 --- data/products.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/products.yml b/data/products.yml index 6fef7096b..9d9403904 100644 --- a/data/products.yml +++ b/data/products.yml @@ -6,7 +6,7 @@ influxdb3_core: versions: [core] list_order: 2 latest: core - latest_patch: 1 + latest_patch: 3.4.1 placeholder_host: localhost:8181 ai_sample_questions: - How do I install and run InfluxDB 3 Core? From ebe1ec9750e510e4f3398a964d488e31b27acab1 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Fri, 29 Aug 2025 08:44:59 -0600 Subject: [PATCH 142/179] InfluxDB 3 Explorer 1.2 (#6350) * feat(explorer): WIP cache management docs * feat(explorer): add cache management guides, fix js bugs * Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- assets/js/custom-timestamps.js | 86 +++++++++++-------- .../core/admin/distinct-value-cache/_index.md | 1 + .../core/admin/distinct-value-cache/create.md | 1 + .../core/admin/distinct-value-cache/delete.md | 1 + .../core/admin/distinct-value-cache/query.md | 1 + .../core/admin/distinct-value-cache/show.md | 2 + .../core/admin/last-value-cache/_index.md | 1 + .../core/admin/last-value-cache/create.md | 1 + .../core/admin/last-value-cache/delete.md | 1 + .../core/admin/last-value-cache/query.md | 1 + .../core/admin/last-value-cache/show.md | 2 + .../admin/distinct-value-cache/_index.md | 1 + .../admin/distinct-value-cache/create.md | 1 + .../admin/distinct-value-cache/delete.md | 1 + .../admin/distinct-value-cache/query.md | 1 + .../admin/distinct-value-cache/show.md | 2 + .../admin/last-value-cache/_index.md | 1 + .../admin/last-value-cache/create.md | 1 + .../admin/last-value-cache/delete.md | 1 + .../admin/last-value-cache/query.md | 1 + .../enterprise/admin/last-value-cache/show.md | 2 + .../explorer/manage-caches/_index.md | 21 +++++ .../manage-caches/distinct-value-caches.md | 72 ++++++++++++++++ .../manage-caches/last-value-caches.md | 74 ++++++++++++++++ .../influxdb3-sample-data/sample-data.md | 17 ++-- .../footer/widgets/custom-time-trigger.html | 2 +- 26 files changed, 249 insertions(+), 47 deletions(-) create mode 100644 content/influxdb3/explorer/manage-caches/_index.md create mode 100644 content/influxdb3/explorer/manage-caches/distinct-value-caches.md create mode 100644 content/influxdb3/explorer/manage-caches/last-value-caches.md diff --git a/assets/js/custom-timestamps.js b/assets/js/custom-timestamps.js index 3cde0a6ad..e4411ee25 100644 --- a/assets/js/custom-timestamps.js +++ b/assets/js/custom-timestamps.js @@ -43,7 +43,7 @@ function getStartDate() { //////////////////////////////////////////////////////////////////////////////// -// If the user has not set the startDate cookie, default the startDate to yesterday +// If the user has not set the startDate cookie, default startDate to yesterday var startDate = getStartDate() || yesterday(); // Convert a time value to a Unix timestamp (seconds) @@ -109,6 +109,49 @@ const defaultTimes = [ }, // 1641067200 ]; +// Helper function to update text while preserving code placeholder elements +function updateTextNode(node, times) { + if (node.nodeType === Node.TEXT_NODE) { + let text = node.textContent; + times.forEach(function (x) { + const oldDatePart = datePart(x.rfc3339.replace(/T.*$/, '')); + const newDatePart = datePart(x.rfc3339_new.replace(/T.*$/, '')); + const rfc3339Regex = new RegExp( + `${oldDatePart.year}(.*?)${oldDatePart.month}(.*?)${oldDatePart.day}`, + 'g' + ); + const rfc3339Repl = `${newDatePart.year}$1${newDatePart.month}$2${newDatePart.day}`; + + text = text + .replaceAll(x.unix, x.unix_new) + .replace(rfc3339Regex, rfc3339Repl); + }); + node.textContent = text; + } +} + +// Recursively update timestamps in DOM while preserving structure +function updateTimestampsInElement(element, times) { + // Skip code placeholder elements to preserve their functionality + if (element.classList && element.classList.contains('code-placeholder')) { + return; + } + + // Skip elements with data-component attribute (preserves all components) + if (element.hasAttribute && element.hasAttribute('data-component')) { + return; + } + + const childNodes = Array.from(element.childNodes); + childNodes.forEach((child) => { + if (child.nodeType === Node.TEXT_NODE) { + updateTextNode(child, times); + } else if (child.nodeType === Node.ELEMENT_NODE) { + updateTimestampsInElement(child, times); + } + }); +} + function updateTimestamps(newStartDate, seedTimes = defaultTimes) { // Update the times array with replacement times const times = seedTimes.map((x) => { @@ -129,40 +172,14 @@ function updateTimestamps(newStartDate, seedTimes = defaultTimes) { '.custom-timestamps table', ]; + // Update block elements while preserving DOM structure $(updateBlockElWhitelist.join()).each(function () { - var wrapper = $(this)[0]; - - times.forEach(function (x) { - const oldDatePart = datePart(x.rfc3339.replace(/T.*$/, '')); - const newDatePart = datePart(x.rfc3339_new.replace(/T.*$/, '')); - const rfc3339Regex = new RegExp( - `${oldDatePart.year}(.*?)${oldDatePart.month}(.*?)${oldDatePart.day}`, - 'g' - ); - const rfc3339Repl = `${newDatePart.year}$1${newDatePart.month}$2${newDatePart.day}`; - - wrapper.innerHTML = wrapper.innerHTML - .replaceAll(x.unix, x.unix_new) - .replaceAll(rfc3339Regex, rfc3339Repl); - }); + updateTimestampsInElement(this, times); }); + // Update span elements $('span.custom-timestamps').each(function () { - var wrapper = $(this)[0]; - - times.forEach(function (x) { - const oldDatePart = datePart(x.rfc3339.replace(/T.*$/, '')); - const newDatePart = datePart(x.rfc3339_new.replace(/T.*$/, '')); - const rfc3339Regex = new RegExp( - `${oldDatePart.year}-${oldDatePart.month}-${oldDatePart.day}`, - 'g' - ); - const rfc3339Repl = `${newDatePart.year}-${newDatePart.month}-${newDatePart.day}`; - - wrapper.innerHTML = wrapper.innerHTML - .replaceAll(x.unix, x.unix_new) - .replaceAll(rfc3339Regex, rfc3339Repl); - }); + updateTimestampsInElement(this, times); }); // Create a new seed times array with new start time for next change @@ -196,10 +213,11 @@ function CustomTimeTrigger({ component }) { prevArrow: '<', }); - //////////////////////////////////// ACTIONS /////////////////////////////////// + /////////////////////////////////// ACTIONS ////////////////////////////////// - // Initial update to yesterdays date ON PAGE LOAD - // Conditionally set the start date cookie it startDate is equal to the default value + // Initial update to yesterday's date ON PAGE LOAD + // Conditionally set the start date cookie if startDate is equal to the + // default value let updatedTimes = updateTimestamps(startDate, defaultTimes); if (startDate === yesterday()) { diff --git a/content/influxdb3/core/admin/distinct-value-cache/_index.md b/content/influxdb3/core/admin/distinct-value-cache/_index.md index 5e9004280..7d21ca602 100644 --- a/content/influxdb3/core/admin/distinct-value-cache/_index.md +++ b/content/influxdb3/core/admin/distinct-value-cache/_index.md @@ -12,6 +12,7 @@ weight: 105 influxdb3/core/tags: [cache] related: - /influxdb3/core/reference/sql/functions/cache/#distinct_cache, distinct_cache SQL function + - /influxdb3/explorer/manage-caches/distinct-value-caches/ source: /shared/influxdb3-admin/distinct-value-cache/_index.md --- diff --git a/content/influxdb3/core/admin/distinct-value-cache/create.md b/content/influxdb3/core/admin/distinct-value-cache/create.md index cda0d9ccd..038dbf0c2 100644 --- a/content/influxdb3/core/admin/distinct-value-cache/create.md +++ b/content/influxdb3/core/admin/distinct-value-cache/create.md @@ -10,6 +10,7 @@ weight: 201 influxdb3/core/tags: [cache] related: - /influxdb3/core/reference/cli/influxdb3/create/distinct_cache/ + - /influxdb3/explorer/manage-caches/distinct-value-caches/ list_code_example: | {{% show-in "core" %}} diff --git a/content/influxdb3/core/admin/distinct-value-cache/delete.md b/content/influxdb3/core/admin/distinct-value-cache/delete.md index ffbfb1374..02aeb00e0 100644 --- a/content/influxdb3/core/admin/distinct-value-cache/delete.md +++ b/content/influxdb3/core/admin/distinct-value-cache/delete.md @@ -20,6 +20,7 @@ list_code_example: | ``` related: - /influxdb3/core/reference/cli/influxdb3/delete/distinct_cache/ + - /influxdb3/explorer/manage-caches/distinct-value-caches/ source: /shared/influxdb3-admin/distinct-value-cache/delete.md --- diff --git a/content/influxdb3/core/admin/distinct-value-cache/query.md b/content/influxdb3/core/admin/distinct-value-cache/query.md index ea499869e..8cf6f21a4 100644 --- a/content/influxdb3/core/admin/distinct-value-cache/query.md +++ b/content/influxdb3/core/admin/distinct-value-cache/query.md @@ -19,6 +19,7 @@ list_code_example: | > InfluxQL does not support the `distinct_cache()` function. related: - /influxdb3/core/reference/sql/functions/cache/#distinct_cache, distinct_cache SQL function + - /influxdb3/explorer/manage-caches/distinct-value-caches/ source: /shared/influxdb3-admin/distinct-value-cache/query.md --- diff --git a/content/influxdb3/core/admin/distinct-value-cache/show.md b/content/influxdb3/core/admin/distinct-value-cache/show.md index 5cd73312a..a596d2464 100644 --- a/content/influxdb3/core/admin/distinct-value-cache/show.md +++ b/content/influxdb3/core/admin/distinct-value-cache/show.md @@ -18,6 +18,8 @@ list_code_example: | --token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \ table distinct_caches ``` +related: + - /influxdb3/explorer/manage-caches/distinct-value-caches/ source: /shared/influxdb3-admin/distinct-value-cache/show.md --- diff --git a/content/influxdb3/core/admin/last-value-cache/_index.md b/content/influxdb3/core/admin/last-value-cache/_index.md index 2560c1ead..e0b68978d 100644 --- a/content/influxdb3/core/admin/last-value-cache/_index.md +++ b/content/influxdb3/core/admin/last-value-cache/_index.md @@ -13,6 +13,7 @@ weight: 104 influxdb3/core/tags: [cache] related: - /influxdb3/core/reference/sql/functions/cache/#last_cache, last_cache SQL function + - /influxdb3/explorer/manage-caches/last-value-caches/ source: /shared/influxdb3-admin/last-value-cache/_index.md --- diff --git a/content/influxdb3/core/admin/last-value-cache/create.md b/content/influxdb3/core/admin/last-value-cache/create.md index 6a6d98b4a..2cca8a876 100644 --- a/content/influxdb3/core/admin/last-value-cache/create.md +++ b/content/influxdb3/core/admin/last-value-cache/create.md @@ -10,6 +10,7 @@ weight: 201 influxdb3/core/tags: [cache] related: - /influxdb3/core/reference/cli/influxdb3/create/last_cache/ + - /influxdb3/explorer/manage-caches/last-value-caches/ list_code_example: | {{% show-in "core" %}} diff --git a/content/influxdb3/core/admin/last-value-cache/delete.md b/content/influxdb3/core/admin/last-value-cache/delete.md index db0060262..df0032d47 100644 --- a/content/influxdb3/core/admin/last-value-cache/delete.md +++ b/content/influxdb3/core/admin/last-value-cache/delete.md @@ -20,6 +20,7 @@ list_code_example: | ``` related: - /influxdb3/core/reference/cli/influxdb3/delete/last_cache/ + - /influxdb3/explorer/manage-caches/last-value-caches/ source: /shared/influxdb3-admin/last-value-cache/delete.md --- diff --git a/content/influxdb3/core/admin/last-value-cache/query.md b/content/influxdb3/core/admin/last-value-cache/query.md index a634d3600..5d5e1ae4c 100644 --- a/content/influxdb3/core/admin/last-value-cache/query.md +++ b/content/influxdb3/core/admin/last-value-cache/query.md @@ -19,6 +19,7 @@ list_code_example: | > InfluxQL does not support the `last_cache()` function. related: - /influxdb3/core/reference/sql/functions/cache/#last_cache, last_cache SQL function + - /influxdb3/explorer/manage-caches/last-value-caches/ source: /shared/influxdb3-admin/last-value-cache/query.md --- diff --git a/content/influxdb3/core/admin/last-value-cache/show.md b/content/influxdb3/core/admin/last-value-cache/show.md index 9d66333dd..96d484656 100644 --- a/content/influxdb3/core/admin/last-value-cache/show.md +++ b/content/influxdb3/core/admin/last-value-cache/show.md @@ -18,6 +18,8 @@ list_code_example: | --token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \ table last_caches ``` +related: + - /influxdb3/explorer/manage-caches/last-value-caches/ source: /shared/influxdb3-admin/last-value-cache/show.md --- diff --git a/content/influxdb3/enterprise/admin/distinct-value-cache/_index.md b/content/influxdb3/enterprise/admin/distinct-value-cache/_index.md index 78d4e1762..8e190a678 100644 --- a/content/influxdb3/enterprise/admin/distinct-value-cache/_index.md +++ b/content/influxdb3/enterprise/admin/distinct-value-cache/_index.md @@ -12,6 +12,7 @@ weight: 106 influxdb3/enterprise/tags: [cache] related: - /influxdb3/enterprise/reference/sql/functions/cache/#distinct_cache, distinct_cache SQL function + - /influxdb3/explorer/manage-caches/distinct-value-caches/ source: /shared/influxdb3-admin/distinct-value-cache/_index.md --- diff --git a/content/influxdb3/enterprise/admin/distinct-value-cache/create.md b/content/influxdb3/enterprise/admin/distinct-value-cache/create.md index 51e1ae109..d52eadc46 100644 --- a/content/influxdb3/enterprise/admin/distinct-value-cache/create.md +++ b/content/influxdb3/enterprise/admin/distinct-value-cache/create.md @@ -10,6 +10,7 @@ weight: 201 influxdb3/enterprise/tags: [cache] related: - /influxdb3/enterprise/reference/cli/influxdb3/create/distinct_cache/ + - /influxdb3/explorer/manage-caches/distinct-value-caches/ list_code_example: | {{% show-in "core" %}} diff --git a/content/influxdb3/enterprise/admin/distinct-value-cache/delete.md b/content/influxdb3/enterprise/admin/distinct-value-cache/delete.md index 4ad6f867d..34902649c 100644 --- a/content/influxdb3/enterprise/admin/distinct-value-cache/delete.md +++ b/content/influxdb3/enterprise/admin/distinct-value-cache/delete.md @@ -20,6 +20,7 @@ list_code_example: | ``` related: - /influxdb3/enterprise/reference/cli/influxdb3/delete/distinct_cache/ + - /influxdb3/explorer/manage-caches/distinct-value-caches/ source: /shared/influxdb3-admin/distinct-value-cache/delete.md --- diff --git a/content/influxdb3/enterprise/admin/distinct-value-cache/query.md b/content/influxdb3/enterprise/admin/distinct-value-cache/query.md index aaee0c6f5..fe39c4d67 100644 --- a/content/influxdb3/enterprise/admin/distinct-value-cache/query.md +++ b/content/influxdb3/enterprise/admin/distinct-value-cache/query.md @@ -19,6 +19,7 @@ list_code_example: | > InfluxQL does not support the `distinct_cache()` function. related: - /influxdb3/enterprise/reference/sql/functions/cache/#distinct_cache, distinct_cache SQL function + - /influxdb3/explorer/manage-caches/distinct-value-caches/ source: /shared/influxdb3-admin/distinct-value-cache/query.md --- diff --git a/content/influxdb3/enterprise/admin/distinct-value-cache/show.md b/content/influxdb3/enterprise/admin/distinct-value-cache/show.md index 53fa3a9a3..f3100655e 100644 --- a/content/influxdb3/enterprise/admin/distinct-value-cache/show.md +++ b/content/influxdb3/enterprise/admin/distinct-value-cache/show.md @@ -18,6 +18,8 @@ list_code_example: | --token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \ table distinct_caches ``` +related: + - /influxdb3/explorer/manage-caches/distinct-value-caches/ source: /shared/influxdb3-admin/distinct-value-cache/show.md --- diff --git a/content/influxdb3/enterprise/admin/last-value-cache/_index.md b/content/influxdb3/enterprise/admin/last-value-cache/_index.md index c8221c224..570432067 100644 --- a/content/influxdb3/enterprise/admin/last-value-cache/_index.md +++ b/content/influxdb3/enterprise/admin/last-value-cache/_index.md @@ -13,6 +13,7 @@ weight: 105 influxdb3/enterprise/tags: [cache] related: - /influxdb3/enterprise/reference/sql/functions/cache/#last_cache, last_cache SQL function + - /influxdb3/explorer/manage-caches/last-value-caches/ source: /shared/influxdb3-admin/last-value-cache/_index.md --- diff --git a/content/influxdb3/enterprise/admin/last-value-cache/create.md b/content/influxdb3/enterprise/admin/last-value-cache/create.md index 66940789d..2c9fb6956 100644 --- a/content/influxdb3/enterprise/admin/last-value-cache/create.md +++ b/content/influxdb3/enterprise/admin/last-value-cache/create.md @@ -10,6 +10,7 @@ weight: 201 influxdb3/enterprise/tags: [cache] related: - /influxdb3/enterprise/reference/cli/influxdb3/create/last_cache/ + - /influxdb3/explorer/manage-caches/last-value-caches/ list_code_example: | {{% show-in "core" %}} diff --git a/content/influxdb3/enterprise/admin/last-value-cache/delete.md b/content/influxdb3/enterprise/admin/last-value-cache/delete.md index 9361bf9b0..c515c4027 100644 --- a/content/influxdb3/enterprise/admin/last-value-cache/delete.md +++ b/content/influxdb3/enterprise/admin/last-value-cache/delete.md @@ -20,6 +20,7 @@ list_code_example: | ``` related: - /influxdb3/enterprise/reference/cli/influxdb3/delete/last_cache/ + - /influxdb3/explorer/manage-caches/last-value-caches/ source: /shared/influxdb3-admin/last-value-cache/delete.md --- diff --git a/content/influxdb3/enterprise/admin/last-value-cache/query.md b/content/influxdb3/enterprise/admin/last-value-cache/query.md index e94e85e97..171145439 100644 --- a/content/influxdb3/enterprise/admin/last-value-cache/query.md +++ b/content/influxdb3/enterprise/admin/last-value-cache/query.md @@ -19,6 +19,7 @@ list_code_example: | > InfluxQL does not support the `last_cache()` function. related: - /influxdb3/enterprise/reference/sql/functions/cache/#last_cache, last_cache SQL function + - /influxdb3/explorer/manage-caches/last-value-caches/ source: /shared/influxdb3-admin/last-value-cache/query.md --- diff --git a/content/influxdb3/enterprise/admin/last-value-cache/show.md b/content/influxdb3/enterprise/admin/last-value-cache/show.md index 6e981ed38..231fa6574 100644 --- a/content/influxdb3/enterprise/admin/last-value-cache/show.md +++ b/content/influxdb3/enterprise/admin/last-value-cache/show.md @@ -18,6 +18,8 @@ list_code_example: | --token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \ table last_caches ``` +related: + - /influxdb3/explorer/manage-caches/last-value-caches/ source: /shared/influxdb3-admin/last-value-cache/show.md --- diff --git a/content/influxdb3/explorer/manage-caches/_index.md b/content/influxdb3/explorer/manage-caches/_index.md new file mode 100644 index 000000000..6ae47b24a --- /dev/null +++ b/content/influxdb3/explorer/manage-caches/_index.md @@ -0,0 +1,21 @@ +--- +title: Manage caches with InfluxDB 3 Explorer +seotitle: Manage InfluxDB caches with InfluxDB 3 Explorer +description: > + Use InfluxDB 3 Explorer to manage Last Value Caches and Distinct Value Caches + in an InfluxDB 3 instance or cluster. +menu: + influxdb3_explorer: + name: Manage caches +weight: 6 +related: + - /influxdb3/enterprise/admin/last-value-cache/, Manage the Last Value Cache in InfluxDB 3 Enterprise + - /influxdb3/enterprise/admin/distinct-value-cache/, Manage the Distinct Value Cache in InfluxDB 3 Enterprise + - /influxdb3/core/admin/last-value-cache/, Manage the Last Value Cache in InfluxDB 3 Core + - /influxdb3/core/admin/distinct-value-cache/, Manage the Distinct Value Cache in InfluxDB 3 Core +--- + +Use InfluxDB 3 Explorer to manage Last Value Caches and Distinct Value Caches +in an InfluxDB 3 instance or cluster. + +{{< children >}} diff --git a/content/influxdb3/explorer/manage-caches/distinct-value-caches.md b/content/influxdb3/explorer/manage-caches/distinct-value-caches.md new file mode 100644 index 000000000..a887f1483 --- /dev/null +++ b/content/influxdb3/explorer/manage-caches/distinct-value-caches.md @@ -0,0 +1,72 @@ +--- +title: Manage Distinct Value Caches with InfluxDB 3 Explorer +list_title: Manage Distinct Value Caches +description: > + Use InfluxDB 3 Explorer to manage Distinct Value Caches in an InfluxDB 3 + instance or cluster. +menu: + influxdb3_explorer: + name: Distinct Value Caches + parent: Manage caches +weight: 102 +related: + - /influxdb3/enterprise/admin/distinct-value-cache/, Manage the Distinct Value Cache in InfluxDB 3 Enterprise + - /influxdb3/core/admin/distinct-value-cache/, Manage the Distinct Value Cache in InfluxDB 3 Core +--- + +Use InfluxDB 3 Explorer to manage Distinct Value Caches (DVCs) in an InfluxDB 3 +instance or cluster. To navigate to the **Distinct Value Cache management page**: + +1. In the left navigation bar, select **Configure** > **Caches**. +2. Select the **Distinct Value Caches** tab. + +- [View Distinct Value Caches](#view-distinct-value-caches) +- [Create a Distinct Value Cache](#create-a-distinct-value-cache) +- [Query a Distinct Value Cache](#query-a-distinct-value-cache) +- [Delete a Distinct Value Cache](#delete-a-distinct-value-cache) + +## View Distinct Value Caches + +To view DVCs associated with a database, navigate to the +**Distinct Value Cache management page** and select the database from the +**Select Database** dropdown menu. The page lists all DVCs associated with the +selected database. + +## Create a Distinct Value Cache + +On the **Distinct Value Cache management page**: + +1. Click **+ Create Cache**. +2. Provide the following: + + - **Cache name**: A unique name for the cache. + - **Database**: The database the cache is associated with. + - **Table**: The target table for the cache. As data is written to the table, + it populates the cache. + _You must select a database before you can select a table._ + - **Column names**: Select columns to cache distinct values from. + These are typically InfluxDB tags, but you can also use fields. + combinations to cache. Once this limit is exceeded, InfluxDB drops the oldest cached + distinct values. + - **Max Age**: Specify the maximum age of cached values as a duration in + [humantime](https://docs.rs/humantime/latest/humantime/fn.parse_duration.html) + form. The default is `24h`. + + > [!Note] + > Higher cardinality (more distinct values) in a DVC increases memory usage. + +3. Click **Create**. + +## Query a Distinct Value Cache + +Use the `distinct_cache` SQL function to query a DVC. For more information, see +[Query a Distinct Value Cache](/influxdb3/enterprise/admin/distinct-value-cache/query/). + +## Delete a Distinct Value Cache + +On the **Distinct Value Cache management page**: + +1. Select the database associated with the cache you want to delete from the + **Select Database** dropdown menu. +2. In the **Active Caches** table, click the {{% icon "trash" %}} icon next to + the cache you want to delete. \ No newline at end of file diff --git a/content/influxdb3/explorer/manage-caches/last-value-caches.md b/content/influxdb3/explorer/manage-caches/last-value-caches.md new file mode 100644 index 000000000..89db6c19c --- /dev/null +++ b/content/influxdb3/explorer/manage-caches/last-value-caches.md @@ -0,0 +1,74 @@ +--- +title: Manage Last Value Caches with InfluxDB 3 Explorer +list_title: Manage Last Value Caches +description: > + Use InfluxDB 3 Explorer to manage Last Value Caches in an InfluxDB 3 instance + or cluster. +menu: + influxdb3_explorer: + name: Last Value Caches + parent: Manage caches +weight: 101 +related: + - /influxdb3/enterprise/admin/last-value-cache/, Manage the Last Value Cache in InfluxDB 3 Enterprise + - /influxdb3/core/admin/last-value-cache/, Manage the Last Value Cache in InfluxDB 3 Core +--- + +Use InfluxDB 3 Explorer to manage Last Value Caches (LVCs) in an InfluxDB 3 +instance or cluster. To navigate to the **Last Value Cache management page**, in +the left navigation bar, select **Configure** > **Caches**. + +- [View Last Value Caches](#view-last-value-caches) +- [Create a Last Value Cache](#create-a-last-value-cache) +- [Query a Last Value Cache](#query-a-last-value-cache) +- [Delete a Last Value Cache](#delete-a-last-value-cache) + +## View Last Value Caches + +To view LVCs associated with a database, navigate to the +**Last Value Cache management page** and select the database from the +**Select Database** dropdown menu. The page lists all LVCs associated with the +selected database. + +## Create a Last Value Cache + +On the **Last Value Cache management page**: + +1. Click **+ Create Cache**. +2. Provide the following: + + - **Cache name**: A unique name for the cache. + - **Database**: The database the cache is associated with. + - **Table**: The target table for the cache. As data is written to the table, + it populates the cache. + _You must select a database before you can select a table._ + - **Key columns**: Select string-typed column columns to use as the primary + key for the cache. These are typically InfluxDB tags, but you can also use + fields. Each unique combination of key column values represents a distinct + series. LVCs cache N (count) values per series. + - **Value columns**: Select columns to cache values for. These are + typically InfluxDB fields, but can also be tags. If no columns are + selected as value columns, all non-key columns are used as value columns + (excluding `time`). + - **Count**: Specify the number of recently written values to cache per series. + + > [!Note] + > Higher cardinality (more unique series) in an LVC increases memory usage. + > Be selective about key columns and the number of values to cache per + > series to optimize performance. + +3. Click **Create**. + +## Query a Last Value Cache + +Use the `last_cache` SQL function to query an LVC. For more information, see +[Query a Last Value Cache](/influxdb3/enterprise/admin/last-value-cache/query/). + +## Delete a Last Value Cache + +On the **Last Value Cache management page**: + +1. Select the database associated with the cache you want to delete from the + **Select Database** dropdown menu. +2. In the **Active Caches** table, click the {{% icon "trash" %}} icon next to + the cache you want to delete. diff --git a/content/shared/influxdb3-sample-data/sample-data.md b/content/shared/influxdb3-sample-data/sample-data.md index 213806595..04530e922 100644 --- a/content/shared/influxdb3-sample-data/sample-data.md +++ b/content/shared/influxdb3-sample-data/sample-data.md @@ -53,8 +53,8 @@ home sensor sample data to {{< product-name >}}. {{% code-tab-content %}} {{% influxdb/custom-timestamps %}} -{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} -```sh + +```bash { placeholders="AUTH_TOKEN|DATABASE_NAME" } influxdb3 write \ --token AUTH_TOKEN \ --database DATABASE_NAME \ @@ -85,15 +85,13 @@ home,room=Kitchen temp=23.1,hum=36.6,co=22i 1641063600 home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200 home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200' ``` -{{% /code-placeholders %}} {{% /influxdb/custom-timestamps %}} {{% /code-tab-content %}} {{% code-tab-content %}} {{% influxdb/custom-timestamps %}} -{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} -```sh +```bash { placeholders="AUTH_TOKEN|DATABASE_NAME" } curl -v "http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto&accept_partial=true" \ --data-raw "home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1735545600 home,room=Kitchen temp=21.0,hum=35.9,co=0i 1735545600 @@ -122,15 +120,13 @@ home,room=Kitchen temp=23.1,hum=36.6,co=22i 1735585200 home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1735588800 home,room=Kitchen temp=22.7,hum=36.5,co=26i 1735588800" ``` -{{% /code-placeholders %}} {{% /influxdb/custom-timestamps %}} {{% /code-tab-content %}} {{% code-tab-content %}} {{% influxdb/custom-timestamps %}} -{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} -```sh +```bash { placeholders="AUTH_TOKEN|DATABASE_NAME" } curl --request POST \ http://{{< influxdb/host >}}/api/v2/write?bucket=DATABASE_NAME&precision=s \ --header "Authorization: Bearer AUTH_TOKEN" \ @@ -165,15 +161,13 @@ home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200 home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 " ``` -{{% /code-placeholders %}} {{% /influxdb/custom-timestamps %}} {{% /code-tab-content %}} {{% code-tab-content %}} {{% influxdb/custom-timestamps %}} -{{% code-placeholders "AUTH_TOKEN|DATABASE_NAME" %}} -```sh +```bash { placeholders="AUTH_TOKEN|DATABASE_NAME" } curl --request POST \ http://{{< influxdb/host >}}/write?db=DATABASE_NAME&precision=s \ --header "Authorization: Bearer AUTH_TOKEN" \ @@ -207,7 +201,6 @@ home,room=Living\ Room temp=22.2,hum=36.4,co=17i 1641067200 home,room=Kitchen temp=22.7,hum=36.5,co=26i 1641067200 " ``` -{{% /code-placeholders %}} {{% /influxdb/custom-timestamps %}} {{% /code-tab-content %}} diff --git a/layouts/partials/footer/widgets/custom-time-trigger.html b/layouts/partials/footer/widgets/custom-time-trigger.html index 1e10d96f2..378065ef6 100644 --- a/layouts/partials/footer/widgets/custom-time-trigger.html +++ b/layouts/partials/footer/widgets/custom-time-trigger.html @@ -1,6 +1,6 @@ {{ if or (.Page.HasShortcode "influxdb/custom-timestamps") (.Page.HasShortcode "influxdb/custom-timestamps-span") }} From 5af8e57a1b3b851b4e1d8f5f1cdb50471efdf70e Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 2 Sep 2025 11:33:02 -0500 Subject: [PATCH 143/179] hotfix-6352-formatting --- content/shared/v3-core-enterprise-release-notes/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/v3-core-enterprise-release-notes/_index.md b/content/shared/v3-core-enterprise-release-notes/_index.md index c13d3c770..1b1f1060e 100644 --- a/content/shared/v3-core-enterprise-release-notes/_index.md +++ b/content/shared/v3-core-enterprise-release-notes/_index.md @@ -76,7 +76,7 @@ All Core updates are included in Enterprise. Additional Enterprise-specific feat #### Bug Fixes - **Database reliability**: - - Fix url encoded table name handling failures ([#26586](https://github.com/influxdata/influxdb/pull/26586)) + - Fix URL-encoded table name handling failures ([#26586](https://github.com/influxdata/influxdb/pull/26586)) - Allow hard deletion of existing soft-deleted schema ([#26574](https://github.com/influxdata/influxdb/pull/26574)) - **Authentication**: Fix AWS S3 API error handling when tokens are expired ([#1013](https://github.com/influxdata/influxdb/pull/1013)) - **Query processing**: Set nanosecond precision as default for V1 query API CSV output ([#26577](https://github.com/influxdata/influxdb/pull/26577)) From ef1c598ad21cba2577596dc358a29aade5d93e00 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 2 Sep 2025 14:06:57 -0500 Subject: [PATCH 144/179] chore: yarn upgrade --- .husky/_/pre-commit | 16 +- .husky/_/pre-push | 16 +- .husky/_/prepare-commit-msg | 16 +- yarn.lock | 490 ++++++++++++++++++------------------ 4 files changed, 287 insertions(+), 251 deletions(-) diff --git a/.husky/_/pre-commit b/.husky/_/pre-commit index 4855f6124..710b28856 100755 --- a/.husky/_/pre-commit +++ b/.husky/_/pre-commit @@ -33,6 +33,9 @@ call_lefthook() then "$dir/node_modules/lefthook/bin/index.js" "$@" + elif go tool lefthook -h >/dev/null 2>&1 + then + go tool lefthook "$@" elif bundle exec lefthook -h >/dev/null 2>&1 then bundle exec lefthook "$@" @@ -42,12 +45,21 @@ call_lefthook() elif pnpm lefthook -h >/dev/null 2>&1 then pnpm lefthook "$@" - elif swift package plugin lefthook >/dev/null 2>&1 + elif swift package lefthook >/dev/null 2>&1 then - swift package --disable-sandbox plugin lefthook "$@" + swift package --build-path .build/lefthook --disable-sandbox lefthook "$@" elif command -v mint >/dev/null 2>&1 then mint run csjones/lefthook-plugin "$@" + elif uv run lefthook -h >/dev/null 2>&1 + then + uv run lefthook "$@" + elif mise exec -- lefthook -h >/dev/null 2>&1 + then + mise exec -- lefthook "$@" + elif devbox run lefthook -h >/dev/null 2>&1 + then + devbox run lefthook "$@" else echo "Can't find lefthook in PATH" fi diff --git a/.husky/_/pre-push b/.husky/_/pre-push index a0d96ef93..17b532e00 100755 --- a/.husky/_/pre-push +++ b/.husky/_/pre-push @@ -33,6 +33,9 @@ call_lefthook() then "$dir/node_modules/lefthook/bin/index.js" "$@" + elif go tool lefthook -h >/dev/null 2>&1 + then + go tool lefthook "$@" elif bundle exec lefthook -h >/dev/null 2>&1 then bundle exec lefthook "$@" @@ -42,12 +45,21 @@ call_lefthook() elif pnpm lefthook -h >/dev/null 2>&1 then pnpm lefthook "$@" - elif swift package plugin lefthook >/dev/null 2>&1 + elif swift package lefthook >/dev/null 2>&1 then - swift package --disable-sandbox plugin lefthook "$@" + swift package --build-path .build/lefthook --disable-sandbox lefthook "$@" elif command -v mint >/dev/null 2>&1 then mint run csjones/lefthook-plugin "$@" + elif uv run lefthook -h >/dev/null 2>&1 + then + uv run lefthook "$@" + elif mise exec -- lefthook -h >/dev/null 2>&1 + then + mise exec -- lefthook "$@" + elif devbox run lefthook -h >/dev/null 2>&1 + then + devbox run lefthook "$@" else echo "Can't find lefthook in PATH" fi diff --git a/.husky/_/prepare-commit-msg b/.husky/_/prepare-commit-msg index 2655902bc..6efab23a3 100755 --- a/.husky/_/prepare-commit-msg +++ b/.husky/_/prepare-commit-msg @@ -33,6 +33,9 @@ call_lefthook() then "$dir/node_modules/lefthook/bin/index.js" "$@" + elif go tool lefthook -h >/dev/null 2>&1 + then + go tool lefthook "$@" elif bundle exec lefthook -h >/dev/null 2>&1 then bundle exec lefthook "$@" @@ -42,12 +45,21 @@ call_lefthook() elif pnpm lefthook -h >/dev/null 2>&1 then pnpm lefthook "$@" - elif swift package plugin lefthook >/dev/null 2>&1 + elif swift package lefthook >/dev/null 2>&1 then - swift package --disable-sandbox plugin lefthook "$@" + swift package --build-path .build/lefthook --disable-sandbox lefthook "$@" elif command -v mint >/dev/null 2>&1 then mint run csjones/lefthook-plugin "$@" + elif uv run lefthook -h >/dev/null 2>&1 + then + uv run lefthook "$@" + elif mise exec -- lefthook -h >/dev/null 2>&1 + then + mise exec -- lefthook "$@" + elif devbox run lefthook -h >/dev/null 2>&1 + then + devbox run lefthook "$@" else echo "Can't find lefthook in PATH" fi diff --git a/yarn.lock b/yarn.lock index a17503f82..427259e77 100644 --- a/yarn.lock +++ b/yarn.lock @@ -144,15 +144,15 @@ debug "^4.3.1" minimatch "^3.1.2" -"@eslint/config-helpers@^0.3.0": - version "0.3.0" - resolved "https://registry.yarnpkg.com/@eslint/config-helpers/-/config-helpers-0.3.0.tgz#3e09a90dfb87e0005c7694791e58e97077271286" - integrity sha512-ViuymvFmcJi04qdZeDc2whTHryouGcDlaxPqarTD0ZE10ISpxGUVZGZDx4w01upyIynL3iu6IXH2bS1NhclQMw== +"@eslint/config-helpers@^0.3.1": + version "0.3.1" + resolved "https://registry.yarnpkg.com/@eslint/config-helpers/-/config-helpers-0.3.1.tgz#d316e47905bd0a1a931fa50e669b9af4104d1617" + integrity sha512-xR93k9WhrDYpXHORXpxVL5oHj3Era7wo6k/Wd8/IsQNnZUTzkGS29lyn3nAT05v6ltUuTFVCCYDEGfy2Or/sPA== -"@eslint/core@^0.15.0", "@eslint/core@^0.15.1": - version "0.15.1" - resolved "https://registry.yarnpkg.com/@eslint/core/-/core-0.15.1.tgz#d530d44209cbfe2f82ef86d6ba08760196dd3b60" - integrity sha512-bkOp+iumZCCbt1K1CmWf0R9pM5yKpDv+ZXtvSyQpudrI9kuFLp+bM2WOPXImuD/ceQuaa8f5pj93Y7zyECIGNA== +"@eslint/core@^0.15.2": + version "0.15.2" + resolved "https://registry.yarnpkg.com/@eslint/core/-/core-0.15.2.tgz#59386327d7862cc3603ebc7c78159d2dcc4a868f" + integrity sha512-78Md3/Rrxh83gCxoUc0EiciuOHsIITzLy53m3d9UyiW8y9Dj2D29FeETqyKA+BRK76tnTp6RXWb3pCay8Oyomg== dependencies: "@types/json-schema" "^7.0.15" @@ -171,28 +171,28 @@ minimatch "^3.1.2" strip-json-comments "^3.1.1" -"@eslint/js@9.32.0", "@eslint/js@^9.18.0": - version "9.32.0" - resolved "https://registry.yarnpkg.com/@eslint/js/-/js-9.32.0.tgz#a02916f58bd587ea276876cb051b579a3d75d091" - integrity sha512-BBpRFZK3eX6uMLKz8WxFOBIFFcGFJ/g8XuwjTHCqHROSIsopI+ddn/d5Cfh36+7+e5edVS8dbSHnBNhrLEX0zg== +"@eslint/js@9.34.0", "@eslint/js@^9.18.0": + version "9.34.0" + resolved "https://registry.yarnpkg.com/@eslint/js/-/js-9.34.0.tgz#fc423168b9d10e08dea9088d083788ec6442996b" + integrity sha512-EoyvqQnBNsV1CWaEJ559rxXL4c8V92gxirbawSmVUOWXlsRxxQXl6LmCpdUblgxgSkDIqKnhzba2SjRTI/A5Rw== "@eslint/object-schema@^2.1.6": version "2.1.6" resolved "https://registry.yarnpkg.com/@eslint/object-schema/-/object-schema-2.1.6.tgz#58369ab5b5b3ca117880c0f6c0b0f32f6950f24f" integrity sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA== -"@eslint/plugin-kit@^0.3.4": - version "0.3.4" - resolved "https://registry.yarnpkg.com/@eslint/plugin-kit/-/plugin-kit-0.3.4.tgz#c6b9f165e94bf4d9fdd493f1c028a94aaf5fc1cc" - integrity sha512-Ul5l+lHEcw3L5+k8POx6r74mxEYKG5kOb6Xpy2gCRW6zweT6TEhAf8vhxGgjhqrd/VO/Dirhsb+1hNpD1ue9hw== +"@eslint/plugin-kit@^0.3.5": + version "0.3.5" + resolved "https://registry.yarnpkg.com/@eslint/plugin-kit/-/plugin-kit-0.3.5.tgz#fd8764f0ee79c8ddab4da65460c641cefee017c5" + integrity sha512-Z5kJ+wU3oA7MMIqVR9tyZRtjYPr4OC004Q4Rw7pgOKUOKkJfZ3O24nz3WYfGRpMDNmcOi3TwQOmgm7B7Tpii0w== dependencies: - "@eslint/core" "^0.15.1" + "@eslint/core" "^0.15.2" levn "^0.4.1" "@evilmartians/lefthook@^1.7.1": - version "1.12.2" - resolved "https://registry.yarnpkg.com/@evilmartians/lefthook/-/lefthook-1.12.2.tgz#f03b449bc7dfdc8d4ceae1b3cafc0b39c9b61edc" - integrity sha512-PQ7ZE08JiRNWydJpDeVijg8GjoTFiXQTy5/mVXf5U7epNZWYUEfnwjfXFa+HvsegodQcTenl05twrRxB/vOR8Q== + version "1.12.3" + resolved "https://registry.yarnpkg.com/@evilmartians/lefthook/-/lefthook-1.12.3.tgz#081eca59a6d33646616af844244ce6842cd6b5a5" + integrity sha512-MtXIt8h+EVTv5tCGLzh9UwbA/LRv6esdPJOHlxr8NDKHbFnbo8PvU5uVQcm3PAQTd4DZN3HoyokqrwGwntoq6w== "@humanfs/core@^0.19.1": version "0.19.1" @@ -539,11 +539,11 @@ integrity sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ== "@types/node@*": - version "24.1.0" - resolved "https://registry.yarnpkg.com/@types/node/-/node-24.1.0.tgz#0993f7dc31ab5cc402d112315b463e383d68a49c" - integrity sha512-ut5FthK5moxFKH2T1CUOC6ctR67rQRvvHdFLCD2Ql6KXmMuCrjsSsRI9UsLCm9M18BMwClv4pn327UvB7eeO1w== + version "24.3.0" + resolved "https://registry.yarnpkg.com/@types/node/-/node-24.3.0.tgz#89b09f45cb9a8ee69466f18ee5864e4c3eb84dec" + integrity sha512-aPTXCrfwnDLj4VvXrm+UUCQjNEvJgNA8s5F1cvwQU+3KNltTOkBm1j30uNLyqqPNe7gE3KFzImYoZEfLhp4Yow== dependencies: - undici-types "~7.8.0" + undici-types "~7.10.0" "@types/normalize-package-data@^2.4.1": version "2.4.4" @@ -556,9 +556,9 @@ integrity sha512-0kSuKjAS0TrGLJ0M/+8MaFkGsQhZpB6pxOmvS3K8FYI72K//YmdfoW9X2qPsAKh1mkwxGD5zib9s1FIFed6E8g== "@types/sizzle@^2.3.2": - version "2.3.9" - resolved "https://registry.yarnpkg.com/@types/sizzle/-/sizzle-2.3.9.tgz#d4597dbd4618264c414d7429363e3f50acb66ea2" - integrity sha512-xzLEyKB50yqCUPUJkIsrVvoWNfFUbIZI+RspLWt8u+tIW/BetMBZtgV2LY/2o+tYH8dRvQ+eoPf3NdhQCcLE2w== + version "2.3.10" + resolved "https://registry.yarnpkg.com/@types/sizzle/-/sizzle-2.3.10.tgz#277a542aff6776d8a9b15f2ac682a663e3e94bbd" + integrity sha512-TC0dmN0K8YcWEAEfiPi5gJP14eJe30TTGjkvek3iM/1NdHHsdCA/Td6GvNndMOo/iSnIsZ4HuuhrYPDAmbxzww== "@types/triple-beam@^1.3.2": version "1.3.5" @@ -577,79 +577,79 @@ dependencies: "@types/node" "*" -"@typescript-eslint/eslint-plugin@8.38.0": - version "8.38.0" - resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.38.0.tgz#6e5220d16f2691ab6d983c1737dd5b36e17641b7" - integrity sha512-CPoznzpuAnIOl4nhj4tRr4gIPj5AfKgkiJmGQDaq+fQnRJTYlcBjbX3wbciGmpoPf8DREufuPRe1tNMZnGdanA== +"@typescript-eslint/eslint-plugin@8.42.0": + version "8.42.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.42.0.tgz#2172d0496c42eee8c7294b6661681100953fa88f" + integrity sha512-Aq2dPqsQkxHOLfb2OPv43RnIvfj05nw8v/6n3B2NABIPpHnjQnaLo9QGMTvml+tv4korl/Cjfrb/BYhoL8UUTQ== dependencies: "@eslint-community/regexpp" "^4.10.0" - "@typescript-eslint/scope-manager" "8.38.0" - "@typescript-eslint/type-utils" "8.38.0" - "@typescript-eslint/utils" "8.38.0" - "@typescript-eslint/visitor-keys" "8.38.0" + "@typescript-eslint/scope-manager" "8.42.0" + "@typescript-eslint/type-utils" "8.42.0" + "@typescript-eslint/utils" "8.42.0" + "@typescript-eslint/visitor-keys" "8.42.0" graphemer "^1.4.0" ignore "^7.0.0" natural-compare "^1.4.0" ts-api-utils "^2.1.0" -"@typescript-eslint/parser@8.38.0": - version "8.38.0" - resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-8.38.0.tgz#6723a5ea881e1777956b1045cba30be5ea838293" - integrity sha512-Zhy8HCvBUEfBECzIl1PKqF4p11+d0aUJS1GeUiuqK9WmOug8YCmC4h4bjyBvMyAMI9sbRczmrYL5lKg/YMbrcQ== +"@typescript-eslint/parser@8.42.0": + version "8.42.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-8.42.0.tgz#20ea66f4867981fb5bb62cbe1454250fc4a440ab" + integrity sha512-r1XG74QgShUgXph1BYseJ+KZd17bKQib/yF3SR+demvytiRXrwd12Blnz5eYGm8tXaeRdd4x88MlfwldHoudGg== dependencies: - "@typescript-eslint/scope-manager" "8.38.0" - "@typescript-eslint/types" "8.38.0" - "@typescript-eslint/typescript-estree" "8.38.0" - "@typescript-eslint/visitor-keys" "8.38.0" + "@typescript-eslint/scope-manager" "8.42.0" + "@typescript-eslint/types" "8.42.0" + "@typescript-eslint/typescript-estree" "8.42.0" + "@typescript-eslint/visitor-keys" "8.42.0" debug "^4.3.4" -"@typescript-eslint/project-service@8.38.0": - version "8.38.0" - resolved "https://registry.yarnpkg.com/@typescript-eslint/project-service/-/project-service-8.38.0.tgz#4900771f943163027fd7d2020a062892056b5e2f" - integrity sha512-dbK7Jvqcb8c9QfH01YB6pORpqX1mn5gDZc9n63Ak/+jD67oWXn3Gs0M6vddAN+eDXBCS5EmNWzbSxsn9SzFWWg== +"@typescript-eslint/project-service@8.42.0": + version "8.42.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/project-service/-/project-service-8.42.0.tgz#636eb3418b6c42c98554dce884943708bf41a583" + integrity sha512-vfVpLHAhbPjilrabtOSNcUDmBboQNrJUiNAGoImkZKnMjs2TIcWG33s4Ds0wY3/50aZmTMqJa6PiwkwezaAklg== dependencies: - "@typescript-eslint/tsconfig-utils" "^8.38.0" - "@typescript-eslint/types" "^8.38.0" + "@typescript-eslint/tsconfig-utils" "^8.42.0" + "@typescript-eslint/types" "^8.42.0" debug "^4.3.4" -"@typescript-eslint/scope-manager@8.38.0": - version "8.38.0" - resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-8.38.0.tgz#5a0efcb5c9cf6e4121b58f87972f567c69529226" - integrity sha512-WJw3AVlFFcdT9Ri1xs/lg8LwDqgekWXWhH3iAF+1ZM+QPd7oxQ6jvtW/JPwzAScxitILUIFs0/AnQ/UWHzbATQ== +"@typescript-eslint/scope-manager@8.42.0": + version "8.42.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-8.42.0.tgz#36016757bc85b46ea42bae47b61f9421eddedde3" + integrity sha512-51+x9o78NBAVgQzOPd17DkNTnIzJ8T/O2dmMBLoK9qbY0Gm52XJcdJcCl18ExBMiHo6jPMErUQWUv5RLE51zJw== dependencies: - "@typescript-eslint/types" "8.38.0" - "@typescript-eslint/visitor-keys" "8.38.0" + "@typescript-eslint/types" "8.42.0" + "@typescript-eslint/visitor-keys" "8.42.0" -"@typescript-eslint/tsconfig-utils@8.38.0", "@typescript-eslint/tsconfig-utils@^8.38.0": - version "8.38.0" - resolved "https://registry.yarnpkg.com/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.38.0.tgz#6de4ce224a779601a8df667db56527255c42c4d0" - integrity sha512-Lum9RtSE3EroKk/bYns+sPOodqb2Fv50XOl/gMviMKNvanETUuUcC9ObRbzrJ4VSd2JalPqgSAavwrPiPvnAiQ== +"@typescript-eslint/tsconfig-utils@8.42.0", "@typescript-eslint/tsconfig-utils@^8.42.0": + version "8.42.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.42.0.tgz#21a3e74396fd7443ff930bc41b27789ba7e9236e" + integrity sha512-kHeFUOdwAJfUmYKjR3CLgZSglGHjbNTi1H8sTYRYV2xX6eNz4RyJ2LIgsDLKf8Yi0/GL1WZAC/DgZBeBft8QAQ== -"@typescript-eslint/type-utils@8.38.0": - version "8.38.0" - resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-8.38.0.tgz#a56cd84765fa6ec135fe252b5db61e304403a85b" - integrity sha512-c7jAvGEZVf0ao2z+nnz8BUaHZD09Agbh+DY7qvBQqLiz8uJzRgVPj5YvOh8I8uEiH8oIUGIfHzMwUcGVco/SJg== +"@typescript-eslint/type-utils@8.42.0": + version "8.42.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-8.42.0.tgz#d6733e7a9fbdf5af60c09c6038dffde13f4e4253" + integrity sha512-9KChw92sbPTYVFw3JLRH1ockhyR3zqqn9lQXol3/YbI6jVxzWoGcT3AsAW0mu1MY0gYtsXnUGV/AKpkAj5tVlQ== dependencies: - "@typescript-eslint/types" "8.38.0" - "@typescript-eslint/typescript-estree" "8.38.0" - "@typescript-eslint/utils" "8.38.0" + "@typescript-eslint/types" "8.42.0" + "@typescript-eslint/typescript-estree" "8.42.0" + "@typescript-eslint/utils" "8.42.0" debug "^4.3.4" ts-api-utils "^2.1.0" -"@typescript-eslint/types@8.38.0", "@typescript-eslint/types@^8.11.0", "@typescript-eslint/types@^8.38.0": - version "8.38.0" - resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-8.38.0.tgz#297351c994976b93c82ac0f0e206c8143aa82529" - integrity sha512-wzkUfX3plUqij4YwWaJyqhiPE5UCRVlFpKn1oCRn2O1bJ592XxWJj8ROQ3JD5MYXLORW84063z3tZTb/cs4Tyw== +"@typescript-eslint/types@8.42.0", "@typescript-eslint/types@^8.11.0", "@typescript-eslint/types@^8.42.0": + version "8.42.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-8.42.0.tgz#ae15c09cebda20473772902033328e87372db008" + integrity sha512-LdtAWMiFmbRLNP7JNeY0SqEtJvGMYSzfiWBSmx+VSZ1CH+1zyl8Mmw1TT39OrtsRvIYShjJWzTDMPWZJCpwBlw== -"@typescript-eslint/typescript-estree@8.38.0": - version "8.38.0" - resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-8.38.0.tgz#82262199eb6778bba28a319e25ad05b1158957df" - integrity sha512-fooELKcAKzxux6fA6pxOflpNS0jc+nOQEEOipXFNjSlBS6fqrJOVY/whSn70SScHrcJ2LDsxWrneFoWYSVfqhQ== +"@typescript-eslint/typescript-estree@8.42.0": + version "8.42.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-8.42.0.tgz#593c3af87d4462252c0d7239d1720b84a1b56864" + integrity sha512-ku/uYtT4QXY8sl9EDJETD27o3Ewdi72hcXg1ah/kkUgBvAYHLwj2ofswFFNXS+FL5G+AGkxBtvGt8pFBHKlHsQ== dependencies: - "@typescript-eslint/project-service" "8.38.0" - "@typescript-eslint/tsconfig-utils" "8.38.0" - "@typescript-eslint/types" "8.38.0" - "@typescript-eslint/visitor-keys" "8.38.0" + "@typescript-eslint/project-service" "8.42.0" + "@typescript-eslint/tsconfig-utils" "8.42.0" + "@typescript-eslint/types" "8.42.0" + "@typescript-eslint/visitor-keys" "8.42.0" debug "^4.3.4" fast-glob "^3.3.2" is-glob "^4.0.3" @@ -657,22 +657,22 @@ semver "^7.6.0" ts-api-utils "^2.1.0" -"@typescript-eslint/utils@8.38.0": - version "8.38.0" - resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-8.38.0.tgz#5f10159899d30eb92ba70e642ca6f754bddbf15a" - integrity sha512-hHcMA86Hgt+ijJlrD8fX0j1j8w4C92zue/8LOPAFioIno+W0+L7KqE8QZKCcPGc/92Vs9x36w/4MPTJhqXdyvg== +"@typescript-eslint/utils@8.42.0": + version "8.42.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-8.42.0.tgz#95f8e0c697ff2f7da5f72e16135011f878d815c0" + integrity sha512-JnIzu7H3RH5BrKC4NoZqRfmjqCIS1u3hGZltDYJgkVdqAezl4L9d1ZLw+36huCujtSBSAirGINF/S4UxOcR+/g== dependencies: "@eslint-community/eslint-utils" "^4.7.0" - "@typescript-eslint/scope-manager" "8.38.0" - "@typescript-eslint/types" "8.38.0" - "@typescript-eslint/typescript-estree" "8.38.0" + "@typescript-eslint/scope-manager" "8.42.0" + "@typescript-eslint/types" "8.42.0" + "@typescript-eslint/typescript-estree" "8.42.0" -"@typescript-eslint/visitor-keys@8.38.0": - version "8.38.0" - resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-8.38.0.tgz#a9765a527b082cb8fc60fd8a16e47c7ad5b60ea5" - integrity sha512-pWrTcoFNWuwHlA9CvlfSsGWs14JxfN1TH25zM5L7o0pRLhsoZkDnTsXfQRJBEWJoV5DL0jf+Z+sxiud+K0mq1g== +"@typescript-eslint/visitor-keys@8.42.0": + version "8.42.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-8.42.0.tgz#87c6caaa1ac307bc73a87c1fc469f88f0162f27e" + integrity sha512-3WbiuzoEowaEn8RSnhJBrxSwX8ULYE9CXaPepS2C2W3NSA5NNIvBaslpBSBElPq0UGr0xVJlXFWOAKIkyylydQ== dependencies: - "@typescript-eslint/types" "8.38.0" + "@typescript-eslint/types" "8.42.0" eslint-visitor-keys "^4.2.1" "@vvago/vale@^3.4.2": @@ -690,7 +690,7 @@ acorn-jsx@^5.3.2: resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.3.2.tgz#7ed5bb55908b3b2f1bc55c6af1653bada7f07937" integrity sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ== -acorn@^8.14.0, acorn@^8.15.0: +acorn@^8.15.0: version "8.15.0" resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.15.0.tgz#a360898bc415edaac46c8241f6383975b930b816" integrity sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg== @@ -731,9 +731,9 @@ ansi-regex@^5.0.1: integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== ansi-regex@^6.0.1: - version "6.1.0" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.1.0.tgz#95ec409c69619d6cb1b8b34f14b660ef28ebd654" - integrity sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA== + version "6.2.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.2.0.tgz#2f302e7550431b1b7762705fffb52cf1ffa20447" + integrity sha512-TKY5pyBkHyADOPYlRT9Lx6F544mPl0vS5Ew7BJ45hA08Q+t3GjbueLliBWN3sMICk6+y7HdyxSzC4bWS8baBdg== ansi-styles@^4.0.0, ansi-styles@^4.1.0: version "4.3.0" @@ -1021,12 +1021,12 @@ braces@^3.0.3, braces@~3.0.2: fill-range "^7.1.1" browserslist@^4.24.4: - version "4.25.1" - resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.25.1.tgz#ba9e8e6f298a1d86f829c9b975e07948967bb111" - integrity sha512-KGj0KoOMXLpSNkkEI6Z6mShmQy0bc1I+T7K9N81k4WWMrfz+6fQ6es80B/YLAeRoKvjYE1YSHHOW1qe9xIVzHw== + version "4.25.4" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.25.4.tgz#ebdd0e1d1cf3911834bab3a6cd7b917d9babf5af" + integrity sha512-4jYpcjabC606xJ3kw2QwGEZKX0Aw7sgQdZCvIK9dhVSPh76BKo+C+btT1RRofH7B+8iNpEbgGNVWiLki5q93yg== dependencies: - caniuse-lite "^1.0.30001726" - electron-to-chromium "^1.5.173" + caniuse-lite "^1.0.30001737" + electron-to-chromium "^1.5.211" node-releases "^2.0.19" update-browserslist-db "^1.1.3" @@ -1125,10 +1125,10 @@ callsites@^3.0.0: resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== -caniuse-lite@^1.0.30001702, caniuse-lite@^1.0.30001726: - version "1.0.30001731" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001731.tgz#277c07416ea4613ec564e5b0ffb47e7b60f32e2f" - integrity sha512-lDdp2/wrOmTRWuoB5DpfNkC0rJDU8DqRa6nYL6HK6sytw70QMopt/NIc/9SM7ylItlBWfACXk0tEn37UWM/+mg== +caniuse-lite@^1.0.30001702, caniuse-lite@^1.0.30001737: + version "1.0.30001739" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001739.tgz#b34ce2d56bfc22f4352b2af0144102d623a124f4" + integrity sha512-y+j60d6ulelrNSwpPyrHdl+9mJnQzHBr08xm48Qno0nSk4h3Qojh+ziv2qE6rXf4k3tadF4o1J/1tAbVm1NtnA== careful-downloader@^3.0.0: version "3.0.0" @@ -1163,9 +1163,9 @@ chalk@^4.0.0, chalk@^4.1.0: supports-color "^7.1.0" chalk@^5.0.0: - version "5.4.1" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-5.4.1.tgz#1b48bf0963ec158dce2aacf69c093ae2dd2092d8" - integrity sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w== + version "5.6.0" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-5.6.0.tgz#a1a8d294ea3526dbb77660f12649a08490e33ab8" + integrity sha512-46QrSQFyVSEyYAgQ22hQ+zDa60YHA4fBstHmtSApj1Y5vKtG27fWowW03jCk5KcbXEWPZUIR894aARCA/G1kfQ== check-more-types@^2.24.0: version "2.24.0" @@ -1405,9 +1405,9 @@ crypto-random-string@^4.0.0: type-fest "^1.0.1" cypress@^14.0.1: - version "14.5.3" - resolved "https://registry.yarnpkg.com/cypress/-/cypress-14.5.3.tgz#1b402bc1f6a3042d5068901ef9f9affd382ecf36" - integrity sha512-syLwKjDeMg77FRRx68bytLdlqHXDT4yBVh0/PPkcgesChYDjUZbwxLqMXuryYKzAyJsPsQHUDW1YU74/IYEUIA== + version "14.5.4" + resolved "https://registry.yarnpkg.com/cypress/-/cypress-14.5.4.tgz#d821fbb6220c3328e7413acc7724b75319c9e64d" + integrity sha512-0Dhm4qc9VatOcI1GiFGVt8osgpPdqJLHzRwcAB5MSD/CAAts3oybvPUPawHyvJZUd8osADqZe/xzMsZ8sDTjXw== dependencies: "@cypress/request" "^3.0.9" "@cypress/xvfb" "^1.2.4" @@ -1469,9 +1469,9 @@ cytoscape-fcose@^2.2.0: cose-base "^2.2.0" cytoscape@^3.29.3: - version "3.33.0" - resolved "https://registry.yarnpkg.com/cytoscape/-/cytoscape-3.33.0.tgz#c08136096f568d0f9b438406ec722f1a093b4e16" - integrity sha512-2d2EwwhaxLWC8ahkH1PpQwCyu6EY3xDRdcEJXrLTb4fOUtVc+YWQalHU67rFS1a6ngj1fgv9dQLtJxP/KAFZEw== + version "3.33.1" + resolved "https://registry.yarnpkg.com/cytoscape/-/cytoscape-3.33.1.tgz#449e05d104b760af2912ab76482d24c01cdd4c97" + integrity sha512-iJc4TwyANnOGR1OmWhsS9ayRS3s+XQ185FmuHObThD+5AeJCakAAbWv8KimMTt08xCCLNgneQwFp+JRJOr9qGQ== "d3-array@1 - 2": version "2.12.1" @@ -1792,9 +1792,9 @@ data-view-byte-offset@^1.0.1: is-data-view "^1.0.1" dayjs@^1.10.4, dayjs@^1.11.13: - version "1.11.13" - resolved "https://registry.yarnpkg.com/dayjs/-/dayjs-1.11.13.tgz#92430b0139055c3ebb60150aa13e860a4b5a366c" - integrity sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg== + version "1.11.18" + resolved "https://registry.yarnpkg.com/dayjs/-/dayjs-1.11.18.tgz#835fa712aac52ab9dec8b1494098774ed7070a11" + integrity sha512-zFBQ7WFRvVRhKcWoUh+ZA1g2HVgUbsZm9sbddh8EC5iv93sui8DVVz1Npvz+r6meo9VKfa8NyLWBsQK1VvIKPA== debug@^3.1.0, debug@^3.2.7: version "3.2.7" @@ -1963,10 +1963,10 @@ ecc-jsbn@~0.1.1: jsbn "~0.1.0" safer-buffer "^2.1.0" -electron-to-chromium@^1.5.173: - version "1.5.192" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.192.tgz#6dfc57a41846a57b18f9c0121821a6df1e165cc1" - integrity sha512-rP8Ez0w7UNw/9j5eSXCe10o1g/8B1P5SM90PCCMVkIRQn2R0LEHWz4Eh9RnxkniuDe1W0cTSOB3MLlkTGDcuCg== +electron-to-chromium@^1.5.211: + version "1.5.212" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.212.tgz#9b541f90d7d8415ccea94d4be4bb86e73e3f9547" + integrity sha512-gE7ErIzSW+d8jALWMcOIgf+IB6lpfsg6NwOhPVwKzDtN2qcBix47vlin4yzSregYDxTCXOUqAZjVY/Z3naS7ww== emoji-regex@^8.0.0: version "8.0.0" @@ -2225,18 +2225,18 @@ eslint-visitor-keys@^4.2.1: integrity sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ== eslint@^9.18.0: - version "9.32.0" - resolved "https://registry.yarnpkg.com/eslint/-/eslint-9.32.0.tgz#4ea28df4a8dbc454e1251e0f3aed4bcf4ce50a47" - integrity sha512-LSehfdpgMeWcTZkWZVIJl+tkZ2nuSkyyB9C27MZqFWXuph7DvaowgcTvKqxvpLW1JZIk8PN7hFY3Rj9LQ7m7lg== + version "9.34.0" + resolved "https://registry.yarnpkg.com/eslint/-/eslint-9.34.0.tgz#0ea1f2c1b5d1671db8f01aa6b8ce722302016f7b" + integrity sha512-RNCHRX5EwdrESy3Jc9o8ie8Bog+PeYvvSR8sDGoZxNFTvZ4dlxUB3WzQ3bQMztFrSRODGrLLj8g6OFuGY/aiQg== dependencies: "@eslint-community/eslint-utils" "^4.2.0" "@eslint-community/regexpp" "^4.12.1" "@eslint/config-array" "^0.21.0" - "@eslint/config-helpers" "^0.3.0" - "@eslint/core" "^0.15.0" + "@eslint/config-helpers" "^0.3.1" + "@eslint/core" "^0.15.2" "@eslint/eslintrc" "^3.3.1" - "@eslint/js" "9.32.0" - "@eslint/plugin-kit" "^0.3.4" + "@eslint/js" "9.34.0" + "@eslint/plugin-kit" "^0.3.5" "@humanfs/node" "^0.16.6" "@humanwhocodes/module-importer" "^1.0.1" "@humanwhocodes/retry" "^0.4.2" @@ -2409,9 +2409,9 @@ fd-slicer@~1.1.0: pend "~1.2.0" fdir@^6.4.4: - version "6.4.6" - resolved "https://registry.yarnpkg.com/fdir/-/fdir-6.4.6.tgz#2b268c0232697063111bbf3f64810a2a741ba281" - integrity sha512-hiFoqpyZcfNm1yc4u8oWCf9A2c4D3QjCrks3zmoVKVxpQRzmPNar1hUJcBG2RQHvEVGDN+Jm81ZheVLAQMK6+w== + version "6.5.0" + resolved "https://registry.yarnpkg.com/fdir/-/fdir-6.5.0.tgz#ed2ab967a331ade62f18d077dae192684d50d350" + integrity sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg== fecha@^4.2.0: version "4.2.3" @@ -2489,9 +2489,9 @@ fn.name@1.x.x: integrity sha512-GRnmB5gPyJpAhTQdSZTSp9uaPSvl09KoYcMQtsB9rQoOmzs9dH6ffeccH+Z+cv6P68Hu5bC6JjRh4Ah/mHSNRw== follow-redirects@^1.15.6: - version "1.15.9" - resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.9.tgz#a604fa10e443bf98ca94228d9eebcc2e8a2c8ee1" - integrity sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ== + version "1.15.11" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.11.tgz#777d73d72a92f8ec4d2e410eb47352a56b8e8340" + integrity sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ== for-each@^0.3.3, for-each@^0.3.5: version "0.3.5" @@ -2540,9 +2540,9 @@ fs-constants@^1.0.0: integrity sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow== fs-extra@^11.0.0, fs-extra@^11.1.1: - version "11.3.0" - resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-11.3.0.tgz#0daced136bbaf65a555a326719af931adc7a314d" - integrity sha512-Z4XaCL6dUDHfP/jT25jJKMmtxvuwbkrD1vNSMFlo9lNLY2c5FHYSQgHPRZUjAB26TpDEoW9HCOgplrdbaPV/ew== + version "11.3.1" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-11.3.1.tgz#ba7a1f97a85f94c6db2e52ff69570db3671d5a74" + integrity sha512-eXvGGwZ5CL17ZSwHWd3bbgk7UUpF6IFHtP57NYYakPvHOs8GDgDe5KJI36jIJzDkJ6eJjuzRA8eBQb6SkKue0g== dependencies: graceful-fs "^4.2.0" jsonfile "^6.0.1" @@ -2870,9 +2870,9 @@ http2-wrapper@^2.1.10: resolve-alpn "^1.2.0" hugo-extended@>=0.101.0: - version "0.148.2" - resolved "https://registry.yarnpkg.com/hugo-extended/-/hugo-extended-0.148.2.tgz#0b4b52fe3c621737a6c1bc6d959b65e28678f2f1" - integrity sha512-Mruej7lSoTcMI3ZhVmimC+cOhRh5jkh+aT5vnbDZpNHm9z0mlQJAYtXWtZ4OYFEg0dngi70sf1Cfgn4q8H2VnQ== + version "0.149.0" + resolved "https://registry.yarnpkg.com/hugo-extended/-/hugo-extended-0.149.0.tgz#a486e760c998bc616a4bcdd9a650dcab076e0c5b" + integrity sha512-NSCKH9C6SCk4E4LpRELqcyDO+fiGdut+tia8NJy6p180ZOSYWIlGeb/jtvXx0z0inAIutYKd90VNCxT76ifCLw== dependencies: careful-downloader "^3.0.0" log-symbols "^5.1.0" @@ -3329,9 +3329,9 @@ json5@^1.0.2: minimist "^1.2.0" jsonfile@^6.0.1: - version "6.1.0" - resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-6.1.0.tgz#bc55b2634793c679ec6403094eb13698a6ec0aae" - integrity sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ== + version "6.2.0" + resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-6.2.0.tgz#7c265bd1b65de6977478300087c99f1c84383f62" + integrity sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg== dependencies: universalify "^2.0.0" optionalDependencies: @@ -3434,71 +3434,71 @@ lazy-ass@^1.6.0: resolved "https://registry.yarnpkg.com/lazy-ass/-/lazy-ass-1.6.0.tgz#7999655e8646c17f089fdd187d150d3324d54513" integrity sha512-cc8oEVoctTvsFZ/Oje/kGnHbpWHYBe8IAJe4C0QNc3t8uM/0Y8+erSz/7Y1ALuXTEZTMvxXwO6YbX1ey3ujiZw== -lefthook-darwin-arm64@1.12.2: - version "1.12.2" - resolved "https://registry.yarnpkg.com/lefthook-darwin-arm64/-/lefthook-darwin-arm64-1.12.2.tgz#fd26f502901b4f6f823ea4b05e4cd90a30655fae" - integrity sha512-fTxeI9tEskrHjc3QyEO+AG7impBXY2Ed8V5aiRc3fw9POfYtVh9b5jRx90fjk2+ld5hf+Z1DsyyLq/vOHDFskQ== +lefthook-darwin-arm64@1.12.3: + version "1.12.3" + resolved "https://registry.yarnpkg.com/lefthook-darwin-arm64/-/lefthook-darwin-arm64-1.12.3.tgz#a36059416332e31098812de72dc6c35a24cdddc8" + integrity sha512-j1lwaosWRy3vhz8oQgCS1M6EUFN95aIYeNuqkczsBoAA6BDNAmVP1ctYEIYUK4bYaIgENbqbA9prYMAhyzh6Og== -lefthook-darwin-x64@1.12.2: - version "1.12.2" - resolved "https://registry.yarnpkg.com/lefthook-darwin-x64/-/lefthook-darwin-x64-1.12.2.tgz#0d9990d52425f2cb49d83dcb4fb40fc42b42a751" - integrity sha512-T1dCDKAAfdHgYZ8qtrS02SJSHoR52RFcrGArFNll9Mu4ZSV19Sp8BO+kTwDUOcLYdcPGNaqOp9PkRBQGZWQC7g== +lefthook-darwin-x64@1.12.3: + version "1.12.3" + resolved "https://registry.yarnpkg.com/lefthook-darwin-x64/-/lefthook-darwin-x64-1.12.3.tgz#179aad948487957435f1ebf9b0aa44136fe2023d" + integrity sha512-x6aWFfLQX4m5zQ4X9zh5+hHOE5XTvNjz2zB9DI+xbIBLs2RRg0xJNT3OfgSrBU1QtEBneJ5dRQP5nl47td9GDQ== -lefthook-freebsd-arm64@1.12.2: - version "1.12.2" - resolved "https://registry.yarnpkg.com/lefthook-freebsd-arm64/-/lefthook-freebsd-arm64-1.12.2.tgz#55d7483405d16382e5c9e79ddc7f58078a775cf4" - integrity sha512-2n9z7Q4BKeMBoB9cuEdv0UBQH82Z4GgBQpCrfjCtyzpDnYQwrH8Tkrlnlko4qPh9MM6nLLGIYMKsA5nltzo8Cg== +lefthook-freebsd-arm64@1.12.3: + version "1.12.3" + resolved "https://registry.yarnpkg.com/lefthook-freebsd-arm64/-/lefthook-freebsd-arm64-1.12.3.tgz#47faa99918b33bd5d9008f011b61ed22205000e1" + integrity sha512-41OmulLqVZ0EOHmmHouJrpL59SwDD7FLoso4RsQVIBPaf8fHacdLo07Ye28VWQ5XolZQvnWcr1YXKo4JhqQMyw== -lefthook-freebsd-x64@1.12.2: - version "1.12.2" - resolved "https://registry.yarnpkg.com/lefthook-freebsd-x64/-/lefthook-freebsd-x64-1.12.2.tgz#6f5e9c28caecfc62b4628230022f64b348b9a63a" - integrity sha512-1hNY/irY+/3kjRzKoJYxG+m3BYI8QxopJUK1PQnknGo1Wy5u302SdX+tR7pnpz6JM5chrNw4ozSbKKOvdZ5VEw== +lefthook-freebsd-x64@1.12.3: + version "1.12.3" + resolved "https://registry.yarnpkg.com/lefthook-freebsd-x64/-/lefthook-freebsd-x64-1.12.3.tgz#e93eed9ea86fe2fd34f6a3953a0d06179124591b" + integrity sha512-741/JRCJIS++hgYEH2uefN4FsH872V7gy2zDhcfQofiZnWP7+qhl4Wmwi8IpjIu4X7hLOC4cT18LOVU5L8KV9Q== -lefthook-linux-arm64@1.12.2: - version "1.12.2" - resolved "https://registry.yarnpkg.com/lefthook-linux-arm64/-/lefthook-linux-arm64-1.12.2.tgz#ba47f6f11ba1e47aec7214d2216b14c98649344e" - integrity sha512-1W4swYIVRkxq/LFTuuK4oVpd6NtTKY4E3VY2Uq2JDkIOJV46+8qGBF+C/QA9K3O9chLffgN7c+i+NhIuGiZ/Vw== +lefthook-linux-arm64@1.12.3: + version "1.12.3" + resolved "https://registry.yarnpkg.com/lefthook-linux-arm64/-/lefthook-linux-arm64-1.12.3.tgz#cfe91e9973d595f1a4b38ee85ccb28ae94aef616" + integrity sha512-BXIy1aDFZmFgmebJliNrEqZfX1lSOD4b/USvANv1UirFrNgTq5SRssd1CKfflT2PwKX6LsJTD4WabLLWZOxp9A== -lefthook-linux-x64@1.12.2: - version "1.12.2" - resolved "https://registry.yarnpkg.com/lefthook-linux-x64/-/lefthook-linux-x64-1.12.2.tgz#f77121df6404303d82afc1e5dd47d065163a0a8c" - integrity sha512-J6VGuMfhq5iCsg1Pv7xULbuXC63gP5LaikT0PhkyBNMi3HQneZFDJ8k/sp0Ue9HkQv6QfWIo3/FgB9gz38MCFw== +lefthook-linux-x64@1.12.3: + version "1.12.3" + resolved "https://registry.yarnpkg.com/lefthook-linux-x64/-/lefthook-linux-x64-1.12.3.tgz#6d296b0366f505acdfa27300c19af1ec2b4aa5a9" + integrity sha512-FRdwdj5jsQAP2eVrtkVUqMqYNCbQ2Ix84izy29/BvLlu/hVypAGbDfUkgFnsmAd6ZsCBeYCEtPuqyg3E3SO0Rg== -lefthook-openbsd-arm64@1.12.2: - version "1.12.2" - resolved "https://registry.yarnpkg.com/lefthook-openbsd-arm64/-/lefthook-openbsd-arm64-1.12.2.tgz#0695da717d498de315dcde975afa958ad931ec40" - integrity sha512-wncDRW3ml24DaOyH22KINumjvCohswbQqbxyH2GORRCykSnE859cTjOrRIchTKBIARF7PSeGPUtS7EK0+oDbaw== +lefthook-openbsd-arm64@1.12.3: + version "1.12.3" + resolved "https://registry.yarnpkg.com/lefthook-openbsd-arm64/-/lefthook-openbsd-arm64-1.12.3.tgz#d0f7f0479faf85745ed23178743d2547e5a2474c" + integrity sha512-tch5wXY4GOjKAYohH7OFoxNdVHuUSYt2Pulo2VTkMYEG8IrvJnRO5MkvgHtKDHzU5mfABQYv5+ccJykDx5hQWA== -lefthook-openbsd-x64@1.12.2: - version "1.12.2" - resolved "https://registry.yarnpkg.com/lefthook-openbsd-x64/-/lefthook-openbsd-x64-1.12.2.tgz#ea4579c8fff167f3b2fa46b82e21d45a74a63e6b" - integrity sha512-2jDOkCHNnc/oK/vR62hAf3vZb1EQ6Md2GjIlgZ/V7A3ztOsM8QZ5IxwYN3D1UOIR5ZnwMBy7PtmTJC/HJrig5w== +lefthook-openbsd-x64@1.12.3: + version "1.12.3" + resolved "https://registry.yarnpkg.com/lefthook-openbsd-x64/-/lefthook-openbsd-x64-1.12.3.tgz#cd97f94dd91030fdd1307ec6e731910dd14c0ef2" + integrity sha512-IHbHg/rUFXrAN7LnjcQEtutCHBaD49CZge96Hpk0GZ2eEG5GTCNRnUyEf+Kf3+RTqHFgwtADdpeDa/ZaGZTM4g== -lefthook-windows-arm64@1.12.2: - version "1.12.2" - resolved "https://registry.yarnpkg.com/lefthook-windows-arm64/-/lefthook-windows-arm64-1.12.2.tgz#54385a8fff1ee2f7a67f1182b84ea40aacc86e67" - integrity sha512-ZMH/q6UNSidhHEG/1QoqIl1n4yPTBWuVmKx5bONtKHicoz4QCQ+QEiNjKsG5OO4C62nfyHGThmweCzZVUQECJw== +lefthook-windows-arm64@1.12.3: + version "1.12.3" + resolved "https://registry.yarnpkg.com/lefthook-windows-arm64/-/lefthook-windows-arm64-1.12.3.tgz#3cec31e95fa343fed6c47427dbf07bee25a41158" + integrity sha512-wghcE5TSpb+mbtemUV6uAo9hEK09kxRzhf2nPdeDX+fw42cL2TGZsbaCnDyzaY144C+L2/wEWrLIHJMnZYkuqA== -lefthook-windows-x64@1.12.2: - version "1.12.2" - resolved "https://registry.yarnpkg.com/lefthook-windows-x64/-/lefthook-windows-x64-1.12.2.tgz#417771407ade25f634833084505b95fcc701b6c7" - integrity sha512-TqT2jIPcTQ9uwaw+v+DTmvnUHM/p7bbsSrPoPX+fRXSGLzFjyiY+12C9dObSwfCQq6rT70xqQJ9AmftJQsa5/Q== +lefthook-windows-x64@1.12.3: + version "1.12.3" + resolved "https://registry.yarnpkg.com/lefthook-windows-x64/-/lefthook-windows-x64-1.12.3.tgz#55bc17846de797d07805f24b165bb1c01f7d4650" + integrity sha512-7Co/L8e2x2hGC1L33jDJ4ZlTkO3PJm25GOGpLfN1kqwhGB/uzMLeTI/PBczjlIN8isUv26ouNd9rVR7Bibrwyg== lefthook@^1.10.10: - version "1.12.2" - resolved "https://registry.yarnpkg.com/lefthook/-/lefthook-1.12.2.tgz#5cc86475aa22203ad4b68f496126dc6c8633971a" - integrity sha512-2CeTu5NcmoT9YnqsHTq/TF36MlqlzHzhivGx3DrXHwcff4TdvrkIwUTA56huM3Nlo5ODAF/0hlPzaKLmNHCBnQ== + version "1.12.3" + resolved "https://registry.yarnpkg.com/lefthook/-/lefthook-1.12.3.tgz#8a99497ccff612bb2c73ac9146b57e29b3baee0b" + integrity sha512-huMg+mGp6wHPjkaLdchuOvxVRMzvz6OVdhivatiH2Qn47O5Zm46jwzbVPYIanX6N/8ZTjGLBxv8tZ0KYmKt/Jg== optionalDependencies: - lefthook-darwin-arm64 "1.12.2" - lefthook-darwin-x64 "1.12.2" - lefthook-freebsd-arm64 "1.12.2" - lefthook-freebsd-x64 "1.12.2" - lefthook-linux-arm64 "1.12.2" - lefthook-linux-x64 "1.12.2" - lefthook-openbsd-arm64 "1.12.2" - lefthook-openbsd-x64 "1.12.2" - lefthook-windows-arm64 "1.12.2" - lefthook-windows-x64 "1.12.2" + lefthook-darwin-arm64 "1.12.3" + lefthook-darwin-x64 "1.12.3" + lefthook-freebsd-arm64 "1.12.3" + lefthook-freebsd-x64 "1.12.3" + lefthook-linux-arm64 "1.12.3" + lefthook-linux-x64 "1.12.3" + lefthook-openbsd-arm64 "1.12.3" + lefthook-openbsd-x64 "1.12.3" + lefthook-windows-arm64 "1.12.3" + lefthook-windows-x64 "1.12.3" levn@^0.4.1: version "0.4.1" @@ -3538,13 +3538,13 @@ listr2@^3.8.3: wrap-ansi "^7.0.0" local-pkg@^1.0.0: - version "1.1.1" - resolved "https://registry.yarnpkg.com/local-pkg/-/local-pkg-1.1.1.tgz#f5fe74a97a3bd3c165788ee08ca9fbe998dc58dd" - integrity sha512-WunYko2W1NcdfAFpuLUoucsgULmgDBRkdxHxWQ7mK0cQqwPiy8E1enjuRBrhLtZkB5iScJ1XIPdhVEFK8aOLSg== + version "1.1.2" + resolved "https://registry.yarnpkg.com/local-pkg/-/local-pkg-1.1.2.tgz#c03d208787126445303f8161619dc701afa4abb5" + integrity sha512-arhlxbFRmoQHl33a0Zkle/YWlmNwoyt6QNZEIJcqNbdrsix5Lvc4HyyI3EnwxTYlZYc32EbYrQ8SzEZ7dqgg9A== dependencies: mlly "^1.7.4" - pkg-types "^2.0.1" - quansync "^0.2.8" + pkg-types "^2.3.0" + quansync "^0.2.11" locate-path@^6.0.0: version "6.0.0" @@ -3648,9 +3648,9 @@ markdown-link@^0.1.1: integrity sha512-TurLymbyLyo+kAUUAV9ggR9EPcDjP/ctlv9QAFiqUH7c+t6FlsbivPo9OKTU8xdOx9oNd2drW/Fi5RRElQbUqA== marked@^16.0.0: - version "16.1.1" - resolved "https://registry.yarnpkg.com/marked/-/marked-16.1.1.tgz#a7839dcf19fa5e349cad12c561f231320690acd4" - integrity sha512-ij/2lXfCRT71L6u0M29tJPhP0bM5shLL3u5BePhFwPELj2blMJ6GDtD7PfJhRLhJ/c2UwrK17ySVcDzy2YHjHQ== + version "16.2.1" + resolved "https://registry.yarnpkg.com/marked/-/marked-16.2.1.tgz#f4b82ffa8e6201bafebc59249492b88b2dcc949f" + integrity sha512-r3UrXED9lMlHF97jJByry90cwrZBBvZmjG1L68oYfuPMW+uDTnuMbyJDymCWwbTE+f+3LhpNDKfpR3a3saFyjA== math-intrinsics@^1.1.0: version "1.1.0" @@ -3668,9 +3668,9 @@ merge2@^1.3.0: integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== mermaid@^11.10.0: - version "11.10.0" - resolved "https://registry.yarnpkg.com/mermaid/-/mermaid-11.10.0.tgz#4949f98d08cfdc4cda429372ed2f843a64c99946" - integrity sha512-oQsFzPBy9xlpnGxUqLbVY8pvknLlsNIJ0NWwi8SUJjhbP1IT0E0o1lfhU4iYV3ubpy+xkzkaOyDUQMn06vQElQ== + version "11.10.1" + resolved "https://registry.yarnpkg.com/mermaid/-/mermaid-11.10.1.tgz#c62ee121f2080291ba175ae880a16c0838070689" + integrity sha512-0PdeADVWURz7VMAX0+MiMcgfxFKY4aweSGsjgFihe3XlMKNqmai/cugMrqTd3WNHM93V+K+AZL6Wu6tB5HmxRw== dependencies: "@braintree/sanitize-url" "^7.0.4" "@iconify/utils" "^2.1.33" @@ -3785,14 +3785,14 @@ mkdirp@^1.0.3: integrity sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw== mlly@^1.7.4: - version "1.7.4" - resolved "https://registry.yarnpkg.com/mlly/-/mlly-1.7.4.tgz#3d7295ea2358ec7a271eaa5d000a0f84febe100f" - integrity sha512-qmdSIPC4bDJXgZTCR7XosJiNKySV7O215tsPtDN9iEO/7q/76b/ijtgRu/+epFXSJhijtTCCGp3DWS549P3xKw== + version "1.8.0" + resolved "https://registry.yarnpkg.com/mlly/-/mlly-1.8.0.tgz#e074612b938af8eba1eaf43299cbc89cb72d824e" + integrity sha512-l8D9ODSRWLe2KHJSifWGwBqpTZXIXTeo8mlKjY+E2HAakaTeNpqAyBZ8GSqLzHgw4XmHmC8whvpjJNMbFZN7/g== dependencies: - acorn "^8.14.0" - pathe "^2.0.1" - pkg-types "^1.3.0" - ufo "^1.5.4" + acorn "^8.15.0" + pathe "^2.0.3" + pkg-types "^1.3.1" + ufo "^1.6.1" moo@^0.5.0: version "0.5.2" @@ -4140,7 +4140,7 @@ pinkie@^2.0.0: resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870" integrity sha512-MnUuEycAemtSaeFSjXKW/aroV7akBbY+Sv+RkyqFjgAe73F+MR0TBWKBRDkmfWq/HiFmdavfZ1G7h4SPZXaCSg== -pkg-types@^1.3.0: +pkg-types@^1.3.1: version "1.3.1" resolved "https://registry.yarnpkg.com/pkg-types/-/pkg-types-1.3.1.tgz#bd7cc70881192777eef5326c19deb46e890917df" integrity sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ== @@ -4149,10 +4149,10 @@ pkg-types@^1.3.0: mlly "^1.7.4" pathe "^2.0.1" -pkg-types@^2.0.1: - version "2.2.0" - resolved "https://registry.yarnpkg.com/pkg-types/-/pkg-types-2.2.0.tgz#049bf404f82a66c465200149457acf0c5fb0fb2d" - integrity sha512-2SM/GZGAEkPp3KWORxQZns4M+WSeXbC2HEvmOIJe3Cmiv6ieAJvdVhDldtHqM5J1Y7MrR1XhkBT/rMlhh9FdqQ== +pkg-types@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/pkg-types/-/pkg-types-2.3.0.tgz#037f2c19bd5402966ff6810e32706558cb5b5726" + integrity sha512-SIqCzDRg0s9npO5XQ3tNZioRY1uK06lA41ynBC1YmFTmnY6FjUjVt6s4LoADmwoig1qqD0oK8h1p/8mlMx8Oig== dependencies: confbox "^0.2.2" exsolve "^1.0.7" @@ -4293,10 +4293,10 @@ qs@6.14.0: dependencies: side-channel "^1.1.0" -quansync@^0.2.8: - version "0.2.10" - resolved "https://registry.yarnpkg.com/quansync/-/quansync-0.2.10.tgz#32053cf166fa36511aae95fc49796116f2dc20e1" - integrity sha512-t41VRkMYbkHyCYmOvx/6URnN80H7k4X0lLdBMGsz+maAwrJQYB1djpV6vHrQIBE0WBSGqhtEHrK9U3DWWH8v7A== +quansync@^0.2.11: + version "0.2.11" + resolved "https://registry.yarnpkg.com/quansync/-/quansync-0.2.11.tgz#f9c3adda2e1272e4f8cf3f1457b04cbdb4ee692a" + integrity sha512-AifT7QEbW9Nri4tAwR5M/uzpBuqfZf+zwaEM/QkzEjj7NBuFD2rBuy0K3dE+8wltbezDV7JMA0WfnCPYRSYbXA== queue-microtask@^1.2.2: version "1.2.3" @@ -4761,9 +4761,9 @@ spdx-expression-parse@^4.0.0: spdx-license-ids "^3.0.0" spdx-license-ids@^3.0.0: - version "3.0.21" - resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.21.tgz#6d6e980c9df2b6fc905343a3b2d702a6239536c3" - integrity sha512-Bvg/8F5XephndSK3JffaRqdT+gyhfqIPwDHpX80tJrF8QQRYMo8sNMeaZ2Dp5+jhwKnUmIOyFFQfHRkjJm5nXg== + version "3.0.22" + resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.22.tgz#abf5a08a6f5d7279559b669f47f0a43e8f3464ef" + integrity sha512-4PRT4nh1EImPbt2jASOKHX7PB7I+e4IWNLvkKFDxNhJlfjbYlleYQh285Z/3mPTHSAK/AvdMmw5BNNuYH8ShgQ== sprintf-js@~1.0.2: version "1.0.3" @@ -5046,9 +5046,9 @@ tldts@^6.1.32: tldts-core "^6.1.86" tmp@~0.2.3: - version "0.2.4" - resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.2.4.tgz#c6db987a2ccc97f812f17137b36af2b6521b0d13" - integrity sha512-UdiSoX6ypifLmrfQ/XfiawN6hkjSBpCjhKxxZcWlUUmoXLaCKQU0bx4HF/tdDK2uzRuchf1txGvrWBzYREssoQ== + version "0.2.5" + resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.2.5.tgz#b06bcd23f0f3c8357b426891726d16015abfd8f8" + integrity sha512-voyz6MApa1rQGUxT3E+BK7/ROe8itEx7vD8/HEvt4xwXucvQ5G5oeEiHkmHZJuBO21RpOf+YYm9MOivj709jow== to-buffer@^1.1.1: version "1.2.1" @@ -5198,21 +5198,21 @@ typed-array-length@^1.0.7: reflect.getprototypeof "^1.0.6" typescript-eslint@^8.32.1: - version "8.38.0" - resolved "https://registry.yarnpkg.com/typescript-eslint/-/typescript-eslint-8.38.0.tgz#e73af7618139f07b16e2fae715eedaabb41ee8b0" - integrity sha512-FsZlrYK6bPDGoLeZRuvx2v6qrM03I0U0SnfCLPs/XCCPCFD80xU9Pg09H/K+XFa68uJuZo7l/Xhs+eDRg2l3hg== + version "8.42.0" + resolved "https://registry.yarnpkg.com/typescript-eslint/-/typescript-eslint-8.42.0.tgz#e92f6c88569e202b361d5ca1655ad8e33a0554ea" + integrity sha512-ozR/rQn+aQXQxh1YgbCzQWDFrsi9mcg+1PM3l/z5o1+20P7suOIaNg515bpr/OYt6FObz/NHcBstydDLHWeEKg== dependencies: - "@typescript-eslint/eslint-plugin" "8.38.0" - "@typescript-eslint/parser" "8.38.0" - "@typescript-eslint/typescript-estree" "8.38.0" - "@typescript-eslint/utils" "8.38.0" + "@typescript-eslint/eslint-plugin" "8.42.0" + "@typescript-eslint/parser" "8.42.0" + "@typescript-eslint/typescript-estree" "8.42.0" + "@typescript-eslint/utils" "8.42.0" typescript@^5.8.3: - version "5.8.3" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.8.3.tgz#92f8a3e5e3cf497356f4178c34cd65a7f5e8440e" - integrity sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ== + version "5.9.2" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.9.2.tgz#d93450cddec5154a2d5cabe3b8102b83316fb2a6" + integrity sha512-CWBzXQrc/qOkhidw1OzBTQuYRbfyxDXJMVJ1XNwUHGROVmuaeiEm3OslpZ1RV96d7SKKjZKrSJu3+t/xlw3R9A== -ufo@^1.5.4: +ufo@^1.6.1: version "1.6.1" resolved "https://registry.yarnpkg.com/ufo/-/ufo-1.6.1.tgz#ac2db1d54614d1b22c1d603e3aef44a85d8f146b" integrity sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA== @@ -5235,10 +5235,10 @@ unbzip2-stream@^1.0.9: buffer "^5.2.1" through "^2.3.8" -undici-types@~7.8.0: - version "7.8.0" - resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-7.8.0.tgz#de00b85b710c54122e44fbfd911f8d70174cd294" - integrity sha512-9UJ2xGDvQ43tYyVMpuHlsgApydB8ZKfVYTsLDhXkFL/6gfkp+U8xTGdh8pMJv1SpZna0zxG1DwsKZsreLbXBxw== +undici-types@~7.10.0: + version "7.10.0" + resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-7.10.0.tgz#4ac2e058ce56b462b056e629cc6a02393d3ff350" + integrity sha512-t5Fy/nfn+14LuOc2KNYg75vZqClpAiqscVvMygNnlsHBFpSXdJaYtXMcdNLpl/Qvc3P2cB3s6lOV51nqsFq4ag== unique-string@^3.0.0: version "3.0.0" @@ -5508,9 +5508,9 @@ yallist@^4.0.0: integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== yaml@^2.4.2: - version "2.8.0" - resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.8.0.tgz#15f8c9866211bdc2d3781a0890e44d4fa1a5fff6" - integrity sha512-4lLa/EcQCB0cJkyts+FpIRx5G/llPxfP6VQU5KByHEhLxY3IJCH0f0Hy1MHI8sClTvsIb8qwRJ6R/ZdlDJ/leQ== + version "2.8.1" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.8.1.tgz#1870aa02b631f7e8328b93f8bc574fac5d6c4d79" + integrity sha512-lcYcMxX2PO9XMGvAJkJ3OsNMw+/7FKes7/hgerGUYWIoWu5j/+YQqcZr5JnPZWzOsEBgMbSbiSTn/dv/69Mkpw== yargs-parser@^21.1.1: version "21.1.1" From f1c5a0b408f1f390da2b586e990baaa3690cdd90 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 2 Sep 2025 14:17:41 -0500 Subject: [PATCH 145/179] hotfix: minor change to invalidate JS --- assets/js/theme.js | 1 + 1 file changed, 1 insertion(+) diff --git a/assets/js/theme.js b/assets/js/theme.js index 8588d44a9..cdcfedc66 100644 --- a/assets/js/theme.js +++ b/assets/js/theme.js @@ -6,6 +6,7 @@ const PROPS = { style_domain: 'docs.influxdata.com', }; +// Get the user's theme preference function getPreferredTheme() { return `${getPreference(PROPS.style_preference_name)}-theme`; } From 44a2c95518e784f2d4ba5905d50516868897c3b0 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 2 Sep 2025 16:28:44 -0500 Subject: [PATCH 146/179] fix(askai): adjust X modal position for desktop widths --- assets/js/ask-ai.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/assets/js/ask-ai.js b/assets/js/ask-ai.js index f315711a2..9c145bab1 100644 --- a/assets/js/ask-ai.js +++ b/assets/js/ask-ai.js @@ -46,7 +46,7 @@ function initializeChat({ onChatLoad, chatAttributes }) { modalSize: '640px', modalWithOverlay: 'false', modalInnerMaxWidth: '800px', - modalXOffset: 'calc(100% - 800px - .5rem)', + modalXOffset: 'calc(100% - 800px - (40rem * var(--mantine-scale))', modalYOffset: '10vh', userAnalyticsFingerprintEnabled: 'true', fontFamily: 'Proxima Nova, sans-serif', From 69068a64bf508dd0026dd93d253d2991918344c0 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 3 Sep 2025 17:38:03 -0500 Subject: [PATCH 147/179] fix(askai): Ask AI modal position and font:Removes Ask AI modal style overrides to correct positioning in the window. For API reference docs, decreases the trigger button size and fixes the button and modal font. --- api-docs/template.hbs | 18 ++++++++++-------- assets/js/ask-ai.js | 2 -- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/api-docs/template.hbs b/api-docs/template.hbs index b98a20687..33868cffb 100755 --- a/api-docs/template.hbs +++ b/api-docs/template.hbs @@ -31,6 +31,9 @@ padding: 0; margin: 0; } + #kapa-widget-container { + font-family: 'Proxima Nova', sans-serif; + } {{#unless disableGoogleFont}} // Load Kapa AI widget after DOM content is loaded document.addEventListener('DOMContentLoaded', function() { + const fontFamily = 'Proxima Nova, sans-serif'; const askAI = document.createElement('script'); askAI.type = 'text/javascript'; askAI.async = true; @@ -53,8 +57,8 @@ askAI.setAttribute('data-project-logo', '/img/influx-logo-cubo-white.png'); askAI.setAttribute('data-modal-disclaimer', 'This AI can access [documentation for InfluxDB, clients, and related tools](https://docs.influxdata.com). Information you submit is used in accordance with our [Privacy Policy](https://www.influxdata.com/legal/privacy-policy/).'); askAI.setAttribute('data-modal-example-questions', 'How do I write and query data with the {{title}}?, How do I use client libraries for the {{title}}?'); - askAI.setAttribute('data-button-height', '65px'); - askAI.setAttribute('data-button-width', '65px'); + askAI.setAttribute('data-button-height', '50px'); + askAI.setAttribute('data-button-width', '50px'); if (window.matchMedia('(max-width: 800px)').matches) { // For mobile devices (smaller than 600px) askAI.setAttribute('data-button-position-bottom', '130px'); @@ -62,25 +66,23 @@ // For larger screens askAI.setAttribute('data-button-position-bottom', '20px'); } + askAI.setAttribute('data-button-text-font-family', fontFamily); + askAI.setAttribute('data-button-text-font-size', '12.8px'); askAI.setAttribute('data-button-text', 'Ask AI'); askAI.setAttribute('data-conversation-button-icons-only', 'true'); - askAI.setAttribute('data-font-family', 'Proxima Nova, sans-serif'); + askAI.setAttribute('data-font-family', fontFamily); askAI.setAttribute('data-modal-example-questions-col-span', '8'); askAI.setAttribute('data-modal-full-screen-on-mobile', 'true'); askAI.setAttribute('data-modal-header-bg-color', '#d30971'); askAI.setAttribute('data-modal-header-border-bottom', 'none'); askAI.setAttribute('data-modal-header-padding', '.5rem'); askAI.setAttribute('data-modal-header-text-color', '#ffffff'); - askAI.setAttribute('data-modal-x-offset', '0'); + askAI.setAttribute('data-modal-size', '640px') askAI.setAttribute('data-modal-y-offset', '0'); askAI.setAttribute('data-modal-with-overlay', 'false'); askAI.setAttribute('data-modal-inner-flex-direction', 'column'); askAI.setAttribute('data-modal-inner-justify-content', 'end'); - askAI.setAttribute('data-modal-inner-max-width', '600px'); - askAI.setAttribute('data-modal-inner-position-left', 'auto'); - askAI.setAttribute('data-modal-inner-position-right', '50px'); askAI.setAttribute('data-modal-inner-position-bottom', 'calc(2.5rem + 25px)'); - askAI.setAttribute('data-modal-size', '640px'); askAI.setAttribute('data-modal-title-color', '#fff'); askAI.setAttribute('data-modal-title-font-size', '1.25rem'); askAI.setAttribute('data-modal-lock-scroll', 'false'); diff --git a/assets/js/ask-ai.js b/assets/js/ask-ai.js index 9c145bab1..120047029 100644 --- a/assets/js/ask-ai.js +++ b/assets/js/ask-ai.js @@ -45,8 +45,6 @@ function initializeChat({ onChatLoad, chatAttributes }) { modalOverrideOpenClassAskAi: 'ask-ai-open', modalSize: '640px', modalWithOverlay: 'false', - modalInnerMaxWidth: '800px', - modalXOffset: 'calc(100% - 800px - (40rem * var(--mantine-scale))', modalYOffset: '10vh', userAnalyticsFingerprintEnabled: 'true', fontFamily: 'Proxima Nova, sans-serif', From 7b435690494e3485d2d525a2bbe70dd1a65ab1b2 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 3 Sep 2025 21:36:40 -0500 Subject: [PATCH 148/179] Update api-docs/template.hbs Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- api-docs/template.hbs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-docs/template.hbs b/api-docs/template.hbs index 33868cffb..afed6e8be 100755 --- a/api-docs/template.hbs +++ b/api-docs/template.hbs @@ -77,7 +77,7 @@ askAI.setAttribute('data-modal-header-border-bottom', 'none'); askAI.setAttribute('data-modal-header-padding', '.5rem'); askAI.setAttribute('data-modal-header-text-color', '#ffffff'); - askAI.setAttribute('data-modal-size', '640px') + askAI.setAttribute('data-modal-size', '640px'); askAI.setAttribute('data-modal-y-offset', '0'); askAI.setAttribute('data-modal-with-overlay', 'false'); askAI.setAttribute('data-modal-inner-flex-direction', 'column'); From f2bed3b5d86e328c5138b20a6a587f0f69211ce6 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Thu, 4 Sep 2025 12:27:26 -0500 Subject: [PATCH 149/179] docs(influxdb3/explorer): improve installation documentation structure and flow - Include configuration property definitions with documentation links - Simplify Quick Start to 3 steps, allowing UI-based configuration - Add Docker Compose alternatives for all Docker run commands - Add prod vs dev examples, use --pull flags for automatic updates in prod examples --- content/influxdb3/explorer/install.md | 726 +++++++++++++++++--------- 1 file changed, 480 insertions(+), 246 deletions(-) diff --git a/content/influxdb3/explorer/install.md b/content/influxdb3/explorer/install.md index 9478137ec..c84147417 100644 --- a/content/influxdb3/explorer/install.md +++ b/content/influxdb3/explorer/install.md @@ -11,301 +11,535 @@ weight: 2 Use [Docker](https://docker.com) to install and run **InfluxDB 3 Explorer**. -- [Run the InfluxDB 3 Explorer Docker container](#run-the-influxdb-3-explorer-docker-container) -- [Enable TLS/SSL (HTTPS)](#enable-tlsssl-https) -- [Pre-configure InfluxDB connection settings](#pre-configure-influxdb-connection-settings) -- [Run in query or admin mode](#run-in-query-or-admin-mode) - - [Run in query mode](#run-in-query-mode) - - [Run in admin mode](#run-in-admin-mode) -- [Environment Variables](#environment-variables) -- [Volume Reference](#volume-reference) -- [Exposed Ports](#exposed-ports) - - [Custom port mapping](#custom-port-mapping) +- [Quick start](#quick-start) +- [Installation methods](#installation-methods) + - [Docker run](#docker-run) + - [Docker Compose](#docker-compose) +- [Configuration options](#configuration-options) + - [Persist data across restarts](#persist-data-across-restarts) + - [Pre-configure InfluxDB connections](#pre-configure-influxdb-connections) + - [Enable TLS/SSL (HTTPS)](#enable-tlsssl-https) + - [Choose operational mode](#choose-operational-mode) +- [Advanced configuration](#advanced-configuration) + - [Environment variables](#environment-variables) + - [Volume reference](#volume-reference) + - [Port reference](#port-reference) +- [Complete examples](#complete-examples) -## Run the InfluxDB 3 Explorer Docker container +## Quick start -1. **Install Docker** +Get {{% product-name %}} running in minutes: - If you haven't already, install [Docker](https://docs.docker.com/engine/) or - [Docker Desktop](https://docs.docker.com/desktop/). +1. **Run the Explorer container:** -2. **Pull the {{% product-name %}} Docker image** + ```bash + docker run --detach \ + --name influxdb3-explorer \ + --publish 8888:80 \ + influxdata/influxdb3-ui:{{% latest-patch %}} + ``` - ```bash - influxdata/influxdb3-ui:{{% latest-patch %}} - ``` +2. **Access the Explorer UI at ** -3. **Create local directories** _(optional)_ - - {{% product-name %}} can mount the following directories on your local - machine: - - | Directory | Description | Permissions | - | :--------- | :------------------------------------------------------------------------------------------------ | :---------: | - | `./db` | Stores {{% product-name %}} application data | 700 | - | `./config` | Stores [pre-configured InfluxDB connection settings](#pre-configure-influxdb-connection-settings) | 755 | - | `./ssl` | Stores TLS/SSL certificates _(Required when [using TLS/SSL](#enable-tlsssl-https))_ | 755 | - - > [!Important] - > If you don't create and mount a local `./db` directory, {{% product-name %}} - > stores application data in the container's file system. - > This data will be lost when the container is deleted. - - To create these directories with the appropriate permissions, run the - following commands from your current working directory: - - ```bash - mkdir -m 700 ./db - mkdir -m 755 ./config - mkdir -m 755 ./ssl - ``` - -4. **Run the {{% product-name %}} container** - - Use the `docker run` command to start the {{% product-name %}} container. - Include the following: - - - Port mappings: - - `8888` to `80` (or `443` if using TLS/SSL) - - `8889` to `8888` - - Mounted volumes: - - `$(pwd)/db:/db:rw` - - `$(pwd)/config:/app-root/config:ro` - - `$(pwd)/ssl:/etc/nginx/ssl:ro` - - Any of the available [environment variables](#environment-variables) - - > [!Note] - > To persist sessions across container restarts, see the detailed instructions - > on setting the [`SESSION_SECRET_KEY` environment variable](#session_secret_key). - - ```bash - docker run --detach \ - --name influxdb3-explorer \ - --publish 8888:80 \ - --publish 8889:8888 \ - --volume $(pwd)/config:/app-root/config:ro \ - --volume $(pwd)/db:/db:rw \ - --volume $(pwd)/ssl:/etc/nginx/ssl:ro \ - influxdata/influxdb3-ui:{{% latest-patch %}} \ - --mode=admin - ``` - -5. **Access the {{% product-name %}} user interface (UI) at **. +3. **[Configure your InfluxDB connection in the UI](/influxdb3/explorer/get-started)** --- -## Enable TLS/SSL (HTTPS) +## Installation methods -To enable TLS/SSL, mount valid certificate and key files into the container: +### Prerequisites -1. **Place your TLS/SSL certificate files your local `./ssl` directory** +Install [Docker](https://docs.docker.com/engine/) or [Docker Desktop](https://docs.docker.com/desktop/) if you haven't already. - Required files: +### Basic setup - - Certificate: `server.crt` or `fullchain.pem` - - Private key: `server.key` +> [!Tip] +> To get the latest updates, run the following command before starting the container: +> +> ```bash +> docker pull influxdata/influxdb3-ui:{{% latest-patch %}} +> ``` -2. **When running your container, mount the SSL directory and map port 443 to port 8888** - - Include the following options when running your Docker container: +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Docker run](#) +[Docker Compose](#) +{{% /code-tabs %}} - ```sh - --volume $(pwd)/ssl:/etc/nginx/ssl:ro \ - --publish 8888:443 - ``` +{{% code-tab-content %}} +```bash +docker run --detach \ + --name influxdb3-explorer \ + --publish 8888:80 \ + influxdata/influxdb3-ui:{{% latest-patch %}} +``` +{{% /code-tab-content %}} -The nginx web server automatically uses certificate files when they are present -in the mounted path. +{{% code-tab-content %}} +```yaml +# docker-compose.yml +version: '3.8' -> [!Note] -> You can use a custom location for certificate and key files. -> Use the [`SSL_CERT_PATH`](#ssl_cert_path) and [`SSL_KEY_PATH`](#ssl_key_path) -> environment variables to identify the custom location. -> Also update the SSL directory volume mount path inside the container. - - ---- - -## Pre-configure InfluxDB connection settings - -You can predefine InfluxDB connection settings using a `config.json` file. - -{{% code-placeholders "INFLUXDB3_HOST|INFLUXDB_DATABASE_NAME|INFLUXDB3_AUTH_TOKEN|INFLUXDB3_SERVER_NAME" %}} - -1. **Create a `config.json` file in your local `./config` directory** - - ```json - { - "DEFAULT_INFLUX_SERVER": "INFLUXDB3_HOST", - "DEFAULT_INFLUX_DATABASE": "INFLUXDB_DATABASE_NAME", - "DEFAULT_API_TOKEN": "INFLUXDB3_AUTH_TOKEN", - "DEFAULT_SERVER_NAME": "INFLUXDB3_SERVER_NAME" - } - ``` - - > [!Important] - > If connecting to an InfluxDB 3 Core or Enterprise instance running on - > localhost (outside of the container), use the internal Docker network to - > in your InfluxDB 3 host value--for example: - > - > ```txt - > http://host.docker.internal:8181 - > ``` - -2. **Mount the configuration directory** - - Include the following option when running your Docker container: - - ```sh - --volume $(pwd)/config:/app-root/config:ro - ``` - -{{% /code-placeholders %}} - -These settings will be used as defaults when the container starts. - ---- - -## Run in query or admin mode - -{{% product-name %}} has the following operational modes: - -- **Query mode (default):** Read-only UI and query interface -- **Admin mode:** Full UI and API access for administrators - -You can control the operational mode using the `--mode=` option in your -`docker run` command (after the image name). - -### Run in query mode {note="(default)"} - -```sh -docker run \ - ... - --mode=query +services: + explorer: + image: influxdata/influxdb3-ui:{{% latest-patch %}} + container_name: influxdb3-explorer + ports: + - "8888:80" + volumes: + - ./config:/app-root/config:ro + restart: unless-stopped ``` -### Run in admin mode +Start the container: -```sh -docker run \ - ... +```bash +docker-compose up -d +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +### Production setup + +For production deployments with persistence, admin mode, and automatic image updates: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Docker run](#) +[Docker Compose](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} +```bash +docker run --detach \ + --name influxdb3-explorer \ + --pull always \ + --publish 8888:80 \ + --volume $(pwd)/db:/db:rw \ + --volume $(pwd)/config:/app-root/config:ro \ + --env SESSION_SECRET_KEY=$(openssl rand -hex 32) \ + --restart unless-stopped \ + influxdata/influxdb3-ui:{{% latest-patch %}} \ --mode=admin ``` +{{% /code-tab-content %}} -If `--mode` is not set, the container defaults to query mode. +{{% code-tab-content %}} +```yaml +# docker-compose.yml +version: '3.8' + +services: + explorer: + image: influxdata/influxdb3-ui:{{% latest-patch %}} + container_name: influxdb3-explorer + pull_policy: always + command: ["--mode=admin"] + ports: + - "8888:80" + volumes: + - ./db:/db:rw + - ./config:/app-root/config:ro + - ./ssl:/etc/nginx/ssl:ro + environment: + SESSION_SECRET_KEY: ${SESSION_SECRET_KEY:-changeme123456789012345678901234} + restart: unless-stopped +``` + +Create a `.env` file that contains the following: + +```bash +SESSION_SECRET_KEY=your_32_char_hex_string_here +``` + +Start the container: + +```bash +docker-compose up -d +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} --- -## Environment Variables +## Configuration options -Use the following environment variables to customize {{% product-name %}} settings -in your container. +### Persist data across restarts -- [DATABASE_URL](#database_url) -- [SESSION_SECRET_KEY](#session_secret_key) -- [SSL_CERT_PATH](#ssl_cert_path) -- [SSL_KEY_PATH](#ssl_key_path) +{{% product-name %}} stores application data in a SQLite database. To persist this data across container restarts: -### DATABASE_URL +1. **Create a local directory:** -Path to SQLite DB inside container. The default is `/db/sqlite.db`. + ```bash + mkdir -m 700 ./db + ``` -{{< expand-wrapper >}} -{{% expand "View `DATABASE_URL` example" %}} - +2. **Mount the directory when running the container:** -```bash -docker run --detach \ - # ... - --volume $(pwd)/db:/custom/db-path:rw \ - --env DATABASE_URL=/custom/db-path/sqlite.db \ - influxdata/influxdb3-ui:{{% latest-patch %}} -``` -{{% /expand %}} -{{< /expand-wrapper >}} + {{< code-tabs-wrapper >}} + {{% code-tabs %}} + [Docker](#) + [Docker Compose](#) + {{% /code-tabs %}} -### SESSION_SECRET_KEY + {{% code-tab-content %}} + ```bash + docker run --detach \ + --name influxdb3-explorer \ + --publish 8888:80 \ + --volume $(pwd)/db:/db:rw \ + influxdata/influxdb3-ui:{{% latest-patch %}} + ``` + {{% /code-tab-content %}} -Specifies the secret key for session management. If none is provided, Explorer -uses a random 32-byte hex string as the session secret key. - -{{< expand-wrapper >}} -{{% expand "View `SESSION_SECRET_KEY` example" %}} - - -```bash -docker run --detach \ - # ... - --env SESSION_SECRET_KEY=xxX0Xx000xX0XxxxX0Xx000xX0XxX00x \ - influxdata/influxdb3-ui:{{% latest-patch %}} -``` -{{% /expand %}} -{{< /expand-wrapper >}} + {{% code-tab-content %}} + ```yaml + version: '3.8' + + services: + explorer: + image: influxdata/influxdb3-ui:{{% latest-patch %}} + container_name: influxdb3-explorer + ports: + - "8888:80" + volumes: + - ./db:/db:rw + restart: unless-stopped + ``` + {{% /code-tab-content %}} + {{< /code-tabs-wrapper >}} > [!Important] -> #### Always set SESSION_SECRET_KEY in production +> Without a mounted `./db` directory, application data is lost when the container is deleted. + +### Pre-configure InfluxDB connections + +Instead of configuring connections through the UI, you can pre-define connection settings using a `config.json` file. This is useful for: +- Automated deployments +- Shared team configurations +- Quick setup for known environments + +1. **Create a `config.json` file:** + + ```bash + mkdir -p config + cat > config/config.json << 'EOF' + { + "DEFAULT_INFLUX_SERVER": "http://host.docker.internal:8181", + "DEFAULT_INFLUX_DATABASE": "mydb", + "DEFAULT_API_TOKEN": "your-token-here", + "DEFAULT_SERVER_NAME": "Local InfluxDB 3" + } + EOF + ``` + + Customize the following properties for your InfluxDB 3 instance: + + - **`DEFAULT_INFLUX_SERVER`**: your [InfluxDB 3 Core](/influxdb3/core/reference/config-options/#http-bind) or [Enterprise](/influxdb3/enterprise/reference/config-options/#http-bind) server URL + - **`DEFAULT_INFLUX_DATABASE`**: the name of your [InfluxDB 3 Core](/influxdb3/core/admin/databases/) or [Enterprise](/influxdb3/enterprise/admin/databases/) database + - **`DEFAULT_API_TOKEN`**: your [InfluxDB 3 Core](/influxdb3/core/admin/tokens/) or [Enterprise](/influxdb3/enterprise/admin/tokens/) authorization token with the necessary permissions to access your server + - **`DEFAULT_SERVER_NAME`**: a display name (only used by Explorer) for your [InfluxDB 3 Core](/influxdb3/core/get-started/setup/#start-influxdb) or [Enterprise](/influxdb3/enterprise/get-started/setup/#start-influxdb) server + + > [!Note] + > If connecting to a local, _non-Docker_ instance, use `host.docker.internal` as your server host--for example: + > + > ```txt + > "DEFAULT_INFLUX_SERVER": "http://host.docker.internal:8181" + > ``` + > + > `host.docker.internal` allows the Docker container to connect to services running on your host machine. + > For more information, see the [Docker documentation](https://docs.docker.com/desktop/features/networking). + +2. **Mount the configuration directory:** + + {{< code-tabs-wrapper >}} + {{% code-tabs %}} + [Docker](#) + [Docker Compose](#) + {{% /code-tabs %}} + + {{% code-tab-content %}} + ```bash + docker run --detach \ + --name influxdb3-explorer \ + --publish 8888:80 \ + --volume $(pwd)/config:/app-root/config:ro \ + influxdata/influxdb3-ui:{{% latest-patch %}} + ``` + {{% /code-tab-content %}} + + {{% code-tab-content %}} + ```yaml + version: '3.8' + + services: + explorer: + image: influxdata/influxdb3-ui:{{% latest-patch %}} + container_name: influxdb3-explorer + ports: + - "8888:80" + volumes: + - ./config:/app-root/config:ro + restart: unless-stopped + ``` + {{% /code-tab-content %}} + {{< /code-tabs-wrapper >}} + +### Enable TLS/SSL (HTTPS) + +To enable TLS/SSL for secure connections: + +1. **Create SSL directory and add certificate files:** + + ```bash + mkdir -m 755 ./ssl + # Copy your certificate files to the ssl directory + cp /path/to/server.crt ./ssl/ + cp /path/to/server.key ./ssl/ + ``` + + Required files: + - Certificate: `server.crt` or `fullchain.pem` + - Private key: `server.key` + +2. **Run the container with SSL enabled:** + + {{< code-tabs-wrapper >}} + {{% code-tabs %}} + [Docker](#) + [Docker Compose](#) + {{% /code-tabs %}} + + {{% code-tab-content %}} + ```bash + docker run --detach \ + --name influxdb3-explorer \ + --publish 8888:443 \ + --volume $(pwd)/ssl:/etc/nginx/ssl:ro \ + influxdata/influxdb3-ui:{{% latest-patch %}} + ``` + {{% /code-tab-content %}} + + {{% code-tab-content %}} + ```yaml + version: '3.8' + + services: + explorer: + image: influxdata/influxdb3-ui:{{% latest-patch %}} + container_name: influxdb3-explorer + ports: + - "8888:443" + volumes: + - ./ssl:/etc/nginx/ssl:ro + restart: unless-stopped + ``` + {{% /code-tab-content %}} + {{< /code-tabs-wrapper >}} + +3. **Access the Explorer UI at ** + +> [!Note] +> The nginx web server automatically detects and uses certificate files in the mounted path. + +### Choose operational mode + +{{% product-name %}} supports two operational modes: + +- **Query mode** (default): Read-only UI for querying data +- **Admin mode**: Full UI with administrative capabilities + +Set the mode using the `--mode` parameter: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Docker](#) +[Docker Compose](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} +```bash +# Query mode (default) +docker run --detach \ + --name influxdb3-explorer \ + --publish 8888:80 \ + influxdata/influxdb3-ui:{{% latest-patch %}} \ + --mode=query + +# Admin mode +docker run --detach \ + --name influxdb3-explorer \ + --publish 8888:80 \ + influxdata/influxdb3-ui:{{% latest-patch %}} \ + --mode=admin +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```yaml +version: '3.8' + +services: + explorer: + image: influxdata/influxdb3-ui:{{% latest-patch %}} + container_name: influxdb3-explorer + # For query mode (default), omit the command + # For admin mode, add: + command: ["--mode=admin"] + ports: + - "8888:80" + restart: unless-stopped +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +--- + +## Advanced configuration + +### Environment variables + +| Variable | Default | Description | +|----------|---------|-------------| +| `SESSION_SECRET_KEY` | _(random)_ | Secret key for session management. **Set this in production to persist sessions across restarts.** | +| `DATABASE_URL` | `/db/sqlite.db` | Path to SQLite database inside container | +| `SSL_CERT_PATH` | `/etc/nginx/ssl/cert.pem` | Path to SSL certificate file | +| `SSL_KEY_PATH` | `/etc/nginx/ssl/key.pem` | Path to SSL private key file | + +> [!Important] +> Always set `SESSION_SECRET_KEY` in production to persist user sessions across container restarts. +> Enter the following command to generate a secure key: > -> When you restart the container, {{% product-name %}} generates a new key if -> not explicitly set. For production use cases, always set the `SESSION_SECRET_KEY` -> environment variable to persist sessions across restarts. +> ```bash +> openssl rand -hex 32 +> ``` -### SSL_CERT_PATH +### Volume reference -Defines the path to the SSL certificate file inside the container. -Default is `/etc/nginx/ssl/cert.pem`. +| Container Path | Purpose | Permissions | Required | +|----------------|---------|-------------|----------| +| `/db` | SQLite database storage | 700 | No (but recommended) | +| `/app-root/config` | Connection configuration | 755 | No | +| `/etc/nginx/ssl` | TLS/SSL certificates | 755 | Only for HTTPS | -{{< expand-wrapper >}} -{{% expand "View `SSL_CERT_PATH` example" %}} - +### Port reference + +| Container Port | Protocol | Purpose | Common Host Mapping | +|----------------|----------|---------|---------------------| +| 80 | HTTP | Web UI (unencrypted) | 8888 | +| 443 | HTTPS | Web UI (encrypted) | 8888 | + +--- + +## Complete examples + +### Production setup with all features + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Docker](#) +[Docker Compose](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} +```bash +# Create required directories +mkdir -m 700 ./db +mkdir -m 755 ./config ./ssl + +# Generate session secret +export SESSION_SECRET=$(openssl rand -hex 32) + +# Create configuration +cat > config/config.json << 'EOF' +{ + "DEFAULT_INFLUX_SERVER": "http://host.docker.internal:8181", + "DEFAULT_INFLUX_DATABASE": "production", + "DEFAULT_API_TOKEN": "your-production-token", + "DEFAULT_SERVER_NAME": "Production InfluxDB 3" +} +EOF + +# Run Explorer with all features +docker run --detach \ + --name influxdb3-explorer \ + --pull always \ + --publish 8888:443 \ + --volume $(pwd)/db:/db:rw \ + --volume $(pwd)/config:/app-root/config:ro \ + --volume $(pwd)/ssl:/etc/nginx/ssl:ro \ + --env SESSION_SECRET_KEY=$SESSION_SECRET \ + --restart unless-stopped \ + influxdata/influxdb3-ui:{{% latest-patch %}} \ + --mode=admin +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```yaml +# docker-compose.yml +version: '3.8' + +services: + explorer: + image: influxdata/influxdb3-ui:{{% latest-patch %}} + container_name: influxdb3-explorer + pull_policy: always + command: ["--mode=admin"] + ports: + - "8888:443" + volumes: + - ./db:/db:rw + - ./config:/app-root/config:ro + - ./ssl:/etc/nginx/ssl:ro + environment: + SESSION_SECRET_KEY: ${SESSION_SECRET_KEY} + restart: unless-stopped +``` + +Create a `.env` file that contains the following: ```bash -docker run --detach \ - # ... - --volume $(pwd)/ssl:/custom/ssl:ro \ - --env SSL_CERT_PATH=/custom/ssl/cert.pem \ - influxdata/influxdb3-ui:{{% latest-patch %}} +SESSION_SECRET_KEY=your_32_char_hex_string_here ``` -{{% /expand %}} -{{< /expand-wrapper >}} -### SSL_KEY_PATH - -Defines the path to the SSL private key file inside the container. -Default is `/etc/nginx/ssl/key.pem`. - -{{< expand-wrapper >}} -{{% expand "View `SSL_KEY_PATH` example" %}} - +Start the container: ```bash -docker run --detach \ - # ... - --volume $(pwd)/ssl:/custom/ssl:ro \ - --env SSL_KEY_PATH=/custom/ssl/key.pem \ +docker-compose up -d +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +### Development setup (minimal) + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Docker](#) +[Docker Compose](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} +```bash +docker run --rm \ + --name influxdb3-explorer-dev \ + --publish 8888:80 \ influxdata/influxdb3-ui:{{% latest-patch %}} ``` -{{% /expand %}} -{{< /expand-wrapper >}} +{{% /code-tab-content %}} -## Volume Reference +{{% code-tab-content %}} +```yaml +# docker-compose.yml +version: '3.8' -| Container Path | Purpose | Host Example | -|----------------------|------------------------------|----------------------------| -| `/db` | SQLite DB storage | `./db` | -| `/app-root/config` | JSON config for defaults | `./config` | -| `/etc/nginx/ssl` | SSL certs for HTTPS | `./ssl` | - -## Exposed Ports - -| Port | Protocol | Purpose | -|------|----------|-------------------------| -| 80 | HTTP | Web access (unencrypted) | -| 443 | HTTPS | Web access (encrypted) | - -### Custom port mapping - -```sh -# Map ports to custom host values ---publish 8888:80 --publish 8443:443 +services: + explorer: + image: influxdata/influxdb3-ui:{{% latest-patch %}} + container_name: influxdb3-explorer-dev + ports: + - "8888:80" ``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} From 41ce560079da4f77bfeaf0ffcbd9c82ea1ec6115 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Thu, 4 Sep 2025 16:40:23 -0500 Subject: [PATCH 150/179] fix: remove link fragements --- content/influxdb3/explorer/install.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/content/influxdb3/explorer/install.md b/content/influxdb3/explorer/install.md index c84147417..e555511a2 100644 --- a/content/influxdb3/explorer/install.md +++ b/content/influxdb3/explorer/install.md @@ -13,8 +13,6 @@ Use [Docker](https://docker.com) to install and run **InfluxDB 3 Explorer**. - [Quick start](#quick-start) - [Installation methods](#installation-methods) - - [Docker run](#docker-run) - - [Docker Compose](#docker-compose) - [Configuration options](#configuration-options) - [Persist data across restarts](#persist-data-across-restarts) - [Pre-configure InfluxDB connections](#pre-configure-influxdb-connections) From 99c515a1b8f02c70075c29f523bb3b290f83ddb0 Mon Sep 17 00:00:00 2001 From: peterbarnett03 Date: Fri, 5 Sep 2025 09:58:30 -0400 Subject: [PATCH 151/179] Clean up config-options.md --- .../shared/influxdb3-cli/config-options.md | 22 ------------------- 1 file changed, 22 deletions(-) diff --git a/content/shared/influxdb3-cli/config-options.md b/content/shared/influxdb3-cli/config-options.md index be8ce2e7e..5baaea1c7 100644 --- a/content/shared/influxdb3-cli/config-options.md +++ b/content/shared/influxdb3-cli/config-options.md @@ -1972,25 +1972,3 @@ Specifies the TCP listener file path for admin token recovery operations. | influxdb3 serve option | Environment variable | | :---------------------------------------------- | :-------------------------------------------------------- | | `--admin-token-recovery-tcp-listener-file-path` | `INFLUXDB3_ADMIN_TOKEN_RECOVERY_TCP_LISTENER_FILE_PATH` | - -{{% show-in "enterprise" %}} ---- - -### Experimental Features - -- [use-pacha-tree](#use-pacha-tree) - -#### use-pacha-tree - -Enables the experimental PachaTree storage engine for improved performance. - -> [!Warning] -> This is an experimental feature and should not be used in production environments. - -**Default:** `false` - -| influxdb3 serve option | Environment variable | -| :---------------------- | :------------------------------------- | -| `--use-pacha-tree` | `INFLUXDB3_ENTERPRISE_USE_PACHA_TREE` | - -{{% /show-in %}} \ No newline at end of file From 3f9b54541b148d39e9a747ce72d763c88fb2eda2 Mon Sep 17 00:00:00 2001 From: Peter Barnett Date: Fri, 5 Sep 2025 10:07:07 -0400 Subject: [PATCH 152/179] fix: remove link --- content/shared/influxdb3-cli/config-options.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/content/shared/influxdb3-cli/config-options.md b/content/shared/influxdb3-cli/config-options.md index 5baaea1c7..189704e57 100644 --- a/content/shared/influxdb3-cli/config-options.md +++ b/content/shared/influxdb3-cli/config-options.md @@ -175,10 +175,6 @@ influxdb3 serve - [TCP Listeners](#tcp-listeners) - [tcp-listener-file-path](#tcp-listener-file-path) - [admin-token-recovery-tcp-listener-file-path](#admin-token-recovery-tcp-listener-file-path) -{{% show-in "enterprise" %}} -- [Experimental Features](#experimental-features) - - [use-pacha-tree](#use-pacha-tree) -{{% /show-in %}} --- From 8f729f95004ecd8f6913c895d04d4289bad7149a Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Fri, 5 Sep 2025 08:26:38 -0600 Subject: [PATCH 153/179] chore(sql): Update SQL reference with additional functions (#6359) * feat(sql): WIP added struct and map functions * chore(sql): update sql reference with new functions * chore(sql): migrate sql reference function updates to other projects * chore(sql): readd deleted cache functions pages * Update content/shared/sql-reference/functions/array.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fix(sql): fixed broken anchor links * fix(sql): fixed typos * Apply suggestions from code review Co-authored-by: Jason Stirnaman --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: Jason Stirnaman --- .../reference/sql/functions/_index.md | 2 +- .../reference/sql/functions/aggregate.md | 2 +- .../reference/sql/functions/array.md | 17 + .../reference/sql/functions/binary-string.md | 3 +- .../reference/sql/functions/conditional.md | 2 +- .../reference/sql/functions/hashing.md | 5 +- .../reference/sql/functions/map.md | 17 + .../reference/sql/functions/math.md | 2 +- .../reference/sql/functions/misc.md | 4 +- .../sql/functions/regular-expression.md | 4 +- .../reference/sql/functions/selector.md | 2 +- .../reference/sql/functions/string.md | 2 +- .../reference/sql/functions/struct.md | 17 + .../reference/sql/functions/time-and-date.md | 2 +- .../reference/sql/functions/window.md | 4 +- .../reference/sql/functions/_index.md | 4 +- .../reference/sql/functions/aggregate.md | 2 +- .../reference/sql/functions/array.md | 17 + .../reference/sql/functions/binary-string.md | 3 +- .../reference/sql/functions/conditional.md | 2 +- .../reference/sql/functions/hashing.md | 5 +- .../reference/sql/functions/map.md | 17 + .../reference/sql/functions/math.md | 4 +- .../reference/sql/functions/misc.md | 6 +- .../sql/functions/regular-expression.md | 6 +- .../reference/sql/functions/selector.md | 2 +- .../reference/sql/functions/string.md | 2 +- .../reference/sql/functions/struct.md | 17 + .../reference/sql/functions/time-and-date.md | 2 +- .../reference/sql/functions/window.md | 4 +- .../reference/sql/functions/_index.md | 4 +- .../reference/sql/functions/aggregate.md | 2 +- .../reference/sql/functions/array.md | 17 + .../reference/sql/functions/binary-string.md | 3 +- .../reference/sql/functions/conditional.md | 2 +- .../reference/sql/functions/hashing.md | 5 +- .../clustered/reference/sql/functions/map.md | 17 + .../clustered/reference/sql/functions/math.md | 4 +- .../clustered/reference/sql/functions/misc.md | 6 +- .../sql/functions/regular-expression.md | 6 +- .../reference/sql/functions/selector.md | 2 +- .../reference/sql/functions/string.md | 2 +- .../reference/sql/functions/struct.md | 17 + .../reference/sql/functions/time-and-date.md | 2 +- .../reference/sql/functions/window.md | 4 +- .../core/reference/sql/functions/_index.md | 4 +- .../core/reference/sql/functions/aggregate.md | 2 +- .../core/reference/sql/functions/array.md | 17 + .../reference/sql/functions/binary-string.md | 3 +- .../core/reference/sql/functions/cache.md | 2 +- .../reference/sql/functions/conditional.md | 2 +- .../core/reference/sql/functions/hashing.md | 5 +- .../core/reference/sql/functions/map.md | 17 + .../core/reference/sql/functions/math.md | 4 +- .../core/reference/sql/functions/misc.md | 6 +- .../sql/functions/regular-expression.md | 6 +- .../core/reference/sql/functions/selector.md | 2 +- .../core/reference/sql/functions/string.md | 2 +- .../core/reference/sql/functions/struct.md | 17 + .../reference/sql/functions/time-and-date.md | 2 +- .../core/reference/sql/functions/window.md | 4 +- .../reference/sql/functions/_index.md | 4 +- .../reference/sql/functions/aggregate.md | 2 +- .../reference/sql/functions/array.md | 17 + .../reference/sql/functions/binary-string.md | 3 +- .../reference/sql/functions/cache.md | 2 +- .../reference/sql/functions/conditional.md | 2 +- .../reference/sql/functions/hashing.md | 5 +- .../enterprise/reference/sql/functions/map.md | 17 + .../reference/sql/functions/math.md | 4 +- .../reference/sql/functions/misc.md | 6 +- .../sql/functions/regular-expression.md | 6 +- .../reference/sql/functions/selector.md | 2 +- .../reference/sql/functions/string.md | 2 +- .../reference/sql/functions/struct.md | 17 + .../reference/sql/functions/time-and-date.md | 2 +- .../reference/sql/functions/window.md | 4 +- .../sql-reference/functions/aggregate.md | 6 +- .../shared/sql-reference/functions/array.md | 1860 +++++++++++++++++ .../sql-reference/functions/binary-string.md | 4 +- .../shared/sql-reference/functions/cache.md | 8 +- .../sql-reference/functions/conditional.md | 38 +- .../shared/sql-reference/functions/hashing.md | 12 +- content/shared/sql-reference/functions/map.md | 272 +++ .../shared/sql-reference/functions/math.md | 70 +- .../shared/sql-reference/functions/misc.md | 98 +- .../functions/regular-expression.md | 8 +- .../sql-reference/functions/selector.md | 8 +- .../shared/sql-reference/functions/string.md | 68 +- .../shared/sql-reference/functions/struct.md | 140 ++ .../sql-reference/functions/time-and-date.md | 38 +- .../shared/sql-reference/functions/window.md | 13 +- 92 files changed, 2823 insertions(+), 281 deletions(-) create mode 100644 content/influxdb3/cloud-dedicated/reference/sql/functions/array.md create mode 100644 content/influxdb3/cloud-dedicated/reference/sql/functions/map.md create mode 100644 content/influxdb3/cloud-dedicated/reference/sql/functions/struct.md create mode 100644 content/influxdb3/cloud-serverless/reference/sql/functions/array.md create mode 100644 content/influxdb3/cloud-serverless/reference/sql/functions/map.md create mode 100644 content/influxdb3/cloud-serverless/reference/sql/functions/struct.md create mode 100644 content/influxdb3/clustered/reference/sql/functions/array.md create mode 100644 content/influxdb3/clustered/reference/sql/functions/map.md create mode 100644 content/influxdb3/clustered/reference/sql/functions/struct.md create mode 100644 content/influxdb3/core/reference/sql/functions/array.md create mode 100644 content/influxdb3/core/reference/sql/functions/map.md create mode 100644 content/influxdb3/core/reference/sql/functions/struct.md create mode 100644 content/influxdb3/enterprise/reference/sql/functions/array.md create mode 100644 content/influxdb3/enterprise/reference/sql/functions/map.md create mode 100644 content/influxdb3/enterprise/reference/sql/functions/struct.md create mode 100644 content/shared/sql-reference/functions/array.md create mode 100644 content/shared/sql-reference/functions/map.md create mode 100644 content/shared/sql-reference/functions/struct.md diff --git a/content/influxdb3/cloud-dedicated/reference/sql/functions/_index.md b/content/influxdb3/cloud-dedicated/reference/sql/functions/_index.md index 643873b44..0a509ec36 100644 --- a/content/influxdb3/cloud-dedicated/reference/sql/functions/_index.md +++ b/content/influxdb3/cloud-dedicated/reference/sql/functions/_index.md @@ -14,5 +14,5 @@ source: /shared/sql-reference/functions/_index.md --- diff --git a/content/influxdb3/cloud-dedicated/reference/sql/functions/aggregate.md b/content/influxdb3/cloud-dedicated/reference/sql/functions/aggregate.md index c6d8c8e92..465e5d23d 100644 --- a/content/influxdb3/cloud-dedicated/reference/sql/functions/aggregate.md +++ b/content/influxdb3/cloud-dedicated/reference/sql/functions/aggregate.md @@ -15,5 +15,5 @@ source: /shared/sql-reference/functions/aggregate.md --- diff --git a/content/influxdb3/cloud-dedicated/reference/sql/functions/array.md b/content/influxdb3/cloud-dedicated/reference/sql/functions/array.md new file mode 100644 index 000000000..b527991ea --- /dev/null +++ b/content/influxdb3/cloud-dedicated/reference/sql/functions/array.md @@ -0,0 +1,17 @@ +--- +title: SQL array functions +list_title: Array functions +description: > + Use array functions to create and operate on Arrow arrays or lists in SQL queries. +menu: + influxdb3_cloud_dedicated: + name: Array + parent: sql-functions +weight: 309 + +source: /shared/sql-reference/functions/array.md +--- + + diff --git a/content/influxdb3/cloud-dedicated/reference/sql/functions/binary-string.md b/content/influxdb3/cloud-dedicated/reference/sql/functions/binary-string.md index e71ff2b84..3dd027195 100644 --- a/content/influxdb3/cloud-dedicated/reference/sql/functions/binary-string.md +++ b/content/influxdb3/cloud-dedicated/reference/sql/functions/binary-string.md @@ -14,6 +14,5 @@ source: /shared/sql-reference/functions/binary-string.md --- diff --git a/content/influxdb3/cloud-dedicated/reference/sql/functions/conditional.md b/content/influxdb3/cloud-dedicated/reference/sql/functions/conditional.md index 21b48f8e5..2d2dbd7bd 100644 --- a/content/influxdb3/cloud-dedicated/reference/sql/functions/conditional.md +++ b/content/influxdb3/cloud-dedicated/reference/sql/functions/conditional.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/conditional.md --- diff --git a/content/influxdb3/cloud-dedicated/reference/sql/functions/hashing.md b/content/influxdb3/cloud-dedicated/reference/sql/functions/hashing.md index 1241011f5..badcb3f74 100644 --- a/content/influxdb3/cloud-dedicated/reference/sql/functions/hashing.md +++ b/content/influxdb3/cloud-dedicated/reference/sql/functions/hashing.md @@ -8,12 +8,11 @@ menu: influxdb3_cloud_dedicated: name: Hashing parent: sql-functions -weight: 309 +weight: 313 source: /shared/sql-reference/functions/hashing.md --- diff --git a/content/influxdb3/cloud-dedicated/reference/sql/functions/map.md b/content/influxdb3/cloud-dedicated/reference/sql/functions/map.md new file mode 100644 index 000000000..1421777d3 --- /dev/null +++ b/content/influxdb3/cloud-dedicated/reference/sql/functions/map.md @@ -0,0 +1,17 @@ +--- +title: SQL map functions +list_title: Map functions +description: > + Use map functions to create and operate on Arrow maps in SQL queries. +menu: + influxdb3_cloud_dedicated: + name: Map + parent: sql-functions +weight: 310 + +source: /shared/sql-reference/functions/map.md +--- + + diff --git a/content/influxdb3/cloud-dedicated/reference/sql/functions/math.md b/content/influxdb3/cloud-dedicated/reference/sql/functions/math.md index 09d4d7ff1..a49972614 100644 --- a/content/influxdb3/cloud-dedicated/reference/sql/functions/math.md +++ b/content/influxdb3/cloud-dedicated/reference/sql/functions/math.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/math.md --- diff --git a/content/influxdb3/cloud-dedicated/reference/sql/functions/misc.md b/content/influxdb3/cloud-dedicated/reference/sql/functions/misc.md index 3fc1e1db1..aa8ee7eb6 100644 --- a/content/influxdb3/cloud-dedicated/reference/sql/functions/misc.md +++ b/content/influxdb3/cloud-dedicated/reference/sql/functions/misc.md @@ -7,11 +7,11 @@ menu: influxdb3_cloud_dedicated: name: Miscellaneous parent: sql-functions -weight: 310 +weight: 314 source: /shared/sql-reference/functions/misc.md --- diff --git a/content/influxdb3/cloud-dedicated/reference/sql/functions/regular-expression.md b/content/influxdb3/cloud-dedicated/reference/sql/functions/regular-expression.md index 67db2cf12..5089c80a9 100644 --- a/content/influxdb3/cloud-dedicated/reference/sql/functions/regular-expression.md +++ b/content/influxdb3/cloud-dedicated/reference/sql/functions/regular-expression.md @@ -7,12 +7,12 @@ menu: influxdb3_cloud_dedicated: name: Regular expression parent: sql-functions -weight: 308 +weight: 312 influxdb3/cloud-dedicated/tags: [regular expressions, sql] source: /shared/sql-reference/functions/regular-expression.md --- diff --git a/content/influxdb3/cloud-dedicated/reference/sql/functions/selector.md b/content/influxdb3/cloud-dedicated/reference/sql/functions/selector.md index f3149180e..812b026d1 100644 --- a/content/influxdb3/cloud-dedicated/reference/sql/functions/selector.md +++ b/content/influxdb3/cloud-dedicated/reference/sql/functions/selector.md @@ -15,5 +15,5 @@ source: /shared/sql-reference/functions/selector.md --- diff --git a/content/influxdb3/cloud-dedicated/reference/sql/functions/string.md b/content/influxdb3/cloud-dedicated/reference/sql/functions/string.md index 08bca3506..6e5535244 100644 --- a/content/influxdb3/cloud-dedicated/reference/sql/functions/string.md +++ b/content/influxdb3/cloud-dedicated/reference/sql/functions/string.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/string.md --- diff --git a/content/influxdb3/cloud-dedicated/reference/sql/functions/struct.md b/content/influxdb3/cloud-dedicated/reference/sql/functions/struct.md new file mode 100644 index 000000000..95552dbcb --- /dev/null +++ b/content/influxdb3/cloud-dedicated/reference/sql/functions/struct.md @@ -0,0 +1,17 @@ +--- +title: SQL struct functions +list_title: Struct functions +description: > + Use struct functions to create Arrow structs in SQL queries. +menu: + influxdb3_cloud_dedicated: + name: Struct + parent: sql-functions +weight: 311 + +source: /shared/sql-reference/functions/struct.md +--- + + diff --git a/content/influxdb3/cloud-dedicated/reference/sql/functions/time-and-date.md b/content/influxdb3/cloud-dedicated/reference/sql/functions/time-and-date.md index 33949274a..dd5a8b762 100644 --- a/content/influxdb3/cloud-dedicated/reference/sql/functions/time-and-date.md +++ b/content/influxdb3/cloud-dedicated/reference/sql/functions/time-and-date.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/time-and-date.md --- diff --git a/content/influxdb3/cloud-dedicated/reference/sql/functions/window.md b/content/influxdb3/cloud-dedicated/reference/sql/functions/window.md index b2f34e937..0c608a8e5 100644 --- a/content/influxdb3/cloud-dedicated/reference/sql/functions/window.md +++ b/content/influxdb3/cloud-dedicated/reference/sql/functions/window.md @@ -8,11 +8,11 @@ menu: influxdb3_cloud_dedicated: name: Window parent: sql-functions -weight: 309 +weight: 315 source: /shared/sql-reference/functions/window.md --- diff --git a/content/influxdb3/cloud-serverless/reference/sql/functions/_index.md b/content/influxdb3/cloud-serverless/reference/sql/functions/_index.md index 210a60d74..16137f7e5 100644 --- a/content/influxdb3/cloud-serverless/reference/sql/functions/_index.md +++ b/content/influxdb3/cloud-serverless/reference/sql/functions/_index.md @@ -14,5 +14,5 @@ source: /shared/sql-reference/functions/_index.md --- \ No newline at end of file +// SOURCE content/shared/sql-reference/functions/_index.md +--> diff --git a/content/influxdb3/cloud-serverless/reference/sql/functions/aggregate.md b/content/influxdb3/cloud-serverless/reference/sql/functions/aggregate.md index 9e890a3da..156b9bcf3 100644 --- a/content/influxdb3/cloud-serverless/reference/sql/functions/aggregate.md +++ b/content/influxdb3/cloud-serverless/reference/sql/functions/aggregate.md @@ -15,5 +15,5 @@ source: /shared/sql-reference/functions/aggregate.md --- diff --git a/content/influxdb3/cloud-serverless/reference/sql/functions/array.md b/content/influxdb3/cloud-serverless/reference/sql/functions/array.md new file mode 100644 index 000000000..e9479270b --- /dev/null +++ b/content/influxdb3/cloud-serverless/reference/sql/functions/array.md @@ -0,0 +1,17 @@ +--- +title: SQL array functions +list_title: Array functions +description: > + Use array functions to create and operate on Arrow arrays or lists in SQL queries. +menu: + influxdb3_cloud_serverless: + name: Array + parent: sql-functions +weight: 309 + +source: /shared/sql-reference/functions/array.md +--- + + diff --git a/content/influxdb3/cloud-serverless/reference/sql/functions/binary-string.md b/content/influxdb3/cloud-serverless/reference/sql/functions/binary-string.md index 25b216982..6b40233e6 100644 --- a/content/influxdb3/cloud-serverless/reference/sql/functions/binary-string.md +++ b/content/influxdb3/cloud-serverless/reference/sql/functions/binary-string.md @@ -14,6 +14,5 @@ source: /shared/sql-reference/functions/binary-string.md --- diff --git a/content/influxdb3/cloud-serverless/reference/sql/functions/conditional.md b/content/influxdb3/cloud-serverless/reference/sql/functions/conditional.md index f9e837b7e..d21540ccf 100644 --- a/content/influxdb3/cloud-serverless/reference/sql/functions/conditional.md +++ b/content/influxdb3/cloud-serverless/reference/sql/functions/conditional.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/conditional.md --- diff --git a/content/influxdb3/cloud-serverless/reference/sql/functions/hashing.md b/content/influxdb3/cloud-serverless/reference/sql/functions/hashing.md index 5d4b4a201..94c4a3d4f 100644 --- a/content/influxdb3/cloud-serverless/reference/sql/functions/hashing.md +++ b/content/influxdb3/cloud-serverless/reference/sql/functions/hashing.md @@ -8,12 +8,11 @@ menu: influxdb3_cloud_serverless: name: Hashing parent: sql-functions -weight: 309 +weight: 313 source: /shared/sql-reference/functions/hashing.md --- diff --git a/content/influxdb3/cloud-serverless/reference/sql/functions/map.md b/content/influxdb3/cloud-serverless/reference/sql/functions/map.md new file mode 100644 index 000000000..8cf856efe --- /dev/null +++ b/content/influxdb3/cloud-serverless/reference/sql/functions/map.md @@ -0,0 +1,17 @@ +--- +title: SQL map functions +list_title: Map functions +description: > + Use map functions to create and operate on Arrow maps in SQL queries. +menu: + influxdb3_cloud_serverless: + name: Map + parent: sql-functions +weight: 310 + +source: /shared/sql-reference/functions/map.md +--- + + diff --git a/content/influxdb3/cloud-serverless/reference/sql/functions/math.md b/content/influxdb3/cloud-serverless/reference/sql/functions/math.md index cbb017d0c..aab06608e 100644 --- a/content/influxdb3/cloud-serverless/reference/sql/functions/math.md +++ b/content/influxdb3/cloud-serverless/reference/sql/functions/math.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/math.md --- \ No newline at end of file +// SOURCE content/shared/sql-reference/functions/math.md +--> diff --git a/content/influxdb3/cloud-serverless/reference/sql/functions/misc.md b/content/influxdb3/cloud-serverless/reference/sql/functions/misc.md index 1daf798c7..9396ff2e1 100644 --- a/content/influxdb3/cloud-serverless/reference/sql/functions/misc.md +++ b/content/influxdb3/cloud-serverless/reference/sql/functions/misc.md @@ -7,11 +7,11 @@ menu: influxdb3_cloud_serverless: name: Miscellaneous parent: sql-functions -weight: 310 +weight: 314 source: /shared/sql-reference/functions/misc.md --- \ No newline at end of file +// SOURCE content/shared/sql-reference/functions/misc.md +--> diff --git a/content/influxdb3/cloud-serverless/reference/sql/functions/regular-expression.md b/content/influxdb3/cloud-serverless/reference/sql/functions/regular-expression.md index fc4a6b150..299df8792 100644 --- a/content/influxdb3/cloud-serverless/reference/sql/functions/regular-expression.md +++ b/content/influxdb3/cloud-serverless/reference/sql/functions/regular-expression.md @@ -7,12 +7,12 @@ menu: influxdb3_cloud_serverless: name: Regular expression parent: sql-functions -weight: 308 +weight: 312 influxdb3/cloud-serverless/tags: [regular expressions, sql] source: /shared/sql-reference/functions/regular-expression.md --- \ No newline at end of file +// SOURCE content/shared/sql-reference/functions/regular-expression.md +--> diff --git a/content/influxdb3/cloud-serverless/reference/sql/functions/selector.md b/content/influxdb3/cloud-serverless/reference/sql/functions/selector.md index acc3b9394..428906ac4 100644 --- a/content/influxdb3/cloud-serverless/reference/sql/functions/selector.md +++ b/content/influxdb3/cloud-serverless/reference/sql/functions/selector.md @@ -15,5 +15,5 @@ source: /shared/sql-reference/functions/selector.md --- diff --git a/content/influxdb3/cloud-serverless/reference/sql/functions/string.md b/content/influxdb3/cloud-serverless/reference/sql/functions/string.md index 861d13b2a..202abb70b 100644 --- a/content/influxdb3/cloud-serverless/reference/sql/functions/string.md +++ b/content/influxdb3/cloud-serverless/reference/sql/functions/string.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/string.md --- diff --git a/content/influxdb3/cloud-serverless/reference/sql/functions/struct.md b/content/influxdb3/cloud-serverless/reference/sql/functions/struct.md new file mode 100644 index 000000000..951abe974 --- /dev/null +++ b/content/influxdb3/cloud-serverless/reference/sql/functions/struct.md @@ -0,0 +1,17 @@ +--- +title: SQL struct functions +list_title: Struct functions +description: > + Use struct functions to create Arrow structs in SQL queries. +menu: + influxdb3_cloud_serverless: + name: Struct + parent: sql-functions +weight: 311 + +source: /shared/sql-reference/functions/struct.md +--- + + diff --git a/content/influxdb3/cloud-serverless/reference/sql/functions/time-and-date.md b/content/influxdb3/cloud-serverless/reference/sql/functions/time-and-date.md index 322f30c5d..5008831ea 100644 --- a/content/influxdb3/cloud-serverless/reference/sql/functions/time-and-date.md +++ b/content/influxdb3/cloud-serverless/reference/sql/functions/time-and-date.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/time-and-date.md --- diff --git a/content/influxdb3/cloud-serverless/reference/sql/functions/window.md b/content/influxdb3/cloud-serverless/reference/sql/functions/window.md index 882722f57..eeb839ccb 100644 --- a/content/influxdb3/cloud-serverless/reference/sql/functions/window.md +++ b/content/influxdb3/cloud-serverless/reference/sql/functions/window.md @@ -8,11 +8,11 @@ menu: influxdb3_cloud_serverless: name: Window parent: sql-functions -weight: 309 +weight: 315 source: /shared/sql-reference/functions/window.md --- diff --git a/content/influxdb3/clustered/reference/sql/functions/_index.md b/content/influxdb3/clustered/reference/sql/functions/_index.md index d0dd020e7..5fab8b9ff 100644 --- a/content/influxdb3/clustered/reference/sql/functions/_index.md +++ b/content/influxdb3/clustered/reference/sql/functions/_index.md @@ -14,5 +14,5 @@ source: /shared/sql-reference/functions/_index.md --- \ No newline at end of file +// SOURCE content/shared/sql-reference/functions/_index.md +--> diff --git a/content/influxdb3/clustered/reference/sql/functions/aggregate.md b/content/influxdb3/clustered/reference/sql/functions/aggregate.md index 627276ff3..891c4742b 100644 --- a/content/influxdb3/clustered/reference/sql/functions/aggregate.md +++ b/content/influxdb3/clustered/reference/sql/functions/aggregate.md @@ -15,5 +15,5 @@ source: /shared/sql-reference/functions/aggregate.md --- diff --git a/content/influxdb3/clustered/reference/sql/functions/array.md b/content/influxdb3/clustered/reference/sql/functions/array.md new file mode 100644 index 000000000..e36fddfa2 --- /dev/null +++ b/content/influxdb3/clustered/reference/sql/functions/array.md @@ -0,0 +1,17 @@ +--- +title: SQL array functions +list_title: Array functions +description: > + Use array functions to create and operate on Arrow arrays or lists in SQL queries. +menu: + influxdb3_clustered: + name: Array + parent: sql-functions +weight: 309 + +source: /shared/sql-reference/functions/array.md +--- + + diff --git a/content/influxdb3/clustered/reference/sql/functions/binary-string.md b/content/influxdb3/clustered/reference/sql/functions/binary-string.md index d6150ffef..d32411f36 100644 --- a/content/influxdb3/clustered/reference/sql/functions/binary-string.md +++ b/content/influxdb3/clustered/reference/sql/functions/binary-string.md @@ -14,6 +14,5 @@ source: /shared/sql-reference/functions/binary-string.md --- diff --git a/content/influxdb3/clustered/reference/sql/functions/conditional.md b/content/influxdb3/clustered/reference/sql/functions/conditional.md index db930a108..84c197d46 100644 --- a/content/influxdb3/clustered/reference/sql/functions/conditional.md +++ b/content/influxdb3/clustered/reference/sql/functions/conditional.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/conditional.md --- diff --git a/content/influxdb3/clustered/reference/sql/functions/hashing.md b/content/influxdb3/clustered/reference/sql/functions/hashing.md index 38a2a7482..46ecd7aca 100644 --- a/content/influxdb3/clustered/reference/sql/functions/hashing.md +++ b/content/influxdb3/clustered/reference/sql/functions/hashing.md @@ -8,12 +8,11 @@ menu: influxdb3_clustered: name: Hashing parent: sql-functions -weight: 309 +weight: 313 source: /shared/sql-reference/functions/hashing.md --- diff --git a/content/influxdb3/clustered/reference/sql/functions/map.md b/content/influxdb3/clustered/reference/sql/functions/map.md new file mode 100644 index 000000000..97d21217b --- /dev/null +++ b/content/influxdb3/clustered/reference/sql/functions/map.md @@ -0,0 +1,17 @@ +--- +title: SQL map functions +list_title: Map functions +description: > + Use map functions to create and operate on Arrow maps in SQL queries. +menu: + influxdb3_clustered: + name: Map + parent: sql-functions +weight: 310 + +source: /shared/sql-reference/functions/map.md +--- + + diff --git a/content/influxdb3/clustered/reference/sql/functions/math.md b/content/influxdb3/clustered/reference/sql/functions/math.md index 0e032d479..436b2e9c5 100644 --- a/content/influxdb3/clustered/reference/sql/functions/math.md +++ b/content/influxdb3/clustered/reference/sql/functions/math.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/math.md --- \ No newline at end of file +// SOURCE content/shared/sql-reference/functions/math.md +--> diff --git a/content/influxdb3/clustered/reference/sql/functions/misc.md b/content/influxdb3/clustered/reference/sql/functions/misc.md index 3965055c1..4b947169d 100644 --- a/content/influxdb3/clustered/reference/sql/functions/misc.md +++ b/content/influxdb3/clustered/reference/sql/functions/misc.md @@ -7,11 +7,11 @@ menu: influxdb3_clustered: name: Miscellaneous parent: sql-functions -weight: 310 +weight: 314 source: /shared/sql-reference/functions/misc.md --- \ No newline at end of file +// SOURCE content/shared/sql-reference/functions/misc.md +--> diff --git a/content/influxdb3/clustered/reference/sql/functions/regular-expression.md b/content/influxdb3/clustered/reference/sql/functions/regular-expression.md index c07b0eab5..203fe483e 100644 --- a/content/influxdb3/clustered/reference/sql/functions/regular-expression.md +++ b/content/influxdb3/clustered/reference/sql/functions/regular-expression.md @@ -7,12 +7,12 @@ menu: influxdb3_clustered: name: Regular expression parent: sql-functions -weight: 308 +weight: 312 influxdb3/clustered/tags: [regular expressions, sql] source: /shared/sql-reference/functions/regular-expression.md --- \ No newline at end of file +// SOURCE content/shared/sql-reference/functions/regular-expression.md +--> diff --git a/content/influxdb3/clustered/reference/sql/functions/selector.md b/content/influxdb3/clustered/reference/sql/functions/selector.md index ff8a121bc..7f907fe91 100644 --- a/content/influxdb3/clustered/reference/sql/functions/selector.md +++ b/content/influxdb3/clustered/reference/sql/functions/selector.md @@ -15,5 +15,5 @@ source: /shared/sql-reference/functions/selector.md --- diff --git a/content/influxdb3/clustered/reference/sql/functions/string.md b/content/influxdb3/clustered/reference/sql/functions/string.md index 4b072bd5c..5b93e8985 100644 --- a/content/influxdb3/clustered/reference/sql/functions/string.md +++ b/content/influxdb3/clustered/reference/sql/functions/string.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/string.md --- diff --git a/content/influxdb3/clustered/reference/sql/functions/struct.md b/content/influxdb3/clustered/reference/sql/functions/struct.md new file mode 100644 index 000000000..7bc0d38ff --- /dev/null +++ b/content/influxdb3/clustered/reference/sql/functions/struct.md @@ -0,0 +1,17 @@ +--- +title: SQL struct functions +list_title: Struct functions +description: > + Use struct functions to create Arrow structs in SQL queries. +menu: + influxdb3_clustered: + name: Struct + parent: sql-functions +weight: 311 + +source: /shared/sql-reference/functions/struct.md +--- + + diff --git a/content/influxdb3/clustered/reference/sql/functions/time-and-date.md b/content/influxdb3/clustered/reference/sql/functions/time-and-date.md index 1acac8a3d..85fd23231 100644 --- a/content/influxdb3/clustered/reference/sql/functions/time-and-date.md +++ b/content/influxdb3/clustered/reference/sql/functions/time-and-date.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/time-and-date.md --- diff --git a/content/influxdb3/clustered/reference/sql/functions/window.md b/content/influxdb3/clustered/reference/sql/functions/window.md index 4b4c0052a..35c7d5a6d 100644 --- a/content/influxdb3/clustered/reference/sql/functions/window.md +++ b/content/influxdb3/clustered/reference/sql/functions/window.md @@ -8,11 +8,11 @@ menu: influxdb3_clustered: name: Window parent: sql-functions -weight: 309 +weight: 315 source: /shared/sql-reference/functions/window.md --- diff --git a/content/influxdb3/core/reference/sql/functions/_index.md b/content/influxdb3/core/reference/sql/functions/_index.md index fa8e5be81..5fb175955 100644 --- a/content/influxdb3/core/reference/sql/functions/_index.md +++ b/content/influxdb3/core/reference/sql/functions/_index.md @@ -14,5 +14,5 @@ source: /shared/sql-reference/functions/_index.md --- \ No newline at end of file +// SOURCE content/shared/sql-reference/functions/_index.md +--> diff --git a/content/influxdb3/core/reference/sql/functions/aggregate.md b/content/influxdb3/core/reference/sql/functions/aggregate.md index 3b4a2b8ac..2bdcec17d 100644 --- a/content/influxdb3/core/reference/sql/functions/aggregate.md +++ b/content/influxdb3/core/reference/sql/functions/aggregate.md @@ -15,5 +15,5 @@ source: /shared/sql-reference/functions/aggregate.md --- diff --git a/content/influxdb3/core/reference/sql/functions/array.md b/content/influxdb3/core/reference/sql/functions/array.md new file mode 100644 index 000000000..a413d5a1a --- /dev/null +++ b/content/influxdb3/core/reference/sql/functions/array.md @@ -0,0 +1,17 @@ +--- +title: SQL array functions +list_title: Array functions +description: > + Use array functions to create and operate on Arrow arrays or lists in SQL queries. +menu: + influxdb3_core: + name: Array + parent: sql-functions +weight: 309 + +source: /shared/sql-reference/functions/array.md +--- + + diff --git a/content/influxdb3/core/reference/sql/functions/binary-string.md b/content/influxdb3/core/reference/sql/functions/binary-string.md index b12e5e8d5..da92409c0 100644 --- a/content/influxdb3/core/reference/sql/functions/binary-string.md +++ b/content/influxdb3/core/reference/sql/functions/binary-string.md @@ -14,6 +14,5 @@ source: /shared/sql-reference/functions/binary-string.md --- diff --git a/content/influxdb3/core/reference/sql/functions/cache.md b/content/influxdb3/core/reference/sql/functions/cache.md index c56aa4cfd..9b2f4444d 100644 --- a/content/influxdb3/core/reference/sql/functions/cache.md +++ b/content/influxdb3/core/reference/sql/functions/cache.md @@ -7,7 +7,7 @@ menu: influxdb3_core: name: Cache parent: sql-functions -weight: 311 +weight: 314 source: /shared/sql-reference/functions/cache.md --- diff --git a/content/influxdb3/core/reference/sql/functions/conditional.md b/content/influxdb3/core/reference/sql/functions/conditional.md index 1a4ed38ae..8bf49e4e9 100644 --- a/content/influxdb3/core/reference/sql/functions/conditional.md +++ b/content/influxdb3/core/reference/sql/functions/conditional.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/conditional.md --- diff --git a/content/influxdb3/core/reference/sql/functions/hashing.md b/content/influxdb3/core/reference/sql/functions/hashing.md index 5e0248f84..d03ab9e27 100644 --- a/content/influxdb3/core/reference/sql/functions/hashing.md +++ b/content/influxdb3/core/reference/sql/functions/hashing.md @@ -8,12 +8,11 @@ menu: influxdb3_core: name: Hashing parent: sql-functions -weight: 309 +weight: 313 source: /shared/sql-reference/functions/hashing.md --- diff --git a/content/influxdb3/core/reference/sql/functions/map.md b/content/influxdb3/core/reference/sql/functions/map.md new file mode 100644 index 000000000..b399242c7 --- /dev/null +++ b/content/influxdb3/core/reference/sql/functions/map.md @@ -0,0 +1,17 @@ +--- +title: SQL map functions +list_title: Map functions +description: > + Use map functions to create and operate on Arrow maps in SQL queries. +menu: + influxdb3_core: + name: Map + parent: sql-functions +weight: 310 + +source: /shared/sql-reference/functions/map.md +--- + + diff --git a/content/influxdb3/core/reference/sql/functions/math.md b/content/influxdb3/core/reference/sql/functions/math.md index 572b47353..e8ab5fe5b 100644 --- a/content/influxdb3/core/reference/sql/functions/math.md +++ b/content/influxdb3/core/reference/sql/functions/math.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/math.md --- \ No newline at end of file +// SOURCE content/shared/sql-reference/functions/math.md +--> diff --git a/content/influxdb3/core/reference/sql/functions/misc.md b/content/influxdb3/core/reference/sql/functions/misc.md index 3235bf654..971d80bb2 100644 --- a/content/influxdb3/core/reference/sql/functions/misc.md +++ b/content/influxdb3/core/reference/sql/functions/misc.md @@ -7,11 +7,11 @@ menu: influxdb3_core: name: Miscellaneous parent: sql-functions -weight: 310 +weight: 314 source: /shared/sql-reference/functions/misc.md --- \ No newline at end of file +// SOURCE content/shared/sql-reference/functions/misc.md +--> diff --git a/content/influxdb3/core/reference/sql/functions/regular-expression.md b/content/influxdb3/core/reference/sql/functions/regular-expression.md index 8d5f0a29b..fdb305c36 100644 --- a/content/influxdb3/core/reference/sql/functions/regular-expression.md +++ b/content/influxdb3/core/reference/sql/functions/regular-expression.md @@ -7,12 +7,12 @@ menu: influxdb3_core: name: Regular expression parent: sql-functions -weight: 308 +weight: 312 influxdb3/core/tags: [regular expressions, sql] source: /shared/sql-reference/functions/regular-expression.md --- \ No newline at end of file +// SOURCE content/shared/sql-reference/functions/regular-expression.md +--> diff --git a/content/influxdb3/core/reference/sql/functions/selector.md b/content/influxdb3/core/reference/sql/functions/selector.md index d74760493..ff14ff96b 100644 --- a/content/influxdb3/core/reference/sql/functions/selector.md +++ b/content/influxdb3/core/reference/sql/functions/selector.md @@ -15,5 +15,5 @@ source: /shared/sql-reference/functions/selector.md --- diff --git a/content/influxdb3/core/reference/sql/functions/string.md b/content/influxdb3/core/reference/sql/functions/string.md index 07f02e941..bef0515e6 100644 --- a/content/influxdb3/core/reference/sql/functions/string.md +++ b/content/influxdb3/core/reference/sql/functions/string.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/string.md --- diff --git a/content/influxdb3/core/reference/sql/functions/struct.md b/content/influxdb3/core/reference/sql/functions/struct.md new file mode 100644 index 000000000..1db0dd9eb --- /dev/null +++ b/content/influxdb3/core/reference/sql/functions/struct.md @@ -0,0 +1,17 @@ +--- +title: SQL struct functions +list_title: Struct functions +description: > + Use struct functions to create Arrow structs in SQL queries. +menu: + influxdb3_core: + name: Struct + parent: sql-functions +weight: 311 + +source: /shared/sql-reference/functions/struct.md +--- + + diff --git a/content/influxdb3/core/reference/sql/functions/time-and-date.md b/content/influxdb3/core/reference/sql/functions/time-and-date.md index 1d58575f5..60cf7f76d 100644 --- a/content/influxdb3/core/reference/sql/functions/time-and-date.md +++ b/content/influxdb3/core/reference/sql/functions/time-and-date.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/time-and-date.md --- diff --git a/content/influxdb3/core/reference/sql/functions/window.md b/content/influxdb3/core/reference/sql/functions/window.md index e964f30ed..4c000077d 100644 --- a/content/influxdb3/core/reference/sql/functions/window.md +++ b/content/influxdb3/core/reference/sql/functions/window.md @@ -8,11 +8,11 @@ menu: influxdb3_core: name: Window parent: sql-functions -weight: 309 +weight: 315 source: /shared/sql-reference/functions/window.md --- diff --git a/content/influxdb3/enterprise/reference/sql/functions/_index.md b/content/influxdb3/enterprise/reference/sql/functions/_index.md index eb99e2246..9c3da8d4c 100644 --- a/content/influxdb3/enterprise/reference/sql/functions/_index.md +++ b/content/influxdb3/enterprise/reference/sql/functions/_index.md @@ -14,5 +14,5 @@ source: /shared/sql-reference/functions/_index.md --- \ No newline at end of file +// SOURCE content/shared/sql-reference/functions/_index.md +--> diff --git a/content/influxdb3/enterprise/reference/sql/functions/aggregate.md b/content/influxdb3/enterprise/reference/sql/functions/aggregate.md index affec8cfa..2e533d9a2 100644 --- a/content/influxdb3/enterprise/reference/sql/functions/aggregate.md +++ b/content/influxdb3/enterprise/reference/sql/functions/aggregate.md @@ -15,5 +15,5 @@ source: /shared/sql-reference/functions/aggregate.md --- diff --git a/content/influxdb3/enterprise/reference/sql/functions/array.md b/content/influxdb3/enterprise/reference/sql/functions/array.md new file mode 100644 index 000000000..fa9006c47 --- /dev/null +++ b/content/influxdb3/enterprise/reference/sql/functions/array.md @@ -0,0 +1,17 @@ +--- +title: SQL array functions +list_title: Array functions +description: > + Use array functions to create and operate on Arrow arrays or lists in SQL queries. +menu: + influxdb3_enterprise: + name: Array + parent: sql-functions +weight: 309 + +source: /shared/sql-reference/functions/array.md +--- + + diff --git a/content/influxdb3/enterprise/reference/sql/functions/binary-string.md b/content/influxdb3/enterprise/reference/sql/functions/binary-string.md index 1f3f33f0d..c9d44878b 100644 --- a/content/influxdb3/enterprise/reference/sql/functions/binary-string.md +++ b/content/influxdb3/enterprise/reference/sql/functions/binary-string.md @@ -14,6 +14,5 @@ source: /shared/sql-reference/functions/binary-string.md --- diff --git a/content/influxdb3/enterprise/reference/sql/functions/cache.md b/content/influxdb3/enterprise/reference/sql/functions/cache.md index e1656ee44..505346fbc 100644 --- a/content/influxdb3/enterprise/reference/sql/functions/cache.md +++ b/content/influxdb3/enterprise/reference/sql/functions/cache.md @@ -7,7 +7,7 @@ menu: influxdb3_enterprise: name: Cache parent: sql-functions -weight: 311 +weight: 314 source: /shared/sql-reference/functions/cache.md --- diff --git a/content/influxdb3/enterprise/reference/sql/functions/conditional.md b/content/influxdb3/enterprise/reference/sql/functions/conditional.md index 07bb62cbc..5edc59782 100644 --- a/content/influxdb3/enterprise/reference/sql/functions/conditional.md +++ b/content/influxdb3/enterprise/reference/sql/functions/conditional.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/conditional.md --- diff --git a/content/influxdb3/enterprise/reference/sql/functions/hashing.md b/content/influxdb3/enterprise/reference/sql/functions/hashing.md index 4770d5336..509eac3e4 100644 --- a/content/influxdb3/enterprise/reference/sql/functions/hashing.md +++ b/content/influxdb3/enterprise/reference/sql/functions/hashing.md @@ -8,12 +8,11 @@ menu: influxdb3_enterprise: name: Hashing parent: sql-functions -weight: 309 +weight: 313 source: /shared/sql-reference/functions/hashing.md --- diff --git a/content/influxdb3/enterprise/reference/sql/functions/map.md b/content/influxdb3/enterprise/reference/sql/functions/map.md new file mode 100644 index 000000000..4fca52803 --- /dev/null +++ b/content/influxdb3/enterprise/reference/sql/functions/map.md @@ -0,0 +1,17 @@ +--- +title: SQL map functions +list_title: Map functions +description: > + Use map functions to create and operate on Arrow maps in SQL queries. +menu: + influxdb3_enterprise: + name: Map + parent: sql-functions +weight: 310 + +source: /shared/sql-reference/functions/map.md +--- + + diff --git a/content/influxdb3/enterprise/reference/sql/functions/math.md b/content/influxdb3/enterprise/reference/sql/functions/math.md index 934980b30..5998892e3 100644 --- a/content/influxdb3/enterprise/reference/sql/functions/math.md +++ b/content/influxdb3/enterprise/reference/sql/functions/math.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/math.md --- \ No newline at end of file +// SOURCE content/shared/sql-reference/functions/math.md +--> diff --git a/content/influxdb3/enterprise/reference/sql/functions/misc.md b/content/influxdb3/enterprise/reference/sql/functions/misc.md index 7bc7f4803..e91ed96e1 100644 --- a/content/influxdb3/enterprise/reference/sql/functions/misc.md +++ b/content/influxdb3/enterprise/reference/sql/functions/misc.md @@ -7,11 +7,11 @@ menu: influxdb3_enterprise: name: Miscellaneous parent: sql-functions -weight: 310 +weight: 314 source: /shared/sql-reference/functions/misc.md --- \ No newline at end of file +// SOURCE content/shared/sql-reference/functions/misc.md +--> diff --git a/content/influxdb3/enterprise/reference/sql/functions/regular-expression.md b/content/influxdb3/enterprise/reference/sql/functions/regular-expression.md index 5d118bbc7..f1671e039 100644 --- a/content/influxdb3/enterprise/reference/sql/functions/regular-expression.md +++ b/content/influxdb3/enterprise/reference/sql/functions/regular-expression.md @@ -7,12 +7,12 @@ menu: influxdb3_enterprise: name: Regular expression parent: sql-functions -weight: 308 +weight: 312 influxdb3/enterprise/tags: [regular expressions, sql] source: /shared/sql-reference/functions/regular-expression.md --- \ No newline at end of file +// SOURCE content/shared/sql-reference/functions/regular-expression.md +--> diff --git a/content/influxdb3/enterprise/reference/sql/functions/selector.md b/content/influxdb3/enterprise/reference/sql/functions/selector.md index 4f974a754..95d562e4a 100644 --- a/content/influxdb3/enterprise/reference/sql/functions/selector.md +++ b/content/influxdb3/enterprise/reference/sql/functions/selector.md @@ -15,5 +15,5 @@ source: /shared/sql-reference/functions/selector.md --- diff --git a/content/influxdb3/enterprise/reference/sql/functions/string.md b/content/influxdb3/enterprise/reference/sql/functions/string.md index 7a292892f..7e81f1512 100644 --- a/content/influxdb3/enterprise/reference/sql/functions/string.md +++ b/content/influxdb3/enterprise/reference/sql/functions/string.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/string.md --- diff --git a/content/influxdb3/enterprise/reference/sql/functions/struct.md b/content/influxdb3/enterprise/reference/sql/functions/struct.md new file mode 100644 index 000000000..769f1107a --- /dev/null +++ b/content/influxdb3/enterprise/reference/sql/functions/struct.md @@ -0,0 +1,17 @@ +--- +title: SQL struct functions +list_title: Struct functions +description: > + Use struct functions to create Arrow structs in SQL queries. +menu: + influxdb3_enterprise: + name: Struct + parent: sql-functions +weight: 311 + +source: /shared/sql-reference/functions/struct.md +--- + + diff --git a/content/influxdb3/enterprise/reference/sql/functions/time-and-date.md b/content/influxdb3/enterprise/reference/sql/functions/time-and-date.md index 7776a453b..5dbb1ff67 100644 --- a/content/influxdb3/enterprise/reference/sql/functions/time-and-date.md +++ b/content/influxdb3/enterprise/reference/sql/functions/time-and-date.md @@ -13,5 +13,5 @@ source: /shared/sql-reference/functions/time-and-date.md --- diff --git a/content/influxdb3/enterprise/reference/sql/functions/window.md b/content/influxdb3/enterprise/reference/sql/functions/window.md index 934c647be..19175317d 100644 --- a/content/influxdb3/enterprise/reference/sql/functions/window.md +++ b/content/influxdb3/enterprise/reference/sql/functions/window.md @@ -8,11 +8,11 @@ menu: influxdb3_enterprise: name: Window parent: sql-functions -weight: 309 +weight: 315 source: /shared/sql-reference/functions/window.md --- diff --git a/content/shared/sql-reference/functions/aggregate.md b/content/shared/sql-reference/functions/aggregate.md index 1cf76088c..45630de2f 100644 --- a/content/shared/sql-reference/functions/aggregate.md +++ b/content/shared/sql-reference/functions/aggregate.md @@ -77,7 +77,7 @@ aggregate value. Returns an array created from the expression elements. > [!Note] -> `array_agg` returns a `LIST` arrow type. Use bracket notation to reference the +> `array_agg` returns a `LIST` Arrow type. Use bracket notation to reference the > index of an element in the returned array. Arrays are 1-indexed. ```sql @@ -524,7 +524,7 @@ GROUP BY location ### mean -_Alias of [avg](#avg)._ +_Alias of [`avg`](#avg)._ ### median @@ -1403,7 +1403,7 @@ GROUP BY room ### var_population -_Alias of [var_pop](#var_pop)._ +_Alias of [`var_pop`](#var_pop)._ ### var_samp diff --git a/content/shared/sql-reference/functions/array.md b/content/shared/sql-reference/functions/array.md new file mode 100644 index 000000000..5efa2249e --- /dev/null +++ b/content/shared/sql-reference/functions/array.md @@ -0,0 +1,1860 @@ + +Use array functions to create and operate on Arrow arrays or lists in SQL queries. + +- [array_any_value](#array_any_value) +- [array_append](#array_append) +- [array_cat](#array_cat) +- [array_concat](#array_concat) +- [array_contains](#array_contains) +- [array_dims](#array_dims) +- [array_distance](#array_distance) +- [array_distinct](#array_distinct) +- [array_element](#array_element) +- [array_empty](#array_empty) +- [array_except](#array_except) +- [array_extract](#array_extract) +- [array_has](#array_has) +- [array_has_all](#array_has_all) +- [array_has_any](#array_has_any) +- [array_indexof](#array_indexof) +- [array_intersect](#array_intersect) +- [array_join](#array_join) +- [array_length](#array_length) +- [array_max](#array_max) +- [array_min](#array_min) +- [array_ndims](#array_ndims) +- [array_pop_back](#array_pop_back) +- [array_pop_front](#array_pop_front) +- [array_position](#array_position) +- [array_positions](#array_positions) +- [array_prepend](#array_prepend) +- [array_push_back](#array_push_back) +- [array_push_front](#array_push_front) +- [array_remove](#array_remove) +- [array_remove_all](#array_remove_all) +- [array_remove_n](#array_remove_n) +- [array_repeat](#array_repeat) +- [array_replace](#array_replace) +- [array_replace_all](#array_replace_all) +- [array_replace_n](#array_replace_n) +- [array_resize](#array_resize) +- [array_reverse](#array_reverse) +- [array_slice](#array_slice) +- [array_sort](#array_sort) +- [array_to_string](#array_to_string) +- [array_union](#array_union) +- [arrays_overlap](#arrays_overlap) +- [cardinality](#cardinality) +- [empty](#empty) +- [flatten](#flatten) +- [generate_series](#generate_series) +- [list_any_value](#list_any_value) +- [list_append](#list_append) +- [list_cat](#list_cat) +- [list_concat](#list_concat) +- [list_contains](#list_contains) +- [list_dims](#list_dims) +- [list_distance](#list_distance) +- [list_distinct](#list_distinct) +- [list_element](#list_element) +- [list_empty](#list_empty) +- [list_except](#list_except) +- [list_extract](#list_extract) +- [list_has](#list_has) +- [list_has_all](#list_has_all) +- [list_has_any](#list_has_any) +- [list_indexof](#list_indexof) +- [list_intersect](#list_intersect) +- [list_join](#list_join) +- [list_length](#list_length) +- [list_max](#list_max) +- [list_ndims](#list_ndims) +- [list_pop_back](#list_pop_back) +- [list_pop_front](#list_pop_front) +- [list_position](#list_position) +- [list_positions](#list_positions) +- [list_prepend](#list_prepend) +- [list_push_back](#list_push_back) +- [list_push_front](#list_push_front) +- [list_remove](#list_remove) +- [list_remove_all](#list_remove_all) +- [list_remove_n](#list_remove_n) +- [list_repeat](#list_repeat) +- [list_replace](#list_replace) +- [list_replace_all](#list_replace_all) +- [list_replace_n](#list_replace_n) +- [list_resize](#list_resize) +- [list_reverse](#list_reverse) +- [list_slice](#list_slice) +- [list_sort](#list_sort) +- [list_to_string](#list_to_string) +- [list_union](#list_union) +- [make_array](#make_array) +- [make_list](#make_list) +- [range](#range) +- [string_to_array](#string_to_array) +- [string_to_list](#string_to_list) + +## array_any_value + +Returns the first non-null element in the array. + +```sql +array_any_value(array) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. + +#### Aliases + +- `list_any_value` + +{{< expand-wrapper >}} +{{% expand "View `array_any_value` example" %}} + +```sql +SELECT array_any_value([NULL, 1, 2, 3]) AS array_any_value +``` + +| array_any_value | +| :-------------- | +| 1 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_append + +Appends an element to the end of an array. + +```sql +array_append(array, element) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **element**: Element to append to the array. + +#### Aliases + +- `list_append` +- `array_push_back` +- `list_push_back` + +{{< expand-wrapper >}} +{{% expand "View `array_append` example" %}} + +```sql +SELECT array_append([1, 2, 3], 4) AS array_append +``` + +| array_append | +| :----------- | +| [1, 2, 3, 4] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_cat + +_Alias of [array_concat](#array_concat)._ + +## array_concat + +Concatenates multiple arrays into a single array. + +```sql +array_concat(array[, ..., array_n]) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **array_n**: Subsequent array column or literal array to concatenate. + +#### Aliases + +- `array_cat` +- `list_concat` +- `list_cat` + +{{< expand-wrapper >}} +{{% expand "View `array_concat` example" %}} + +```sql +SELECT array_concat([1, 2], [3, 4], [5, 6]) AS array_concat +``` +| array_concat | +| :----------------- | +| [1, 2, 3, 4, 5, 6] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_contains + +_Alias of [array_has](#array_has)._ + +## array_dims + +Returns an array of the array's dimensions. + +```sql +array_dims(array) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. + +#### Aliases + +- `list_dims` + +{{< expand-wrapper >}} +{{% expand "View `array_dims` example" %}} + +```sql +SELECT array_dims([[1, 2, 3], [4, 5, 6]]) AS array_dims +``` + +| array_dims(List([1,2,3,4,5,6])) | +| :------------------------------ | +| [2, 3] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_distance + +Returns the Euclidean distance between two input arrays of equal length. + +```sql +array_distance(array1, array2) +``` + +### Arguments + +- **array1**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **array2**: Array expression. Can be a constant, column, or function, and any + combination of array operators. + +#### Aliases + +- `list_distance` + +{{< expand-wrapper >}} +{{% expand "View `array_distance` example" %}} + +```sql +SELECT array_distance([1, 2], [1, 4]) AS array_distance +``` + +| array_distance | +| -------------: | +| 2.0 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_distinct + +Returns distinct values from the array after removing duplicates. + +```sql +array_distinct(array) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. + +#### Aliases + +- `list_distinct` + +{{< expand-wrapper >}} +{{% expand "View `array_distinct` example" %}} + +```sql +SELECT array_distinct([1, 3, 2, 3, 1, 2, 4]) AS array_distinct +``` + +| array_distinct(List([1,2,3,4])) | +| :------------------------------ | +| [1, 2, 3, 4] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_element + +Extracts the element with the index n from the array. + +```sql +array_element(array, index) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **index**: Index to use to extract the element from the array. + +#### Aliases + +- `array_extract` +- `list_element` +- `list_extract` + +{{< expand-wrapper >}} +{{% expand "View `array_element` example" %}} + +```sql +SELECT array_element([1, 2, 3, 4], 3) AS array_element +``` + +| array_element | +| ------------: | +| 3 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_empty + +_Alias of [empty](#empty)._ + +## array_except + +Returns an array containing elements from the first array that are not present in the second array. + +```sql +array_except(array1, array2) +``` + +### Arguments + +- **array1**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **array2**: Array expression. Can be a constant, column, or function, and any + combination of array operators. + +#### Aliases + +- `list_except` + +{{< expand-wrapper >}} +{{% expand "View `array_except` example" %}} + +```sql +SELECT array_except([1, 2, 3, 4], [5, 6, 3, 4]) AS array_except +``` + +| array_except | +| :----------- | +| [1, 2] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_extract + +_Alias of [array_element](#array_element)._ + +## array_has + +Returns `true` if the array contains the element. + +```sql +array_has(array, element) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **element**: Scalar or Array expression. Can be a constant, column, or + function, and any combination of array operators. + +#### Aliases + +- `list_has` +- `array_contains` +- `list_contains` + +{{< expand-wrapper >}} +{{% expand "View `array_has` example" %}} + +```sql +SELECT array_has([1, 2, 3], 2) AS array_has +``` + +| array_has | +| :-------- | +| true | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_has_all + +Returns `true` if all elements of sub-array exist in array. + +```sql +array_has_all(array, sub-array) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **sub-array**: Array expression. Can be a constant, column, or function, and + any combination of array operators. + +#### Aliases + +- `list_has_all` + +{{< expand-wrapper >}} +{{% expand "View `array_has_all` example" %}} + +```sql +SELECT array_has_all([1, 2, 3, 4], [2, 3]) AS array_has_all +``` + +| array_has_all | +| :------------ | +| true | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_has_any + +Returns `true` if at least one element appears in both arrays. + +```sql +array_has_any(array, sub-array) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **sub-array**: Array expression. Can be a constant, column, or function, and + any combination of array operators. + +#### Aliases + +- `list_has_any` +- `arrays_overlap` + +{{< expand-wrapper >}} +{{% expand "View `array_has_any` example" %}} + +```sql +SELECT array_has_any([1, 2, 3], [3, 4]) AS array_has_any +``` + +| array_has_any | +| :------------ | +| true | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_indexof + +_Alias of [array_position](#array_position)._ + +## array_intersect + +Returns an array containing only the elements that appear in both **array1** and **array2**. + +```sql +array_intersect(array1, array2) +``` + +### Arguments + +- **array1**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **array2**: Array expression. Can be a constant, column, or function, and any + combination of array operators. + +#### Aliases + +- `list_intersect` + +{{< expand-wrapper >}} +{{% expand "View `array_intersect` example with intersecting arrays" %}} + +```sql +SELECT array_intersect([1, 2, 3, 4], [5, 6, 3, 4]) AS array_intersect +``` + +| array_intersect | +| :-------------- | +| [3, 4] | + +{{% /expand %}} +{{% expand "View `array_intersect` example with non-intersecting arrays" %}} + +```sql +SELECT array_intersect([1, 2, 3, 4], [5, 6, 7, 8]) AS array_intersect +``` + +| array_intersect | +| :-------------- | +| [] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_join + +_Alias of [array_to_string](#array_to_string)._ + +## array_length + +Returns the length of the array dimension. + +```sql +array_length(array, dimension) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **dimension**: Array dimension. Default is `1`. + +#### Aliases + +- `list_length` + +{{< expand-wrapper >}} +{{% expand "View `array_length` example with single-dimension array" %}} + +```sql +SELECT array_length([1, 2, 3, 4, 5]) AS array_length +``` + +| array_length | +| -----------: | +| 5 | + +{{% /expand %}} +{{% expand "View `array_length` example with multi-dimension array" %}} + +```sql +WITH vars AS ( + SELECT [ + [1, 2, 3, 4, 5], + [5, 6, 7, 8, 9] + ] AS example_array +) + +SELECT + array_length(example_array, 1) AS 'dim1_length', + array_length(example_array, 2) AS 'dim2_length' +FROM vars +``` + +| dim1_length | dim2_length | +| ----------: | ----------: | +| 2 | 5 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_max + +Returns the maximum value in the array. + +```sql +array_max(array) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. + +#### Aliases + +- `list_max` + +{{< expand-wrapper >}} +{{% expand "View `array_max` example" %}} + +```sql +SELECT array_max([3,1,4,2]) AS array_max +``` + +| array_max | +| --------: | +| 4 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_min + +Returns the minimum value in the array. + +```sql +array_min(array) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. + +{{< expand-wrapper >}} +{{% expand "View `array_min` example" %}} + +```sql +SELECT array_min([3,1,4,2]) AS array_min +``` + +| array_min | +| --------: | +| 1 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_ndims + +Returns the number of dimensions of the array. + +```sql +array_ndims(array) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. + +#### Aliases + +- `list_ndims` + +{{< expand-wrapper >}} +{{% expand "View `array_ndims` example" %}} + +```sql +SELECT array_ndims([[1, 2, 3], [4, 5, 6]]) AS array_ndims +``` + +| array_ndims | +| ----------: | +| 2 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_pop_back + +Returns the array without the last element. + +```sql +array_pop_back(array) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. + +#### Aliases + +- `list_pop_back` + +{{< expand-wrapper >}} +{{% expand "View `array_pop_back` example" %}} + +```sql +SELECT array_pop_back([1, 2, 3]) AS array_pop_back +``` + +| array_pop_back | +| :------------- | +| [1, 2] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_pop_front + +Returns the array without the first element. + +```sql +array_pop_front(array) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. + +#### Aliases + +- `list_pop_front` + +{{< expand-wrapper >}} +{{% expand "View `array_pop_front` example" %}} + +```sql +SELECT array_pop_front([1, 2, 3]) AS array_pop_front +``` + +| array_pop_front | +| :-------------- | +| [2, 3] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_position + +Returns the position of the first occurrence of the specified element in the +array, or _NULL_ if not found. + +```sql +array_position(array, element, index) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **element**: Element to search for position in the array. +- **index**: Index at which to start searching (1-indexed). Default is `1`. + +#### Aliases + +- `list_position` +- `array_indexof` +- `list_indexof` + +{{< expand-wrapper >}} +{{% expand "View `array_position` example" %}} + +```sql +SELECT array_position([1, 2, 2, 3, 1, 4], 2) AS array_position +``` + +| array_position | +| -------------: | +| 2 | + +{{% /expand %}} +{{% expand "View `array_position` example with index offset" %}} + +```sql +SELECT array_position([1, 2, 2, 3, 1, 4], 2, 3) AS array_position +``` + +| array_position | +| -------------: | +| 3 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_positions + +Searches for an element in the array and returns the position or index of each +occurrence. + +```sql +array_positions(array, element) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **element**: Element to search for position in the array. + +#### Aliases + +- `list_positions` + +{{< expand-wrapper >}} +{{% expand "View `array_positions` example" %}} + +```sql +SELECT array_positions(['John', 'Jane', 'James', 'John'], 'John') AS array_positions +``` + +| array_positions | +| :-------------- | +| [1, 4] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_prepend + +Prepends an element to the beginning of an array. + +```sql +array_prepend(element, array) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **element**: Element to prepend to the array. + +#### Aliases + +- `list_prepend` +- `array_push_front` +- `list_push_front` + +{{< expand-wrapper >}} +{{% expand "View `array_prepend` example" %}} + +```sql +SELECT array_prepend(1, [2, 3, 4]) AS array_prepend +``` + +| array_prepend | +| :------------ | +| [1, 2, 3, 4] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_push_back + +_Alias of [array_append](#array_append)._ + +## array_push_front + +_Alias of [array_prepend](#array_prepend)._ + +## array_remove + +Removes the first element from the array equal to the given value. + +```sql +array_remove(array, element) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **element**: Element to remove from the array. + +#### Aliases + +- `list_remove` + +{{< expand-wrapper >}} +{{% expand "View `array_remove` example" %}} + +```sql +SELECT array_remove([1, 2, 2, 3, 2, 1, 4], 2) AS array_remove +``` + +| array_remove | +| :----------------- | +| [1, 2, 3, 2, 1, 4] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_remove_all + +Removes all elements from the array equal to the specified value. + +```sql +array_remove_all(array, element) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **element**: Element to be removed from the array. + +#### Aliases + +- `list_remove_all` + +{{< expand-wrapper >}} +{{% expand "View `array_remove_all` example" %}} + +```sql +SELECT array_remove_all([1, 2, 2, 3, 2, 1, 4], 2) AS array_remove_all +``` + +| array_remove_all | +| :--------------- | +| [1, 3, 1, 4] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_remove_n + +Removes the first `max` elements from the array equal to the specified value. + +```sql +array_remove_n(array, element, max) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **element**: Element to remove from the array. +- **max**: Maximum number of occurrences to remove. + +#### Aliases + +- `list_remove_n` + +{{< expand-wrapper >}} +{{% expand "View `array_remove_n` example" %}} + +```sql +SELECT array_remove_n([1, 2, 2, 3, 2, 1, 4], 2, 2) AS array_remove_n +``` + +| array_remove_n | +| :-------------- | +| [1, 3, 2, 1, 4] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_repeat + +Returns an array containing element `count` times. + +```sql +array_repeat(element, count) +``` + +### Arguments + +- **element**: Element expression. Can be a constant, column, or function, and + any combination of array operators. +- **count**: Number of times to repeat the element. + +#### Aliases + +- `list_repeat` + +{{< expand-wrapper >}} +{{% expand "View `array_repeat` example with numeric values" %}} + +```sql +SELECT array_repeat(1, 3) AS array_repeat +``` + +| array_repeat | +| :----------- | +| [1, 1, 1] | + +{{% /expand %}} +{{% expand "View `array_repeat` example with string values" %}} + +```sql +SELECT array_repeat('John', 3) AS array_repeat +``` + +| array_repeat | +| :----------- | +| [John, John, John] | + + +{{% /expand %}} +{{% expand "View `array_repeat` example with array values" %}} + +```sql +SELECT array_repeat([1, 2], 2) AS array_repeat +``` + +| array_repeat | +| :--------------- | +| [[1, 2], [1, 2]] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_replace + +Replaces the first occurrence of the specified element with another specified element. + +```sql +array_replace(array, from, to) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **from**: Element to replace. +- **to**: Replacement element. + +#### Aliases + +- `list_replace` + +{{< expand-wrapper >}} +{{% expand "View `array_replace` example" %}} + +```sql +SELECT array_replace(['John', 'Jane', 'James', 'John'], 'John', 'Joe') AS array_replace +``` + +| array_replace | +| :----------------------- | +| [Joe, Jane, James, John] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_replace_all + +Replaces all occurrences of the specified element with another specified element. + +```sql +array_replace_all(array, from, to) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **from**: Element to replace. +- **to**: Replacement element. + +#### Aliases + +- `list_replace_all` + +{{< expand-wrapper >}} +{{% expand "View `array_replace_all` example" %}} + +```sql +SELECT array_replace_all(['John', 'Jane', 'James', 'John'], 'John', 'Joe') AS array_replace_all +``` + +| array_replace_all | +| :---------------------- | +| [Joe, Jane, James, Joe] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_replace_n + +Replaces the first `max` occurrences of the specified element with another +specified element. + +```sql +array_replace_n(array, from, to, max) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **from**: Element to replace. +- **to**: Replacement element. +- **max**: Maximum number of occurrences to replace. + +#### Aliases + +- `list_replace_n` + +{{< expand-wrapper >}} +{{% expand "View `array_replace_n` example" %}} + +```sql +SELECT array_replace_n(['John', 'Jane', 'James', 'John', 'John'], 'John', 'Joe', 2) AS array_replace_n +``` + +| array_replace_n | +| :---------------------------- | +| [Joe, Jane, James, Joe, John] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_resize + +Resizes the list to contain size elements. Initializes new elements with value +Resizes the array to the specified size. If expanding, fills new elements with the +specified value (or _NULL_ if not provided). If shrinking, truncates excess elements. + +```sql +array_resize(array, size, value) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **size**: New size of the array. +- **value**: Value to use for new elements. Default is _NULL_. + +#### Aliases + +- `list_resize` + +{{< expand-wrapper >}} +{{% expand "View `array_resize` example" %}} + +```sql +SELECT array_resize([1, 2, 3], 5, 0) AS array_resize +``` + +| array_resize(List([1,2,3],5,0)) | +| :------------------------------ | +| [1, 2, 3, 0, 0] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_reverse + +Returns the array with the order of the elements reversed. + +```sql +array_reverse(array) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. + +#### Aliases + +- `list_reverse` + +{{< expand-wrapper >}} +{{% expand "View `array_reverse` example" %}} + +```sql +SELECT array_reverse([1, 2, 3, 4]) AS array_reverse +``` + +| array_reverse | +| :------------ | +| [4, 3, 2, 1] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_slice + +Returns a slice of the array based on 1-indexed start and end positions. + +```sql +array_slice(array, begin, end) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **begin**: Index of the first element. If negative, it counts backward from + the end of the array. +- **end**: Index of the last element. If negative, it counts backward from the + end of the array. +- **stride**: Stride of the array slice. The default is `1`. + +#### Aliases + +- `list_slice` + +{{< expand-wrapper >}} +{{% expand "View `array_slice` example" %}} + +```sql +SELECT array_slice([1, 2, 3, 4, 5, 6, 7, 8], 3, 6) AS array_slice +``` + +| array_slice | +| :----------- | +| [3, 4, 5, 6] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_sort + +Sorts elements in an array. +If elements are numeric, it sorts elements in numerical order. +If elements are strings, it sorts elements in lexicographical order. + +```sql +array_sort(array, sort_order, sort_nulls) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **sort_order**: Sort order (`'ASC'` _(default)_ or `'DESC'`). +- **sort_nulls**: Sort nulls first or last (`'NULLS FIRST'` _(default)_ or `'NULLS LAST'`). + +#### Aliases + +- `list_sort` + +{{< expand-wrapper >}} +{{% expand "View `array_sort` example with numeric elements" %}} + +```sql +SELECT array_sort([3, 1, 2]) AS array_sort +``` + +| array_sort | +| :--------- | +| [1, 2, 3] | + +{{% /expand %}} +{{% expand "View `array_sort` example with string elements" %}} + +```sql +SELECT array_sort(['banana', 'apple', 'cherry'], 'DESC') AS array_sort +``` + +| array_sort | +| :---------------------- | +| [cherry, banana, apple] | + +{{% /expand %}} +{{% expand "View `array_sort` example with _NULL_ elements" %}} + +```sql +SELECT + array_sort( + ['banana', 'apple', NULL, 'cherry', NULL], + 'ASC', + 'NULLS LAST' + ) AS array_sort +``` + +| array_sort | +| :-------------------------- | +| [apple, banana, cherry, , ] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## array_to_string + +Converts an array to a string by joining all elements with the specified delimiter. + +```sql +array_to_string(array, delimiter[, null_string]) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **delimiter**: Array element separator. +- **null_string**: Optional. String to replace _NULL_ values in the array. + If not provided, _NULL_ elements are ignored. + +#### Aliases + +- `list_to_string` +- `array_join` +- `list_join` + +{{< expand-wrapper >}} +{{% expand "View `array_to_string` example" %}} + +```sql +SELECT array_to_string([1,2,3,4,5,6,7,8], ',') AS array_to_string +``` + +| array_to_string | +| :-------------- | +| 1,2,3,4,5,6,7,8 | + +{{% /expand %}} +{{% expand "View `array_to_string` example with _NULL_ replacements" %}} + +```sql +SELECT array_to_string([[1,2,3,4,5,NULL,7,8,NULL]], '-', '?') AS array_to_string +``` + +| array_to_string | +| :---------------- | +| 1-2-3-4-5-?-7-8-? | + +{{% /expand %}} +{{< /expand-wrapper >}} + + +## array_union + +Returns an array of elements that are present in both arrays (all elements from +Returns an array containing all unique elements from both input arrays, with +duplicates removed. + +```sql +array_union(array1, array2) +``` + +### Arguments + +- **array1**: Array expression. Can be a constant, column, or function, and any + combination of array operators. +- **array2**: Array expression. Can be a constant, column, or function, and any + combination of array operators. + +#### Aliases + +- `list_union` + +{{< expand-wrapper >}} +{{% expand "View `array_union` example" %}} + +```sql +SELECT array_union([1, 2, 3, 4], [5, 6, 3, 4]) AS array_union +``` + +| array_union | +| :----------------- | +| [1, 2, 3, 4, 5, 6] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## arrays_overlap + +_Alias of [array_has_any](#array_has_any)._ + +## cardinality + +Returns the total number of elements in the array. + +```sql +cardinality(array) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. + +{{< expand-wrapper >}} +{{% expand "View `cardinality` example" %}} + +```sql +SELECT cardinality([[1, 2, 3, 4], [5, 6, 7, 8]]) AS cardinality +``` + +| cardinality | +| ----------: | +| 8 | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## empty + +Returns `true` for an empty array or `false` for a non-empty array. + +```sql +empty(array) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. + +#### Aliases + +- `array_empty` +- `list_empty` + +{{< expand-wrapper >}} +{{% expand "View `empty` example" %}} + +```sql +SELECT empty(['apple']) AS empty +``` + +| empty | +| :---- | +| false | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## flatten + +Flattens nested arrays into a single-level array. + +- Recursively flattens arrays at any depth of nesting +- Returns unchanged if the array is already flat + +The result contains all elements from all nested arrays in a single flat array. + +```sql +flatten(array) +``` + +### Arguments + +- **array**: Array expression. Can be a constant, column, or function, and any + combination of array operators. + +{{< expand-wrapper >}} +{{% expand "View `flatten` example" %}} + +```sql +SELECT flatten([[1, 2], [3, 4]]) AS flattened +``` + +| flattened | +| :----------- | +| [1, 2, 3, 4] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## generate_series + +Returns an array with values between the specified **start** and **stop** values +generated at the specified **step**. + +The range `start..stop` contains all values greater than or equal to **start** +and less than or equal to **stop** (`start <= x <= stop`). +If **start** is greater than or equal to **stop** (`start >= stop`), the +function returns an empty array. + +_`generate_series` is similar to [range](#range), but includes the upper bound +(**stop**) in the output array._ + +```sql +generate_series(start, stop, step) +``` + +### Arguments + +- **start**: Start of the series. Supports integers, timestamps, dates, or + string types that can be coerced to `Date32`. +- **stop**: Upper bound of the series. Supports integers, timestamps, + dates, or string types that can be coerced to `Date32`. The type must be the + same as **start**. +- **step**: Increase by step (cannot be `0`). Steps less than a day are + only supported for ranges with the `TIMESTAMP` type. + +##### Related functions + +[range](#range) + +{{< expand-wrapper >}} +{{% expand "View `generate_series` example" %}} + +```sql +SELECT generate_series(1,5) AS generate_series +``` + +| generate_series | +| :-------------- | +| [1, 2, 3, 4, 5] | + +{{% /expand %}} +{{% expand "View `range` example with dates" %}} + +```sql +SELECT + generate_series( + DATE '2025-03-01', + DATE '2025-08-01', + INTERVAL '1 month' + ) AS generate_series +``` + +| generate_series | +| :----------------------------------------------------------------------- | +| [2025-03-01, 2025-04-01, 2025-05-01, 2025-06-01, 2025-07-01, 2025-08-01] | + +{{% /expand %}} +{{% expand "View `generate_series` example using timestamps" %}} + +```sql +SELECT + generate_series( + '2025-01-01T00:00:00Z'::timestamp, + '2025-01-01T06:00:00Z'::timestamp, + INTERVAL '2 hours' + ) AS generate_series +``` + +| generate_series | +| :----------------------------------------------------------------------------------- | +| [2025-01-01T00:00:00, 2025-01-01T02:00:00, 2025-01-01T04:00:00, 2025-01-01T06:00:00] | + +{{% /expand %}} +{{< /expand-wrapper >}} + + +## list_any_value + +_Alias of [array_any_value](#array_any_value)._ + +## list_append + +_Alias of [array_append](#array_append)._ + +## list_cat + +_Alias of [array_concat](#array_concat)._ + +## list_concat + +_Alias of [array_concat](#array_concat)._ + +## list_contains + +_Alias of [array_has](#array_has)._ + +## list_dims + +_Alias of [array_dims](#array_dims)._ + +## list_distance + +_Alias of [array_distance](#array_distance)._ + +## list_distinct + +_Alias of [array_distinct](#array_distinct)._ + +## list_element + +_Alias of [array_element](#array_element)._ + +## list_empty + +_Alias of [empty](#empty)._ + +## list_except + +_Alias of [array_except](#array_except)._ + +## list_extract + +_Alias of [array_element](#array_element)._ + +## list_has + +_Alias of [array_has](#array_has)._ + +## list_has_all + +_Alias of [array_has_all](#array_has_all)._ + +## list_has_any + +_Alias of [array_has_any](#array_has_any)._ + +## list_indexof + +_Alias of [array_position](#array_position)._ + +## list_intersect + +_Alias of [array_intersect](#array_intersect)._ + +## list_join + +_Alias of [array_to_string](#array_to_string)._ + +## list_length + +_Alias of [array_length](#array_length)._ + +## list_max + +_Alias of [array_max](#array_max)._ + +## list_ndims + +_Alias of [array_ndims](#array_ndims)._ + +## list_pop_back + +_Alias of [array_pop_back](#array_pop_back)._ + +## list_pop_front + +_Alias of [array_pop_front](#array_pop_front)._ + +## list_position + +_Alias of [array_position](#array_position)._ + +## list_positions + +_Alias of [array_positions](#array_positions)._ + +## list_prepend + +_Alias of [array_prepend](#array_prepend)._ + +## list_push_back + +_Alias of [array_append](#array_append)._ + +## list_push_front + +_Alias of [array_prepend](#array_prepend)._ + +## list_remove + +_Alias of [array_remove](#array_remove)._ + +## list_remove_all + +_Alias of [array_remove_all](#array_remove_all)._ + +## list_remove_n + +_Alias of [array_remove_n](#array_remove_n)._ + +## list_repeat + +_Alias of [array_repeat](#array_repeat)._ + +## list_replace + +_Alias of [array_replace](#array_replace)._ + +## list_replace_all + +_Alias of [array_replace_all](#array_replace_all)._ + +## list_replace_n + +_Alias of [array_replace_n](#array_replace_n)._ + +## list_resize + +_Alias of [array_resize](#array_resize)._ + +## list_reverse + +_Alias of [array_reverse](#array_reverse)._ + +## list_slice + +_Alias of [array_slice](#array_slice)._ + +## list_sort + +_Alias of [array_sort](#array_sort)._ + +## list_to_string + +_Alias of [array_to_string](#array_to_string)._ + +## list_union + +_Alias of [array_union](#array_union)._ + +## make_array + +Returns an array using the specified input expressions. + +```sql +make_array(expression1[, ..., expression_n]) +``` + +### Arguments + +- **expression_n**: Expression to include in the output array. + Can be a constant, column, or function, and any combination of arithmetic or + string operators. + +#### Aliases + +- `make_list` + +{{< expand-wrapper >}} +{{% expand "View `make_array` example" %}} + +```sql +SELECT make_array(1, 2, 3, 4, 5) AS make_array +``` + +| make_array | +| :-------------- | +| [1, 2, 3, 4, 5] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## make_list + +_Alias of [make_array](#make_array)._ + +## range + +Returns an array with values between the specified **start** and **stop** values +generated at the specified **step**. + +The range `start..stop` contains all values greater than or equal to **start** +and less than **stop** (`start <= x < stop`). +If **start** is greater than or equal to **stop** (`start >= stop`), the +function returns an empty array. + +_`range` is similar to [generate_series](#generate_series), but does not include +the upper bound (**stop**) in the output array._ + +```sql +range(start, stop, step) +``` + +### Arguments + +- **start**: Start of the series. Supports integers, timestamps, dates, or + string types that can be coerced to `Date32`. +- **stop**: Upper bound of the series. Supports integers, timestamps, + dates, or string types that can be coerced to `Date32`. The type must be the + same as **start**. +- **step**: Increase by step (cannot be `0`). Steps less than a day are + only supported for ranges with the `TIMESTAMP` type. + +##### Related functions + +[generate_series](#generate_series) + +{{< expand-wrapper >}} +{{% expand "View `range` example" %}} + +```sql +SELECT range(1, 5, 1) AS range +``` +| range | +|:-------------| +| [1, 2, 3, 4] | + +{{% /expand %}} +{{% expand "View `range` example with dates" %}} + +```sql +SELECT + range( + DATE '2025-03-01', + DATE '2025-08-01', + INTERVAL '1 month' + ) AS range +``` + +| range | +| :----------------------------------------------------------- | +| [2025-03-01, 2025-04-01, 2025-05-01, 2025-06-01, 2025-07-01] | + +{{% /expand %}} +{{% expand "View range example with timestamps" %}} + +```sql +SELECT + range( + '2025-01-01T00:00:00Z'::timestamp, + '2025-01-01T06:00:00Z'::timestamp, + INTERVAL '2 hours' + ) AS range +``` + +| range | +| :-------------------------------------------------------------- | +| [2025-01-01T00:00:00, 2025-01-01T02:00:00, 2025-01-01T04:00:00] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## string_to_array + +Splits a string into an array of substrings based on a delimiter. Any substrings +matching the optional `null_str` argument are replaced with `NULL`. + +```sql +string_to_array(str, delimiter[, null_str]) +``` + +### Arguments + +- **str**: String expression to split. +- **delimiter**: Delimiter string to split on. +- **null_str**: _(Optional)_ Substring values to replace with `NULL`. + +#### Aliases + +- `string_to_list` + +{{< expand-wrapper >}} +{{% expand "View `string_to_array` example with comma-delimited list" %}} + +```sql +SELECT string_to_array('abc, def, ghi', ', ') AS string_to_array +``` + +| string_to_array | +| :-------------- | +| [abc, def, ghi] | + +{{% /expand %}} +{{% expand "View `string_to_array` example with a non-standard delimiter" %}} + +```sql +SELECT string_to_array('abc##def', '##') AS string_to_array +``` + +| string_to_array | +| :-------------- | +| ['abc', 'def'] | + +{{% /expand %}} +{{% expand "View `string_to_array` example with _NULL_ replacements" %}} + +```sql +SELECT string_to_array('abc def', ' ', 'def') AS string_to_array +``` + +| string_to_array | +| :-------------- | +| ['abc', NULL] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## string_to_list + +_Alias of [string_to_array](#string_to_array)._ diff --git a/content/shared/sql-reference/functions/binary-string.md b/content/shared/sql-reference/functions/binary-string.md index 42521e3f1..1b81909d5 100644 --- a/content/shared/sql-reference/functions/binary-string.md +++ b/content/shared/sql-reference/functions/binary-string.md @@ -13,7 +13,7 @@ Decode binary data from textual representation in string. decode(expression, format) ``` -##### Arguments +### Arguments - **expression**: Expression containing encoded string data. Can be a constant, column, or function, and any combination of string operators. @@ -33,7 +33,7 @@ Encode binary data into a textual representation. encode(expression, format) ``` -##### Arguments +### Arguments - **expression**: Expression containing string or binary data. Can be a constant, column, or function, and any combination of string operators. diff --git a/content/shared/sql-reference/functions/cache.md b/content/shared/sql-reference/functions/cache.md index f4e33b914..2179be780 100644 --- a/content/shared/sql-reference/functions/cache.md +++ b/content/shared/sql-reference/functions/cache.md @@ -13,11 +13,11 @@ Returns data from an {{< product-name >}} distinct value cache. distinct_cache(table_name, cache_name) ``` -#### Arguments +### Arguments - **table_name**: Name of the table associated with the distinct value cache _(formatted as a string literal)_. -- **datatype**: Name of the the distinct value cache to query +- **cache_name**: Name of the distinct value cache to query _(formatted as a string literal)_. {{< expand-wrapper >}} @@ -38,11 +38,11 @@ Returns data from an {{< product-name >}} last value cache. last_cache(table_name, cache_name) ``` -#### Arguments +### Arguments - **table_name**: Name of the table associated with the last value cache _(formatted as a string literal)_. -- **datatype**: Name of the the last value cache to query +- **cache_name**: Name of the last value cache to query _(formatted as a string literal)_. {{< expand-wrapper >}} diff --git a/content/shared/sql-reference/functions/conditional.md b/content/shared/sql-reference/functions/conditional.md index b3258ef81..a264ba057 100644 --- a/content/shared/sql-reference/functions/conditional.md +++ b/content/shared/sql-reference/functions/conditional.md @@ -2,16 +2,13 @@ The {{< product-name >}} SQL implementation supports the following conditional functions for conditionally handling _null_ values: - [coalesce](#coalesce) +- [greatest](#greatest) - [ifnull](#ifnull) +- [least](#least) - [nullif](#nullif) - [nvl](#nvl) - [nvl2](#nvl2) - - ## coalesce Returns the first of its arguments that is not _null_. @@ -56,7 +53,7 @@ FROM {{% /expand %}} {{< /expand-wrapper >}} - ## ifnull _Alias of [nvl](#nvl)._ - ## nullif diff --git a/content/shared/sql-reference/functions/hashing.md b/content/shared/sql-reference/functions/hashing.md index 54a3b5d64..7a014537a 100644 --- a/content/shared/sql-reference/functions/hashing.md +++ b/content/shared/sql-reference/functions/hashing.md @@ -17,7 +17,7 @@ Computes the binary hash of an expression using the specified algorithm. digest(expression, algorithm) ``` -##### Arguments +### Arguments - **expression**: String expression to operate on. Can be a constant, column, or function, and any combination of operators. @@ -63,7 +63,7 @@ Computes an MD5 128-bit checksum for a string expression. md5(expression) ``` -##### Arguments +### Arguments - **expression**: String expression to operate on. Can be a constant, column, or function, and any combination of operators. @@ -98,7 +98,7 @@ Computes the SHA-224 hash of a binary string. sha224(expression) ``` -##### Arguments +### Arguments - **expression**: String expression to operate on. Can be a constant, column, or function, and any combination of operators. @@ -133,7 +133,7 @@ Computes the SHA-256 hash of a binary string. sha256(expression) ``` -##### Arguments +### Arguments - **expression**: String expression to operate on. Can be a constant, column, or function, and any combination of operators. @@ -168,7 +168,7 @@ Computes the SHA-384 hash of a binary string. sha384(expression) ``` -##### Arguments +### Arguments - **expression**: String expression to operate on. Can be a constant, column, or function, and any combination of operators. @@ -203,7 +203,7 @@ Computes the SHA-512 hash of a binary string. sha512(expression) ``` -##### Arguments +### Arguments - **expression**: String expression to operate on. Can be a constant, column, or function, and any combination of operators. diff --git a/content/shared/sql-reference/functions/map.md b/content/shared/sql-reference/functions/map.md new file mode 100644 index 000000000..b6d4dba27 --- /dev/null +++ b/content/shared/sql-reference/functions/map.md @@ -0,0 +1,272 @@ + +Use map functions to create and operate on Arrow maps in SQL queries. + +- [element_at](#element_at) +- [make_map](#make_map) +- [map](#map) +- [map_extract](#map_extract) +- [map_keys](#map_keys) +- [map_values](#map_values) + + +## element_at + +_Alias of [map_extract](#map_extract)._ + +## make_map + +Returns an Arrow map with the specified key and value. + +```sql +make_map(key, value) +``` + +### Arguments + +- **key**: Expression to use for the key. + Can be a constant, column, function, or any combination of arithmetic or + string operators. +- **value**: Expression to use for the value. + Can be a constant, column, function, or any combination of arithmetic or + string operators. + +{{< expand-wrapper >}} +{{% expand "View `make_map` query example" %}} + +_The following example uses the +{{% influxdb3/home-sample-link %}}._ + +```sql +SELECT + make_map(room, temp) AS make_map +FROM + home +LIMIT 4 +``` + +| make_map | +| :------------------ | +| {Kitchen: 22.4} | +| {Living Room: 22.2} | +| {Kitchen: 22.7} | +| {Living Room: 22.2} | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## map + +Returns an Arrow map with the specified key-value pairs. +Keys are mapped to values by their positions in each respective list. +Each _key_ must be unique and non-null. + +```sql +map(key_list, value_list) +-- or +map { key: value, ... } +``` + +### Arguments + +- **key_list**: List of keys to use in the map. + Each key must be unique and non-null. +- **value_list**: List of values to map to the corresponding keys. + +{{< expand-wrapper >}} +{{% expand "View `map` query example" %}} + +```sql +SELECT + map( + [400, 401, 402, 403, 404], + ['Bad Request', 'Unauthorized', 'Payment Required', 'Forbidden', 'Not Found'] + ) AS map +``` + +| map | +| :------------------------------------------------------------------------------------------- | +| {400: Bad Request, 401: Unauthorized, 402: Payment Required, 403: Forbidden, 404: Not Found} | + +{{% /expand %}} +{{% expand "View `map` query example with alternate syntax" %}} + +```sql +SELECT + map { + 400: 'Bad Request', + 401: 'Unauthorized', + 402: 'Payment Required', + 403: 'Forbidden', + 404: 'Not Found' + } AS map +``` + +| map | +| :------------------------------------------------------------------------------------------- | +| {400: Bad Request, 401: Unauthorized, 402: Payment Required, 403: Forbidden, 404: Not Found} | + +{{% /expand %}} +{{< /expand-wrapper >}} + + + + +## map_extract + +Returns a list containing the value for the given key or an empty list if the +Returns a list containing the value for the given key, or an empty list if the +key is not present in the map. The returned list will contain exactly one element +(the value) when the key is found. + +```sql +map_extract(map, key) +``` + +### Arguments + +- **map**: Map expression. Can be a constant, column, or function, and any + combination of map operators. +- **key**: Key to extract from the map. Can be a constant, column, or function, + any combination of arithmetic or string operators, or a named expression of + the previously listed. + +#### Aliases + +- `element_at` + +##### Related functions + +[get_field](/influxdb3/version/reference/sql/functions/misc/#get_field) + +{{< expand-wrapper >}} +{{% expand "View `map_extract` query example" %}} + +The following example uses the +[NOAA Bay Area weather sample data](/influxdb3/version/reference/sample-data/#noaa-bay-area-weather-data) +to perform the a query that: + +- Defines a set of constants that includes a map that assigns integers to days + of the week. +- Queries the weather sample data and use `date_part` to extract an integer + representing the day of the week of the row's `time` value. +- Uses `map_extract` and the output of `date_part` to return an array containing + the name of the day of the week. +- Uses bracket notation (`[i]`) to reference an element by index in the returned + list (SQL arrays are 1-indexed, so `[1]` retrieves the first element). + +```sql +WITH constants AS ( + SELECT map( + [0, 1, 2, 3, 4, 5, 6], + ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'] + ) AS days_of_week +) +SELECT + weather.time, + map_extract(c.days_of_week, date_part('dow', time))[1] AS day_of_week +FROM + weather, + constants AS c +ORDER BY + weather.time +LIMIT 6 +``` + +| time | day_of_week | +| :------------------ | :---------- | +| 2020-01-01T00:00:00 | Wednesday | +| 2020-01-01T00:00:00 | Wednesday | +| 2020-01-01T00:00:00 | Wednesday | +| 2020-01-02T00:00:00 | Thursday | +| 2020-01-02T00:00:00 | Thursday | +| 2020-01-02T00:00:00 | Thursday | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## map_keys + +Returns a list of all keys in the map. + +```sql +map_keys(map) +``` + +### Arguments + +- **map**: Map expression. Can be a constant, column, or function, and any +combination of map operators. + +##### Related functions + +[get_field](/influxdb3/version/reference/sql/functions/misc/#get_field) + +{{< expand-wrapper >}} +{{% expand "View `map_keys` query example" %}} + +```sql +SELECT map_keys(map {'a': 1, 'b': NULL, 'c': 3}) AS map_keys +``` + +| map_keys | +| :-------- | +| [a, b, c] | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## map_values + +Returns a list of all values in the map. + +```sql +map_values(map) +``` + +### Arguments + +- **map**: Map expression. Can be a constant, column, or function, and any combination of map operators. + +{{< expand-wrapper >}} +{{% expand "View `map_values` query example" %}} + +```sql +SELECT map_values(map {'a': 1, 'b': NULL, 'c': 3}) AS map_values +``` + +| map_values | +| :--------- | +| [1, , 3] | + +{{% /expand %}} +{{< /expand-wrapper >}} diff --git a/content/shared/sql-reference/functions/math.md b/content/shared/sql-reference/functions/math.md index 13896ec76..23f32b097 100644 --- a/content/shared/sql-reference/functions/math.md +++ b/content/shared/sql-reference/functions/math.md @@ -49,7 +49,7 @@ Returns the absolute value of a number. abs(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -80,7 +80,7 @@ Returns the arc cosine or inverse cosine of a number. acos(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -111,7 +111,7 @@ Returns the area hyperbolic cosine or inverse hyperbolic cosine of a number. acosh(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -142,7 +142,7 @@ Returns the arc sine or inverse sine of a number. asin(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -173,7 +173,7 @@ Returns the area hyperbolic sine or inverse hyperbolic sine of a number. asinh(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -204,7 +204,7 @@ Returns the arc tangent or inverse tangent of a number. atan(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -235,7 +235,7 @@ Returns the area hyperbolic tangent or inverse hyperbolic tangent of a number. atanh(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -266,7 +266,7 @@ Returns the arc tangent or inverse tangent of `expression_y / expression_x`. atan2(expression_y, expression_x) ``` -##### Arguments +### Arguments - **expression_y**: First numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -299,7 +299,7 @@ Returns the cube root of a number. cbrt(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -330,7 +330,7 @@ Returns the nearest integer greater than or equal to a number. ceil(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -361,7 +361,7 @@ Returns the cosine of a number. cos(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -392,7 +392,7 @@ Returns the hyperbolic cosine of a number. cosh(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -423,7 +423,7 @@ Returns the cotangent of a number. cot(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of operators. @@ -455,7 +455,7 @@ Converts radians to degrees. degrees(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -487,7 +487,7 @@ Returns the base-e exponential of a number. exp(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to use as the exponent. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -518,7 +518,7 @@ Returns 1 if value is less than 2. factorial(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Must be an integer (`BIGINT`). @@ -551,7 +551,7 @@ Returns the nearest integer less than or equal to a number. floor(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -583,7 +583,7 @@ Returns `0` if both inputs are zero. gcd(expression_x, expression_y) ``` -##### Arguments +### Arguments - **expression_x**: First numeric expression to operate on. Must be an integer (`BIGINT`). @@ -619,7 +619,7 @@ Returns `true` if a given number is ±NaN, otherwise returns `false`. isnan(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Must be a float (`DOUBLE`). @@ -658,7 +658,7 @@ Returns `true` if the given number is ±0.0, otherwise returns `false`. iszero(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -697,7 +697,7 @@ Returns `0` if either input is zero. lcm(expression_x, expression_y) ``` -##### Arguments +### Arguments - **expression_x**: First numeric expression to operate on. Must be an integer (`BIGINT`). @@ -733,7 +733,7 @@ Returns the natural logarithm of a number. ln(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -805,7 +805,7 @@ Returns the base-10 logarithm of a number. log10(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -836,7 +836,7 @@ Returns the base-2 logarithm of a number. log2(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -868,7 +868,7 @@ Otherwise returns the second argument. nanvl(expression_x, expression_y) ``` -##### Arguments +### Arguments - **expression_x**: Numeric expression to return if it’s not `NaN`. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -934,7 +934,7 @@ power(base, exponent) - `pow` -##### Arguments +### Arguments - **base**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -971,7 +971,7 @@ Converts degrees to radians. radians(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -1033,7 +1033,7 @@ Rounds a number to the nearest integer. round(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -1066,7 +1066,7 @@ Zero and positive numbers return `1`. signum(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -1097,7 +1097,7 @@ Returns the sine of a number. sin(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -1128,7 +1128,7 @@ Returns the hyperbolic sine of a number. sinh(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -1159,7 +1159,7 @@ Returns the square root of a number. sqrt(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -1190,7 +1190,7 @@ Returns the tangent of a number. tan(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -1221,7 +1221,7 @@ Returns the hyperbolic tangent of a number. tanh(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -1253,7 +1253,7 @@ Truncates a number toward zero (at the decimal point). trunc(numeric_expression) ``` -##### Arguments +### Arguments - **numeric_expression**: Numeric expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. diff --git a/content/shared/sql-reference/functions/misc.md b/content/shared/sql-reference/functions/misc.md index e1ef37ba0..2623e432a 100644 --- a/content/shared/sql-reference/functions/misc.md +++ b/content/shared/sql-reference/functions/misc.md @@ -3,10 +3,10 @@ for performing a variety of operations: - [arrow_cast](#arrow_cast) - [arrow_typeof](#arrow_typeof) +- [get_field](#get_field) - [interpolate](#interpolate) - [locf](#locf) - [version](#version) - ## arrow_cast @@ -16,7 +16,7 @@ Casts a value to a specific Arrow data type. arrow_cast(expression, datatype) ``` -#### Arguments +### Arguments - **expression**: Expression to cast. Can be a constant, column, or function, and any combination of arithmetic or @@ -55,7 +55,7 @@ of the expression: arrow_typeof(expression) ``` -##### Arguments +### Arguments - **expression**: Expression to evaluate. Can be a constant, column, or function, and any combination of arithmetic or @@ -84,6 +84,66 @@ LIMIT 1 {{% /expand %}} {{< /expand-wrapper >}} +## get_field + +Returns a field from a map or a struct with the specified key. + +> [!Note] +> Typically, `get_field` is indirectly invoked via field access syntax such as +> `my_struct['field_name']` which results in the call: +> `get_field(my_struct, 'field_name')`. + +```sql +get_field(collection, field) +``` + +### Arguments + +- **collection**: The map or struct to retrieve a field from. +- **field**: The name of field the field to retrieve from the map or struct. + Must evaluate to a string. + +{{< expand-wrapper >}} +{{% expand "View `get_field` example with a struct column" %}} + +```sql +SELECT + get_field(influxdb_struct, 'version') AS influxdb_version +FROM + (VALUES (struct('influxdb' AS product, 'v1' AS version)), + (struct('influxdb' AS product, 'v2' AS version)), + (struct('influxdb' AS product, 'v3' AS version)) + ) AS data(influxdb_struct) +``` + +| influxdb_version | +| :--------------- | +| v1 | +| v2 | +| v3 | + +{{% /expand %}} +{{% expand "View `get_field` example with a map column" %}} + +```sql +SELECT + get_field(influxdb_map, 'version') AS influxdb_version +FROM + (VALUES (map {'product': 'influxdb', 'version': 'v1'}), + (map {'product': 'influxdb', 'version': 'v2'}), + (map {'product': 'influxdb', 'version': 'v3'}) + ) AS data(influxdb_map) +``` + +| influxdb_version | +| :--------------- | +| v1 | +| v2 | +| v3 | + +{{% /expand %}} +{{< /expand-wrapper >}} + ## interpolate Fills null values in a specified aggregated column by interpolating values @@ -94,7 +154,7 @@ Must be used with [`date_bin_gapfill`](/influxdb/version/reference/sql/functions interpolate(aggregate_expression) ``` -##### Arguments +### Arguments - **aggregate_expression**: Aggregate operation on a specified expression. The operation can use any [aggregate function](/influxdb/version/reference/sql/functions/aggregate/). @@ -156,7 +216,7 @@ _LOCF is an initialism of "last observation carried forward."_ locf(aggregate_expression) ``` -##### Arguments +### Arguments - **aggregate_expression**: Aggregate operation on a specified expression. The operation can use any [aggregate function](/influxdb/version/reference/sql/functions/aggregate/). @@ -206,34 +266,6 @@ GROUP BY _time, room {{% /expand %}} {{< /expand-wrapper >}} - - ## version Returns the version of DataFusion. diff --git a/content/shared/sql-reference/functions/regular-expression.md b/content/shared/sql-reference/functions/regular-expression.md index c32e6bea2..629cb0f7c 100644 --- a/content/shared/sql-reference/functions/regular-expression.md +++ b/content/shared/sql-reference/functions/regular-expression.md @@ -17,7 +17,7 @@ Returns the number of matches that a regular expression has in a string. regexp_count(str, regexp[, start, flags]) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of operators. @@ -62,7 +62,7 @@ false otherwise. regexp_like(str, regexp[, flags]) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -104,7 +104,7 @@ Returns a list of regular expression matches in a string. regexp_match(str, regexp, flags) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -147,7 +147,7 @@ Replaces substrings in a string that match a regular expression. regexp_replace(str, regexp, replacement, flags) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. diff --git a/content/shared/sql-reference/functions/selector.md b/content/shared/sql-reference/functions/selector.md index dbcaf81e0..fd8b4dc96 100644 --- a/content/shared/sql-reference/functions/selector.md +++ b/content/shared/sql-reference/functions/selector.md @@ -61,7 +61,7 @@ Returns the smallest value of a selected column and a timestamp. selector_min(expression, timestamp) ``` -##### Arguments +#### Arguments - **expression**: Expression to operate on. Can be a constant, column, or function, and any combination of string or @@ -103,7 +103,7 @@ Returns the largest value of a selected column and a timestamp. selector_max(expression, timestamp) ``` -##### Arguments +#### Arguments - **expression**: Expression to operate on. Can be a constant, column, or function, and any combination of string or @@ -145,7 +145,7 @@ Returns the first value ordered by time ascending. selector_first(expression, timestamp) ``` -##### Arguments +#### Arguments - **expression**: Expression to operate on. Can be a constant, column, or function, and any combination of string or @@ -187,7 +187,7 @@ Returns the last value ordered by time ascending. selector_last(expression, timestamp) ``` -##### Arguments +#### Arguments - **expression**: Expression to operate on. Can be a constant, column, or function, and any combination of string or diff --git a/content/shared/sql-reference/functions/string.md b/content/shared/sql-reference/functions/string.md index fc2d39076..318620019 100644 --- a/content/shared/sql-reference/functions/string.md +++ b/content/shared/sql-reference/functions/string.md @@ -54,7 +54,7 @@ Returns the ASCII value of the first character in a string. ascii(str) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -95,7 +95,7 @@ Returns the bit length of a string. bit_length(str) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -134,7 +134,7 @@ of the input string. btrim(str[, trim_str]) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -225,7 +225,7 @@ Concatenates multiple strings together. concat(str[, ..., str_n]) ``` -##### Arguments +### Arguments - **str**: String expression to concatenate. Can be a constant, column, or function, and any combination of string operators. @@ -233,7 +233,7 @@ concat(str[, ..., str_n]) ##### Related functions -[contcat_ws](#contcat_ws) +[concat_ws](#concat_ws) {{< expand-wrapper >}} {{% expand "View `concat` query example" %}} @@ -268,7 +268,7 @@ Concatenates multiple strings together with a specified separator. concat_ws(separator, str[, ..., str_n]) ``` -##### Arguments +### Arguments - **separator**: Separator to insert between concatenated strings. - **str**: String expression to concatenate. @@ -313,7 +313,7 @@ Returns true if a string contains a search string (case-sensitive). contains(str, search_str) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of operators. @@ -327,7 +327,7 @@ Tests if a string ends with a substring. ends_with(str, substr) ``` -##### Arguments +### Arguments - **str**: String expression to test. Can be a constant, column, or function, and any combination of string operators. @@ -365,7 +365,7 @@ Returns 0 if the string is not in the list of substrings. find_in_set(str, strlist) ``` -##### Arguments +### Arguments - **str**: String expression to find in `strlist`. - **strlist**: A string containing a comma-delimited list of substrings. @@ -402,7 +402,7 @@ Words are delimited by non-alphanumeric characters. initcap(str) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -444,7 +444,7 @@ If the substring is not in the string, the function returns 0. instr(str, substr) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -484,7 +484,7 @@ Returns a specified number of characters from the left side of a string. left(str, n) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -526,7 +526,7 @@ Returns the number of characters in a string. length(str) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -570,7 +570,7 @@ between two strings. levenshtein(str1, str2) ``` -##### Arguments +### Arguments - **str1**: First string expression to operate on. Can be a constant, column, or function, and any combination of string operators. - **str2**: Second string expression to operate on. @@ -608,7 +608,7 @@ Converts a string to lower-case. lower(str) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -646,7 +646,7 @@ Pads the left side of a string with another string to a specified string length. lpad(str, n[, padding_str]) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -687,7 +687,7 @@ Removes leading spaces from a string. ltrim(str) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -733,7 +733,7 @@ Returns the length of a string in bytes. octet_length(str) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -772,7 +772,7 @@ position and number of characters to replace. overlay(str PLACING substr FROM pos [FOR count]) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -814,7 +814,7 @@ Returns the position of a substring in a string. position(substr IN str) ``` -##### Arguments +### Arguments - **substr**: Substring expression to search for. Can be a constant, column, or function, and any combination of string operators. @@ -852,7 +852,7 @@ Returns a string with an input string repeated a specified number of times. repeat(str, n) ``` -##### Arguments +### Arguments - **str**: String expression to repeat. Can be a constant, column, or function, and any combination of string operators. @@ -889,7 +889,7 @@ Replaces all occurrences of a specified substring in a string with a new substri replace(str, substr, replacement) ``` -##### Arguments +### Arguments - **str**: String expression to repeat. Can be a constant, column, or function, and any combination of string operators. @@ -926,7 +926,7 @@ Reverses the character order of a string. reverse(str) ``` -##### Arguments +### Arguments - **str**: String expression to repeat. Can be a constant, column, or function, and any combination of string operators. @@ -959,7 +959,7 @@ Returns a specified number of characters from the right side of a string. right(str, n) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -997,7 +997,7 @@ Pads the right side of a string with another string to a specified string length rpad(str, n[, padding_str]) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -1038,7 +1038,7 @@ Removes trailing spaces from a string. rtrim(str) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -1081,7 +1081,7 @@ specified position. split_part(str, delimiter, pos) ``` -##### Arguments +### Arguments - **str**: String expression to spit. Can be a constant, column, or function, and any combination of string operators. @@ -1119,7 +1119,7 @@ Tests if a string starts with a substring. starts_with(str, substr) ``` -##### Arguments +### Arguments - **str**: String expression to test. Can be a constant, column, or function, and any combination of string operators. @@ -1159,7 +1159,7 @@ If the substring does not exist in the string, the function returns 0. strpos(str, substr) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -1195,7 +1195,7 @@ starting position in a string. substr(str, start_pos[, length]) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -1237,7 +1237,7 @@ final delimiter (counting from the right). substr_index(str, delimiter, count) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -1321,7 +1321,7 @@ Converts an integer to a hexadecimal string. to_hex(int) ``` -##### Arguments +### Arguments - **int**: Integer expression to convert. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -1357,7 +1357,7 @@ Removes leading and trailing spaces from a string. trim(str) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. @@ -1399,7 +1399,7 @@ Converts a string to upper-case. upper(str) ``` -##### Arguments +### Arguments - **str**: String expression to operate on. Can be a constant, column, or function, and any combination of string operators. diff --git a/content/shared/sql-reference/functions/struct.md b/content/shared/sql-reference/functions/struct.md new file mode 100644 index 000000000..ec19ecfa9 --- /dev/null +++ b/content/shared/sql-reference/functions/struct.md @@ -0,0 +1,140 @@ + +Use struct functions to create Arrow structs in SQL queries. + +- [named_struct](#named_struct) +- [row](#row) +- [struct](#struct) + +## named_struct + +Returns an _Arrow struct_ using the specified name and input expressions pairs. + +```sql +named_struct(expression1_name, expression1_input[, ..., expression_n_name, expression_n_input]) +``` + +### Arguments + +- **expression_n_name**: Name of the column field. Must be a constant string. +- **expression_n_input**: Expression to include in the output struct. + Can be a constant, column, or function, and any combination of arithmetic or + string operators. + +##### Related functions + +[get_field](/influxdb3/version/reference/sql/functions/misc/#get_field) + +{{< expand-wrapper >}} +{{% expand "View `named_struct` query example" %}} + +_The following example uses the +{{% influxdb3/home-sample-link %}}._ + +```sql +SELECT + named_struct('time', time, 'temperature', temp, 'humidity', hum) AS named_struct +FROM + home +WHERE + room = 'Kitchen' +LIMIT 4 +``` + +| named_struct | +| :------------------------------------------------------------- | +| {time: 2022-01-01T13:00:00, temperature: 22.8, humidity: 36.5} | +| {time: 2022-01-01T12:00:00, temperature: 22.5, humidity: 36.0} | +| {time: 2022-01-01T15:00:00, temperature: 22.7, humidity: 36.2} | +| {time: 2022-01-01T18:00:00, temperature: 23.3, humidity: 36.9} | + +{{% /expand %}} +{{< /expand-wrapper >}} + +## row + +_Alias of [`struct`](#struct)._ + +## struct + +Returns an _Arrow struct_ using the specified input expressions optionally named. +Fields in the returned struct use the optional name or the `cN` naming convention. +Fields in the returned struct use the `cN` naming convention (for example: `c0`, `c1`, `c2`, etc.) +unless you specify custom names using the `AS` operator within individual expressions. + +```sql +struct(expression1[, ..., expression_n]) +``` + +### Arguments + +- **expression1, expression_n**: Expression to include in the output struct. + Can be a constant, column, or function, and any combination of arithmetic or + string operators. + +### Aliases + +- row + +##### Related functions + +[get_field](/influxdb3/version/reference/sql/functions/misc/#get_field) + +{{< expand-wrapper >}} +{{% expand "View `struct` query example" %}} + +_The following example uses the +{{% influxdb3/home-sample-link %}}._ + +```sql +SELECT + struct(time, temp, hum) AS struct +FROM + home +WHERE + room = 'Kitchen' +LIMIT 4 +``` + +{{% influxdb/custom-timestamps %}} + +| struct | +| :-------------------------------------------- | +| {c0: 2022-01-01T13:00:00, c1: 22.8, c2: 36.5} | +| {c0: 2022-01-01T12:00:00, c1: 22.5, c2: 36.0} | +| {c0: 2022-01-01T15:00:00, c1: 22.7, c2: 36.2} | +| {c0: 2022-01-01T18:00:00, c1: 23.3, c2: 36.9} | + +{{% /influxdb/custom-timestamps %}} + +{{% /expand %}} +{{% expand "View `struct` query example with named fields" %}} + +Use the `AS` operator in a `struct` expression argument to assign a name to the +struct field. + +_The following example uses the +{{% influxdb3/home-sample-link %}}._ + +```sql +SELECT + struct(time AS 'time', temp AS 'temperature', hum) AS struct +FROM + home +WHERE + room = 'Kitchen' +LIMIT 4 +``` + +{{% influxdb/custom-timestamps %}} + +| struct | +| :------------------------------------------------------- | +| {time: 2022-01-01T13:00:00, temperature: 22.8, c2: 36.5} | +| {time: 2022-01-01T12:00:00, temperature: 22.5, c2: 36.0} | +| {time: 2022-01-01T15:00:00, temperature: 22.7, c2: 36.2} | +| {time: 2022-01-01T18:00:00, temperature: 23.3, c2: 36.9} | + +{{% /influxdb/custom-timestamps %}} + +{{% /expand %}} +{{< /expand-wrapper >}} diff --git a/content/shared/sql-reference/functions/time-and-date.md b/content/shared/sql-reference/functions/time-and-date.md index 82738435a..d116e112f 100644 --- a/content/shared/sql-reference/functions/time-and-date.md +++ b/content/shared/sql-reference/functions/time-and-date.md @@ -125,7 +125,7 @@ For example, if you "bin" or "window" data into 15-minute intervals, an input ti date_bin(interval, expression[, origin_timestamp]) ``` -##### Arguments: +### Arguments - **interval**: Bin interval. Supports the following interval units: @@ -197,7 +197,7 @@ date_bin_gapfill(interval, expression[, origin_timestamp]) > `date_bin_gapfill` requires [time bounds](/influxdb/version/query-data/sql/basic-query/#query-data-within-time-boundaries) > in the `WHERE` clause. -##### Arguments: +### Arguments - **interval**: Bin interval. Supports the following interval units: @@ -375,7 +375,7 @@ UTC offset of the input timestamp. date_bin_wallclock(interval, expression[, origin_timestamp]) ``` -##### Arguments: +### Arguments - **interval**: Bin interval. Supports the following interval units: @@ -514,7 +514,7 @@ date_bin_wallclock_gapfill(interval, expression[, origin_timestamp]) > `date_bin_wallclock_gapfill` requires [time bounds](/influxdb/version/query-data/sql/basic-query/#query-data-within-time-boundaries) > in the `WHERE` clause. -##### Arguments: +### Arguments - **interval**: Bin interval. Supports the following interval units: @@ -690,7 +690,7 @@ Truncates a timestamp value to a specified precision. date_trunc(precision, expression) ``` -##### Arguments: +### Arguments - **precision**: Time precision to truncate to. The following precisions are supported: @@ -778,7 +778,7 @@ Returns the specified part of the date as an integer. date_part(part, expression) ``` -##### Arguments: +### Arguments - **part**: Part of the date to return. The following date parts are supported: @@ -844,7 +844,7 @@ Similar to `date_part`, but with different arguments. extract(field FROM source) ``` -##### Arguments +### Arguments - **field**: Part or field of the date to return. The following date fields are supported: @@ -894,7 +894,7 @@ When output to Parquet, the raw integer value (for example, `1641042000`) is pre from_unixtime(expression) ``` -##### Arguments: +### Arguments - **expression**: Integer expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -930,7 +930,7 @@ Returns a date using the component parts (year, month, day). make_date(year, month, day) ``` -##### Arguments +### Arguments - **year**: Year to use when making the date. Can be a constant, column or function, and any combination of arithmetic operators. @@ -1000,7 +1000,7 @@ a [Rust Chrono format string](https://docs.rs/chrono/latest/chrono/format/strfti to_char(expression, format) ``` -##### Arguments +### Arguments - **expression**: Expression to operate on. Can be a constant, column, or function that results in a date, time, timestamp or duration. @@ -1042,7 +1042,7 @@ Numeric values are interpreted as days since the to_date(expression[, ..., format_n]) ``` -###### Arguments +### Arguments - **expression**: Expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -1082,7 +1082,7 @@ like daylight saving time (DST). to_local_time(expression) ``` -##### Arguments +### Arguments - **expression**: Time expression to operate on. Can be a constant, column, or function. @@ -1146,7 +1146,7 @@ and return the corresponding RFC3339 timestamp. to_timestamp(expression) ``` -##### Arguments: +### Arguments - **expression**: Expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -1177,7 +1177,7 @@ and return the corresponding RFC3339 timestamp. to_timestamp_micros(expression[, ..., format_n]) ``` -##### Arguments: +### Arguments - **expression**: Expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -1223,7 +1223,7 @@ and return the corresponding RFC3339 timestamp. to_timestamp_millis(expression[, ..., format_n]) ``` -##### Arguments: +### Arguments - **expression**: Expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -1271,7 +1271,7 @@ and return the corresponding RFC3339 timestamp. to_timestamp_nanos(expression[, ..., format_n]) ``` -##### Arguments: +### Arguments - **expression**: Expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -1317,7 +1317,7 @@ and return the corresponding RFC3339 timestamp. to_timestamp_seconds(expression[, ..., format_n]) ``` -##### Arguments: +### Arguments - **expression**: Expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -1364,7 +1364,7 @@ are provided. to_unixtime(expression[, ..., format_n]) ``` -##### Arguments +### Arguments - **expression**: Expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -1413,7 +1413,7 @@ Converts a timestamp to a provided timezone. If the second argument is not provi tz(time_expression[, timezone]) ``` -##### Arguments +### Arguments - **time_expression**: time to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. diff --git a/content/shared/sql-reference/functions/window.md b/content/shared/sql-reference/functions/window.md index 698f46787..17693c8c0 100644 --- a/content/shared/sql-reference/functions/window.md +++ b/content/shared/sql-reference/functions/window.md @@ -36,7 +36,6 @@ ORDER BY {{% /influxdb/custom-timestamps %}} - [Window frames](#window-frames) -- [Window function syntax](#window-function-syntax) - [OVER clause](#over-clause) - [PARTITION BY clause](#partition-by-clause) - [ORDER BY clause](#order-by-clause) @@ -566,7 +565,7 @@ ranking order. ntile(expression) ``` -##### Arguments +#### Arguments - **expression**: An integer. The number of groups to split the partition into. @@ -823,7 +822,7 @@ Returns the value from the first row of the window frame. first_value(expression) ``` -##### Arguments +#### Arguments - **expression**: Expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -879,7 +878,7 @@ the function returns the specified default. lag(expression, offset, default) ``` -##### Arguments +#### Arguments - **expression**: Expression to operate on. Can be a constant, column, or function, and any combination of arithmetic or @@ -938,7 +937,7 @@ Returns the value from the last row of the window frame. last_value(expression) ``` -##### Arguments +#### Arguments - **expression**: Expression to operate on. Can be a constant, column, or function, and any combination of arithmetic operators. @@ -995,7 +994,7 @@ the function returns the specified default. lead(expression, offset, default) ``` -##### Arguments +#### Arguments - **expression**: Expression to operate on. Can be a constant, column, or function, and any combination of arithmetic or @@ -1055,7 +1054,7 @@ Returns the value from the row that is the nth row of the window frame nth_value(expression, n) ``` -##### Arguments +#### Arguments - **expression**: The expression to operator on. Can be a constant, column, or function, and any combination of arithmetic or From d9b2e9aea0306350c48b685b1177243c83f38d6b Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Fri, 5 Sep 2025 14:35:05 -0500 Subject: [PATCH 154/179] test: Map a directory for influxdata plugins (from influxdb3_plugins), map ent- and core-specific plugins to plugins/custom --- compose.yaml | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/compose.yaml b/compose.yaml index ea11e03cb..612900f6a 100644 --- a/compose.yaml +++ b/compose.yaml @@ -307,6 +307,7 @@ services: influxdb3-core: container_name: influxdb3-core image: influxdb:3-core + pull_policy: always # Set variables (except your auth token) for Core in the .env.3core file. env_file: - .env.3core @@ -316,17 +317,21 @@ services: - influxdb3 - serve - --node-id=node0 - - --log-filter=debug - --object-store=file - --data-dir=/var/lib/influxdb3/data - --plugin-dir=/var/lib/influxdb3/plugins + - --log-filter=debug + - --verbose volumes: - type: bind source: test/.influxdb3/core/data target: /var/lib/influxdb3/data - type: bind - source: test/.influxdb3/core/plugins + source: test/.influxdb3/plugins/influxdata target: /var/lib/influxdb3/plugins + - type: bind + source: test/.influxdb3/core/plugins + target: /var/lib/influxdb3/plugins/custom environment: - INFLUXDB3_AUTH_TOKEN=/run/secrets/influxdb3-core-admin-token secrets: @@ -334,6 +339,7 @@ services: influxdb3-enterprise: container_name: influxdb3-enterprise image: influxdb:3-enterprise + pull_policy: always # Set license email and other variables (except your auth token) for Enterprise in the .env.3ent file. env_file: - .env.3ent @@ -344,10 +350,11 @@ services: - serve - --node-id=node0 - --cluster-id=cluster0 - - --log-filter=debug - --object-store=file - --data-dir=/var/lib/influxdb3/data - --plugin-dir=/var/lib/influxdb3/plugins + - --log-filter=debug + - --verbose environment: - INFLUXDB3_AUTH_TOKEN=/run/secrets/influxdb3-enterprise-admin-token volumes: @@ -355,8 +362,11 @@ services: source: test/.influxdb3/enterprise/data target: /var/lib/influxdb3/data - type: bind - source: test/.influxdb3/enterprise/plugins + source: test/.influxdb3/plugins/influxdata target: /var/lib/influxdb3/plugins + - type: bind + source: test/.influxdb3/enterprise/plugins + target: /var/lib/influxdb3/plugins/custom secrets: - influxdb3-enterprise-admin-token telegraf-pytest: From 632b99fafcab5043c9fe9f2edc630bf7274f4458 Mon Sep 17 00:00:00 2001 From: karel rehor Date: Mon, 8 Sep 2025 15:10:40 +0200 Subject: [PATCH 155/179] chore: update release notes and data for kapacitor-1.8.1 --- .../v1/reference/about_the_project/release-notes.md | 7 +++++++ data/products.yml | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/content/kapacitor/v1/reference/about_the_project/release-notes.md b/content/kapacitor/v1/reference/about_the_project/release-notes.md index 2ae546029..f9ca94aa4 100644 --- a/content/kapacitor/v1/reference/about_the_project/release-notes.md +++ b/content/kapacitor/v1/reference/about_the_project/release-notes.md @@ -9,6 +9,13 @@ aliases: - /kapacitor/v1/about_the_project/releasenotes-changelog/ --- +## v1.8.1 {date="2025-09-08} + +### Dependency updates + +1. Upgrade golang.org/x/oauth2 from 0.23.0 to 0.27.0 +1. Upgrade Go to 1.24.6 + ## v1.8.0 {date="2025-06-26"} > [!Warning] diff --git a/data/products.yml b/data/products.yml index 9d9403904..307aea9cc 100644 --- a/data/products.yml +++ b/data/products.yml @@ -171,7 +171,7 @@ kapacitor: versions: [v1] latest: v1.8 latest_patches: - v1: 1.8.0 + v1: 1.8.1 ai_sample_questions: - How do I configure Kapacitor for InfluxDB v1? - How do I write a custom Kapacitor task? From 93eb70d3777f48dd29628123edc7f5fac025a12b Mon Sep 17 00:00:00 2001 From: karel rehor Date: Mon, 8 Sep 2025 16:26:48 +0200 Subject: [PATCH 156/179] chore: switch list from ordered to unordered. --- .../kapacitor/v1/reference/about_the_project/release-notes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/content/kapacitor/v1/reference/about_the_project/release-notes.md b/content/kapacitor/v1/reference/about_the_project/release-notes.md index f9ca94aa4..ac71c9586 100644 --- a/content/kapacitor/v1/reference/about_the_project/release-notes.md +++ b/content/kapacitor/v1/reference/about_the_project/release-notes.md @@ -13,8 +13,8 @@ aliases: ### Dependency updates -1. Upgrade golang.org/x/oauth2 from 0.23.0 to 0.27.0 -1. Upgrade Go to 1.24.6 +- Upgrade golang.org/x/oauth2 from 0.23.0 to 0.27.0 +- Upgrade Go to 1.24.6 ## v1.8.0 {date="2025-06-26"} From be2974cea2749d45fbae92f18d03cb65d6d4e02f Mon Sep 17 00:00:00 2001 From: Sven Rebhan Date: Mon, 8 Sep 2025 23:53:16 +0200 Subject: [PATCH 157/179] Add Telegraf v1.35.4 release notes --- content/telegraf/v1/release-notes.md | 67 ++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/content/telegraf/v1/release-notes.md b/content/telegraf/v1/release-notes.md index b387c589c..2b4da71a1 100644 --- a/content/telegraf/v1/release-notes.md +++ b/content/telegraf/v1/release-notes.md @@ -11,6 +11,73 @@ menu: weight: 60 --- +## v1.35.4 {date="2025-08-18"} + +### Bugfixes + +- [#17451](https://github.com/influxdata/telegraf/pull/17451) `agent` Update help message for CLI flag --test +- [#17413](https://github.com/influxdata/telegraf/pull/17413) `inputs.gnmi` Handle empty updates in gnmi notification response +- [#17445](https://github.com/influxdata/telegraf/pull/17445) `inputs.redfish` Log correct address on HTTP error + +### Dependency Updates + +- [#17454](https://github.com/influxdata/telegraf/pull/17454) `deps` Bump actions/checkout from 4 to 5 +- [#17404](https://github.com/influxdata/telegraf/pull/17404) `deps` Bump cloud.google.com/go/storage from 1.55.0 to 1.56.0 +- [#17428](https://github.com/influxdata/telegraf/pull/17428) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/azcore from 1.18.1 to 1.18.2 +- [#17455](https://github.com/influxdata/telegraf/pull/17455) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/azidentity from 1.10.1 to 1.11.0 +- [#17383](https://github.com/influxdata/telegraf/pull/17383) `deps` Bump github.com/ClickHouse/clickhouse-go/v2 from 2.37.2 to 2.39.0 +- [#17435](https://github.com/influxdata/telegraf/pull/17435) `deps` Bump github.com/ClickHouse/clickhouse-go/v2 from 2.39.0 to 2.40.1 +- [#17393](https://github.com/influxdata/telegraf/pull/17393) `deps` Bump github.com/apache/arrow-go/v18 from 18.3.1 to 18.4.0 +- [#17439](https://github.com/influxdata/telegraf/pull/17439) `deps` Bump github.com/apache/inlong/inlong-sdk/dataproxy-sdk-twins/dataproxy-sdk-golang from 1.0.3 to 1.0.5 +- [#17437](https://github.com/influxdata/telegraf/pull/17437) `deps` Bump github.com/aws/aws-sdk-go-v2 from 1.37.0 to 1.37.2 +- [#17402](https://github.com/influxdata/telegraf/pull/17402) `deps` Bump github.com/aws/aws-sdk-go-v2/config from 1.29.17 to 1.30.0 +- [#17458](https://github.com/influxdata/telegraf/pull/17458) `deps` Bump github.com/aws/aws-sdk-go-v2/config from 1.30.1 to 1.31.0 +- [#17391](https://github.com/influxdata/telegraf/pull/17391) `deps` Bump github.com/aws/aws-sdk-go-v2/credentials from 1.17.70 to 1.18.0 +- [#17436](https://github.com/influxdata/telegraf/pull/17436) `deps` Bump github.com/aws/aws-sdk-go-v2/credentials from 1.18.1 to 1.18.3 +- [#17434](https://github.com/influxdata/telegraf/pull/17434) `deps` Bump github.com/aws/aws-sdk-go-v2/feature/ec2/imds from 1.18.0 to 1.18.2 +- [#17461](https://github.com/influxdata/telegraf/pull/17461) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.45.3 to 1.48.0 +- [#17392](https://github.com/influxdata/telegraf/pull/17392) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs from 1.51.0 to 1.54.0 +- [#17440](https://github.com/influxdata/telegraf/pull/17440) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs from 1.54.0 to 1.55.0 +- [#17473](https://github.com/influxdata/telegraf/pull/17473) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs from 1.55.0 to 1.56.0 +- [#17431](https://github.com/influxdata/telegraf/pull/17431) `deps` Bump github.com/aws/aws-sdk-go-v2/service/dynamodb from 1.44.0 to 1.46.0 +- [#17470](https://github.com/influxdata/telegraf/pull/17470) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.231.0 to 1.242.0 +- [#17397](https://github.com/influxdata/telegraf/pull/17397) `deps` Bump github.com/aws/aws-sdk-go-v2/service/kinesis from 1.35.3 to 1.36.0 +- [#17430](https://github.com/influxdata/telegraf/pull/17430) `deps` Bump github.com/aws/aws-sdk-go-v2/service/kinesis from 1.36.0 to 1.37.0 +- [#17469](https://github.com/influxdata/telegraf/pull/17469) `deps` Bump github.com/aws/aws-sdk-go-v2/service/kinesis from 1.37.0 to 1.38.0 +- [#17432](https://github.com/influxdata/telegraf/pull/17432) `deps` Bump github.com/aws/aws-sdk-go-v2/service/sts from 1.35.0 to 1.36.0 +- [#17401](https://github.com/influxdata/telegraf/pull/17401) `deps` Bump github.com/aws/aws-sdk-go-v2/service/timestreamwrite from 1.31.2 to 1.32.0 +- [#17421](https://github.com/influxdata/telegraf/pull/17421) `deps` Bump github.com/aws/aws-sdk-go-v2/service/timestreamwrite from 1.32.0 to 1.33.0 +- [#17464](https://github.com/influxdata/telegraf/pull/17464) `deps` Bump github.com/aws/aws-sdk-go-v2/service/timestreamwrite from 1.33.0 to 1.34.0 +- [#17457](https://github.com/influxdata/telegraf/pull/17457) `deps` Bump github.com/clarify/clarify-go from 0.4.0 to 0.4.1 +- [#17407](https://github.com/influxdata/telegraf/pull/17407) `deps` Bump github.com/docker/docker from 28.3.2+incompatible to 28.3.3+incompatible +- [#17463](https://github.com/influxdata/telegraf/pull/17463) `deps` Bump github.com/docker/go-connections from 0.5.0 to 0.6.0 +- [#17394](https://github.com/influxdata/telegraf/pull/17394) `deps` Bump github.com/golang-jwt/jwt/v5 from 5.2.2 to 5.2.3 +- [#17423](https://github.com/influxdata/telegraf/pull/17423) `deps` Bump github.com/gopacket/gopacket from 1.3.1 to 1.4.0 +- [#17399](https://github.com/influxdata/telegraf/pull/17399) `deps` Bump github.com/jedib0t/go-pretty/v6 from 6.6.7 to 6.6.8 +- [#17422](https://github.com/influxdata/telegraf/pull/17422) `deps` Bump github.com/lxc/incus/v6 from 6.14.0 to 6.15.0 +- [#17429](https://github.com/influxdata/telegraf/pull/17429) `deps` Bump github.com/miekg/dns from 1.1.67 to 1.1.68 +- [#17433](https://github.com/influxdata/telegraf/pull/17433) `deps` Bump github.com/nats-io/nats-server/v2 from 2.11.6 to 2.11.7 +- [#17426](https://github.com/influxdata/telegraf/pull/17426) `deps` Bump github.com/nats-io/nats.go from 1.43.0 to 1.44.0 +- [#17456](https://github.com/influxdata/telegraf/pull/17456) `deps` Bump github.com/redis/go-redis/v9 from 9.11.0 to 9.12.1 +- [#17420](https://github.com/influxdata/telegraf/pull/17420) `deps` Bump github.com/shirou/gopsutil/v4 from 4.25.6 to 4.25.7 +- [#17388](https://github.com/influxdata/telegraf/pull/17388) `deps` Bump github.com/testcontainers/testcontainers-go/modules/azure from 0.37.0 to 0.38.0 +- [#17382](https://github.com/influxdata/telegraf/pull/17382) `deps` Bump github.com/testcontainers/testcontainers-go/modules/kafka from 0.37.0 to 0.38.0 +- [#17427](https://github.com/influxdata/telegraf/pull/17427) `deps` Bump github.com/yuin/goldmark from 1.7.12 to 1.7.13 +- [#17386](https://github.com/influxdata/telegraf/pull/17386) `deps` Bump go.opentelemetry.io/collector/pdata from 1.36.0 to 1.36.1 +- [#17425](https://github.com/influxdata/telegraf/pull/17425) `deps` Bump go.step.sm/crypto from 0.67.0 to 0.68.0 +- [#17462](https://github.com/influxdata/telegraf/pull/17462) `deps` Bump go.step.sm/crypto from 0.68.0 to 0.69.0 +- [#17460](https://github.com/influxdata/telegraf/pull/17460) `deps` Bump golang.org/x/crypto from 0.40.0 to 0.41.0 +- [#17424](https://github.com/influxdata/telegraf/pull/17424) `deps` Bump google.golang.org/api from 0.243.0 to 0.244.0 +- [#17459](https://github.com/influxdata/telegraf/pull/17459) `deps` Bump google.golang.org/api from 0.244.0 to 0.246.0 +- [#17465](https://github.com/influxdata/telegraf/pull/17465) `deps` Bump google.golang.org/protobuf from 1.36.6 to 1.36.7 +- [#17384](https://github.com/influxdata/telegraf/pull/17384) `deps` Bump k8s.io/apimachinery from 0.33.2 to 0.33.3 +- [#17389](https://github.com/influxdata/telegraf/pull/17389) `deps` Bump k8s.io/client-go from 0.33.2 to 0.33.3 +- [#17396](https://github.com/influxdata/telegraf/pull/17396) `deps` Bump modernc.org/sqlite from 1.38.0 to 1.38.1 +- [#17385](https://github.com/influxdata/telegraf/pull/17385) `deps` Bump software.sslmate.com/src/go-pkcs12 from 0.5.0 to 0.6.0 +- [#17390](https://github.com/influxdata/telegraf/pull/17390) `deps` Bump super-linter/super-linter from 7.4.0 to 8.0.0 +- [#17448](https://github.com/influxdata/telegraf/pull/17448) `deps` Fix collectd dependency not resolving +- [#17410](https://github.com/influxdata/telegraf/pull/17410) `deps` Migrate from cloud.google.com/go/pubsub to v2 + ## v1.35.3 {date="2025-07-28"} ### Bug fixes From e94bd563fd50b72a6581cb1bc66e98591407cc2b Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 8 Sep 2025 23:42:16 -0500 Subject: [PATCH 158/179] Update content/telegraf/v1/release-notes.md --- content/telegraf/v1/release-notes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/telegraf/v1/release-notes.md b/content/telegraf/v1/release-notes.md index 2b4da71a1..0fd5dba61 100644 --- a/content/telegraf/v1/release-notes.md +++ b/content/telegraf/v1/release-notes.md @@ -15,7 +15,7 @@ menu: ### Bugfixes -- [#17451](https://github.com/influxdata/telegraf/pull/17451) `agent` Update help message for CLI flag --test +- [#17451](https://github.com/influxdata/telegraf/pull/17451) `agent` Update help message for `--test` CLI flag - [#17413](https://github.com/influxdata/telegraf/pull/17413) `inputs.gnmi` Handle empty updates in gnmi notification response - [#17445](https://github.com/influxdata/telegraf/pull/17445) `inputs.redfish` Log correct address on HTTP error From 767dcaeafbbfae4bdd5ea03dd54d1a3a3015d6cc Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 8 Sep 2025 23:45:52 -0500 Subject: [PATCH 159/179] Update products.yml for Telegraph 1.35.4 --- data/products.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/products.yml b/data/products.yml index 307aea9cc..ec0014361 100644 --- a/data/products.yml +++ b/data/products.yml @@ -143,7 +143,7 @@ telegraf: versions: [v1] latest: v1.35 latest_patches: - v1: 1.35.3 + v1: 1.35.4 ai_sample_questions: - How do I install and configure Telegraf? - How do I write a custom Telegraf plugin? From 74900a0cc995cb3ae455639f6b798c0d4b32f068 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 9 Sep 2025 11:16:39 -0500 Subject: [PATCH 160/179] docs: Add .mcp.json config file for Docs MCP --- .mcp.json | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 .mcp.json diff --git a/.mcp.json b/.mcp.json new file mode 100644 index 000000000..f600dfa67 --- /dev/null +++ b/.mcp.json @@ -0,0 +1,20 @@ +{ + "$schema": "https://raw.githubusercontent.com/modelcontextprotocol/modelcontextprotocol/refs/heads/main/schema/2025-06-18/schema.json", + "description": "InfluxData documentation assistance via MCP server - Node.js execution", + "mcpServers": { + "influxdata": { + "comment": "Use Node to run Docs MCP. To install and setup, see https://github.com/influxdata/docs-mcp-server", + "type": "stdio", + "command": "node", + "args": [ + "${DOCS_MCP_SERVER_PATH}/dist/index.js" + ], + "env": { + "DOCS_API_KEY_FILE": "${DOCS_API_KEY_FILE:-$HOME/.env.docs-kapa-api-key}", + "DOCS_MODE": "external-only", + "MCP_LOG_LEVEL": "${MCP_LOG_LEVEL:-info}", + "NODE_ENV": "${NODE_ENV:-production}" + } + } + } +} \ No newline at end of file From ddb36d1a39a2eff469021447f51398f5095257a2 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 8 Sep 2025 11:30:58 -0500 Subject: [PATCH 161/179] Closes DAR #535 - Adds Clustered reference/internals/durability/\ - Migrates Cloud Dedicated durability page to shared for Dedicated and Clustered.\ - Adds diagram (also used in storage-engine) to illustrate data flow. - Fixes typo in Serverless --- .../reference/internals/durability.md | 73 +------------- .../reference/internals/durability.md | 2 +- .../reference/internals/durability.md | 17 ++++ .../durability.md | 98 +++++++++++++++++++ 4 files changed, 120 insertions(+), 70 deletions(-) create mode 100644 content/influxdb3/clustered/reference/internals/durability.md create mode 100644 content/shared/v3-distributed-internals-reference/durability.md diff --git a/content/influxdb3/cloud-dedicated/reference/internals/durability.md b/content/influxdb3/cloud-dedicated/reference/internals/durability.md index f90b0de44..7c50ef019 100644 --- a/content/influxdb3/cloud-dedicated/reference/internals/durability.md +++ b/content/influxdb3/cloud-dedicated/reference/internals/durability.md @@ -1,7 +1,8 @@ --- title: InfluxDB Cloud Dedicated data durability description: > - InfluxDB Cloud Dedicated replicates all time series data in the storage tier across + Data written to {{% product-name %}} progresses through multiple stages to ensure durability, optimized performance and storage, and efficient querying. Configuration options at each stage affect system behavior, balancing reliability and resource usage. + {{% product-name %}} replicates all time series data in the storage tier across multiple availability zones within a cloud region and automatically creates backups that can be used to restore data in the event of a node failure or data corruption. weight: 102 @@ -13,73 +14,7 @@ influxdb3/cloud-dedicated/tags: [backups, internals] related: - https://docs.aws.amazon.com/AmazonS3/latest/userguide/DataDurability.html, AWS S3 Data Durabililty - /influxdb3/cloud-dedicated/reference/internals/storage-engine/ +source: /shared/v3-distributed-internals-reference/durability.md --- -{{< product-name >}} writes data to multiple Write-Ahead-Log (WAL) files on local -storage and retains WALs until the data is persisted to Parquet files in object storage. -Parquet data files in object storage are redundantly stored on multiple devices -across a minimum of three availability zones in a cloud region. - -## Data storage - -In {{< product-name >}}, all measurements are stored in -[Apache Parquet](https://parquet.apache.org/) files that represent a -point-in-time snapshot of the data. The Parquet files are immutable and are -never replaced nor modified. Parquet files are stored in object storage and -referenced in the [Catalog](/influxdb3/cloud-dedicated/reference/internals/storage-engine/#catalog), which InfluxDB uses to find the appropriate Parquet files for a particular set of data. - -### Data deletion - -When data is deleted or expires (reaches the database's [retention period](/influxdb3/cloud-dedicated/reference/internals/data-retention/#database-retention-period)), InfluxDB performs the following steps: - -1. Marks the associated Parquet files as deleted in the catalog. -2. Filters out data marked for deletion from all queries. -3. Retains Parquet files marked for deletion in object storage for approximately 30 days after the youngest data in the file ages out of retention. - -## Data ingest - -When data is written to {{< product-name >}}, InfluxDB first writes the data to a -Write-Ahead-Log (WAL) on locally attached storage on the [Ingester](/influxdb3/cloud-dedicated/reference/internals/storage-engine/#ingester) node before -acknowledging the write request. After acknowledging the write request, the -Ingester holds the data in memory temporarily and then writes the contents of -the WAL to Parquet files in object storage and updates the [Catalog](/influxdb3/cloud-dedicated/reference/internals/storage-engine/#catalog) to -reference the newly created Parquet files. If an Ingester node is gracefully shut -down (for example, during a new software deployment), it flushes the contents of -the WAL to the Parquet files before shutting down. - -## Backups - -{{< product-name >}} implements the following data backup strategies: - -- **Backup of WAL file**: The WAL file is written on locally attached storage. - If an ingester process fails, the new ingester simply reads the WAL file on - startup and continues normal operation. WAL files are maintained until their - contents have been written to the Parquet files in object storage. - For added protection, ingesters can be configured for write replication, where - each measurement is written to two different WAL files before acknowledging - the write. - -- **Backup of Parquet files**: Parquet files are stored in object storage where - they are redundantly stored on multiple devices across a minimum of three - availability zones in a cloud region. Parquet files associated with each - database are kept in object storage for the duration of database retention period - plus an additional time period (approximately 30 days). - -- **Backup of catalog**: InfluxData keeps a transaction log of all recent updates - to the [InfluxDB catalog](/influxdb3/cloud-dedicated/reference/internals/storage-engine/#catalog) and generates a daily backup of - the catalog. Backups are preserved for at least 30 days in object storage across a minimum - of three availability zones. - -## Recovery - -InfluxData can perform the following recovery operations: - -- **Recovery after ingester failure**: If an ingester fails, a new ingester is - started up and reads from the WAL file for the recently ingested data. - -- **Recovery of Parquet files**: {{< product-name >}} uses the provided object - storage data durability to recover Parquet files. - -- **Recovery of the catalog**: InfluxData can restore the [Catalog](/influxdb3/cloud-dedicated/reference/internals/storage-engine/#catalog) to - the most recent daily backup and then reapply any transactions - that occurred since the interruption. + diff --git a/content/influxdb3/cloud-serverless/reference/internals/durability.md b/content/influxdb3/cloud-serverless/reference/internals/durability.md index d43d4bb8a..903fa3132 100644 --- a/content/influxdb3/cloud-serverless/reference/internals/durability.md +++ b/content/influxdb3/cloud-serverless/reference/internals/durability.md @@ -27,7 +27,7 @@ point-in-time snapshot of the data. The Parquet files are immutable and are never replaced nor modified. Parquet files are stored in object storage. -The _InfluxDB catalog_ is a relational, PostreSQL-compatible database that +The _InfluxDB catalog_ is a relational, PostgreSQL-compatible database that contains references to all Parquet files in object storage and is used as an index to find the appropriate Parquet files for a particular set of data. diff --git a/content/influxdb3/clustered/reference/internals/durability.md b/content/influxdb3/clustered/reference/internals/durability.md new file mode 100644 index 000000000..d9e674451 --- /dev/null +++ b/content/influxdb3/clustered/reference/internals/durability.md @@ -0,0 +1,17 @@ +--- +title: InfluxDB Clustered data durability +description: > + Data written to {{% product-name %}} progresses through multiple stages to ensure durability, optimized performance and storage, and efficient querying. Configuration options at each stage affect system behavior, balancing reliability and resource usage. +weight: 102 +menu: + influxdb3_clustered: + name: Data durability + parent: InfluxDB internals +influxdb3/clustered/tags: [backups, internals] +related: + - https://docs.aws.amazon.com/AmazonS3/latest/userguide/DataDurability.html, AWS S3 Data Durabililty + - /influxdb3/clustered/reference/internals/storage-engine/ +source: /shared/v3-distributed-internals-reference/durability.md +--- + + \ No newline at end of file diff --git a/content/shared/v3-distributed-internals-reference/durability.md b/content/shared/v3-distributed-internals-reference/durability.md new file mode 100644 index 000000000..70bd3c7c2 --- /dev/null +++ b/content/shared/v3-distributed-internals-reference/durability.md @@ -0,0 +1,98 @@ +## How data flows through {{% product-name %}} + +When data is written to {{% product-name %}}, it progresses through multiple stages to ensure durability, optimized performance and storage, and efficient querying. Configuration options at each stage affect system behavior, balancing reliability and resource usage. + +{{< svg "/static/svgs/v3-storage-architecture.svg" >}} + +Figure: Write request, response, and ingest flow for {{% product-name %}} + +- [How data flows through {{% product-name %}}](#how-data-flows-through--product-name-) +- [Data ingest](#data-ingest) + 1. [Write validation](#write-validation) + 2. [Write-ahead log (WAL) persistence](#write-ahead-log-wal-persistence) +- [Data storage](#data-storage) +- [Data deletion](#data-deletion) +- [Backups](#backups) +- [Recovery](#recovery) + +## Data ingest + +1. [Write validation and memory buffer](#write-validation-and-memory-buffer) +2. [Write-ahead log (WAL) persistence](#write-ahead-log-wal-persistence) + +### Write validation + +The [Router](/influxdb3/version/reference/internals/storage-engine/#router) validates incoming data to prevent malformed or unsupported data from entering the system. +{{% product-name %}} writes accepted data to multiple write-ahead-log (WAL) files on local +storage on the [Ingester](/influxdb3/version/reference/internals/storage-engine/#ingester) node before acknowledging the write request. +The Ingester holds the data in memory to ensure leading edge data is available for querying. + +### Write-ahead log (WAL) persistence + +InfluxDB writes yet-to-be persisted data to multiple Write-Ahead-Log (WAL) files on local +storage on the [Ingester](/influxdb3/version/reference/internals/storage-engine/#ingester) node before acknowledging the write request. +{{% hide-in "clustered" %}} +Parquet data files in object storage are redundantly stored on multiple devices +across a minimum of three availability zones in a cloud region. +{{% /hide-in %}} + +The Ingester then writes the contents of +the WAL to Parquet files in object storage and updates the [Catalog](/influxdb3/version/reference/internals/storage-engine/#catalog) to +reference the newly created Parquet files. + +If an Ingester node is gracefully shut down (for example, during a new software deployment), it flushes the contents of the WAL to the Parquet files before shutting down. +{{% product-name %}} retains WALs until the data is persisted to Parquet files in object storage. + +## Data storage + +In {{< product-name >}}, all measurements are stored in +[Apache Parquet](https://parquet.apache.org/) files that represent a +point-in-time snapshot of the data. The Parquet files are immutable and are +never replaced nor modified. Parquet files are stored in object storage and +referenced in the [Catalog](/influxdb3/version/reference/internals/storage-engine/#catalog), which InfluxDB uses to find the appropriate Parquet files for a particular set of data. + +## Data deletion + +When data is deleted or expires (reaches the database's [retention period](/influxdb3/version/reference/internals/data-retention/#database-retention-period)), InfluxDB performs the following steps: + +1. Marks the associated Parquet files as deleted in the catalog. +2. Filters out data marked for deletion from all queries. +{{% hide-in "clustered" %}}3. Retains Parquet files marked for deletion in object storage for approximately 30 days after the youngest data in the file ages out of retention.{{% /hide-in %}} + +## Backups + +{{< product-name >}} implements the following data backup strategies: + +- **Backup of WAL file**: The WAL file is written on locally attached storage. + If an ingester process fails, the new ingester simply reads the WAL file on + startup and continues normal operation. WAL files are maintained until their + contents have been written to the Parquet files in object storage. + For added protection, ingesters can be configured for write replication, where + each measurement is written to two different WAL files before acknowledging + the write. + +- **Backup of Parquet files**: Parquet files are stored in object storage {{% hide-in "clustered" %}}where + they are redundantly stored on multiple devices across a minimum of three + availability zones in a cloud region. Parquet files associated with each + database are kept in object storage for the duration of database retention period + plus an additional time period (approximately 30 days).{{% /hide-in %}} + +- **Backup of catalog**: InfluxData keeps a transaction log of all recent updates + to the [InfluxDB catalog](/influxdb3/version/reference/internals/storage-engine/#catalog) and generates a daily backup of + the catalog. {{% hide-in "clustered" %}}Backups are preserved for at least 30 days in object storage across a minimum of three availability zones.{{% /hide-in %}} + +{{% hide-in "clustered" %}} +## Recovery + +InfluxData can perform the following recovery operations: + +- **Recovery after ingester failure**: If an ingester fails, a new ingester is + started up and reads from the WAL file for the recently ingested data. + +- **Recovery of Parquet files**: {{< product-name >}} uses the provided object + storage data durability to recover Parquet files. + +- **Recovery of the catalog**: InfluxData can restore the [Catalog](/influxdb3/version/reference/internals/storage-engine/#catalog) to + the most recent daily backup and then reapply any transactions + that occurred since the interruption. +{{% /hide-in %}} From c4974d4a3d6072ed0384d09f0d96c613a27c9389 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 8 Sep 2025 12:01:55 -0500 Subject: [PATCH 162/179] fix(v3): DAR-535 resolve duplication --- .../v3-distributed-internals-reference/durability.md | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/content/shared/v3-distributed-internals-reference/durability.md b/content/shared/v3-distributed-internals-reference/durability.md index 70bd3c7c2..3d8fbaf9a 100644 --- a/content/shared/v3-distributed-internals-reference/durability.md +++ b/content/shared/v3-distributed-internals-reference/durability.md @@ -29,17 +29,14 @@ The Ingester holds the data in memory to ensure leading edge data is available f ### Write-ahead log (WAL) persistence -InfluxDB writes yet-to-be persisted data to multiple Write-Ahead-Log (WAL) files on local -storage on the [Ingester](/influxdb3/version/reference/internals/storage-engine/#ingester) node before acknowledging the write request. +The Ingester persists the contents of +the WAL to Parquet files in object storage and updates the [Catalog](/influxdb3/version/reference/internals/storage-engine/#catalog) to +reference the newly created Parquet files. {{% hide-in "clustered" %}} Parquet data files in object storage are redundantly stored on multiple devices across a minimum of three availability zones in a cloud region. {{% /hide-in %}} -The Ingester then writes the contents of -the WAL to Parquet files in object storage and updates the [Catalog](/influxdb3/version/reference/internals/storage-engine/#catalog) to -reference the newly created Parquet files. - If an Ingester node is gracefully shut down (for example, during a new software deployment), it flushes the contents of the WAL to the Parquet files before shutting down. {{% product-name %}} retains WALs until the data is persisted to Parquet files in object storage. From 85b89e353e219f54d4432e6fac609a686fc64dbf Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 8 Sep 2025 12:03:20 -0500 Subject: [PATCH 163/179] fix(v3): remove top-level TOC link, hide recovery in Clustered --- .../v3-distributed-internals-reference/durability.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/content/shared/v3-distributed-internals-reference/durability.md b/content/shared/v3-distributed-internals-reference/durability.md index 3d8fbaf9a..dd0bf903b 100644 --- a/content/shared/v3-distributed-internals-reference/durability.md +++ b/content/shared/v3-distributed-internals-reference/durability.md @@ -6,21 +6,18 @@ When data is written to {{% product-name %}}, it progresses through multiple sta Figure: Write request, response, and ingest flow for {{% product-name %}} -- [How data flows through {{% product-name %}}](#how-data-flows-through--product-name-) - [Data ingest](#data-ingest) - 1. [Write validation](#write-validation) - 2. [Write-ahead log (WAL) persistence](#write-ahead-log-wal-persistence) - [Data storage](#data-storage) - [Data deletion](#data-deletion) - [Backups](#backups) -- [Recovery](#recovery) +{{% hide-in "clustered" %}}- [Recovery](#recovery){{% /hide-in %}} ## Data ingest 1. [Write validation and memory buffer](#write-validation-and-memory-buffer) 2. [Write-ahead log (WAL) persistence](#write-ahead-log-wal-persistence) -### Write validation +### Write validation and memory buffer The [Router](/influxdb3/version/reference/internals/storage-engine/#router) validates incoming data to prevent malformed or unsupported data from entering the system. {{% product-name %}} writes accepted data to multiple write-ahead-log (WAL) files on local From 2bc9e1736d92b01022239d5457c5fcc42efbcba6 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 9 Sep 2025 12:15:29 -0500 Subject: [PATCH 164/179] fix(v3): Apply code review suggestions\ Co-authored-by: reidkauffman@users.noreply.github.com --- .../durability.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/content/shared/v3-distributed-internals-reference/durability.md b/content/shared/v3-distributed-internals-reference/durability.md index dd0bf903b..4036dcc07 100644 --- a/content/shared/v3-distributed-internals-reference/durability.md +++ b/content/shared/v3-distributed-internals-reference/durability.md @@ -20,22 +20,17 @@ When data is written to {{% product-name %}}, it progresses through multiple sta ### Write validation and memory buffer The [Router](/influxdb3/version/reference/internals/storage-engine/#router) validates incoming data to prevent malformed or unsupported data from entering the system. -{{% product-name %}} writes accepted data to multiple write-ahead-log (WAL) files on local -storage on the [Ingester](/influxdb3/version/reference/internals/storage-engine/#ingester) node before acknowledging the write request. -The Ingester holds the data in memory to ensure leading edge data is available for querying. +{{% product-name %}} writes accepted data to multiple write-ahead log (WAL) files on [Ingester](/influxdb3/version/reference/internals/storage-engine/#ingester) pods' local storage (default is 2 for redundancy) before acknowledging the write request. +The Ingester holds the data in memory to ensure leading-edge data is available for querying. ### Write-ahead log (WAL) persistence -The Ingester persists the contents of +Ingesters persist the contents of the WAL to Parquet files in object storage and updates the [Catalog](/influxdb3/version/reference/internals/storage-engine/#catalog) to reference the newly created Parquet files. -{{% hide-in "clustered" %}} -Parquet data files in object storage are redundantly stored on multiple devices -across a minimum of three availability zones in a cloud region. -{{% /hide-in %}} +{{% product-name %}} retains WALs until the data is persisted. If an Ingester node is gracefully shut down (for example, during a new software deployment), it flushes the contents of the WAL to the Parquet files before shutting down. -{{% product-name %}} retains WALs until the data is persisted to Parquet files in object storage. ## Data storage @@ -45,6 +40,11 @@ point-in-time snapshot of the data. The Parquet files are immutable and are never replaced nor modified. Parquet files are stored in object storage and referenced in the [Catalog](/influxdb3/version/reference/internals/storage-engine/#catalog), which InfluxDB uses to find the appropriate Parquet files for a particular set of data. +{{% hide-in "clustered" %}} +Parquet data files in object storage are redundantly stored on multiple devices +across a minimum of three availability zones in a cloud region. +{{% /hide-in %}} + ## Data deletion When data is deleted or expires (reaches the database's [retention period](/influxdb3/version/reference/internals/data-retention/#database-retention-period)), InfluxDB performs the following steps: From bba78ea40b24be778236a84150db2be51a62c605 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Mon, 8 Sep 2025 17:04:56 -0500 Subject: [PATCH 165/179] chore(v1): Cautions, risks, and mitigations for using truncate-shards with future data - Closes influxdata/DAR/issues/534 - Contact Support for assistance - Add risks and technical details to truncate-shard command - Add cautions to rebalance guide - Add planning guidance for future data in schema_and_data_layout --- .../manage/clusters/rebalance.md | 35 ++++++++++++++--- .../v1/concepts/schema_and_data_layout.md | 33 ++++++++++++++++ .../v1/tools/influxd-ctl/truncate-shards.md | 39 +++++++++++++++++++ 3 files changed, 102 insertions(+), 5 deletions(-) diff --git a/content/enterprise_influxdb/v1/administration/manage/clusters/rebalance.md b/content/enterprise_influxdb/v1/administration/manage/clusters/rebalance.md index db0b48532..b6652e831 100644 --- a/content/enterprise_influxdb/v1/administration/manage/clusters/rebalance.md +++ b/content/enterprise_influxdb/v1/administration/manage/clusters/rebalance.md @@ -40,11 +40,20 @@ cluster, and they use the [`influxd-ctl` tool](/enterprise_influxdb/v1/tools/influxd-ctl/) available on all meta nodes. -{{% warn %}} -Before you begin, stop writing historical data to InfluxDB. -Historical data have timestamps that occur at anytime in the past. -Performing a rebalance while writing historical data can lead to data loss. -{{% /warn %}} +> [!Warning] +> #### Stop writing data before rebalancing +> +> Before you begin, stop writing historical data to InfluxDB. +> Historical data have timestamps that occur at anytime in the past. +> Performing a rebalance while writing historical data can lead to data loss. + +> [!Caution] +> #### Risks of rebalancing with future data +> +> Truncating shards that contain data with future timestamps (such as forecast or prediction data) +> can lead to overlapping shards and data duplication. +> For more information, see [`truncate-shards` and future data](/enterprise_influxdb/v1/tools/influxd-ctl/truncate-shards/#understand-the-risks-with-future-data) +> or [contact InfluxData support](https://support.influxdata.com). ## Rebalance Procedure 1: Rebalance a cluster to create space @@ -67,6 +76,14 @@ Hot shards are shards that are currently receiving writes. Performing any action on a hot shard can lead to data inconsistency within the cluster which requires manual intervention from the user. +> [!Caution] +> #### Risks of rebalancing with future data +> +> Truncating shards that contain data with future timestamps (such as forecast or prediction data) +> can lead to overlapping shards and data duplication. +> For more information, see [`truncate-shards` and future data](/enterprise_influxdb/v1/tools/influxd-ctl/truncate-shards/#understand-the-risks-with-future-data) +> or [contact InfluxData support](https://support.influxdata.com). + To prevent data inconsistency, truncate hot shards before moving any shards across data nodes. The command below creates a new hot shard which is automatically distributed @@ -298,6 +315,14 @@ Hot shards are shards that are currently receiving writes. Performing any action on a hot shard can lead to data inconsistency within the cluster which requires manual intervention from the user. +> [!Caution] +> #### Risks of rebalancing with future data +> +> Truncating shards that contain data with future timestamps (such as forecast or prediction data) +> can lead to overlapping shards and data duplication. +> For more information, see [`truncate-shards` and future data](/enterprise_influxdb/v1/tools/influxd-ctl/truncate-shards/#understand-the-risks-with-future-data) +> or [contact InfluxData support](https://support.influxdata.com). + To prevent data inconsistency, truncate hot shards before copying any shards to the new data node. The command below creates a new hot shard which is automatically distributed diff --git a/content/enterprise_influxdb/v1/concepts/schema_and_data_layout.md b/content/enterprise_influxdb/v1/concepts/schema_and_data_layout.md index c60e8ccef..febf8c2dc 100644 --- a/content/enterprise_influxdb/v1/concepts/schema_and_data_layout.md +++ b/content/enterprise_influxdb/v1/concepts/schema_and_data_layout.md @@ -16,6 +16,7 @@ We recommend the following design guidelines for most use cases: - [Where to store data (tag or field)](#where-to-store-data-tag-or-field) - [Avoid too many series](#avoid-too-many-series) - [Use recommended naming conventions](#use-recommended-naming-conventions) + - [Writing data with future timestamps](#writing-data-with-future-timestamps) - [Shard Group Duration Management](#shard-group-duration-management) ## Where to store data (tag or field) @@ -209,6 +210,38 @@ from(bucket:"/") > SELECT mean("temp") FROM "weather_sensor" WHERE region = 'north' ``` +## Writing data with future timestamps + +When designing schemas for applications that write data with future timestamps--such as forecast data from machine learning models, predictions, or scheduled events--consider the following implications for InfluxDB Enterprise v1 cluster operations and data integrity. + +### Understanding future data behavior + +InfluxDB Enterprise v1 creates shards based on time ranges. +When you write data with future timestamps, InfluxDB creates shards that cover future time periods. + +> [!Caution] +> #### Risks of rebalancing with future data +> +> Truncating shards that contain data with future timestamps (such as forecast or prediction data) +> can lead to overlapping shards and data duplication. +> For more information, see [`truncate-shards` and future data](/enterprise_influxdb/v1/tools/influxd-ctl/truncate-shards/#understand-the-risks-with-future-data) +> or [contact InfluxData support](https://support.influxdata.com). + +### Use separate databases for future data + +When planning for data that contains future timestamps, consider isolating it in dedicated databases to: + +- Minimize impact on real-time data operations +- Allow targeted maintenance operations on current vs. future data +- Simplify backup and recovery strategies for different data types + +```sql +# Example: Separate databases for different data types +CREATE DATABASE "realtime_metrics" +CREATE DATABASE "ml_forecasts" +CREATE DATABASE "scheduled_predictions" +``` + ## Shard group duration management ### Shard group duration overview diff --git a/content/enterprise_influxdb/v1/tools/influxd-ctl/truncate-shards.md b/content/enterprise_influxdb/v1/tools/influxd-ctl/truncate-shards.md index f7dffef50..fce401ac2 100644 --- a/content/enterprise_influxdb/v1/tools/influxd-ctl/truncate-shards.md +++ b/content/enterprise_influxdb/v1/tools/influxd-ctl/truncate-shards.md @@ -17,6 +17,14 @@ The `influxd-ctl truncate-shards` command truncates all shards that are currentl being written to (also known as "hot" shards) and creates new shards to write new data to. +> [!Caution] +> #### Overlapping shards with forecast and future data +> +> Running `truncate-shards` on shards containing future timestamps can create +> overlapping shards with duplicate data points. +> +> [Understand the risks with future data](#understand-the-risks-with-future-data). + ## Usage ```sh @@ -40,3 +48,34 @@ _Also see [`influxd-ctl` global flags](/enterprise_influxdb/v1/tools/influxd-ctl ```bash influxd-ctl truncate-shards -delay 3m ``` + +## Understand the risks with future data + +> [!Important] +> If you need to rebalance shards that contain future data, contact [InfluxData support](https://www.influxdata.com/contact/) for assistance. + +When you write data points with timestamps in the future (for example, forecast data from machine learning models), +the `truncate-shards` command behaves differently and can cause data duplication issues. + +### How truncate-shards normally works + +For shards containing current data: +1. The command creates an artificial stop point in the shard at the truncation timestamp +2. Creates a new shard starting from the truncation point +3. Example: A one-week shard (Sunday to Saturday) becomes: + - Shard A: Sunday to truncation point (Wednesday 2pm) + - Shard B: Truncation point (Wednesday 2pm) to Saturday + +This works correctly because the meta nodes understand the boundaries and route queries appropriately. + +### The problem with future data + +For shards containing future timestamps: +1. The truncation doesn't cleanly split the shard at a point in time +2. Instead, it creates overlapping shards that cover the same time period +3. Example: If you're writing September forecast data in August: + - Original shard: September 1-7 + - After truncation: + - Shard A: September 1-7 (with data up to truncation) + - Shard B: September 1-7 (for new data after truncation) + - **Result**: Duplicate data points for the same timestamps From de115edf8978fb33a064f95798f7150aff1ae08f Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 9 Sep 2025 15:58:43 -0500 Subject: [PATCH 166/179] fix(v1): clarify truncate-shards operates on hot shards --- .../manage/clusters/rebalance.md | 40 +++++++++---------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/content/enterprise_influxdb/v1/administration/manage/clusters/rebalance.md b/content/enterprise_influxdb/v1/administration/manage/clusters/rebalance.md index b6652e831..9715aed7d 100644 --- a/content/enterprise_influxdb/v1/administration/manage/clusters/rebalance.md +++ b/content/enterprise_influxdb/v1/administration/manage/clusters/rebalance.md @@ -70,9 +70,9 @@ data node to expand the total disk capacity of the cluster. In the next steps, you will safely move shards from one of the two original data nodes to the new data node. -### Step 1: Truncate Hot Shards +### Step 1: Truncate hot shards -Hot shards are shards that are currently receiving writes. +Hot shards are shards that currently receive writes. Performing any action on a hot shard can lead to data inconsistency within the cluster which requires manual intervention from the user. @@ -84,12 +84,9 @@ cluster which requires manual intervention from the user. > For more information, see [`truncate-shards` and future data](/enterprise_influxdb/v1/tools/influxd-ctl/truncate-shards/#understand-the-risks-with-future-data) > or [contact InfluxData support](https://support.influxdata.com). -To prevent data inconsistency, truncate hot shards before moving any shards +To prevent data inconsistency, truncate shards before moving any shards across data nodes. -The command below creates a new hot shard which is automatically distributed -across all data nodes in the cluster, and the system writes all new points to -that shard. -All previous writes are now stored in cold shards. +The following command truncates all hot shards and creates new shards to write data to: ``` influxd-ctl truncate-shards @@ -101,10 +98,11 @@ The expected output of this command is: Truncated shards. ``` -Once you truncate the shards, you can work on redistributing the cold shards -without the threat of data inconsistency in the cluster. -Any hot or new shards are now evenly distributed across the cluster and require -no further intervention. +New shards are automatically distributed across all data nodes, and InfluxDB writes new points to them. +Previous writes are stored in cold shards. + +After truncating shards, you can redistribute cold shards without data inconsistency. +Hot and new shards are evenly distributed and require no further intervention. ### Step 2: Identify Cold Shards @@ -309,9 +307,9 @@ name duration shardGroupDuration replicaN default autogen 0s 1h0m0s 3 #👍 true ``` -### Step 2: Truncate Hot Shards +### Step 2: Truncate hot shards -Hot shards are shards that are currently receiving writes. +Hot shards are shards that currently receive writes. Performing any action on a hot shard can lead to data inconsistency within the cluster which requires manual intervention from the user. @@ -323,12 +321,9 @@ cluster which requires manual intervention from the user. > For more information, see [`truncate-shards` and future data](/enterprise_influxdb/v1/tools/influxd-ctl/truncate-shards/#understand-the-risks-with-future-data) > or [contact InfluxData support](https://support.influxdata.com). -To prevent data inconsistency, truncate hot shards before copying any shards +To prevent data inconsistency, truncate shards before copying any shards to the new data node. -The command below creates a new hot shard which is automatically distributed -across the three data nodes in the cluster, and the system writes all new points -to that shard. -All previous writes are now stored in cold shards. +The following command truncates all hot shards and creates new shards to write data to: ``` influxd-ctl truncate-shards @@ -340,10 +335,11 @@ The expected output of this command is: Truncated shards. ``` -Once you truncate the shards, you can work on distributing the cold shards -without the threat of data inconsistency in the cluster. -Any hot or new shards are now automatically distributed across the cluster and -require no further intervention. +New shards are automatically distributed across all data nodes, and InfluxDB writes new points to them. +Previous writes are stored in cold shards. + +After truncating shards, you can redistribute cold shards without data inconsistency. +Hot and new shards are evenly distributed and require no further intervention. ### Step 3: Identify Cold Shards From 6fcd870555fad43b76aa3fbfb9fd303b3aefcaef Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 9 Sep 2025 14:12:52 -0500 Subject: [PATCH 167/179] fix(clustered): Add known bug and clustered-auth override to release notes --- .../reference/release-notes/clustered.md | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/content/influxdb3/clustered/reference/release-notes/clustered.md b/content/influxdb3/clustered/reference/release-notes/clustered.md index 3b382e8c2..54629f761 100644 --- a/content/influxdb3/clustered/reference/release-notes/clustered.md +++ b/content/influxdb3/clustered/reference/release-notes/clustered.md @@ -390,6 +390,43 @@ spec: # ...[remaining configuration] ``` +### `clustered-auth` service routes to removed `gateway` service instead of `core` service + +If you have the `clusteredAuth` feature flag enabled, the `clustered-auth` service will be deployed. +The service currently routes to the recently removed `gateway` service instead of the new `core` service. + +#### Temporary workaround for service routing + +Until you upgrade to release `20250805-1812019`, you will need to override the `clustered-auth` +service to point to the new `core` service by adding the following `env` overrides to your `AppInstance`: + +```yaml +apiVersion: kubecfg.dev/v1alpha1 +kind: AppInstance +metadata: + name: influxdb + namespace: influxdb +spec: + package: + image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20241024-1354148 + apiVersion: influxdata.com/v1alpha1 + spec: + components: + querier: + template: + containers: + clustered-auth: + env: + AUTHZ_TOKEN_SVC_ADDRESS: 'http://core:8091/' + router: + template: + containers: + clustered-auth: + env: + AUTHZ_TOKEN_SVC_ADDRESS: 'http://core:8091/' +# ...remaining configuration... +``` + ### Highlights #### AppInstance image override bug fix From f0117ed3996797dbcfc44049b3e8e7f82a68b9d0 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 9 Sep 2025 14:22:15 -0500 Subject: [PATCH 168/179] Update content/influxdb3/clustered/reference/release-notes/clustered.md Co-authored-by: Scott Anderson --- .../influxdb3/clustered/reference/release-notes/clustered.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb3/clustered/reference/release-notes/clustered.md b/content/influxdb3/clustered/reference/release-notes/clustered.md index 54629f761..dc4a7d405 100644 --- a/content/influxdb3/clustered/reference/release-notes/clustered.md +++ b/content/influxdb3/clustered/reference/release-notes/clustered.md @@ -397,7 +397,7 @@ The service currently routes to the recently removed `gateway` service instead o #### Temporary workaround for service routing -Until you upgrade to release `20250805-1812019`, you will need to override the `clustered-auth` +Until you upgrade to release `20250805-1812019`, you need to override the `clustered-auth` service to point to the new `core` service by adding the following `env` overrides to your `AppInstance`: ```yaml From c748819f6db2344127802049a6c2bfa62ea14c4b Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Tue, 9 Sep 2025 16:03:52 -0500 Subject: [PATCH 169/179] fix(clustered): Google Cloud IAM link --- .../influxdb3/clustered/reference/release-notes/clustered.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb3/clustered/reference/release-notes/clustered.md b/content/influxdb3/clustered/reference/release-notes/clustered.md index dc4a7d405..0899575f0 100644 --- a/content/influxdb3/clustered/reference/release-notes/clustered.md +++ b/content/influxdb3/clustered/reference/release-notes/clustered.md @@ -1278,7 +1278,7 @@ We now expose a `google` object within the `objectStore` configuration, which enables support for using Google Cloud's GCS as a backing object store for IOx components. This supports both [GKE workload identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) -and [IAM Service Account](https://cloud.google.com/kubernetes-engine/docs/tutorials/authenticating-to-cloud-platform#step_3_create_service_account_credentials) +and [IAM Service Account](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#kubernetes-sa-to-iam) authentication methods. #### Support for bypassing identity provider configuration for database/token management From fed9e49f045558aee2173ae3690ebd0441b359e4 Mon Sep 17 00:00:00 2001 From: mdevy-influxdata <53542066+mdevy-influxdata@users.noreply.github.com> Date: Tue, 9 Sep 2025 17:21:38 -0700 Subject: [PATCH 170/179] Update grafana.md typo --- .../cloud-serverless/process-data/visualize/grafana.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/influxdb3/cloud-serverless/process-data/visualize/grafana.md b/content/influxdb3/cloud-serverless/process-data/visualize/grafana.md index 959570f23..afeed288d 100644 --- a/content/influxdb3/cloud-serverless/process-data/visualize/grafana.md +++ b/content/influxdb3/cloud-serverless/process-data/visualize/grafana.md @@ -131,7 +131,7 @@ When creating an InfluxDB data source that uses InfluxQL to query data: 2. Under **InfluxDB Details**: - **Database**: Provide a database name to query. - Use the database name that is mapped to your InfluxBD bucket. + Use the database name that is mapped to your InfluxDB bucket. - **User**: Provide an arbitrary string. _This credential is ignored when querying {{% product-name %}}, but it cannot be empty._ - **Password**: Provide an [API token](/influxdb3/cloud-serverless/admin/tokens/) From e8350a39950a6258267ff35e65ede1f9d2674074 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Wed, 10 Sep 2025 15:52:54 -0500 Subject: [PATCH 171/179] chore(v3-dist): Consolidate to shared Grafana guide - Fix broken link to fragment --- .../process-data/visualize/grafana.md | 198 +---------------- .../process-data/visualize/grafana.md | 208 +---------------- .../process-data/visualize/grafana.md | 194 +--------------- .../v3-process-data/visualize/grafana.md | 209 ++++++++++++++++++ 4 files changed, 217 insertions(+), 592 deletions(-) create mode 100644 content/shared/v3-process-data/visualize/grafana.md diff --git a/content/influxdb3/cloud-dedicated/process-data/visualize/grafana.md b/content/influxdb3/cloud-dedicated/process-data/visualize/grafana.md index e5b168e5e..4fc0cb47e 100644 --- a/content/influxdb3/cloud-dedicated/process-data/visualize/grafana.md +++ b/content/influxdb3/cloud-dedicated/process-data/visualize/grafana.md @@ -9,7 +9,7 @@ menu: influxdb3_cloud_dedicated: name: Use Grafana parent: Visualize data -influxdb3/cloud-dedicated/tags: [Flight client, query, visualization] +influxdb3/cloud-dedicated/tags: [query, visualization, Grafana] aliases: - /influxdb3/cloud-dedicated/query-data/tools/grafana/ - /influxdb3/cloud-dedicated/query-data/sql/execute-queries/grafana/ @@ -20,199 +20,7 @@ alt_links: cloud: /influxdb/cloud/tools/grafana/ core: /influxdb3/core/visualize-data/grafana/ enterprise: /influxdb3/enterprise/visualize-data/grafana/ +source: /content/shared/v3-process-data/visualize/grafana.md --- -Use [Grafana](https://grafana.com/) to query and visualize data stored in -{{% product-name %}}. - -> [Grafana] enables you to query, visualize, alert on, and explore your metrics, -> logs, and traces wherever they are stored. -> [Grafana] provides you with tools to turn your time-series database (TSDB) -> data into insightful graphs and visualizations. -> -> {{% cite %}}-- [Grafana documentation](https://grafana.com/docs/grafana/latest/introduction/){{% /cite %}} - - - -- [Install Grafana or login to Grafana Cloud](#install-grafana-or-login-to-grafana-cloud) -- [InfluxDB data source](#influxdb-data-source) -- [Create an InfluxDB data source](#create-an-influxdb-data-source) -- [Query InfluxDB with Grafana](#query-influxdb-with-grafana) -- [Build visualizations with Grafana](#build-visualizations-with-grafana) - - - -## Install Grafana or login to Grafana Cloud - -If using the open source version of **Grafana**, follow the -[Grafana installation instructions](https://grafana.com/docs/grafana/latest/setup-grafana/installation/) -to install Grafana for your operating system. -If using **Grafana Cloud**, login to your Grafana Cloud instance. - -## InfluxDB data source - -The InfluxDB data source plugin is included in the Grafana core distribution. -Use the plugin to query and visualize data stored in {{< product-name >}} with -both InfluxQL and SQL. - -> [!Note] -> #### Grafana 10.3+ -> -> The instructions below are for **Grafana 10.3+** which introduced the newest -> version of the InfluxDB core plugin. -> The updated plugin includes **SQL support** for InfluxDB 3-based products such -> as {{< product-name >}}. - -## Create an InfluxDB data source - -1. In your Grafana user interface (UI), navigate to **Data Sources**. -2. Click **Add new data source**. -3. Search for and select the **InfluxDB** plugin. -4. Provide a name for your data source. -5. Under **Query Language**, select either **SQL** or **InfluxQL**: - -{{< tabs-wrapper >}} -{{% tabs %}} -[SQL](#) -[InfluxQL](#) -{{% /tabs %}} -{{% tab-content %}} - - -When creating an InfluxDB data source that uses SQL to query data: - -1. Under **HTTP**: - - - **URL**: Provide your {{% product-name omit=" Clustered" %}} cluster URL - using the HTTPS protocol: - - ``` - https://{{< influxdb/host >}} - ``` - -2. Under **InfluxDB Details**: - - - **Database**: Provide a default database name to query. - - **Token**: Provide a [database token](/influxdb3/cloud-dedicated/admin/tokens/#database-tokens) - with read access to the databases you want to query. - -3. Click **Save & test**. - - {{< img-hd src="/img/influxdb3/cloud-dedicated-grafana-influxdb-data-source-sql.png" alt="Grafana InfluxDB data source for InfluxDB Cloud Serverless that uses SQL" />}} - - -{{% /tab-content %}} -{{% tab-content %}} - - -When creating an InfluxDB data source that uses InfluxQL to query data: - -1. Under **HTTP**: - - - **URL**: Provide your {{% product-name %}} cluster URL - using the HTTPS protocol: - - ``` - https://{{< influxdb/host >}} - ``` - -2. Under **InfluxDB Details**: - - - **Database**: Provide a default database name to query. - - **User**: Provide an arbitrary string. - _This credential is ignored when querying {{% product-name %}}, but it cannot be empty._ - - **Password**: Provide a [database token](/influxdb3/cloud-dedicated/admin/tokens/#database-tokens) - with read access to the databases you want to query. - - **HTTP Method**: Choose one of the available HTTP request methods to use when querying data: - - - **POST** ({{< req text="Recommended" >}}) - - **GET** - -3. Click **Save & test**. - - {{< img-hd src="/img/influxdb3/cloud-dedicated-grafana-influxdb-data-source-influxql.png" alt="Grafana InfluxDB data source for InfluxDB Cloud Serverless using InfluxQL" />}} - - -{{% /tab-content %}} -{{< /tabs-wrapper >}} - -## Query InfluxDB with Grafana - -After you [configure and save an InfluxDB datasource](#create-a-datasource), -use Grafana to build, run, and inspect queries against your InfluxDB database. - -{{< tabs-wrapper >}} -{{% tabs %}} -[SQL](#) -[InfluxQL](#) -{{% /tabs %}} -{{% tab-content %}} - - -> [!Note] -> {{% sql/sql-schema-intro %}} -> To learn more, see [Query Data](/influxdb3/cloud-dedicated/query-data/sql/). - -1. Click **Explore**. -2. In the dropdown, select the saved InfluxDB data source to query. -3. Use the SQL query form to build your query: - - **Table**: Select the measurement to query. - - **Column**: Select one or more fields and tags to return as columns in query results. - - With SQL, select the `time` column to include timestamps with the data. - Grafana relies on the `time` column to correctly graph time series data. - - - _**Optional:**_ Toggle **filter** to generate **WHERE** clause statements. - - **WHERE**: Configure condition expressions to include in the `WHERE` clause. - - - _**Optional:**_ Toggle **group** to generate **GROUP BY** clause statements. - - - **GROUP BY**: Select columns to group by. - If you include an aggregation function in the **SELECT** list, - you must group by one or more of the queried columns. - SQL returns the aggregation for each group. - - - {{< req text="Recommended" color="green" >}}: - Toggle **order** to generate **ORDER BY** clause statements. - - - **ORDER BY**: Select columns to sort by. - You can sort by time and multiple fields or tags. - To sort in descending order, select **DESC**. - -4. {{< req text="Recommended" color="green" >}}: Change format to **Time series**. - - Use the **Format** dropdown to change the format of the query results. - For example, to visualize the query results as a time series, select **Time series**. - -5. Click **Run query** to execute the query. - - -{{% /tab-content %}} -{{% tab-content %}} - - -1. Click **Explore**. -2. In the dropdown, select the **InfluxDB** data source that you want to query. -3. Use the InfluxQL query form to build your query: - - **FROM**: Select the measurement that you want to query. - - **WHERE**: To filter the query results, enter a conditional expression. - - **SELECT**: Select fields to query and an aggregate function to apply to each. - The aggregate function is applied to each time interval defined in the - `GROUP BY` clause. - - **GROUP BY**: By default, Grafana groups data by time to downsample results - and improve query performance. - You can also add other tags to group by. -4. Click **Run query** to execute the query. - - -{{% /tab-content %}} -{{< /tabs-wrapper >}} - -{{< youtube "rSsouoNsNDs" >}} - -To learn about query management and inspection in Grafana, see the -[Grafana Explore documentation](https://grafana.com/docs/grafana/latest/explore/). - -## Build visualizations with Grafana - -For a comprehensive walk-through of creating visualizations with -Grafana, see the [Grafana documentation](https://grafana.com/docs/grafana/latest/). + diff --git a/content/influxdb3/cloud-serverless/process-data/visualize/grafana.md b/content/influxdb3/cloud-serverless/process-data/visualize/grafana.md index afeed288d..b2ca5c372 100644 --- a/content/influxdb3/cloud-serverless/process-data/visualize/grafana.md +++ b/content/influxdb3/cloud-serverless/process-data/visualize/grafana.md @@ -21,211 +21,7 @@ alt_links: cloud: /influxdb/cloud/tools/grafana/ core: /influxdb3/core/visualize-data/grafana/ enterprise: /influxdb3/enterprise/visualize-data/grafana/ +source: /content/shared/v3-process-data/visualize/grafana.md --- -Use [Grafana](https://grafana.com/) to query and visualize data stored in -{{% product-name %}}. - -> [Grafana] enables you to query, visualize, alert on, and explore your metrics, -> logs, and traces wherever they are stored. -> [Grafana] provides you with tools to turn your time-series database (TSDB) -> data into insightful graphs and visualizations. -> -> {{% cite %}}-- [Grafana documentation](https://grafana.com/docs/grafana/latest/introduction/){{% /cite %}} - - - -- [Install Grafana or login to Grafana Cloud](#install-grafana-or-login-to-grafana-cloud) -- [InfluxDB data source](#influxdb-data-source) -- [Create an InfluxDB data source](#create-an-influxdb-data-source) -- [Query InfluxDB with Grafana](#query-influxdb-with-grafana) -- [Build visualizations with Grafana](#build-visualizations-with-grafana) - - - -## Install Grafana or login to Grafana Cloud - -If using the open source version of **Grafana**, follow the -[Grafana installation instructions](https://grafana.com/docs/grafana/latest/setup-grafana/installation/) -to install Grafana for your operating system. -If using **Grafana Cloud**, login to your Grafana Cloud instance. - -## InfluxDB data source - -The InfluxDB data source plugin is included in the Grafana core distribution. -Use the plugin to query and visualize data stored in {{< product-name >}} with -both InfluxQL and SQL. - -> [!Note] -> #### Grafana 10.3+ -> -> The instructions below are for **Grafana 10.3+** which introduced the newest -> version of the InfluxDB core plugin. -> The updated plugin includes **SQL support** for InfluxDB 3-based products such -> as {{< product-name >}}. - -## Create an InfluxDB data source - -Which data source you create depends on which query language you want to use to -query {{% product-name %}}: - -1. In your Grafana user interface (UI), navigate to **Data Sources**. -2. Click **Add new data source**. -3. Search for and select the **InfluxDB** plugin. -4. Provide a name for your data source. -5. Under **Query Language**, select either **SQL** or **InfluxQL**: - -{{< tabs-wrapper >}} -{{% tabs %}} -[SQL](#) -[InfluxQL](#) -{{% /tabs %}} -{{% tab-content %}} - - -When creating an InfluxDB data source that uses SQL to query data: - -1. Under **HTTP**: - - - **URL**: Provide your [{{% product-name %}} region URL](/influxdb3/cloud-serverless/reference/regions/) - using the HTTPS protocol: - - ``` - https://{{< influxdb/host >}} - ``` - -2. Under **InfluxDB Details**: - - - **Database**: Provide a default bucket name to query. - In {{< product-name >}}, a bucket functions as a database. - - **Token**: Provide an [API token](/influxdb3/cloud-serverless/admin/tokens/) - with read access to the buckets you want to query. - -3. Click **Save & test**. - - {{< img-hd src="/img/influxdb3/cloud-serverless-grafana-influxdb-data-source-sql.png" alt="Grafana InfluxDB data source for InfluxDB Cloud Serverless that uses SQL" />}} - - -{{% /tab-content %}} -{{% tab-content %}} - - -When creating an InfluxDB data source that uses InfluxQL to query data: - -> [!Note] -> #### Map databases and retention policies to buckets -> -> To query {{% product-name %}} with InfluxQL, first map database and retention policy -> (DBRP) combinations to your InfluxDB Cloud buckets. For more information, see -> [Map databases and retention policies to buckets](/influxdb3/cloud-serverless/query-data/influxql/dbrp/). - -1. Under **HTTP**: - - - **URL**: Provide your [{{% product-name %}} region URL](/influxdb3/cloud-serverless/reference/regions/) - using the HTTPS protocol: - - ``` - https://{{< influxdb/host >}} - ``` - -2. Under **InfluxDB Details**: - - - **Database**: Provide a database name to query. - Use the database name that is mapped to your InfluxDB bucket. - - **User**: Provide an arbitrary string. - _This credential is ignored when querying {{% product-name %}}, but it cannot be empty._ - - **Password**: Provide an [API token](/influxdb3/cloud-serverless/admin/tokens/) - with read access to the buckets you want to query. - - **HTTP Method**: Choose one of the available HTTP request methods to use when querying data: - - - **POST** ({{< req text="Recommended" >}}) - - **GET** - -3. Click **Save & test**. - - {{< img-hd src="/img/influxdb3/cloud-serverless-grafana-influxdb-data-source-influxql.png" alt="Grafana InfluxDB data source for InfluxDB Cloud Serverless using InfluxQL" />}} - - -{{% /tab-content %}} -{{< /tabs-wrapper >}} - -## Query InfluxDB with Grafana - -After you [configure and save a FlightSQL or InfluxDB datasource](#create-a-datasource), -use Grafana to build, run, and inspect queries against your InfluxDB bucket. - -{{< tabs-wrapper >}} -{{% tabs %}} -[SQL](#) -[InfluxQL](#) -{{% /tabs %}} -{{% tab-content %}} - - -> [!Note] -> {{% sql/sql-schema-intro %}} -> To learn more, see [Query Data](/influxdb3/cloud-serverless/query-data/sql/). - -1. Click **Explore**. -2. In the dropdown, select the saved InfluxDB data source to query. -3. Use the SQL query form to build your query: - - **Table**: Select the measurement to query. - - **Column**: Select one or more fields and tags to return as columns in query results. - - With SQL, select the `time` column to include timestamps with the data. - Grafana relies on the `time` column to correctly graph time series data. - - - _**Optional:**_ Toggle **filter** to generate **WHERE** clause statements. - - **WHERE**: Configure condition expressions to include in the `WHERE` clause. - - - _**Optional:**_ Toggle **group** to generate **GROUP BY** clause statements. - - - **GROUP BY**: Select columns to group by. - If you include an aggregation function in the **SELECT** list, - you must group by one or more of the queried columns. - SQL returns the aggregation for each group. - - - {{< req text="Recommended" color="green" >}}: - Toggle **order** to generate **ORDER BY** clause statements. - - - **ORDER BY**: Select columns to sort by. - You can sort by time and multiple fields or tags. - To sort in descending order, select **DESC**. - -4. {{< req text="Recommended" color="green" >}}: Change format to **Time series**. - - Use the **Format** dropdown to change the format of the query results. - For example, to visualize the query results as a time series, select **Time series**. - -5. Click **Run query** to execute the query. - - -{{% /tab-content %}} -{{% tab-content %}} - - -1. Click **Explore**. -2. In the dropdown, select the **InfluxDB** data source that you want to query. -3. Use the InfluxQL query form to build your query: - - **FROM**: Select the measurement that you want to query. - - **WHERE**: To filter the query results, enter a conditional expression. - - **SELECT**: Select fields to query and an aggregate function to apply to each. - The aggregate function is applied to each time interval defined in the - `GROUP BY` clause. - - **GROUP BY**: By default, Grafana groups data by time to downsample results - and improve query performance. - You can also add other tags to group by. -4. Click **Run query** to execute the query. - - -{{% /tab-content %}} -{{< /tabs-wrapper >}} - -{{< youtube "rSsouoNsNDs" >}} - -To learn about query management and inspection in Grafana, see the -[Grafana Explore documentation](https://grafana.com/docs/grafana/latest/explore/). - -## Build visualizations with Grafana - -For a comprehensive walk-through of creating visualizations with -Grafana, see the [Grafana documentation](https://grafana.com/docs/grafana/latest/). + diff --git a/content/influxdb3/clustered/process-data/visualize/grafana.md b/content/influxdb3/clustered/process-data/visualize/grafana.md index 3bf6a952e..4818070bf 100644 --- a/content/influxdb3/clustered/process-data/visualize/grafana.md +++ b/content/influxdb3/clustered/process-data/visualize/grafana.md @@ -9,7 +9,7 @@ menu: influxdb3_clustered: name: Use Grafana parent: Visualize data -influxdb3/clustered/tags: [query, visualization] +influxdb3/clustered/tags: [query, visualization, Grafana] aliases: - /influxdb3/clustered/query-data/tools/grafana/ - /influxdb3/clustered/query-data/sql/execute-queries/grafana/ @@ -20,195 +20,7 @@ alt_links: cloud: /influxdb/cloud/tools/grafana/ core: /influxdb3/core/visualize-data/grafana/ enterprise: /influxdb3/enterprise/visualize-data/grafana/ +source: /content/shared/v3-process-data/visualize/grafana.md --- -Use [Grafana](https://grafana.com/) to query and visualize data stored in -{{% product-name %}}. - -> [Grafana] enables you to query, visualize, alert on, and explore your metrics, -> logs, and traces wherever they are stored. -> [Grafana] provides you with tools to turn your time-series database (TSDB) -> data into insightful graphs and visualizations. -> -> {{% cite %}}-- [Grafana documentation](https://grafana.com/docs/grafana/latest/introduction/){{% /cite %}} - -- [Install Grafana or login to Grafana Cloud](#install-grafana-or-login-to-grafana-cloud) -- [InfluxDB data source](#influxdb-data-source) -- [Create an InfluxDB data source](#create-an-influxdb-data-source) -- [Query InfluxDB with Grafana](#query-influxdb-with-grafana) -- [Build visualizations with Grafana](#build-visualizations-with-grafana) - -## Install Grafana or login to Grafana Cloud - -If using the open source version of **Grafana**, follow the -[Grafana installation instructions](https://grafana.com/docs/grafana/latest/setup-grafana/installation/) -to install Grafana for your operating system. -If using **Grafana Cloud**, login to your Grafana Cloud instance. - -## InfluxDB data source - -The InfluxDB data source plugin is included in the Grafana core distribution. -Use the plugin to query and visualize data stored in {{< product-name >}} with -both InfluxQL and SQL. - -> [!Note] -> #### Grafana 10.3+ -> -> The instructions below are for **Grafana 10.3+** which introduced the newest -> version of the InfluxDB core plugin. -> The updated plugin includes **SQL support** for InfluxDB 3-based products such -> as {{< product-name >}}. - -## Create an InfluxDB data source - -1. In your Grafana user interface (UI), navigate to **Data Sources**. -2. Click **Add new data source**. -3. Search for and select the **InfluxDB** plugin. -4. Provide a name for your data source. -5. Under **Query Language**, select either **SQL** or **InfluxQL**: - -{{< tabs-wrapper >}} -{{% tabs %}} -[SQL](#) -[InfluxQL](#) -{{% /tabs %}} -{{% tab-content %}} - - -When creating an InfluxDB data source that uses SQL to query data: - -1. Under **HTTP**: - - - **URL**: Provide your {{% product-name omit=" Clustered" %}} cluster URL - using the HTTPS protocol: - - ``` - https://{{< influxdb/host >}} - ``` - -2. Under **InfluxDB Details**: - - - **Database**: Provide a default [database](/influxdb3/clustered/admin/databases/) name to query. - - **Token**: Provide a [database token](/influxdb3/clustered/admin/tokens/#database-tokens) - with read access to the databases you want to query. - -3. Click **Save & test**. - - {{< img-hd src="/img/influxdb3/clustered-grafana-influxdb-data-source-sql.png" alt="Grafana InfluxDB data source for InfluxDB Cloud Serverless that uses SQL" />}} - - -{{% /tab-content %}} -{{% tab-content %}} - - -When creating an InfluxDB data source that uses InfluxQL to query data: - -1. Under **HTTP**: - - - **URL**: Provide your [{{% product-name %}} region URL](/influxdb3/clustered/reference/regions/) - using the HTTPS protocol: - - ``` - https://{{< influxdb/host >}} - ``` - -2. Under **InfluxDB Details**: - - - **Database**: Provide a default [database](/influxdb3/clustered/admin/databases/) name to query. - - **User**: Provide an arbitrary string. - _This credential is ignored when querying {{% product-name %}}, but it cannot be empty._ - - **Password**: Provide a [database token](/influxdb3/clustered/admin/tokens/#database-tokens) - with read access to the databases you want to query. - - **HTTP Method**: Choose one of the available HTTP request methods to use when querying data: - - - **POST** ({{< req text="Recommended" >}}) - - **GET** - -3. Click **Save & test**. - - {{< img-hd src="/img/influxdb3/clustered-grafana-influxdb-data-source-influxql.png" alt="Grafana InfluxDB data source for InfluxDB Cloud Serverless using InfluxQL" />}} - - -{{% /tab-content %}} -{{< /tabs-wrapper >}} - -## Query InfluxDB with Grafana - -After you [configure and save an InfluxDB datasource](#create-a-datasource), -use Grafana to build, run, and inspect queries against your InfluxDB database. - -{{< tabs-wrapper >}} -{{% tabs %}} -[SQL](#) -[InfluxQL](#) -{{% /tabs %}} -{{% tab-content %}} - - -> [!Note] -> {{% sql/sql-schema-intro %}} -> To learn more, see [Query Data](/influxdb3/clustered/query-data/sql/). - -1. Click **Explore**. -2. In the dropdown, select the saved InfluxDB data source to query. -3. Use the SQL query form to build your query: - - **Table**: Select the measurement to query. - - **Column**: Select one or more fields and tags to return as columns in query results. - - With SQL, select the `time` column to include timestamps with the data. - Grafana relies on the `time` column to correctly graph time series data. - - - _**Optional:**_ Toggle **filter** to generate **WHERE** clause statements. - - **WHERE**: Configure condition expressions to include in the `WHERE` clause. - - - _**Optional:**_ Toggle **group** to generate **GROUP BY** clause statements. - - - **GROUP BY**: Select columns to group by. - If you include an aggregation function in the **SELECT** list, - you must group by one or more of the queried columns. - SQL returns the aggregation for each group. - - - {{< req text="Recommended" color="green" >}}: - Toggle **order** to generate **ORDER BY** clause statements. - - - **ORDER BY**: Select columns to sort by. - You can sort by time and multiple fields or tags. - To sort in descending order, select **DESC**. - -4. {{< req text="Recommended" color="green" >}}: Change format to **Time series**. - - Use the **Format** dropdown to change the format of the query results. - For example, to visualize the query results as a time series, select **Time series**. - -5. Click **Run query** to execute the query. - - -{{% /tab-content %}} -{{% tab-content %}} - - -1. Click **Explore**. -2. In the dropdown, select the **InfluxDB** data source that you want to query. -3. Use the InfluxQL query form to build your query: - - **FROM**: Select the measurement that you want to query. - - **WHERE**: To filter the query results, enter a conditional expression. - - **SELECT**: Select fields to query and an aggregate function to apply to each. - The aggregate function is applied to each time interval defined in the - `GROUP BY` clause. - - **GROUP BY**: By default, Grafana groups data by time to downsample results - and improve query performance. - You can also add other tags to group by. -4. Click **Run query** to execute the query. - - -{{% /tab-content %}} -{{< /tabs-wrapper >}} - -{{< youtube "rSsouoNsNDs" >}} - -To learn about query management and inspection in Grafana, see the -[Grafana Explore documentation](https://grafana.com/docs/grafana/latest/explore/). - -## Build visualizations with Grafana - -For a comprehensive walk-through of creating visualizations with -Grafana, see the [Grafana documentation](https://grafana.com/docs/grafana/latest/). + diff --git a/content/shared/v3-process-data/visualize/grafana.md b/content/shared/v3-process-data/visualize/grafana.md new file mode 100644 index 000000000..baf2a753c --- /dev/null +++ b/content/shared/v3-process-data/visualize/grafana.md @@ -0,0 +1,209 @@ +Use [Grafana](https://grafana.com/) to query and visualize data stored in +{{% product-name %}}. + +> [Grafana] enables you to query, visualize, alert on, and explore your metrics, +> logs, and traces wherever they are stored. +> [Grafana] provides you with tools to turn your time-series database (TSDB) +> data into insightful graphs and visualizations. +> +> {{% cite %}}-- [Grafana documentation](https://grafana.com/docs/grafana/latest/introduction/){{% /cite %}} + +- [Install Grafana or login to Grafana Cloud](#install-grafana-or-login-to-grafana-cloud) +- [InfluxDB data source](#influxdb-data-source) +- [Create an InfluxDB data source](#create-an-influxdb-data-source) +- [Query InfluxDB with Grafana](#query-influxdb-with-grafana) +- [Build visualizations with Grafana](#build-visualizations-with-grafana) + +## Install Grafana or login to Grafana Cloud + +If using the open source version of **Grafana**, follow the +[Grafana installation instructions](https://grafana.com/docs/grafana/latest/setup-grafana/installation/) +to install Grafana for your operating system. +If using **Grafana Cloud**, login to your Grafana Cloud instance. + +## InfluxDB data source + +The InfluxDB data source plugin is included in the Grafana core distribution. +Use the plugin to query and visualize data stored in {{< product-name >}} with +both InfluxQL and SQL. + +> [!Note] +> #### Grafana 10.3+ +> +> The instructions below are for **Grafana 10.3+** which introduced the newest +> version of the InfluxDB core plugin. +> The updated plugin includes **SQL support** for InfluxDB 3-based products such +> as {{< product-name >}}. + +## Create an InfluxDB data source + +Which data source you create depends on which query language you want to use to +query {{% product-name %}}: + +1. In your Grafana user interface (UI), navigate to **Data Sources**. +2. Click **Add new data source**. +3. Search for and select the **InfluxDB** plugin. +4. Provide a name for your data source. +5. Under **Query Language**, select either **SQL** or **InfluxQL**: + +{{< tabs-wrapper >}} +{{% tabs %}} +[SQL](#) +[InfluxQL](#) +{{% /tabs %}} +{{% tab-content %}} + + +When creating an InfluxDB data source that uses SQL to query data: + +1. Under **HTTP**: + + - **URL**: Provide your {{% show-in "cloud-serverless" %}}[{{< product-name >}} region URL](/influxdb3/version/reference/regions/){{% /show-in %}} + {{% hide-in "cloud-serverless" %}}{{% product-name omit=" Clustered" %}} cluster URL{{% /hide-in %}} using the HTTPS protocol: + + ``` + https://{{< influxdb/host >}} + ``` +2. Under **InfluxDB Details**: + + - **Database**: Provide a default {{% show-in "cloud-serverless" %}}[bucket](/influxdb3/version/admin/buckets/) name to query. In {{< product-name >}}, a bucket functions as a database.{{% /show-in %}}{{% hide-in "cloud-serverless" %}}[database](/influxdb3/version/admin/databases/) name to query.{{% /hide-in %}} + - **Token**: Provide {{% show-in "cloud-serverless" %}}an [API token](/influxdb3/version/admin/tokens/) with read access to the buckets you want to query.{{% /show-in %}}{{% hide-in "cloud-serverless" %}}a [database token](/influxdb3/version/admin/tokens/#database-tokens) with read access to the databases you want to query.{{% /hide-in %}} +3. Click **Save & test**. + +{{% show-in "cloud-serverless" %}}{{< img-hd src="/img/influxdb3/cloud-serverless-grafana-influxdb-data-source-sql.png" alt="Grafana InfluxDB data source for InfluxDB Cloud Serverless that uses SQL" />}}{{% /show-in %}} +{{% show-in "cloud-dedicated" %}}{{< img-hd src="/img/influxdb/cloud-dedicated-grafana-influxdb-data-source-sql.png" alt="Grafana InfluxDB data source for InfluxDB Cloud Dedicated that uses SQL" />}}{{% /show-in %}} +{{% show-in "clustered" %}}{{< img-hd src="/img/influxdb3/clustered-grafana-influxdb-data-source-sql.png" alt="Grafana InfluxDB data source for InfluxDB Clustered that uses SQL" />}}{{% /show-in %}} + + +{{% /tab-content %}} +{{% tab-content %}} + + +When creating an InfluxDB data source that uses InfluxQL to query data: + +{{% show-in "cloud-serverless" %}} +> [!Note] +> #### Map databases and retention policies to buckets +> +> To query {{% product-name %}} with InfluxQL, first map database and retention policy +> (DBRP) combinations to your InfluxDB Cloud buckets. For more information, see +> [Map databases and retention policies to buckets](/influxdb3/version/query-data/influxql/dbrp/). +{{% /show-in %}} + +1. Under **HTTP**: + + - **URL**: Provide your {{% show-in "cloud-serverless" %}}[{{< product-name >}} region URL](/influxdb3/version/reference/regions/){{% /show-in %}}{{% hide-in "cloud-serverless" %}}{{% product-name omit=" Clustered" %}} cluster URL{{% /hide-in %}} + using the HTTPS protocol: + + ``` + https://{{< influxdb/host >}} + ``` +2. Under **InfluxDB Details**: + + - **Database**: Provide a {{% show-in "cloud-serverless" %}}database name to query. + Use the database name that is mapped to your InfluxDB bucket{{% /show-in %}}{{% hide-in "cloud-serverless" %}}default [database](/influxdb3/version/admin/databases/) name to query{{% /hide-in %}}. + - **User**: Provide an arbitrary string. + _This credential is ignored when querying {{% product-name %}}, but it cannot be empty._ + - **Password**: Provide {{% show-in "cloud-serverless" %}}an [API token](/influxdb3/version/admin/tokens/) with read access to the buckets you want to query{{% /show-in %}}{{% hide-in "cloud-serverless" %}}a [database token](/influxdb3/version/admin/tokens/#database-tokens) with read access to the databases you want to query{{% /hide-in %}}. + - **HTTP Method**: Choose one of the available HTTP request methods to use when querying data: + + - **POST** ({{< req text="Recommended" >}}) + - **GET** +3. Click **Save & test**. + +{{% show-in "cloud-dedicated" %}}{{< img-hd src="/img/influxdb/cloud-dedicated-grafana-influxdb-data-source-influxql.png" alt="Grafana InfluxDB data source for InfluxDB Cloud Dedicated using InfluxQL" />}}{{% /show-in %}} +{{% show-in "cloud-serverless" %}}{{< img-hd src="/img/influxdb3/cloud-serverless-grafana-influxdb-data-source-influxql.png" alt="Grafana InfluxDB data source for InfluxDB Cloud Serverless using InfluxQL" />}}{{% /show-in %}} +{{% show-in "clustered" %}}{{< img-hd src="/img/influxdb3/clustered-grafana-influxdb-data-source-influxql.png" alt="Grafana InfluxDB data source for InfluxDB Clustered using InfluxQL" />}}{{% /show-in %}} + + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +## Query InfluxDB with Grafana + +After you [configure and save an InfluxDB datasource](#create-an-influxdb-data-source), +use Grafana to build, run, and inspect queries against your InfluxDB {{% show-in "cloud-serverless" %}}bucket{{% /show-in %}}{{% hide-in "cloud-serverless" %}}database{{% /hide-in %}}. + +{{< tabs-wrapper >}} +{{% tabs %}} +[SQL](#) +[InfluxQL](#) +{{% /tabs %}} +{{% tab-content %}} + + +> [!Note] +> {{% sql/sql-schema-intro %}} +{{% show-in "cloud-serverless" %}} +> To learn more, see [Query Data](/influxdb3/version/query-data/sql/). +{{% /show-in %}} +{{% show-in "cloud-dedicated" %}} +> To learn more, see [Query Data](/influxdb3/version/query-data/sql/). +{{% /show-in %}} +{{% show-in "clustered" %}} +> To learn more, see [Query Data](/influxdb3/version/query-data/sql/). +{{% /show-in %}} + +1. Click **Explore**. +2. In the dropdown, select the saved InfluxDB data source to query. +3. Use the SQL query form to build your query: + - **Table**: Select the measurement to query. + - **Column**: Select one or more fields and tags to return as columns in query results. + + With SQL, select the `time` column to include timestamps with the data. + Grafana relies on the `time` column to correctly graph time series data. + + - _**Optional:**_ Toggle **filter** to generate **WHERE** clause statements. + - **WHERE**: Configure condition expressions to include in the `WHERE` clause. + + - _**Optional:**_ Toggle **group** to generate **GROUP BY** clause statements. + + - **GROUP BY**: Select columns to group by. + If you include an aggregation function in the **SELECT** list, + you must group by one or more of the queried columns. + SQL returns the aggregation for each group. + + - {{< req text="Recommended" color="green" >}}: + Toggle **order** to generate **ORDER BY** clause statements. + + - **ORDER BY**: Select columns to sort by. + You can sort by time and multiple fields or tags. + To sort in descending order, select **DESC**. + +4. {{< req text="Recommended" color="green" >}}: Change format to **Time series**. + - Use the **Format** dropdown to change the format of the query results. + For example, to visualize the query results as a time series, select **Time series**. + +5. Click **Run query** to execute the query. + + +{{% /tab-content %}} +{{% tab-content %}} + + +1. Click **Explore**. +2. In the dropdown, select the **InfluxDB** data source that you want to query. +3. Use the InfluxQL query form to build your query: + - **FROM**: Select the measurement that you want to query. + - **WHERE**: To filter the query results, enter a conditional expression. + - **SELECT**: Select fields to query and an aggregate function to apply to each. + The aggregate function is applied to each time interval defined in the + `GROUP BY` clause. + - **GROUP BY**: By default, Grafana groups data by time to downsample results + and improve query performance. + You can also add other tags to group by. +4. Click **Run query** to execute the query. + + +{{% /tab-content %}} +{{< /tabs-wrapper >}} + +{{< youtube "rSsouoNsNDs" >}} + +To learn about query management and inspection in Grafana, see the +[Grafana Explore documentation](https://grafana.com/docs/grafana/latest/explore/). + +## Build visualizations with Grafana + +For a comprehensive walk-through of creating visualizations with +Grafana, see the [Grafana documentation](https://grafana.com/docs/grafana/latest/). \ No newline at end of file From 903c16e50a6c02bc43776386de460f8ae3a2a3a2 Mon Sep 17 00:00:00 2001 From: jaal2001 Date: Thu, 11 Sep 2025 16:46:36 +0200 Subject: [PATCH 172/179] Update config-options.md Removed ")" in --exec-mem-pool-bytes as typo. Added format information for --wal-flush-interval and 100ms suggestion from @peterbarnett03 in discord chat. --- content/shared/influxdb3-cli/config-options.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/content/shared/influxdb3-cli/config-options.md b/content/shared/influxdb3-cli/config-options.md index 5f91d50a0..32c545778 100644 --- a/content/shared/influxdb3-cli/config-options.md +++ b/content/shared/influxdb3-cli/config-options.md @@ -1278,7 +1278,7 @@ Defines the address on which InfluxDB serves HTTP API requests. Specifies the size of memory pool used during query execution. Can be given as absolute value in bytes or as a percentage of the total available memory--for -example: `8000000000` or `10%`). +example: `8000000000` or `10%`. {{% show-in "core" %}}**Default:** `8589934592`{{% /show-in %}} {{% show-in "enterprise" %}}**Default:** `20%`{{% /show-in %}} @@ -1316,6 +1316,7 @@ percentage (portion of available memory) or absolute value in MB--for example: ` Specifies the interval to flush buffered data to a WAL file. Writes that wait for WAL confirmation take up to this interval to complete. +Can be `s` for seconds or `ms` for miliseconds. 100ms is suggested for local disks. **Default:** `1s` From 988aef7e071c2a1172c3c3804f9afbacdbddc8d0 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Thu, 11 Sep 2025 08:58:47 -0600 Subject: [PATCH 173/179] fix(sql): hotfix typos in sql window functions doc --- content/shared/sql-reference/functions/window.md | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/content/shared/sql-reference/functions/window.md b/content/shared/sql-reference/functions/window.md index 17693c8c0..ca980dbca 100644 --- a/content/shared/sql-reference/functions/window.md +++ b/content/shared/sql-reference/functions/window.md @@ -329,8 +329,8 @@ each frame that the window function operates on. - [UNBOUNDED PRECEDING](#unbounded-preceding) - [offset PRECEDING](#offset-preceding) -- CURRENT_ROW](#current-row) -- [offset> FOLLOWING](#offset-following) +- [CURRENT_ROW](#current-row) +- [offset FOLLOWING](#offset-following) - [UNBOUNDED FOLLOWING](#unbounded-following) ##### UNBOUNDED PRECEDING @@ -369,18 +369,6 @@ For example, `3 FOLLOWING` includes 3 rows after the current row. ##### UNBOUNDED FOLLOWING -Starts at the current row and ends at the last row of the partition. -##### offset FOLLOWING - -Use a specified offset of [frame units](#frame-units) _after_ the current row -as a frame boundary. - -```sql -offset FOLLOWING -``` - -##### UNBOUNDED FOLLOWING - Use the current row to the end of the current partition the frame boundary. ```sql From 74a1cc45df881603abc0d454c71cdb91ecf0e9dd Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Thu, 11 Sep 2025 11:12:16 -0500 Subject: [PATCH 174/179] Apply suggestions from code review Co-authored-by: Scott Anderson --- content/shared/v3-process-data/visualize/grafana.md | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/content/shared/v3-process-data/visualize/grafana.md b/content/shared/v3-process-data/visualize/grafana.md index baf2a753c..c809b1c4d 100644 --- a/content/shared/v3-process-data/visualize/grafana.md +++ b/content/shared/v3-process-data/visualize/grafana.md @@ -19,7 +19,7 @@ Use [Grafana](https://grafana.com/) to query and visualize data stored in If using the open source version of **Grafana**, follow the [Grafana installation instructions](https://grafana.com/docs/grafana/latest/setup-grafana/installation/) to install Grafana for your operating system. -If using **Grafana Cloud**, login to your Grafana Cloud instance. +If using **Grafana Cloud**, log in to your Grafana Cloud instance. ## InfluxDB data source @@ -134,15 +134,7 @@ use Grafana to build, run, and inspect queries against your InfluxDB {{% show-in > [!Note] > {{% sql/sql-schema-intro %}} -{{% show-in "cloud-serverless" %}} > To learn more, see [Query Data](/influxdb3/version/query-data/sql/). -{{% /show-in %}} -{{% show-in "cloud-dedicated" %}} -> To learn more, see [Query Data](/influxdb3/version/query-data/sql/). -{{% /show-in %}} -{{% show-in "clustered" %}} -> To learn more, see [Query Data](/influxdb3/version/query-data/sql/). -{{% /show-in %}} 1. Click **Explore**. 2. In the dropdown, select the saved InfluxDB data source to query. From 78de3407a1a15ca2bb38795d1ffcfd763deb1283 Mon Sep 17 00:00:00 2001 From: Sven Rebhan Date: Thu, 11 Sep 2025 20:41:36 +0200 Subject: [PATCH 175/179] Updating changelog --- content/telegraf/v1/release-notes.md | 83 ++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/content/telegraf/v1/release-notes.md b/content/telegraf/v1/release-notes.md index 0fd5dba61..2c96b275d 100644 --- a/content/telegraf/v1/release-notes.md +++ b/content/telegraf/v1/release-notes.md @@ -11,6 +11,89 @@ menu: weight: 60 --- +## v1.36.0 {date="2025-09-08"} + +### Important Changes + +- PR [#17355](https://github.com/influxdata/telegraf/pull/17355) changes the `profiles` support + of `inputs.opentelemetry` from the `v1 experimental` to the `v1 development` as this experimental API + is updated upstream. This will change the metric by for example removing the no-longer reported + `frame_type`, `stack_trace_id`, `build_id`, and `build_id_type` fields. Also, the value format of other fields + or tags might have changed. Please refer to the + [OpenTelemetry documentation](https://opentelemetry.io/docs/) for more details. + +### New Plugins + +- [#17368](https://github.com/influxdata/telegraf/pull/17368) `inputs.turbostat` Add plugin +- [#17078](https://github.com/influxdata/telegraf/pull/17078) `processors.round` Add plugin + +### Features + +- [#16705](https://github.com/influxdata/telegraf/pull/16705) `agent` Introduce labels and selectors to enable and disable plugins +- [#17547](https://github.com/influxdata/telegraf/pull/17547) `inputs.influxdb_v2_listener` Add `/health` route +- [#17312](https://github.com/influxdata/telegraf/pull/17312) `inputs.internal` Allow to collect statistics per plugin instance +- [#17024](https://github.com/influxdata/telegraf/pull/17024) `inputs.lvm` Add sync_percent for lvm_logical_vol +- [#17355](https://github.com/influxdata/telegraf/pull/17355) `inputs.opentelemetry` Upgrade otlp proto module +- [#17156](https://github.com/influxdata/telegraf/pull/17156) `inputs.syslog` Add support for RFC3164 over TCP +- [#17543](https://github.com/influxdata/telegraf/pull/17543) `inputs.syslog` Allow limiting message size in octet counting mode +- [#17539](https://github.com/influxdata/telegraf/pull/17539) `inputs.x509_cert` Add support for Windows certificate stores +- [#17244](https://github.com/influxdata/telegraf/pull/17244) `output.nats` Allow disabling stream creation for externally managed streams +- [#17474](https://github.com/influxdata/telegraf/pull/17474) `outputs.elasticsearch` Support array headers and preserve commas in values +- [#17548](https://github.com/influxdata/telegraf/pull/17548) `outputs.influxdb` Add internal statistics for written bytes +- [#17213](https://github.com/influxdata/telegraf/pull/17213) `outputs.nats` Allow providing a subject layout +- [#17346](https://github.com/influxdata/telegraf/pull/17346) `outputs.nats` Enable batch serialization with use_batch_format +- [#17249](https://github.com/influxdata/telegraf/pull/17249) `outputs.sql` Allow sending batches of metrics in transactions +- [#17510](https://github.com/influxdata/telegraf/pull/17510) `parsers.avro` Support record arrays at root level +- [#17365](https://github.com/influxdata/telegraf/pull/17365) `plugins.snmp` Allow debug logging in gosnmp +- [#17345](https://github.com/influxdata/telegraf/pull/17345) `selfstat` Implement collection of plugin-internal statistics + +### Bugfixes + +- [#17411](https://github.com/influxdata/telegraf/pull/17411) `inputs.diskio` Handle counter wrapping in io fields +- [#17551](https://github.com/influxdata/telegraf/pull/17551) `inputs.s7comm` Use correct value for string length with 'extra' parameter +- [#17579](https://github.com/influxdata/telegraf/pull/17579) `internal` Extract go version more robustly +- [#17566](https://github.com/influxdata/telegraf/pull/17566) `outputs` Retrigger batch-available-events only if at least one metric was written successfully +- [#17381](https://github.com/influxdata/telegraf/pull/17381) `packaging` Rename rpm from loong64 to loongarch64 + +### Dependency Updates + +- [#17519](https://github.com/influxdata/telegraf/pull/17519) `deps` Bump cloud.google.com/go/storage from 1.56.0 to 1.56.1 +- [#17532](https://github.com/influxdata/telegraf/pull/17532) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/azcore from 1.18.2 to 1.19.0 +- [#17494](https://github.com/influxdata/telegraf/pull/17494) `deps` Bump github.com/SAP/go-hdb from 1.13.12 to 1.14.0 +- [#17488](https://github.com/influxdata/telegraf/pull/17488) `deps` Bump github.com/antchfx/xpath from 1.3.4 to 1.3.5 +- [#17540](https://github.com/influxdata/telegraf/pull/17540) `deps` Bump github.com/aws/aws-sdk-go-v2/config from 1.31.0 to 1.31.2 +- [#17538](https://github.com/influxdata/telegraf/pull/17538) `deps` Bump github.com/aws/aws-sdk-go-v2/credentials from 1.18.4 to 1.18.6 +- [#17517](https://github.com/influxdata/telegraf/pull/17517) `deps` Bump github.com/aws/aws-sdk-go-v2/feature/ec2/imds from 1.18.3 to 1.18.4 +- [#17528](https://github.com/influxdata/telegraf/pull/17528) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.48.0 to 1.48.2 +- [#17536](https://github.com/influxdata/telegraf/pull/17536) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs from 1.56.0 to 1.57.0 +- [#17524](https://github.com/influxdata/telegraf/pull/17524) `deps` Bump github.com/aws/aws-sdk-go-v2/service/dynamodb from 1.46.0 to 1.49.1 +- [#17493](https://github.com/influxdata/telegraf/pull/17493) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.242.0 to 1.244.0 +- [#17527](https://github.com/influxdata/telegraf/pull/17527) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.244.0 to 1.246.0 +- [#17530](https://github.com/influxdata/telegraf/pull/17530) `deps` Bump github.com/aws/aws-sdk-go-v2/service/kinesis from 1.38.0 to 1.39.1 +- [#17534](https://github.com/influxdata/telegraf/pull/17534) `deps` Bump github.com/aws/aws-sdk-go-v2/service/sts from 1.37.0 to 1.38.0 +- [#17513](https://github.com/influxdata/telegraf/pull/17513) `deps` Bump github.com/aws/aws-sdk-go-v2/service/timestreamwrite from 1.34.0 to 1.34.2 +- [#17514](https://github.com/influxdata/telegraf/pull/17514) `deps` Bump github.com/coreos/go-systemd/v22 from 22.5.0 to 22.6.0 +- [#17563](https://github.com/influxdata/telegraf/pull/17563) `deps` Bump github.com/facebook/time from 0.0.0-20240626113945-18207c5d8ddc to 0.0.0-20250903103710-a5911c32cdb9 +- [#17526](https://github.com/influxdata/telegraf/pull/17526) `deps` Bump github.com/gophercloud/gophercloud/v2 from 2.7.0 to 2.8.0 +- [#17537](https://github.com/influxdata/telegraf/pull/17537) `deps` Bump github.com/microsoft/go-mssqldb from 1.9.2 to 1.9.3 +- [#17490](https://github.com/influxdata/telegraf/pull/17490) `deps` Bump github.com/nats-io/nats-server/v2 from 2.11.7 to 2.11.8 +- [#17523](https://github.com/influxdata/telegraf/pull/17523) `deps` Bump github.com/nats-io/nats.go from 1.44.0 to 1.45.0 +- [#17492](https://github.com/influxdata/telegraf/pull/17492) `deps` Bump github.com/safchain/ethtool from 0.5.10 to 0.6.2 +- [#17486](https://github.com/influxdata/telegraf/pull/17486) `deps` Bump github.com/snowflakedb/gosnowflake from 1.15.0 to 1.16.0 +- [#17541](https://github.com/influxdata/telegraf/pull/17541) `deps` Bump github.com/tidwall/wal from 1.1.8 to 1.2.0 +- [#17529](https://github.com/influxdata/telegraf/pull/17529) `deps` Bump github.com/vmware/govmomi from 0.51.0 to 0.52.0 +- [#17496](https://github.com/influxdata/telegraf/pull/17496) `deps` Bump go.opentelemetry.io/collector/pdata from 1.36.1 to 1.38.0 +- [#17533](https://github.com/influxdata/telegraf/pull/17533) `deps` Bump go.opentelemetry.io/collector/pdata from 1.38.0 to 1.39.0 +- [#17516](https://github.com/influxdata/telegraf/pull/17516) `deps` Bump go.step.sm/crypto from 0.69.0 to 0.70.0 +- [#17499](https://github.com/influxdata/telegraf/pull/17499) `deps` Bump golang.org/x/mod from 0.26.0 to 0.27.0 +- [#17497](https://github.com/influxdata/telegraf/pull/17497) `deps` Bump golang.org/x/net from 0.42.0 to 0.43.0 +- [#17487](https://github.com/influxdata/telegraf/pull/17487) `deps` Bump google.golang.org/api from 0.246.0 to 0.247.0 +- [#17531](https://github.com/influxdata/telegraf/pull/17531) `deps` Bump google.golang.org/api from 0.247.0 to 0.248.0 +- [#17520](https://github.com/influxdata/telegraf/pull/17520) `deps` Bump google.golang.org/grpc from 1.74.2 to 1.75.0 +- [#17518](https://github.com/influxdata/telegraf/pull/17518) `deps` Bump google.golang.org/protobuf from 1.36.7 to 1.36.8 +- [#17498](https://github.com/influxdata/telegraf/pull/17498) `deps` Bump k8s.io/client-go from 0.33.3 to 0.33.4 +- [#17515](https://github.com/influxdata/telegraf/pull/17515) `deps` Bump super-linter/super-linter from 8.0.0 to 8.1.0 + ## v1.35.4 {date="2025-08-18"} ### Bugfixes From e0b58c3e4c486fcaa65ce3488fd622f96d536980 Mon Sep 17 00:00:00 2001 From: Sven Rebhan Date: Thu, 11 Sep 2025 20:41:36 +0200 Subject: [PATCH 176/179] Updating plugin list --- data/telegraf_plugins.yml | 60 ++++++++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 11 deletions(-) diff --git a/data/telegraf_plugins.yml b/data/telegraf_plugins.yml index ac89b5be7..369410e48 100644 --- a/data/telegraf_plugins.yml +++ b/data/telegraf_plugins.yml @@ -502,8 +502,8 @@ input: Docker containers. > [!NOTE] - > Make sure Telegraf has sufficient permissions to access the - > configured endpoint. + > Make sure Telegraf has sufficient permissions to access the configured + > endpoint. introduced: v0.1.9 os_support: [freebsd, linux, macos, solaris, windows] tags: [containers] @@ -516,8 +516,8 @@ input: > [!NOTE] > This plugin works only for containers with the `local` or `json-file` or - > `journald` logging driver. Please make sure Telegraf has sufficient - > permissions to access the configured endpoint. + > `journald` logging driver. Make sure Telegraf has sufficient permissions + > to access the configured endpoint. introduced: v1.12.0 os_support: [freebsd, linux, macos, solaris, windows] tags: [containers, logging] @@ -1970,6 +1970,11 @@ input: This service plugin receives traces, metrics, logs and profiles from [OpenTelemetry](https://opentelemetry.io) clients and compatible agents via gRPC. + + > [!NOTE] + > Telegraf v1.32 through v1.35 support the Profiles signal using the v1 + > experimental API. Telegraf v1.36+ supports the Profiles signal using the + > v1 development API. introduced: v1.19.0 os_support: [freebsd, linux, macos, solaris, windows] tags: [logging, messaging] @@ -2672,6 +2677,19 @@ input: introduced: v0.3.0 os_support: [freebsd, linux, macos, solaris, windows] tags: [testing] + - name: Turbostat + id: turbostat + description: | + This service plugin monitors system performance using the + [turbostat](https://github.com/torvalds/linux/tree/master/tools/power/x86/turbostat) + command. + + > [!IMPORTANT] + > This plugin requires the `turbostat` executable to be installed on the + > system. + introduced: v1.36.0 + os_support: [linux] + tags: [hardware, system] - name: Twemproxy id: twemproxy description: | @@ -2835,7 +2853,8 @@ input: description: | This plugin provides information about [X.509](https://en.wikipedia.org/wiki/X.509) certificates accessible e.g. - via local file, tcp, udp, https or smtp protocols. + via local file, tcp, udp, https or smtp protocols and the Windows + Certificate Store. > [!NOTE] > When using a UDP address as a certificate source, the server must @@ -2940,8 +2959,8 @@ output: Explorer](https://docs.microsoft.com/en-us/azure/data-explorer), [Azure Synapse Data Explorer](https://docs.microsoft.com/en-us/azure/synapse-analytics/data-explorer/data-explorer-overview), - and [Real-Time Intelligence in - Fabric](https://learn.microsoft.com/fabric/real-time-intelligence/overview) + and [Real time analytics in + Fabric](https://learn.microsoft.com/en-us/fabric/real-time-analytics/overview) services. Azure Data Explorer is a distributed, columnar store, purpose built for @@ -3299,9 +3318,17 @@ output: - name: Microsoft Fabric id: microsoft_fabric description: | - This plugin writes metrics to [Real time analytics in - Fabric](https://learn.microsoft.com/en-us/fabric/real-time-analytics/overview) - services. + This plugin writes metrics to [Fabric + Eventhouse](https://learn.microsoft.com/fabric/real-time-intelligence/eventhouse) + and [Fabric + Eventstream](https://learn.microsoft.com/fabric/real-time-intelligence/event-streams/overview?tabs=enhancedcapabilities) + artifacts of [Real-Time Intelligence in Microsoft + Fabric](https://learn.microsoft.com/fabric/real-time-intelligence/overview). + + Real-Time Intelligence is a SaaS service in Microsoft Fabric that allows + you to extract insights and visualize data in motion. It offers an + end-to-end solution for event-driven scenarios, streaming data, and data + logs. introduced: v1.35.0 os_support: [freebsd, linux, macos, solaris, windows] tags: [datastore] @@ -4026,6 +4053,17 @@ processor: introduced: v1.15.0 os_support: [freebsd, linux, macos, solaris, windows] tags: [annotation] + - name: Round + id: round + description: | + This plugin allows to round numerical field values to the configured + precision. This is particularly useful in combination with the [dedup + processor](/telegraf/v1/plugins/#processor-dedup) to reduce the number of + metrics sent to the output if only a lower precision is required for the + values. + introduced: v1.36.0 + os_support: [freebsd, linux, macos, solaris, windows] + tags: [transformation] - name: S2 Geo id: s2geo description: | @@ -4122,7 +4160,7 @@ processor: - name: Template id: template description: | - This plugin applies templates to metrics for generatuing a new tag. The + This plugin applies templates to metrics for generating a new tag. The primary use case of this plugin is to create a tag that can be used for dynamic routing to multiple output plugins or using an output specific routing option. From d018cdedcb27c35f5aa7e80d1f4675fbfc507d3f Mon Sep 17 00:00:00 2001 From: Sven Rebhan Date: Thu, 11 Sep 2025 20:41:37 +0200 Subject: [PATCH 177/179] Updating product version --- data/products.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/data/products.yml b/data/products.yml index ec0014361..0be800a1b 100644 --- a/data/products.yml +++ b/data/products.yml @@ -141,9 +141,9 @@ telegraf: menu_category: other list_order: 6 versions: [v1] - latest: v1.35 + latest: v1.36 latest_patches: - v1: 1.35.4 + v1: 1.36.0 ai_sample_questions: - How do I install and configure Telegraf? - How do I write a custom Telegraf plugin? From 62880c9834e4267d05b6ba117d42acc309a4ff42 Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Thu, 11 Sep 2025 16:01:18 -0500 Subject: [PATCH 178/179] Update content/shared/influxdb3-cli/config-options.md --- content/shared/influxdb3-cli/config-options.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/shared/influxdb3-cli/config-options.md b/content/shared/influxdb3-cli/config-options.md index 32c545778..84af67883 100644 --- a/content/shared/influxdb3-cli/config-options.md +++ b/content/shared/influxdb3-cli/config-options.md @@ -1316,7 +1316,7 @@ percentage (portion of available memory) or absolute value in MB--for example: ` Specifies the interval to flush buffered data to a WAL file. Writes that wait for WAL confirmation take up to this interval to complete. -Can be `s` for seconds or `ms` for miliseconds. 100ms is suggested for local disks. +Use `s` for seconds or `ms` for milliseconds. For local disks, `100 ms` is recommended. **Default:** `1s` From e087bc5aaede0c99f1e78254f224d7047fe3693b Mon Sep 17 00:00:00 2001 From: Jason Stirnaman Date: Fri, 12 Sep 2025 10:00:44 -0500 Subject: [PATCH 179/179] Apply suggestions from code review --- content/telegraf/v1/release-notes.md | 7 +------ data/telegraf_plugins.yml | 12 ++++++------ 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/content/telegraf/v1/release-notes.md b/content/telegraf/v1/release-notes.md index 2c96b275d..fadfe0ef0 100644 --- a/content/telegraf/v1/release-notes.md +++ b/content/telegraf/v1/release-notes.md @@ -15,12 +15,7 @@ menu: ### Important Changes -- PR [#17355](https://github.com/influxdata/telegraf/pull/17355) changes the `profiles` support - of `inputs.opentelemetry` from the `v1 experimental` to the `v1 development` as this experimental API - is updated upstream. This will change the metric by for example removing the no-longer reported - `frame_type`, `stack_trace_id`, `build_id`, and `build_id_type` fields. Also, the value format of other fields - or tags might have changed. Please refer to the - [OpenTelemetry documentation](https://opentelemetry.io/docs/) for more details. +- Pull request [#17355](https://github.com/influxdata/telegraf/pull/17355) updates `profiles` support in `inputs.opentelemetry` from v1 experimental to v1 development, following upstream changes to the experimental API. This update modifies metric output. For example, the `frame_type`, `stack_trace_id`, `build_id`, and `build_id_type` fields are no longer reported. The value format of other fields or tags might also have changed. For more information, see the [OpenTelemetry documentation](https://opentelemetry.io/docs/). ### New Plugins diff --git a/data/telegraf_plugins.yml b/data/telegraf_plugins.yml index 369410e48..7d43bcdeb 100644 --- a/data/telegraf_plugins.yml +++ b/data/telegraf_plugins.yml @@ -515,7 +515,7 @@ input: Docker containers. > [!NOTE] - > This plugin works only for containers with the `local` or `json-file` or + > This plugin works only for containers with the `local`, `json-file`, or > `journald` logging driver. Make sure Telegraf has sufficient permissions > to access the configured endpoint. introduced: v1.12.0 @@ -1972,9 +1972,9 @@ input: via gRPC. > [!NOTE] - > Telegraf v1.32 through v1.35 support the Profiles signal using the v1 - > experimental API. Telegraf v1.36+ supports the Profiles signal using the - > v1 development API. + > Telegraf v1.32 through v1.35 support the Profiles signal using the **v1 + > experimental API**. Telegraf v1.36+ supports the Profiles signal using the + > **v1 development API**. introduced: v1.19.0 os_support: [freebsd, linux, macos, solaris, windows] tags: [logging, messaging] @@ -4056,10 +4056,10 @@ processor: - name: Round id: round description: | - This plugin allows to round numerical field values to the configured + This plugin rounds numerical field values to the configured precision. This is particularly useful in combination with the [dedup processor](/telegraf/v1/plugins/#processor-dedup) to reduce the number of - metrics sent to the output if only a lower precision is required for the + metrics sent to the output when a lower precision is required for the values. introduced: v1.36.0 os_support: [freebsd, linux, macos, solaris, windows]