diff --git a/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt b/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt index 1ebaf7d46..a56077021 100644 --- a/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt +++ b/.ci/vale/styles/config/vocabularies/InfluxDataDocs/accept.txt @@ -31,7 +31,7 @@ LogicalPlan [Mm]onitor MBs? PBs? -Parquet +Parquet|\b\w*-*parquet-\w*\b|\b--\w*parquet\w*\b|`[^`]*parquet[^`]*` Redoc SQLAlchemy SQLAlchemy diff --git a/.husky/_/pre-commit b/.husky/_/pre-commit index 710b28856..4855f6124 100755 --- a/.husky/_/pre-commit +++ b/.husky/_/pre-commit @@ -33,9 +33,6 @@ call_lefthook() then "$dir/node_modules/lefthook/bin/index.js" "$@" - elif go tool lefthook -h >/dev/null 2>&1 - then - go tool lefthook "$@" elif bundle exec lefthook -h >/dev/null 2>&1 then bundle exec lefthook "$@" @@ -45,21 +42,12 @@ call_lefthook() elif pnpm lefthook -h >/dev/null 2>&1 then pnpm lefthook "$@" - elif swift package lefthook >/dev/null 2>&1 + elif swift package plugin lefthook >/dev/null 2>&1 then - swift package --build-path .build/lefthook --disable-sandbox lefthook "$@" + swift package --disable-sandbox plugin lefthook "$@" elif command -v mint >/dev/null 2>&1 then mint run csjones/lefthook-plugin "$@" - elif uv run lefthook -h >/dev/null 2>&1 - then - uv run lefthook "$@" - elif mise exec -- lefthook -h >/dev/null 2>&1 - then - mise exec -- lefthook "$@" - elif devbox run lefthook -h >/dev/null 2>&1 - then - devbox run lefthook "$@" else echo "Can't find lefthook in PATH" fi diff --git a/.husky/_/pre-push b/.husky/_/pre-push index 17b532e00..a0d96ef93 100755 --- a/.husky/_/pre-push +++ b/.husky/_/pre-push @@ -33,9 +33,6 @@ call_lefthook() then "$dir/node_modules/lefthook/bin/index.js" "$@" - elif go tool lefthook -h >/dev/null 2>&1 - then - go tool lefthook "$@" elif bundle exec lefthook -h >/dev/null 2>&1 then bundle exec lefthook "$@" @@ -45,21 +42,12 @@ call_lefthook() elif pnpm lefthook -h >/dev/null 2>&1 then pnpm lefthook "$@" - elif swift package lefthook >/dev/null 2>&1 + elif swift package plugin lefthook >/dev/null 2>&1 then - swift package --build-path .build/lefthook --disable-sandbox lefthook "$@" + swift package --disable-sandbox plugin lefthook "$@" elif command -v mint >/dev/null 2>&1 then mint run csjones/lefthook-plugin "$@" - elif uv run lefthook -h >/dev/null 2>&1 - then - uv run lefthook "$@" - elif mise exec -- lefthook -h >/dev/null 2>&1 - then - mise exec -- lefthook "$@" - elif devbox run lefthook -h >/dev/null 2>&1 - then - devbox run lefthook "$@" else echo "Can't find lefthook in PATH" fi diff --git a/.husky/_/prepare-commit-msg b/.husky/_/prepare-commit-msg index 6efab23a3..2655902bc 100755 --- a/.husky/_/prepare-commit-msg +++ b/.husky/_/prepare-commit-msg @@ -33,9 +33,6 @@ call_lefthook() then "$dir/node_modules/lefthook/bin/index.js" "$@" - elif go tool lefthook -h >/dev/null 2>&1 - then - go tool lefthook "$@" elif bundle exec lefthook -h >/dev/null 2>&1 then bundle exec lefthook "$@" @@ -45,21 +42,12 @@ call_lefthook() elif pnpm lefthook -h >/dev/null 2>&1 then pnpm lefthook "$@" - elif swift package lefthook >/dev/null 2>&1 + elif swift package plugin lefthook >/dev/null 2>&1 then - swift package --build-path .build/lefthook --disable-sandbox lefthook "$@" + swift package --disable-sandbox plugin lefthook "$@" elif command -v mint >/dev/null 2>&1 then mint run csjones/lefthook-plugin "$@" - elif uv run lefthook -h >/dev/null 2>&1 - then - uv run lefthook "$@" - elif mise exec -- lefthook -h >/dev/null 2>&1 - then - mise exec -- lefthook "$@" - elif devbox run lefthook -h >/dev/null 2>&1 - then - devbox run lefthook "$@" else echo "Can't find lefthook in PATH" fi diff --git a/content/influxdb3/cloud-dedicated/admin/tables/rename.md b/content/influxdb3/cloud-dedicated/admin/tables/rename.md new file mode 100644 index 000000000..294f38478 --- /dev/null +++ b/content/influxdb3/cloud-dedicated/admin/tables/rename.md @@ -0,0 +1,44 @@ +--- +title: Rename a table +description: > + Use the [`influxctl table rename` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/table/rename/) + to rename a table in your {{< product-name omit=" Cluster" >}} cluster. +menu: + influxdb3_cloud_dedicated: + parent: Manage tables +weight: 202 +list_code_example: | + ##### CLI + ```sh + influxctl table rename + ``` +related: + - /influxdb3/cloud-dedicated/reference/cli/influxctl/table/rename/ +--- + +Use the [`influxctl table rename` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/table/rename/) +to rename a table in your {{< product-name omit=" Clustered" >}} cluster. + +> [!Note] +> After renaming a table, write and query requests using the old table name +> are routed to the same table. + +## Rename a database using the influxctl CLI + + + +```bash { placeholders="DATABASE_NAME|CURRENT_TABLE_NAME|NEW_TABLE_NAME" } +influxctl table rename DATABASE_NAME CURRENT_TABLE_NAME NEW_TABLE_NAME +``` + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Name of the database the table is in +- {{% code-placeholder-key %}}`CURRENT_TABLE_NAME`{{% /code-placeholder-key %}}: Name of the table to change +- {{% code-placeholder-key %}}`NEW_TABLE_NAME`{{% /code-placeholder-key %}}: New name for the table + +> [!Note] +> #### Renamed table retains its ID +> +> The table ID remains the same after renaming. When you list tables, +> you'll see the new name associated with the original table ID. diff --git a/content/influxdb3/cloud-dedicated/query-data/execute-queries/client-libraries/python.md b/content/influxdb3/cloud-dedicated/query-data/execute-queries/client-libraries/python.md index ad173145e..0ae8e82cb 100644 --- a/content/influxdb3/cloud-dedicated/query-data/execute-queries/client-libraries/python.md +++ b/content/influxdb3/cloud-dedicated/query-data/execute-queries/client-libraries/python.md @@ -26,6 +26,7 @@ related: - /influxdb3/cloud-dedicated/reference/influxql/ - /influxdb3/cloud-dedicated/reference/sql/ - /influxdb3/cloud-dedicated/query-data/execute-queries/troubleshoot/ + - /influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices/ list_code_example: | ```py @@ -240,7 +241,8 @@ from influxdb_client_3 import InfluxDBClient3 client = InfluxDBClient3( host='{{< influxdb/host >}}', token='DATABASE_TOKEN', - database='DATABASE_NAME' + database='DATABASE_NAME', + timeout=60 # Set default timeout to 60 seconds ) ``` {{% /code-placeholders %}} @@ -275,6 +277,7 @@ client = InfluxDBClient3( host="{{< influxdb/host >}}", token='DATABASE_TOKEN', database='DATABASE_NAME', +timeout=60, # Set default timeout to 60 seconds flight_client_options=flight_client_options( tls_root_certs=cert)) ... @@ -332,7 +335,8 @@ client = InfluxDBClient3( # Execute the query and return an Arrow table table = client.query( query="SELECT * FROM home", - language="sql" + language="sql", + timeout=30 # Override default timeout for simple queries (30 seconds) ) print("\n#### View Schema information\n") @@ -377,7 +381,8 @@ client = InfluxDBClient3( # Execute the query and return an Arrow table table = client.query( query="SELECT * FROM home", - language="influxql" + language="influxql", + timeout=30 # Override default timeout for simple queries (30 seconds) ) print("\n#### View Schema information\n") diff --git a/content/influxdb3/cloud-dedicated/query-data/execute-queries/influxctl-cli.md b/content/influxdb3/cloud-dedicated/query-data/execute-queries/influxctl-cli.md index 92d886474..446f3d73e 100644 --- a/content/influxdb3/cloud-dedicated/query-data/execute-queries/influxctl-cli.md +++ b/content/influxdb3/cloud-dedicated/query-data/execute-queries/influxctl-cli.md @@ -13,6 +13,7 @@ influxdb3/cloud-dedicated/tags: [query, sql, influxql, influxctl, CLI] related: - /influxdb3/cloud-dedicated/reference/cli/influxctl/query/ - /influxdb3/cloud-dedicated/get-started/query/#execute-an-sql-query, Get started querying data + - /influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices/, Query timeout best practices - /influxdb3/cloud-dedicated/reference/sql/ - /influxdb3/cloud-dedicated/reference/influxql/ list_code_example: | @@ -142,6 +143,34 @@ Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Name of the database to query +## Query timeouts + +The [`influxctl --timeout` global flag](/influxdb3/cloud-dedicated/reference/cli/influxctl/) sets the maximum duration for API calls, including query requests. +If a query takes longer than the specified timeout, the operation will be canceled. + +### Timeout examples + +Use different timeout values based on your query type: + +{{% code-placeholders "DATABASE_(TOKEN|NAME)" %}} +```sh +# Shorter timeout for testing dashboard queries (10 seconds) +influxctl query \ + --timeout 10s \ + --token DATABASE_TOKEN \ + --database DATABASE_NAME \ + "SELECT AVG(temperature) FROM sensors WHERE time >= now() - INTERVAL '1 day'" + +# Longer timeout for analytical queries (5 minutes) +influxctl query \ + --timeout 5m \ + --token DATABASE_TOKEN \ + --database DATABASE_NAME \ + "SELECT room, AVG(temperature) FROM sensors WHERE time >= now() - INTERVAL '30 days' GROUP BY room" +``` +{{% /code-placeholders %}} + +For guidance on selecting appropriate timeout values, see [Query timeout best practices](/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices/). ## Output format @@ -243,7 +272,7 @@ influxctl query \ {{% /influxdb/custom-timestamps %}} {{< expand-wrapper >}} -{{% expand "View example results with unix nanosecond timestamps" %}} +{{% expand "View example results with Unix nanosecond timestamps" %}} {{% influxdb/custom-timestamps %}} ``` +-------+--------+---------+------+---------------------+ diff --git a/content/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md b/content/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md new file mode 100644 index 000000000..fb4a1c875 --- /dev/null +++ b/content/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md @@ -0,0 +1,17 @@ +--- +title: Query timeout best practices +description: Learn how to set appropriate query timeouts to balance performance and resource protection. +menu: + influxdb3_cloud_dedicated: + name: Query timeout best practices + parent: Troubleshoot and optimize queries +weight: 205 +related: + - /influxdb3/cloud-dedicated/reference/client-libraries/v3/ + - /influxdb3/cloud-dedicated/query-data/execute-queries/influxctl-cli/ +source: shared/influxdb3-query-guides/query-timeout-best-practices.md +--- + + diff --git a/content/influxdb3/cloud-dedicated/write-data/troubleshoot.md b/content/influxdb3/cloud-dedicated/write-data/troubleshoot.md index 16dc7ad69..5c835639a 100644 --- a/content/influxdb3/cloud-dedicated/write-data/troubleshoot.md +++ b/content/influxdb3/cloud-dedicated/write-data/troubleshoot.md @@ -10,101 +10,15 @@ menu: influxdb3_cloud_dedicated: name: Troubleshoot issues parent: Write data -influxdb3/cloud-dedicated/tags: [write, line protocol, errors] +influxdb3/cloud-dedicated/tags: [write, line protocol, errors, partial writes] related: + - /influxdb3/cloud-dedicated/get-started/write/ - /influxdb3/cloud-dedicated/reference/syntax/line-protocol/ - /influxdb3/cloud-dedicated/write-data/best-practices/ - /influxdb3/cloud-dedicated/reference/internals/durability/ +source: /shared/influxdb3-write-guides/troubleshoot-distributed.md --- -Learn how to avoid unexpected results and recover from errors when writing to {{% product-name %}}. - -- [Handle write responses](#handle-write-responses) - - [Review HTTP status codes](#review-http-status-codes) -- [Troubleshoot failures](#troubleshoot-failures) -- [Troubleshoot rejected points](#troubleshoot-rejected-points) - -## Handle write responses - -{{% product-name %}} does the following when you send a write request: - - 1. Validates the request. - 2. If successful, attempts to [ingest data](/influxdb3/cloud-dedicated/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](#review-http-status-codes). - 3. Ingests or rejects data in the batch and returns one of the following HTTP status codes: - - - `204 No Content`: All data in the batch is ingested. - - `400 Bad Request`: Some (_when **partial writes** are configured for the cluster_) or all of the data has been rejected. Data that has not been rejected is ingested and queryable. - - The response body contains error details about [rejected points](#troubleshoot-rejected-points), up to 100 points. - - Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable. - - To ensure that InfluxDB handles writes in the order you request them, - wait for the response before you send the next request. - -### Review HTTP status codes - -InfluxDB uses conventional HTTP status codes to indicate the success or failure of a request. -The `message` property of the response body may contain additional details about the error. -{{< product-name >}} returns one the following HTTP status codes for a write request: - -| HTTP response code | Response body | Description | -|:------------------------------|:------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `204 No Content"` | no response body | If InfluxDB ingested all of the data in the batch | -| `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | If some (_when **partial writes** are configured for the cluster_) or all request data isn't allowed (for example, if it is malformed or falls outside of the bucket's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected | -| `401 "Unauthorized"` | | If the `Authorization` header is missing or malformed or if the [token](/influxdb3/cloud-dedicated/admin/tokens/) doesn't have [permission](/influxdb3/cloud-dedicated/reference/cli/influxctl/token/create/#examples) to write to the database. See [examples using credentials](/influxdb3/cloud-dedicated/get-started/write/#write-line-protocol-to-influxdb) in write requests. | -| `404 "Not found"` | requested **resource type** (for example, "organization" or "database"), and **resource name** | If a requested resource (for example, organization or database) wasn't found | -| `422 "Unprocessable Entity"` | `message` contains details about the error | If the data isn't allowed (for example, falls outside of the database’s retention period). -| `500 "Internal server error"` | | Default status for an error | -| `503 "Service unavailable"` | | If the server is temporarily unavailable to accept writes. The `Retry-After` header contains the number of seconds to wait before trying the write again. - -The `message` property of the response body may contain additional details about the error. -If your data did not write to the database, see how to [troubleshoot rejected points](#troubleshoot-rejected-points). - -## Troubleshoot failures - -If you notice data is missing in your database, do the following: - -- Check the [HTTP status code](#review-http-status-codes) in the response. -- Check the `message` property in the response body for details about the error. -- If the `message` describes a field error, [troubleshoot rejected points](#troubleshoot-rejected-points). -- Verify all lines contain valid syntax ([line protocol](/influxdb3/cloud-dedicated/reference/syntax/line-protocol/)). -- Verify the timestamps in your data match the [precision parameter](/influxdb3/cloud-dedicated/reference/glossary/#precision) in your request. -- Minimize payload size and network errors by [optimizing writes](/influxdb3/cloud-dedicated/write-data/best-practices/optimize-writes/). - -## Troubleshoot rejected points - -When writing points from a batch, InfluxDB rejects points that have syntax errors or schema conflicts. -If InfluxDB processes the data in your batch and then rejects points, the [HTTP response](#handle-write-responses) body contains the following properties that describe rejected points: - -- `code`: `"invalid"` -- `line`: the line number of the _first_ rejected point in the batch. -- `message`: a string that contains line-separated error messages, one message for each rejected point in the batch, up to 100 rejected points. - -InfluxDB rejects points for the following reasons: - -- a line protocol parsing error -- an invalid timestamp -- a schema conflict - -Schema conflicts occur when you try to write data that contains any of the following: - -- a wrong data type: the point falls within the same partition (default partitioning is measurement and day) as existing bucket data and contains a different data type for an existing field -- a tag and a field that use the same key - -### Example - -The following example shows a response body for a write request that contains two rejected points: - -```json -{ - "code": "invalid", - "line": 2, - "message": "failed to parse line protocol: - errors encountered on line(s): - error parsing line 2 (1-based): Invalid measurement was provided - error parsing line 4 (1-based): Unable to parse timestamp value '123461000000000000000000000000'" -} -``` - -Check for [field data type](/influxdb3/cloud-dedicated/reference/syntax/line-protocol/#data-types-and-format) differences between the rejected data point and points within the same database and partition--for example, did you attempt to write `string` data to an `int` field? + \ No newline at end of file diff --git a/content/influxdb3/cloud-serverless/query-data/execute-queries/client-libraries/python.md b/content/influxdb3/cloud-serverless/query-data/execute-queries/client-libraries/python.md index cd2545135..da203588d 100644 --- a/content/influxdb3/cloud-serverless/query-data/execute-queries/client-libraries/python.md +++ b/content/influxdb3/cloud-serverless/query-data/execute-queries/client-libraries/python.md @@ -27,6 +27,7 @@ related: - /influxdb3/cloud-serverless/reference/influxql/ - /influxdb3/cloud-serverless/reference/sql/ - /influxdb3/cloud-serverless/query-data/execute-queries/troubleshoot/ + - /influxdb3/cloud-serverless/query-data/troubleshoot-and-optimize/query-timeout-best-practices/ list_code_example: | ```py @@ -241,7 +242,8 @@ from influxdb_client_3 import InfluxDBClient3 client = InfluxDBClient3( host='{{< influxdb/host >}}', token='API_TOKEN', - database='BUCKET_NAME' + database='BUCKET_NAME', + timeout=30 # Set default timeout to 30 seconds for serverless ) ``` {{% /code-placeholders %}} @@ -332,7 +334,8 @@ client = InfluxDBClient3( # Execute the query and return an Arrow table table = client.query( query="SELECT * FROM home", - language="sql" + language="sql", + timeout=10 # Override default timeout for simple queries (10 seconds) ) print("\n#### View Schema information\n") @@ -377,7 +380,8 @@ client = InfluxDBClient3( # Execute the query and return an Arrow table table = client.query( query="SELECT * FROM home", - language="influxql" + language="influxql", + timeout=10 # Override default timeout for simple queries (10 seconds) ) print("\n#### View Schema information\n") diff --git a/content/influxdb3/cloud-serverless/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md b/content/influxdb3/cloud-serverless/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md new file mode 100644 index 000000000..e337f6664 --- /dev/null +++ b/content/influxdb3/cloud-serverless/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md @@ -0,0 +1,17 @@ +--- +title: Query timeout best practices +description: Learn how to set appropriate query timeouts to balance performance and resource protection. +menu: + influxdb3_cloud_serverless: + name: Query timeout best practices + parent: Troubleshoot and optimize queries + identifier: query-timeout-best-practices +weight: 201 +related: + - /influxdb3/cloud-serverless/reference/client-libraries/v3/ +source: shared/influxdb3-query-guides/query-timeout-best-practices.md +--- + + \ No newline at end of file diff --git a/content/influxdb3/clustered/admin/tables/rename.md b/content/influxdb3/clustered/admin/tables/rename.md new file mode 100644 index 000000000..419e61703 --- /dev/null +++ b/content/influxdb3/clustered/admin/tables/rename.md @@ -0,0 +1,44 @@ +--- +title: Rename a table +description: > + Use the [`influxctl table rename` command](/influxdb3/clustered/reference/cli/influxctl/table/rename/) + to rename a table in your {{< product-name omit=" Cluster" >}} cluster. +menu: + influxdb3_clustered: + parent: Manage tables +weight: 202 +list_code_example: | + ##### CLI + ```sh + influxctl table rename + ``` +related: + - /influxdb3/clustered/reference/cli/influxctl/table/rename/ +--- + +Use the [`influxctl table rename` command](/influxdb3/clustered/reference/cli/influxctl/table/rename/) +to rename a table in your {{< product-name omit=" Clustered" >}} cluster. + +> [!Note] +> After renaming a table, write and query requests using the old table name +> are routed to the same table. + +## Rename a database using the influxctl CLI + + + +```bash { placeholders="DATABASE_NAME|CURRENT_TABLE_NAME|NEW_TABLE_NAME" } +influxctl table rename DATABASE_NAME CURRENT_TABLE_NAME NEW_TABLE_NAME +``` + +Replace the following: + +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Name of the database the table is in +- {{% code-placeholder-key %}}`CURRENT_TABLE_NAME`{{% /code-placeholder-key %}}: Name of the table to change +- {{% code-placeholder-key %}}`NEW_TABLE_NAME`{{% /code-placeholder-key %}}: New name for the table + +> [!Note] +> #### Renamed table retains its ID +> +> The table ID remains the same after renaming. When you list tables, +> you'll see the new name associated with the original table ID. diff --git a/content/influxdb3/clustered/query-data/execute-queries/client-libraries/python.md b/content/influxdb3/clustered/query-data/execute-queries/client-libraries/python.md index 444e109d2..c64e2f681 100644 --- a/content/influxdb3/clustered/query-data/execute-queries/client-libraries/python.md +++ b/content/influxdb3/clustered/query-data/execute-queries/client-libraries/python.md @@ -20,6 +20,7 @@ related: - /influxdb3/clustered/query-data/sql/ - /influxdb3/clustered/reference/influxql/ - /influxdb3/clustered/reference/sql/ + - /influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices/ list_code_example: | ```py @@ -234,7 +235,8 @@ from influxdb_client_3 import InfluxDBClient3 client = InfluxDBClient3( host='{{< influxdb/host >}}', token='DATABASE_TOKEN', - database='DATABASE_NAME' + database='DATABASE_NAME', + timeout=60 # Set default timeout to 60 seconds ) ``` {{% /code-placeholders %}} @@ -325,7 +327,8 @@ client = InfluxDBClient3( # Execute the query and return an Arrow table table = client.query( query="SELECT * FROM home", - language="sql" + language="sql", + timeout=30 # Override default timeout for simple queries (30 seconds) ) print("\n#### View Schema information\n") @@ -370,7 +373,8 @@ client = InfluxDBClient3( # Execute the query and return an Arrow table table = client.query( query="SELECT * FROM home", - language="influxql" + language="influxql", + timeout=30 # Override default timeout for simple queries (30 seconds) ) print("\n#### View Schema information\n") diff --git a/content/influxdb3/clustered/query-data/execute-queries/influxctl-cli.md b/content/influxdb3/clustered/query-data/execute-queries/influxctl-cli.md index d218f03a1..f3f19e2aa 100644 --- a/content/influxdb3/clustered/query-data/execute-queries/influxctl-cli.md +++ b/content/influxdb3/clustered/query-data/execute-queries/influxctl-cli.md @@ -12,6 +12,7 @@ influxdb3/clustered/tags: [query, sql, influxql, influxctl, CLI] related: - /influxdb3/clustered/reference/cli/influxctl/query/ - /influxdb3/clustered/get-started/query/#execute-an-sql-query, Get started querying data + - /influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices/, Query timeout best practices - /influxdb3/clustered/reference/sql/ - /influxdb3/clustered/reference/influxql/ list_code_example: | @@ -141,6 +142,35 @@ Replace the following: - {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Name of the database to query +## Query timeouts + +The [`influxctl --timeout` global flag](/influxdb3/clustered/reference/cli/influxctl/) sets the maximum duration for API calls, including query requests. +If a query takes longer than the specified timeout, the operation will be canceled. + +### Timeout examples + +Use different timeout values based on your query type: + +{{% code-placeholders "DATABASE_(TOKEN|NAME)" %}} +```sh +# Shorter timeout for testing dashboard queries (10 seconds) +influxctl query \ + --timeout 10s \ + --token DATABASE_TOKEN \ + --database DATABASE_NAME \ + "SELECT * FROM sensors WHERE time >= now() - INTERVAL '1 hour' LIMIT 100" + +# Longer timeout for analytical queries (5 minutes) +influxctl query \ + --timeout 300s \ + --token DATABASE_TOKEN \ + --database DATABASE_NAME \ + "SELECT room, AVG(temperature) FROM sensors WHERE time >= now() - INTERVAL '30 days' GROUP BY room" +``` +{{% /code-placeholders %}} + +For guidance on selecting appropriate timeout values, see [Query timeout best practices](/influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices/). + ## Output format The `influxctl query` command supports the following output formats: @@ -241,7 +271,7 @@ influxctl query \ {{% /influxdb/custom-timestamps %}} {{< expand-wrapper >}} -{{% expand "View example results with unix nanosecond timestamps" %}} +{{% expand "View example results with Unix nanosecond timestamps" %}} {{% influxdb/custom-timestamps %}} ``` +-------+--------+---------+------+---------------------+ diff --git a/content/influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md b/content/influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md new file mode 100644 index 000000000..532f2f976 --- /dev/null +++ b/content/influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices.md @@ -0,0 +1,18 @@ +--- +title: Query timeout best practices +description: Learn how to set appropriate query timeouts to balance performance and resource protection. +menu: + influxdb3_clustered: + name: Query timeout best practices + parent: Troubleshoot and optimize queries + identifier: query-timeout-best-practices +weight: 201 +related: + - /influxdb3/clustered/reference/client-libraries/v3/ + - /influxdb3/clustered/query-data/execute-queries/influxctl-cli/ +source: shared/influxdb3-query-guides/query-timeout-best-practices.md +--- + + diff --git a/content/influxdb3/clustered/reference/release-notes/clustered.md b/content/influxdb3/clustered/reference/release-notes/clustered.md index 9547b3546..a82fa5c9b 100644 --- a/content/influxdb3/clustered/reference/release-notes/clustered.md +++ b/content/influxdb3/clustered/reference/release-notes/clustered.md @@ -61,6 +61,34 @@ directory. This new directory contains artifacts associated with the specified r --- +## 20250721-1796368 {date="2025-07-21"} + +### Quickstart + +```yaml +spec: + package: + image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250721-1796368 +``` + +#### Release artifacts +- [app-instance-schema.json](/downloads/clustered-release-artifacts/20250721-1796368/app-instance-schema.json) +- [example-customer.yml](/downloads/clustered-release-artifacts/20250721-1796368/example-customer.yml) +- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt) + + +### Highlights + +#### Support for InfluxQL INTEGRAL() + +InfluxQL `INTEGRAL()` function is now supported in the InfluxDB 3.0 database engine. + +### Bug Fixes + +- Fix `SHOW TABLES` timeout when a database has a large number of tables. + +--- + ## 20250707-1777929 {date="2025-07-07"} ### Quickstart diff --git a/content/influxdb3/clustered/write-data/troubleshoot.md b/content/influxdb3/clustered/write-data/troubleshoot.md index 1dc7b94d0..8520ee59e 100644 --- a/content/influxdb3/clustered/write-data/troubleshoot.md +++ b/content/influxdb3/clustered/write-data/troubleshoot.md @@ -11,77 +11,15 @@ menu: influxdb3_clustered: name: Troubleshoot issues parent: Write data -influxdb3/clustered/tags: [write, line protocol, errors] +influxdb3/clustered/tags: [write, line protocol, errors, partial writes] related: + - /influxdb3/clustered/get-started/write/ - /influxdb3/clustered/reference/syntax/line-protocol/ - /influxdb3/clustered/write-data/best-practices/ - /influxdb3/clustered/reference/internals/durability/ +source: /shared/influxdb3-write-guides/troubleshoot-distributed.md --- -Learn how to avoid unexpected results and recover from errors when writing to -{{% product-name %}}. - -- [Handle write responses](#handle-write-responses) - - [Review HTTP status codes](#review-http-status-codes) -- [Troubleshoot failures](#troubleshoot-failures) -- [Troubleshoot rejected points](#troubleshoot-rejected-points) - -## Handle write responses - -{{% product-name %}} does the following when you send a write request: - -1. Validates the request. -2. If successful, attempts to ingest data from the request body; otherwise, - responds with an [error status](#review-http-status-codes). -3. Ingests or rejects data in the batch and returns one of the following HTTP - status codes: - - - `204 No Content`: All data in the batch is ingested. - - `400 Bad Request`: Some or all of the data has been rejected. - Data that has not been rejected is ingested and queryable. - -The response body contains error details about -[rejected points](#troubleshoot-rejected-points), up to 100 points. - -Writes are synchronous--the response status indicates the final status of the -write and all ingested data is queryable. - -To ensure that InfluxDB handles writes in the order you request them, -wait for the response before you send the next request. - -### Review HTTP status codes - -InfluxDB uses conventional HTTP status codes to indicate the success or failure of a request. -The `message` property of the response body may contain additional details about the error. -Write requests return the following status codes: - -| HTTP response code | Message | Description | -| :-------------------------------| :--------------------------------------------------------------- | :------------- | -| `204 "Success"` | | If InfluxDB ingested the data | -| `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | If some or all request data isn't allowed (for example, if it is malformed or falls outside of the bucket's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected | -| `401 "Unauthorized"` | | If the `Authorization` header is missing or malformed or if the [token](/influxdb3/clustered/admin/tokens/) doesn't have [permission](/influxdb3/clustered/reference/cli/influxctl/token/create/#examples) to write to the database. See [examples using credentials](/influxdb3/clustered/get-started/write/#write-line-protocol-to-influxdb) in write requests. | -| `404 "Not found"` | requested **resource type** (for example, "organization" or "database"), and **resource name** | If a requested resource (for example, organization or database) wasn't found | -| `500 "Internal server error"` | | Default status for an error | -| `503` "Service unavailable" | | If the server is temporarily unavailable to accept writes. The `Retry-After` header describes when to try the write again. - -If your data did not write to the database, see how to [troubleshoot rejected points](#troubleshoot-rejected-points). - -## Troubleshoot failures - -If you notice data is missing in your database, do the following: - -- Check the `message` property in the response body for details about the error. -- If the `message` describes a field error, [troubleshoot rejected points](#troubleshoot-rejected-points). -- Verify all lines contain valid syntax ([line protocol](/influxdb3/clustered/reference/syntax/line-protocol/)). -- Verify the timestamps in your data match the [precision parameter](/influxdb3/clustered/reference/glossary/#precision) in your request. -- Minimize payload size and network errors by [optimizing writes](/influxdb3/clustered/write-data/best-practices/optimize-writes/). - -## Troubleshoot rejected points - -InfluxDB rejects points that fall within the same partition (default partitioning -is by measurement and day) as existing bucket data and have a different data type -for an existing field. - -Check for [field data type](/influxdb3/clustered/reference/syntax/line-protocol/#data-types-and-format) -differences between the rejected data point and points within the same database -and partition--for example, did you attempt to write `string` data to an `int` field? + \ No newline at end of file diff --git a/content/influxdb3/core/reference/cli/influxdb3/delete/_index.md b/content/influxdb3/core/reference/cli/influxdb3/delete/_index.md index d15e985a9..954678c45 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/delete/_index.md +++ b/content/influxdb3/core/reference/cli/influxdb3/delete/_index.md @@ -1,7 +1,7 @@ --- title: influxdb3 delete description: > - The `influxdb3 delete` command deletes a resource such as a database or a table. + The `influxdb3 delete` command deletes a resource such as a cache, database, or table. menu: influxdb3_core: parent: influxdb3 @@ -10,6 +10,6 @@ weight: 300 source: /shared/influxdb3-cli/delete/_index.md --- - diff --git a/content/influxdb3/core/reference/cli/influxdb3/delete/token.md b/content/influxdb3/core/reference/cli/influxdb3/delete/token.md new file mode 100644 index 000000000..5a7caf3c0 --- /dev/null +++ b/content/influxdb3/core/reference/cli/influxdb3/delete/token.md @@ -0,0 +1,18 @@ +--- +title: influxdb3 delete token +description: > + The `influxdb3 delete token` command deletes an authorization token from the {{% product-name %}} server. +influxdb3/core/tags: [cli] +menu: + influxdb3_core: + parent: influxdb3 delete +weight: 201 +related: + - /influxdb3/core/admin/tokens/ + - /influxdb3/core/api/v3/#tag/Token, InfluxDB /api/v3 Token API reference +source: /shared/influxdb3-cli/delete/token.md +--- + + diff --git a/content/influxdb3/core/reference/cli/influxdb3/serve.md b/content/influxdb3/core/reference/cli/influxdb3/serve.md index ffc7fe5af..debeb8ddc 100644 --- a/content/influxdb3/core/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/core/reference/cli/influxdb3/serve.md @@ -36,41 +36,23 @@ influxdb3 serve [OPTIONS] --node-id | :--------------- | :--------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------ | | {{< req "\*" >}} | `--node-id` | _See [configuration options](/influxdb3/core/reference/config-options/#node-id)_ | | | `--object-store` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store)_ | -| | `--bucket` | _See [configuration options](/influxdb3/core/reference/config-options/#bucket)_ | -| | `--data-dir` | _See [configuration options](/influxdb3/core/reference/config-options/#data-dir)_ | | | `--admin-token-recovery-http-bind` | _See [configuration options](/influxdb3/core/reference/config-options/#admin-token-recovery-http-bind)_ | +| | `--admin-token-recovery-tcp-listener-file-path` | _See [configuration options](/influxdb3/core/reference/config-options/#admin-token-recovery-tcp-listener-file-path)_ | | | `--aws-access-key-id` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-access-key-id)_ | -| | `--aws-secret-access-key` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-secret-access-key)_ | +| | `--aws-allow-http` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-allow-http)_ | | | `--aws-default-region` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-default-region)_ | | | `--aws-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-endpoint)_ | +| | `--aws-secret-access-key` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-secret-access-key)_ | | | `--aws-session-token` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-session-token)_ | -| | `--aws-allow-http` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-allow-http)_ | | | `--aws-skip-signature` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-skip-signature)_ | -| | `--google-service-account` | _See [configuration options](/influxdb3/core/reference/config-options/#google-service-account)_ | -| | `--azure-storage-account` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-account)_ | | | `--azure-storage-access-key` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-access-key)_ | -| | `--object-store-connection-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-connection-limit)_ | -| | `--object-store-http2-only` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-http2-only)_ | -| | `--object-store-http2-max-frame-size` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-http2-max-frame-size)_ | -| | `--object-store-max-retries` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-max-retries)_ | -| | `--object-store-retry-timeout` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-retry-timeout)_ | -| | `--object-store-cache-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-cache-endpoint)_ | -| `-h` | `--help` | Print help information | -| | `--help-all` | Print detailed help information | -| | `--log-filter` | _See [configuration options](/influxdb3/core/reference/config-options/#log-filter)_ | -| `-v` | `--verbose` | Enable verbose output | -| | `--log-destination` | _See [configuration options](/influxdb3/core/reference/config-options/#log-destination)_ | -| | `--log-format` | _See [configuration options](/influxdb3/core/reference/config-options/#log-format)_ | -| | `--traces-exporter` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter)_ | -| | `--traces-exporter-jaeger-agent-host` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-agent-host)_ | -| | `--traces-exporter-jaeger-agent-port` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-agent-port)_ | -| | `--traces-exporter-jaeger-service-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-service-name)_ | -| | `--traces-exporter-jaeger-trace-context-header-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-trace-context-header-name)_ | -| | `--traces-jaeger-debug-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-debug-name)_ | -| | `--traces-jaeger-tags` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-tags)_ | -| | `--traces-jaeger-max-msgs-per-second` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-max-msgs-per-second)_ | +| | `--azure-storage-account` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-account)_ | +| | `--bucket` | _See [configuration options](/influxdb3/core/reference/config-options/#bucket)_ | +| | `--buffer-mem-limit-mb` | _See [configuration options](/influxdb3/core/reference/config-options/#buffer-mem-limit-mb)_ | +| | `--data-dir` | _See [configuration options](/influxdb3/core/reference/config-options/#data-dir)_ | +| | `--datafusion-config` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-config)_ | +| | `--datafusion-max-parquet-fanout` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-max-parquet-fanout)_ | | | `--datafusion-num-threads` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-num-threads)_ | -| | `--datafusion-runtime-type` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-type)_ | | | `--datafusion-runtime-disable-lifo-slot` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-disable-lifo-slot)_ | | | `--datafusion-runtime-event-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-event-interval)_ | | | `--datafusion-runtime-global-queue-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-global-queue-interval)_ | @@ -78,29 +60,67 @@ influxdb3 serve [OPTIONS] --node-id | | `--datafusion-runtime-max-io-events-per-tick` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-max-io-events-per-tick)_ | | | `--datafusion-runtime-thread-keep-alive` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-thread-keep-alive)_ | | | `--datafusion-runtime-thread-priority` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-thread-priority)_ | -| | `--datafusion-max-parquet-fanout` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-max-parquet-fanout)_ | +| | `--datafusion-runtime-type` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-type)_ | | | `--datafusion-use-cached-parquet-loader` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-use-cached-parquet-loader)_ | -| | `--datafusion-config` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-config)_ | -| | `--max-http-request-size` | _See [configuration options](/influxdb3/core/reference/config-options/#max-http-request-size)_ | -| | `--http-bind` | _See [configuration options](/influxdb3/core/reference/config-options/#http-bind)_ | -| | `--exec-mem-pool-bytes` | _See [configuration options](/influxdb3/core/reference/config-options/#exec-mem-pool-bytes)_ | -| | `--gen1-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#gen1-duration)_ | -| | `--wal-flush-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-flush-interval)_ | -| | `--wal-snapshot-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-snapshot-size)_ | -| | `--wal-max-write-buffer-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-max-write-buffer-size)_ | -| | `--snapshotted-wal-files-to-keep` | _See [configuration options](/influxdb3/core/reference/config-options/#snapshotted-wal-files-to-keep)_ | -| | `--query-log-size` | _See [configuration options](/influxdb3/core/reference/config-options/#query-log-size)_ | -| | `--parquet-mem-cache-size` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-size)_ | -| | `--parquet-mem-cache-prune-percentage` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-percentage)_ | -| | `--parquet-mem-cache-prune-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-interval)_ | +| | `--delete-grace-period` | _See [configuration options](/influxdb3/core/reference/config-options/#delete-grace-period)_ | +| | `--disable-authz` | _See [configuration options](/influxdb3/core/reference/config-options/#disable-authz)_ | | | `--disable-parquet-mem-cache` | _See [configuration options](/influxdb3/core/reference/config-options/#disable-parquet-mem-cache)_ | -| | `--last-cache-eviction-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#last-cache-eviction-interval)_ | | | `--distinct-cache-eviction-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#distinct-cache-eviction-interval)_ | -| | `--plugin-dir` | _See [configuration options](/influxdb3/core/reference/config-options/#plugin-dir)_ | +| | `--exec-mem-pool-bytes` | _See [configuration options](/influxdb3/core/reference/config-options/#exec-mem-pool-bytes)_ | | | `--force-snapshot-mem-threshold` | _See [configuration options](/influxdb3/core/reference/config-options/#force-snapshot-mem-threshold)_ | -| | `--virtual-env-location` | _See [configuration options](/influxdb3/core/reference/config-options/#virtual-env-location)_ | +| | `--gen1-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#gen1-duration)_ | +| | `--gen1-lookback-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#gen1-lookback-duration)_ | +| | `--google-service-account` | _See [configuration options](/influxdb3/core/reference/config-options/#google-service-account)_ | +| | `--hard-delete-default-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#hard-delete-default-duration)_ | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information | +| | `--http-bind` | _See [configuration options](/influxdb3/core/reference/config-options/#http-bind)_ | +| | `--last-cache-eviction-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#last-cache-eviction-interval)_ | +| | `--log-destination` | _See [configuration options](/influxdb3/core/reference/config-options/#log-destination)_ | +| | `--log-filter` | _See [configuration options](/influxdb3/core/reference/config-options/#log-filter)_ | +| | `--log-format` | _See [configuration options](/influxdb3/core/reference/config-options/#log-format)_ | +| | `--max-http-request-size` | _See [configuration options](/influxdb3/core/reference/config-options/#max-http-request-size)_ | +| | `--object-store-cache-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-cache-endpoint)_ | +| | `--object-store-connection-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-connection-limit)_ | +| | `--object-store-http2-max-frame-size` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-http2-max-frame-size)_ | +| | `--object-store-http2-only` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-http2-only)_ | +| | `--object-store-max-retries` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-max-retries)_ | +| | `--object-store-retry-timeout` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-retry-timeout)_ | | | `--package-manager` | _See [configuration options](/influxdb3/core/reference/config-options/#package-manager)_ | +| | `--parquet-mem-cache-prune-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-interval)_ | +| | `--parquet-mem-cache-prune-percentage` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-percentage)_ | +| | `--parquet-mem-cache-query-path-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-query-path-duration)_ | +| | `--parquet-mem-cache-size` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-size)_ | +| | `--plugin-dir` | _See [configuration options](/influxdb3/core/reference/config-options/#plugin-dir)_ | +| | `--preemptive-cache-age` | _See [configuration options](/influxdb3/core/reference/config-options/#preemptive-cache-age)_ | | | `--query-file-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#query-file-limit)_ | +| | `--query-log-size` | _See [configuration options](/influxdb3/core/reference/config-options/#query-log-size)_ | +| | `--retention-check-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#retention-check-interval)_ | +| | `--snapshotted-wal-files-to-keep` | _See [configuration options](/influxdb3/core/reference/config-options/#snapshotted-wal-files-to-keep)_ | +| | `--table-index-cache-concurrency-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#table-index-cache-concurrency-limit)_ | +| | `--table-index-cache-max-entries` | _See [configuration options](/influxdb3/core/reference/config-options/#table-index-cache-max-entries)_ | +| | `--tcp-listener-file-path` | _See [configuration options](/influxdb3/core/reference/config-options/#tcp-listener-file-path)_ | +| | `--telemetry-disable-upload` | _See [configuration options](/influxdb3/core/reference/config-options/#telemetry-disable-upload)_ | +| | `--telemetry-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#telemetry-endpoint)_ | +| | `--tls-cert` | _See [configuration options](/influxdb3/core/reference/config-options/#tls-cert)_ | +| | `--tls-key` | _See [configuration options](/influxdb3/core/reference/config-options/#tls-key)_ | +| | `--tls-minimum-version` | _See [configuration options](/influxdb3/core/reference/config-options/#tls-minimum-version)_ | +| | `--traces-exporter` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter)_ | +| | `--traces-exporter-jaeger-agent-host` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-agent-host)_ | +| | `--traces-exporter-jaeger-agent-port` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-agent-port)_ | +| | `--traces-exporter-jaeger-service-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-service-name)_ | +| | `--traces-exporter-jaeger-trace-context-header-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-trace-context-header-name)_ | +| | `--traces-jaeger-debug-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-debug-name)_ | +| | `--traces-jaeger-max-msgs-per-second` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-max-msgs-per-second)_ | +| | `--traces-jaeger-tags` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-tags)_ | +| `-v` | `--verbose` | Enable verbose output | +| | `--virtual-env-location` | _See [configuration options](/influxdb3/core/reference/config-options/#virtual-env-location)_ | +| | `--wal-flush-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-flush-interval)_ | +| | `--wal-max-write-buffer-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-max-write-buffer-size)_ | +| | `--wal-replay-concurrency-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-replay-concurrency-limit)_ | +| | `--wal-replay-fail-on-error` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-replay-fail-on-error)_ | +| | `--wal-snapshot-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-snapshot-size)_ | +| | `--without-auth` | _See [configuration options](/influxdb3/core/reference/config-options/#without-auth)_ | {{< caption >}} {{< req text="\* Required options" >}} @@ -110,7 +130,7 @@ influxdb3 serve [OPTIONS] --node-id You can use environment variables to define most `influxdb3 serve` options. For more information, see -[Configuration options](/influxdb3/enterprise/reference/config-options/). +[Configuration options](/influxdb3/core/reference/config-options/). ## Examples diff --git a/content/influxdb3/core/reference/config-options.md b/content/influxdb3/core/reference/config-options.md index 6914536eb..11e29f06c 100644 --- a/content/influxdb3/core/reference/config-options.md +++ b/content/influxdb3/core/reference/config-options.md @@ -8,1052 +8,9 @@ menu: parent: Reference name: Configuration options weight: 100 +source: /shared/influxdb3-cli/config-options.md --- -{{< product-name >}} lets you customize your server configuration by using -`influxdb3 serve` command options or by setting environment variables. - -## Configure your server - -Pass configuration options to the `influxdb serve` server using either command -options or environment variables. Command options take precedence over -environment variables. - -##### Example influxdb3 serve command options - - - -```sh -influxdb3 serve \ - --object-store file \ - --data-dir ~/.influxdb3 \ - --node-id NODE_ID \ - --log-filter info \ - --max-http-request-size 20971520 \ - --aws-allow-http -``` - -##### Example environment variables - - - -```sh -export INFLUXDB3_OBJECT_STORE=file -export INFLUXDB3_DB_DIR=~/.influxdb3 -export INFLUXDB3_WRITER_IDENTIFIER_PREFIX=my-host -export LOG_FILTER=info -export INFLUXDB3_MAX_HTTP_REQUEST_SIZE=20971520 -export AWS_ALLOW_HTTP=true - -influxdb3 serve -``` - -## Server configuration options - -- [General](#general) - - [object-store](#object-store) - - [data-dir](#data-dir) - - [node-id](#node-id) - - [query-file-limit](#query-file-limit) -- [AWS](#aws) - - [aws-access-key-id](#aws-access-key-id) - - [aws-secret-access-key](#aws-secret-access-key) - - [aws-default-region](#aws-default-region) - - [aws-endpoint](#aws-endpoint) - - [aws-session-token](#aws-session-token) - - [aws-allow-http](#aws-allow-http) - - [aws-skip-signature](#aws-skip-signature) -- [Google Cloud Service](#google-cloud-service) - - [google-service-account](#google-service-account) -- [Microsoft Azure](#microsoft-azure) - - [azure-storage-account](#azure-storage-account) - - [azure-storage-access-key](#azure-storage-access-key) -- [Object Storage](#object-storage) - - [bucket](#bucket) - - [object-store-connection-limit](#object-store-connection-limit) - - [object-store-http2-only](#object-store-http2-only) - - [object-store-http2-max-frame-size](#object-store-http2-max-frame-size) - - [object-store-max-retries](#object-store-max-retries) - - [object-store-retry-timeout](#object-store-retry-timeout) - - [object-store-cache-endpoint](#object-store-cache-endpoint) -- [Logs](#logs) - - [log-filter](#log-filter) - - [log-destination](#log-destination) - - [log-format](#log-format) - - [query-log-size](#query-log-size) -- [Traces](#traces) - - [traces-exporter](#traces-exporter) - - [traces-exporter-jaeger-agent-host](#traces-exporter-jaeger-agent-host) - - [traces-exporter-jaeger-agent-port](#traces-exporter-jaeger-agent-port) - - [traces-exporter-jaeger-service-name](#traces-exporter-jaeger-service-name) - - [traces-exporter-jaeger-trace-context-header-name](#traces-exporter-jaeger-trace-context-header-name) - - [traces-jaeger-debug-name](#traces-jaeger-debug-name) - - [traces-jaeger-tags](#traces-jaeger-tags) - - [traces-jaeger-max-msgs-per-second](#traces-jaeger-max-msgs-per-second) -- [DataFusion](#datafusion) - - [datafusion-num-threads](#datafusion-num-threads) - - [datafusion-runtime-type](#datafusion-runtime-type) - - [datafusion-runtime-disable-lifo-slot](#datafusion-runtime-disable-lifo-slot) - - [datafusion-runtime-event-interval](#datafusion-runtime-event-interval) - - [datafusion-runtime-global-queue-interval](#datafusion-runtime-global-queue-interval) - - [datafusion-runtime-max-blocking-threads](#datafusion-runtime-max-blocking-threads) - - [datafusion-runtime-max-io-events-per-tick](#datafusion-runtime-max-io-events-per-tick) - - [datafusion-runtime-thread-keep-alive](#datafusion-runtime-thread-keep-alive) - - [datafusion-runtime-thread-priority](#datafusion-runtime-thread-priority) - - [datafusion-max-parquet-fanout](#datafusion-max-parquet-fanout) - - [datafusion-use-cached-parquet-loader](#datafusion-use-cached-parquet-loader) - - [datafusion-config](#datafusion-config) -- [HTTP](#http) - - [max-http-request-size](#max-http-request-size) - - [http-bind](#http-bind) - - [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) -- [Memory](#memory) - - [exec-mem-pool-bytes](#exec-mem-pool-bytes) - - [buffer-mem-limit-mb](#buffer-mem-limit-mb) - - [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) -- [Write-Ahead Log (WAL)](#write-ahead-log-wal) - - [wal-flush-interval](#wal-flush-interval) - - [wal-snapshot-size](#wal-snapshot-size) - - [wal-max-write-buffer-size](#wal-max-write-buffer-size) - - [snapshotted-wal-files-to-keep](#snapshotted-wal-files-to-keep) -- [Compaction](#compaction) - - [gen1-duration](#gen1-duration) -- [Caching](#caching) - - [preemptive-cache-age](#preemptive-cache-age) - - [parquet-mem-cache-size-mb](#parquet-mem-cache-size-mb) - - [parquet-mem-cache-prune-percentage](#parquet-mem-cache-prune-percentage) - - [parquet-mem-cache-prune-interval](#parquet-mem-cache-prune-interval) - - [disable-parquet-mem-cache](#disable-parquet-mem-cache) - - [last-cache-eviction-interval](#last-cache-eviction-interval) - - [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) -- [Processing engine](#processing-engine) - - [plugin-dir](#plugin-dir) - - [virtual-env-location](#virtual-env-location) - - [package-manager](#package-manager) - ---- - -### General - -- [object-store](#object-store) -- [data-dir](#data-dir) -- [node-id](#node-id) -- [query-file-limit](#query-file-limit) - -#### object-store - -Specifies which object storage to use to store Parquet files. -This option supports the following values: - -- `memory` -- `memory-throttled` -- `file` -- `s3` -- `google` -- `azure` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------- | -| `--object-store` | `INFLUXDB3_OBJECT_STORE` | - ---- - -#### data-dir - -For the `file` object store, defines the location {{< product-name >}} uses to store files locally. -Required when using the `file` [object store](#object-store). - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--data-dir` | `INFLUXDB3_DB_DIR` | - ---- - -#### node-id - -Specifies the node identifier used as a prefix in all object store file paths. -Use a unique node identifier for each host sharing the same object store -configuration--for example, the same bucket. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------------------- | -| `--node-id` | `INFLUXDB3_NODE_IDENTIFIER_PREFIX` | - ---- - -#### query-file-limit - -Limits the number of Parquet files a query can access. - -**Default:** `432` - -With the default `432` setting and the default [`gen1-duration`](#gen1-duration) -setting of 10 minutes, queries can access up to a 72 hours of data, but -potentially less depending on whether all data for a given 10 minute block of -time was ingested during the same period. - -You can increase this limit to allow more files to be queried, but be aware of -the following side-effects: - -- Degraded query performance for queries that read more Parquet files -- Increased memory usage -- Your system potentially killing the `influxdb3` process due to Out-of-Memory - (OOM) errors -- If using object storage to store data, many GET requests to access the data - (as many as 2 per file) - -> [!Note] -> We recommend keeping the default setting and querying smaller time ranges. -> If you need to query longer time ranges or faster query performance on any query -> that accesses an hour or more of data, [InfluxDB 3 Enterprise](/influxdb3/enterprise/) -> optimizes data storage by compacting and rearranging Parquet files to achieve -> faster query performance. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------------- | -| `--query-file-limit` | `INFLUXDB3_QUERY_FILE_LIMIT` | - ---- - -### AWS - -- [aws-access-key-id](#aws-access-key-id) -- [aws-secret-access-key](#aws-secret-access-key) -- [aws-default-region](#aws-default-region) -- [aws-endpoint](#aws-endpoint) -- [aws-session-token](#aws-session-token) -- [aws-allow-http](#aws-allow-http) -- [aws-skip-signature](#aws-skip-signature) - -#### aws-access-key-id - -When using Amazon S3 as the object store, set this to an access key that has -permission to read from and write to the specified S3 bucket. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-access-key-id` | `AWS_ACCESS_KEY_ID` | - ---- - -#### aws-secret-access-key - -When using Amazon S3 as the object store, set this to the secret access key that -goes with the specified access key ID. - -| influxdb3 serve option | Environment variable | -| :------------------------ | :---------------------- | -| `--aws-secret-access-key` | `AWS_SECRET_ACCESS_KEY` | - ---- - -#### aws-default-region - -When using Amazon S3 as the object store, set this to the region that goes with -the specified bucket if different from the fallback value. - -**Default:** `us-east-1` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-default-region` | `AWS_DEFAULT_REGION` | - ---- - -#### aws-endpoint - -When using an Amazon S3 compatibility storage service, set this to the endpoint. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-endpoint` | `AWS_ENDPOINT` | - ---- - -#### aws-session-token - -When using Amazon S3 as an object store, set this to the session token. This is -handy when using a federated login or SSO and fetching credentials via the UI. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-session-token` | `AWS_SESSION_TOKEN` | - ---- - -#### aws-allow-http - -Allows unencrypted HTTP connections to AWS. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-allow-http` | `AWS_ALLOW_HTTP` | - ---- - -#### aws-skip-signature - -If enabled, S3 object stores do not fetch credentials and do not sign requests. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-skip-signature` | `AWS_SKIP_SIGNATURE` | - ---- - -### Google Cloud Service - -- [google-service-account](#google-service-account) - -#### google-service-account - -When using Google Cloud Storage as the object store, set this to the path to the -JSON file that contains the Google credentials. - -| influxdb3 serve option | Environment variable | -| :------------------------- | :----------------------- | -| `--google-service-account` | `GOOGLE_SERVICE_ACCOUNT` | - ---- - -### Microsoft Azure - -- [azure-storage-account](#azure-storage-account) -- [azure-storage-access-key](#azure-storage-access-key) - -#### azure-storage-account - -When using Microsoft Azure as the object store, set this to the name you see -when navigating to **All Services > Storage accounts > `[name]`**. - -| influxdb3 serve option | Environment variable | -| :------------------------ | :---------------------- | -| `--azure-storage-account` | `AZURE_STORAGE_ACCOUNT` | - ---- - -#### azure-storage-access-key - -When using Microsoft Azure as the object store, set this to one of the Key -values in the Storage account's **Settings > Access keys**. - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :------------------------- | -| `--azure-storage-access-key` | `AZURE_STORAGE_ACCESS_KEY` | - ---- - -### Object Storage - -- [bucket](#bucket) -- [object-store-connection-limit](#object-store-connection-limit) -- [object-store-http2-only](#object-store-http2-only) -- [object-store-http2-max-frame-size](#object-store-http2-max-frame-size) -- [object-store-max-retries](#object-store-max-retries) -- [object-store-retry-timeout](#object-store-retry-timeout) -- [object-store-cache-endpoint](#object-store-cache-endpoint) - -#### bucket - -Sets the name of the object storage bucket to use. Must also set -`--object-store` to a cloud object storage for this option to take effect. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--bucket` | `INFLUXDB3_BUCKET` | - ---- - -#### object-store-connection-limit - -When using a network-based object store, limits the number of connections to -this value. - -**Default:** `16` - -| influxdb3 serve option | Environment variable | -| :-------------------------------- | :------------------------------ | -| `--object-store-connection-limit` | `OBJECT_STORE_CONNECTION_LIMIT` | - ---- - -#### object-store-http2-only - -Forces HTTP/2 connections to network-based object stores. - -| influxdb3 serve option | Environment variable | -| :-------------------------- | :------------------------ | -| `--object-store-http2-only` | `OBJECT_STORE_HTTP2_ONLY` | - ---- - -#### object-store-http2-max-frame-size - -Sets the maximum frame size (in bytes/octets) for HTTP/2 connections. - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--object-store-http2-max-frame-size` | `OBJECT_STORE_HTTP2_MAX_FRAME_SIZE` | - ---- - -#### object-store-max-retries - -Defines the maximum number of times to retry a request. - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :------------------------- | -| `--object-store-max-retries` | `OBJECT_STORE_MAX_RETRIES` | - ---- - -#### object-store-retry-timeout - -Specifies the maximum length of time from the initial request after which no -further retries are be attempted. - -| influxdb3 serve option | Environment variable | -| :----------------------------- | :--------------------------- | -| `--object-store-retry-timeout` | `OBJECT_STORE_RETRY_TIMEOUT` | - ---- - -#### object-store-cache-endpoint - -Sets the endpoint of an S3-compatible, HTTP/2-enabled object store cache. - -| influxdb3 serve option | Environment variable | -| :------------------------------ | :---------------------------- | -| `--object-store-cache-endpoint` | `OBJECT_STORE_CACHE_ENDPOINT` | - ---- - -### Logs - -- [log-filter](#log-filter) -- [log-destination](#log-destination) -- [log-format](#log-format) -- [query-log-size](#query-log-size) - -#### log-filter - -Sets the filter directive for logs. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--log-filter` | `LOG_FILTER` | - ---- - -#### log-destination - -Specifies the destination for logs. - -**Default:** `stdout` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--log-destination` | `LOG_DESTINATION` | - ---- - -#### log-format - -Defines the message format for logs. - -This option supports the following values: - -- `full` _(default)_ - -**Default:** `full` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--log-format` | `LOG_FORMAT` | - ---- - -#### query-log-size - -Defines the size of the query log. Up to this many queries remain in the -log before older queries are evicted to make room for new ones. - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------------- | -| `--query-log-size` | `INFLUXDB3_QUERY_LOG_SIZE` | - ---- - -### Traces - -- [traces-exporter](#traces-exporter) -- [traces-exporter-jaeger-agent-host](#traces-exporter-jaeger-agent-host) -- [traces-exporter-jaeger-agent-port](#traces-exporter-jaeger-agent-port) -- [traces-exporter-jaeger-service-name](#traces-exporter-jaeger-service-name) -- [traces-exporter-jaeger-trace-context-header-name](#traces-exporter-jaeger-trace-context-header-name) -- [traces-jaeger-debug-name](#traces-jaeger-debug-name) -- [traces-jaeger-tags](#traces-jaeger-tags) -- [traces-jaeger-max-msgs-per-second](#traces-jaeger-max-msgs-per-second) - -#### traces-exporter - -Sets the type of tracing exporter. - -**Default:** `none` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--traces-exporter` | `TRACES_EXPORTER` | - ---- - -#### traces-exporter-jaeger-agent-host - -Specifies the Jaeger agent network hostname for tracing. - -**Default:** `0.0.0.0` - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--traces-exporter-jaeger-agent-host` | `TRACES_EXPORTER_JAEGER_AGENT_HOST` | - ---- - -#### traces-exporter-jaeger-agent-port - -Defines the Jaeger agent network port for tracing. - -**Default:** `6831` - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--traces-exporter-jaeger-agent-port` | `TRACES_EXPORTER_JAEGER_AGENT_PORT` | - ---- - -#### traces-exporter-jaeger-service-name - -Sets the Jaeger service name for tracing. - -**Default:** `iox-conductor` - -| influxdb3 serve option | Environment variable | -| :-------------------------------------- | :------------------------------------ | -| `--traces-exporter-jaeger-service-name` | `TRACES_EXPORTER_JAEGER_SERVICE_NAME` | - ---- - -#### traces-exporter-jaeger-trace-context-header-name - -Specifies the header name used for passing trace context. - -**Default:** `uber-trace-id` - -| influxdb3 serve option | Environment variable | -| :--------------------------------------------------- | :------------------------------------------------- | -| `--traces-exporter-jaeger-trace-context-header-name` | `TRACES_EXPORTER_JAEGER_TRACE_CONTEXT_HEADER_NAME` | - ---- - -#### traces-jaeger-debug-name - -Specifies the header name used for force sampling in tracing. - -**Default:** `jaeger-debug-id` - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :---------------------------------- | -| `--traces-jaeger-debug-name` | `TRACES_EXPORTER_JAEGER_DEBUG_NAME` | - ---- - -#### traces-jaeger-tags - -Defines a set of `key=value` pairs to annotate tracing spans with. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :---------------------------- | -| `--traces-jaeger-tags` | `TRACES_EXPORTER_JAEGER_TAGS` | - ---- - -#### traces-jaeger-max-msgs-per-second - -Specifies the maximum number of messages sent to a Jaeger service per second. - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--traces-jaeger-max-msgs-per-second` | `TRACES_JAEGER_MAX_MSGS_PER_SECOND` | - ---- - -### DataFusion - -- [datafusion-num-threads](#datafusion-num-threads) -- [datafusion-runtime-type](#datafusion-runtime-type) -- [datafusion-runtime-disable-lifo-slot](#datafusion-runtime-disable-lifo-slot) -- [datafusion-runtime-event-interval](#datafusion-runtime-event-interval) -- [datafusion-runtime-global-queue-interval](#datafusion-runtime-global-queue-interval) -- [datafusion-runtime-max-blocking-threads](#datafusion-runtime-max-blocking-threads) -- [datafusion-runtime-max-io-events-per-tick](#datafusion-runtime-max-io-events-per-tick) -- [datafusion-runtime-thread-keep-alive](#datafusion-runtime-thread-keep-alive) -- [datafusion-runtime-thread-priority](#datafusion-runtime-thread-priority) -- [datafusion-max-parquet-fanout](#datafusion-max-parquet-fanout) -- [datafusion-use-cached-parquet-loader](#datafusion-use-cached-parquet-loader) -- [datafusion-config](#datafusion-config) - -#### datafusion-num-threads - -Sets the maximum number of DataFusion runtime threads to use. - -| influxdb3 serve option | Environment variable | -| :------------------------- | :--------------------------------- | -| `--datafusion-num-threads` | `INFLUXDB3_DATAFUSION_NUM_THREADS` | - ---- - -#### datafusion-runtime-type - -Specifies the DataFusion tokio runtime type. - -This option supports the following values: - -- `current-thread` -- `multi-thread` _(default)_ -- `multi-thread-alt` - -**Default:** `multi-thread` - -| influxdb3 serve option | Environment variable | -| :-------------------------- | :---------------------------------- | -| `--datafusion-runtime-type` | `INFLUXDB3_DATAFUSION_RUNTIME_TYPE` | - ---- - -#### datafusion-runtime-disable-lifo-slot - -Disables the LIFO slot of the DataFusion runtime. - -This option supports the following values: - -- `true` -- `false` - -| influxdb3 serve option | Environment variable | -| :--------------------------------------- | :----------------------------------------------- | -| `--datafusion-runtime-disable-lifo-slot` | `INFLUXDB3_DATAFUSION_RUNTIME_DISABLE_LIFO_SLOT` | - ---- - -#### datafusion-runtime-event-interval - -Sets the number of scheduler ticks after which the scheduler of the DataFusion -tokio runtime polls for external events--for example: timers, I/O. - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :-------------------------------------------- | -| `--datafusion-runtime-event-interval` | `INFLUXDB3_DATAFUSION_RUNTIME_EVENT_INTERVAL` | - ---- - -#### datafusion-runtime-global-queue-interval - -Sets the number of scheduler ticks after which the scheduler of the DataFusion -runtime polls the global task queue. - -| influxdb3 serve option | Environment variable | -| :------------------------------------------- | :--------------------------------------------------- | -| `--datafusion-runtime-global-queue-interval` | `INFLUXDB3_DATAFUSION_RUNTIME_GLOBAL_QUEUE_INTERVAL` | - ---- - -#### datafusion-runtime-max-blocking-threads - -Specifies the limit for additional threads spawned by the DataFusion runtime. - -| influxdb3 serve option | Environment variable | -| :------------------------------------------ | :-------------------------------------------------- | -| `--datafusion-runtime-max-blocking-threads` | `INFLUXDB3_DATAFUSION_RUNTIME_MAX_BLOCKING_THREADS` | - ---- - -#### datafusion-runtime-max-io-events-per-tick - -Configures the maximum number of events processed per tick by the tokio -DataFusion runtime. - -| influxdb3 serve option | Environment variable | -| :-------------------------------------------- | :---------------------------------------------------- | -| `--datafusion-runtime-max-io-events-per-tick` | `INFLUXDB3_DATAFUSION_RUNTIME_MAX_IO_EVENTS_PER_TICK` | - ---- - -#### datafusion-runtime-thread-keep-alive - -Sets a custom timeout for a thread in the blocking pool of the tokio DataFusion -runtime. - -| influxdb3 serve option | Environment variable | -| :--------------------------------------- | :----------------------------------------------- | -| `--datafusion-runtime-thread-keep-alive` | `INFLUXDB3_DATAFUSION_RUNTIME_THREAD_KEEP_ALIVE` | - ---- - -#### datafusion-runtime-thread-priority - -Sets the thread priority for tokio DataFusion runtime workers. - -**Default:** `10` - -| influxdb3 serve option | Environment variable | -| :------------------------------------- | :--------------------------------------------- | -| `--datafusion-runtime-thread-priority` | `INFLUXDB3_DATAFUSION_RUNTIME_THREAD_PRIORITY` | - ---- - -#### datafusion-max-parquet-fanout - -When multiple parquet files are required in a sorted way -(deduplication for example), specifies the maximum fanout. - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :-------------------------------- | :---------------------------------------- | -| `--datafusion-max-parquet-fanout` | `INFLUXDB3_DATAFUSION_MAX_PARQUET_FANOUT` | - ---- - -#### datafusion-use-cached-parquet-loader - -Uses a cached parquet loader when reading parquet files from the object store. - -| influxdb3 serve option | Environment variable | -| :--------------------------------------- | :----------------------------------------------- | -| `--datafusion-use-cached-parquet-loader` | `INFLUXDB3_DATAFUSION_USE_CACHED_PARQUET_LOADER` | - ---- - -#### datafusion-config - -Provides custom configuration to DataFusion as a comma-separated list of -`key:value` pairs. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :---------------------------- | -| `--datafusion-config` | `INFLUXDB3_DATAFUSION_CONFIG` | - ---- - -### HTTP - -- [max-http-request-size](#max-http-request-size) -- [http-bind](#http-bind) -- [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) - -#### max-http-request-size - -Specifies the maximum size of HTTP requests. - -**Default:** `10485760` - -| influxdb3 serve option | Environment variable | -| :------------------------ | :-------------------------------- | -| `--max-http-request-size` | `INFLUXDB3_MAX_HTTP_REQUEST_SIZE` | - ---- - -#### http-bind - -Defines the address on which InfluxDB serves HTTP API requests. - -**Default:** `0.0.0.0:8181` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------------- | -| `--http-bind` | `INFLUXDB3_HTTP_BIND_ADDR` | - ---- - -#### admin-token-recovery-http-bind - -Enables an admin token recovery HTTP server on a separate port. This server allows regenerating lost admin tokens without existing authentication. The server automatically shuts down after a successful token regeneration. - -> [!Warning] -> This option creates an unauthenticated endpoint that can regenerate admin tokens. Only use this when you have lost access to your admin token and ensure the server is only accessible from trusted networks. - -**Default:** `127.0.0.1:8182` (when enabled) - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--admin-token-recovery-http-bind` | `INFLUXDB3_ADMIN_TOKEN_RECOVERY_HTTP_BIND` | - -##### Example usage - -```bash -# Start server with recovery endpoint -influxdb3 serve --admin-token-recovery-http-bind - -# In another terminal, regenerate the admin token -influxdb3 create token --admin --regenerate --host http://127.0.0.1:8182 -``` - ---- - -### Memory - -- [exec-mem-pool-bytes](#exec-mem-pool-bytes) -- [buffer-mem-limit-mb](#buffer-mem-limit-mb) -- [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) - -#### exec-mem-pool-bytes - -Specifies the size of the memory pool used during query execution, in bytes. - -**Default:** `8589934592` - -| influxdb3 serve option | Environment variable | -| :---------------------- | :------------------------------ | -| `--exec-mem-pool-bytes` | `INFLUXDB3_EXEC_MEM_POOL_BYTES` | - ---- - -#### buffer-mem-limit-mb - -Specifies the size limit of the buffered data in MB. If this limit is exceeded, -the server forces a snapshot. - -**Default:** `5000` - -| influxdb3 serve option | Environment variable | -| :---------------------- | :------------------------------ | -| `--buffer-mem-limit-mb` | `INFLUXDB3_BUFFER_MEM_LIMIT_MB` | - ---- - -#### force-snapshot-mem-threshold - -Specifies the threshold for the internal memory buffer. Supports either a -percentage (portion of available memory)of or absolute value -(total bytes)--for example: `70%` or `100000`. - -**Default:** `70%` - -| influxdb3 serve option | Environment variable | -| :------------------------------- | :--------------------------------------- | -| `--force-snapshot-mem-threshold` | `INFLUXDB3_FORCE_SNAPSHOT_MEM_THRESHOLD` | - ---- - -### Write-Ahead Log (WAL) - -- [wal-flush-interval](#wal-flush-interval) -- [wal-snapshot-size](#wal-snapshot-size) -- [wal-max-write-buffer-size](#wal-max-write-buffer-size) -- [snapshotted-wal-files-to-keep](#snapshotted-wal-files-to-keep) - -#### wal-flush-interval - -Specifies the interval to flush buffered data to a WAL file. Writes that wait -for WAL confirmation take up to this interval to complete. - -**Default:** `1s` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------------- | -| `--wal-flush-interval` | `INFLUXDB3_WAL_FLUSH_INTERVAL` | - ---- - -#### wal-snapshot-size - -Defines the number of WAL files to attempt to remove in a snapshot. This, -multiplied by the interval, determines how often snapshots are taken. - -**Default:** `600` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :---------------------------- | -| `--wal-snapshot-size` | `INFLUXDB3_WAL_SNAPSHOT_SIZE` | - ---- - -#### wal-max-write-buffer-size - -Specifies the maximum number of write requests that can be buffered before a -flush must be executed and succeed. - -**Default:** `100000` - -| influxdb3 serve option | Environment variable | -| :---------------------------- | :------------------------------------ | -| `--wal-max-write-buffer-size` | `INFLUXDB3_WAL_MAX_WRITE_BUFFER_SIZE` | - ---- - -#### snapshotted-wal-files-to-keep - -Specifies the number of snapshotted WAL files to retain in the object store. -Flushing the WAL files does not clear the WAL files immediately; -they are deleted when the number of snapshotted WAL files exceeds this number. - -**Default:** `300` - -| influxdb3 serve option | Environment variable | -| :-------------------------------- | :-------------------------------- | -| `--snapshotted-wal-files-to-keep` | `INFLUXDB3_NUM_WAL_FILES_TO_KEEP` | - ---- - -### Compaction - -#### gen1-duration - -Specifies the duration that Parquet files are arranged into. Data timestamps -land each row into a file of this duration. Supported durations are `1m`, -`5m`, and `10m`. These files are known as "generation 1" files, which the -compactor in InfluxDB 3 Enterprise can merge into larger generations. - -**Default:** `10m` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------------ | -| `--gen1-duration` | `INFLUXDB3_GEN1_DURATION` | - ---- - -### Caching - -- [preemptive-cache-age](#preemptive-cache-age) -- [parquet-mem-cache-size-mb](#parquet-mem-cache-size-mb) -- [parquet-mem-cache-prune-percentage](#parquet-mem-cache-prune-percentage) -- [parquet-mem-cache-prune-interval](#parquet-mem-cache-prune-interval) -- [disable-parquet-mem-cache](#disable-parquet-mem-cache) -- [last-cache-eviction-interval](#last-cache-eviction-interval) -- [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) - -#### preemptive-cache-age - -Specifies the interval to prefetch into the Parquet cache during compaction. - -**Default:** `3d` - -| influxdb3 serve option | Environment variable | -| :----------------------- | :------------------------------- | -| `--preemptive-cache-age` | `INFLUXDB3_PREEMPTIVE_CACHE_AGE` | - ---- - -#### parquet-mem-cache-size-mb - -Defines the size of the in-memory Parquet cache in megabytes (MB). - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :---------------------------- | :------------------------------------ | -| `--parquet-mem-cache-size-mb` | `INFLUXDB3_PARQUET_MEM_CACHE_SIZE_MB` | - ---- - -#### parquet-mem-cache-prune-percentage - -Specifies the percentage of entries to prune during a prune operation on the -in-memory Parquet cache. - -**Default:** `0.1` - -| influxdb3 serve option | Environment variable | -| :------------------------------------- | :--------------------------------------------- | -| `--parquet-mem-cache-prune-percentage` | `INFLUXDB3_PARQUET_MEM_CACHE_PRUNE_PERCENTAGE` | - ---- - -#### parquet-mem-cache-prune-interval - -Sets the interval to check if the in-memory Parquet cache needs to be pruned. - -**Default:** `1s` - -| influxdb3 serve option | Environment variable | -| :----------------------------------- | :------------------------------------------- | -| `--parquet-mem-cache-prune-interval` | `INFLUXDB3_PARQUET_MEM_CACHE_PRUNE_INTERVAL` | - ---- - -#### disable-parquet-mem-cache - -Disables the in-memory Parquet cache. By default, the cache is enabled. - -| influxdb3 serve option | Environment variable | -| :---------------------------- | :------------------------------------ | -| `--disable-parquet-mem-cache` | `INFLUXDB3_DISABLE_PARQUET_MEM_CACHE` | - ---- - -#### last-cache-eviction-interval - -Specifies the interval to evict expired entries from the Last-N-Value cache, -expressed as a human-readable time--for example: `20s`, `1m`, `1h`. - -**Default:** `10s` - -| influxdb3 serve option | Environment variable | -| :------------------------------- | :--------------------------------------- | -| `--last-cache-eviction-interval` | `INFLUXDB3_LAST_CACHE_EVICTION_INTERVAL` | - ---- - -#### distinct-cache-eviction-interval - -Specifies the interval to evict expired entries from the distinct value cache, -expressed as a human-readable time--for example: `20s`, `1m`, `1h`. - -**Default:** `10s` - -| influxdb3 serve option | Environment variable | -| :----------------------------------- | :------------------------------------------- | -| `--distinct-cache-eviction-interval` | `INFLUXDB3_DISTINCT_CACHE_EVICTION_INTERVAL` | - ---- - -### Processing engine - -- [plugin-dir](#plugin-dir) -- [virtual-env-location](#virtual-env-location) -- [package-manager](#package-manager) - -#### plugin-dir - -Specifies the local directory that contains Python plugins and their test files. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------- | -| `--plugin-dir` | `INFLUXDB3_PLUGIN_DIR` | - ---- - -#### virtual-env-location - -Specifies the location of the Python virtual environment that the processing -engine uses. - -| influxdb3 serve option | Environment variable | -| :----------------------- | :--------------------- | -| `--virtual-env-location` | `VIRTUAL_ENV_LOCATION` | - ---- - -#### package-manager - -Specifies the Python package manager that the processing engine uses. - -**Default:** `10s` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--package-manager` | `PACKAGE_MANAGER` | + \ No newline at end of file diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/delete/_index.md b/content/influxdb3/enterprise/reference/cli/influxdb3/delete/_index.md index 99fa0418e..05aa0b877 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/delete/_index.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/delete/_index.md @@ -1,7 +1,7 @@ --- title: influxdb3 delete description: > - The `influxdb3 delete` command deletes a resource such as a database or a table. + The `influxdb3 delete` command deletes a resource such as a cache, database, or table. menu: influxdb3_enterprise: parent: influxdb3 @@ -10,6 +10,6 @@ weight: 300 source: /shared/influxdb3-cli/delete/_index.md --- - diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/delete/token.md b/content/influxdb3/enterprise/reference/cli/influxdb3/delete/token.md new file mode 100644 index 000000000..da936f12c --- /dev/null +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/delete/token.md @@ -0,0 +1,18 @@ +--- +title: influxdb3 delete token +description: > + The `influxdb3 delete token` command deletes an authorization token from the {{% product-name %}} server. +influxdb3/enterprise/tags: [cli] +menu: + influxdb3_enterprise: + parent: influxdb3 delete +weight: 201 +related: + - /influxdb3/enterprise/admin/tokens/ + - /influxdb3/enterprise/api/v3/#tag/Token, InfluxDB /api/v3 Token API reference +source: /shared/influxdb3-cli/delete/token.md +--- + + diff --git a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md index f8e927d75..7fb25d5d9 100644 --- a/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md +++ b/content/influxdb3/enterprise/reference/cli/influxdb3/serve.md @@ -38,6 +38,7 @@ influxdb3 serve [OPTIONS] \ | Option | | Description | | :--------------- | :--------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------ | | | `--admin-token-recovery-http-bind` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#admin-token-recovery-http-bind)_ | +| | `--admin-token-recovery-tcp-listener-file-path` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#admin-token-recovery-tcp-listener-file-path)_ | | | `--aws-access-key-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-access-key-id)_ | | | `--aws-allow-http` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-allow-http)_ | | | `--aws-default-region` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-default-region)_ | @@ -48,7 +49,11 @@ influxdb3 serve [OPTIONS] \ | | `--azure-storage-access-key` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-storage-access-key)_ | | | `--azure-storage-account` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-storage-account)_ | | | `--bucket` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#bucket)_ | +| | `--buffer-mem-limit-mb` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#buffer-mem-limit-mb)_ | +| | `--catalog-sync-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#catalog-sync-interval)_ | | {{< req "\*" >}} | `--cluster-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#cluster-id)_ | +| | `--compaction-check-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-check-interval)_ | +| | `--compaction-cleanup-wait` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-cleanup-wait)_ | | | `--compaction-gen2-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-gen2-duration)_ | | | `--compaction-max-num-files-per-plan` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-max-num-files-per-plan)_ | | | `--compaction-multipliers` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-multipliers)_ | @@ -66,16 +71,22 @@ influxdb3 serve [OPTIONS] \ | | `--datafusion-runtime-thread-priority` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-thread-priority)_ | | | `--datafusion-runtime-type` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-type)_ | | | `--datafusion-use-cached-parquet-loader` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-use-cached-parquet-loader)_ | +| | `--delete-grace-period` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#delete-grace-period)_ | +| | `--disable-authz` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#disable-authz)_ | | | `--disable-parquet-mem-cache` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#disable-parquet-mem-cache)_ | | | `--distinct-cache-eviction-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#distinct-cache-eviction-interval)_ | +| | `--distinct-value-cache-disable-from-history` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#distinct-value-cache-disable-from-history)_ | | | `--exec-mem-pool-bytes` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#exec-mem-pool-bytes)_ | | | `--force-snapshot-mem-threshold` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#force-snapshot-mem-threshold)_ | | | `--gen1-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#gen1-duration)_ | +| | `--gen1-lookback-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#gen1-lookback-duration)_ | | | `--google-service-account` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#google-service-account)_ | +| | `--hard-delete-default-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#hard-delete-default-duration)_ | | `-h` | `--help` | Print help information | | | `--help-all` | Print detailed help information | | | `--http-bind` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#http-bind)_ | | | `--last-cache-eviction-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#last-cache-eviction-interval)_ | +| | `--last-value-cache-disable-from-history` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#last-value-cache-disable-from-history)_ | | | `--license-email` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#license-email)_ | | | `--license-file` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#license-file)_ | | | `--log-destination` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#log-destination)_ | @@ -84,6 +95,11 @@ influxdb3 serve [OPTIONS] \ | | `--max-http-request-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#max-http-request-size)_ | | | `--mode` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#mode)_ | | {{< req "\*" >}} | `--node-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#node-id)_ | +| | `--node-id-from-env` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#node-id-from-env)_ | +| | `--num-cores` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-cores)_ | +| | `--num-database-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-database-limit)_ | +| | `--num-table-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-table-limit)_ | +| | `--num-total-columns-per-table-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-total-columns-per-table-limit)_ | | | `--object-store` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#object-store)_ | | | `--object-store-cache-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#object-store-cache-endpoint)_ | | | `--object-store-connection-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#object-store-connection-limit)_ | @@ -101,7 +117,16 @@ influxdb3 serve [OPTIONS] \ | | `--query-file-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#query-file-limit)_ | | | `--query-log-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#query-log-size)_ | | | `--replication-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#replication-interval)_ | +| | `--retention-check-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#retention-check-interval)_ | | | `--snapshotted-wal-files-to-keep` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#snapshotted-wal-files-to-keep)_ | +| | `--table-index-cache-concurrency-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#table-index-cache-concurrency-limit)_ | +| | `--table-index-cache-max-entries` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#table-index-cache-max-entries)_ | +| | `--tcp-listener-file-path` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tcp-listener-file-path)_ | +| | `--telemetry-disable-upload` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#telemetry-disable-upload)_ | +| | `--telemetry-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#telemetry-endpoint)_ | +| | `--tls-cert` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tls-cert)_ | +| | `--tls-key` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tls-key)_ | +| | `--tls-minimum-version` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tls-minimum-version)_ | | | `--traces-exporter` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-exporter)_ | | | `--traces-exporter-jaeger-agent-host` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-exporter-jaeger-agent-host)_ | | | `--traces-exporter-jaeger-agent-port` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-exporter-jaeger-agent-port)_ | @@ -110,11 +135,16 @@ influxdb3 serve [OPTIONS] \ | | `--traces-jaeger-debug-name` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-jaeger-debug-name)_ | | | `--traces-jaeger-max-msgs-per-second` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-jaeger-max-msgs-per-second)_ | | | `--traces-jaeger-tags` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-jaeger-tags)_ | +| | `--use-pacha-tree` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#use-pacha-tree)_ | | `-v` | `--verbose` | Enable verbose output | | | `--virtual-env-location` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#virtual-env-location)_ | +| | `--wait-for-running-ingestor` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wait-for-running-ingestor)_ | | | `--wal-flush-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-flush-interval)_ | | | `--wal-max-write-buffer-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-max-write-buffer-size)_ | +| | `--wal-replay-concurrency-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-replay-concurrency-limit)_ | +| | `--wal-replay-fail-on-error` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-replay-fail-on-error)_ | | | `--wal-snapshot-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-snapshot-size)_ | +| | `--without-auth` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#without-auth)_ | {{< caption >}} {{< req text="\* Required options" >}} diff --git a/content/influxdb3/enterprise/reference/config-options.md b/content/influxdb3/enterprise/reference/config-options.md index ee00c6dd2..cab8a5a77 100644 --- a/content/influxdb3/enterprise/reference/config-options.md +++ b/content/influxdb3/enterprise/reference/config-options.md @@ -8,1283 +8,9 @@ menu: parent: Reference name: Configuration options weight: 100 +source: /shared/influxdb3-cli/config-options.md --- -{{< product-name >}} lets you customize your server configuration by using -`influxdb3 serve` command options or by setting environment variables. - -## Configure your server - -Pass configuration options to the `influxdb serve` server using either command -options or environment variables. Command options take precedence over -environment variables. - -##### Example `influxdb3 serve` command options - - - -```sh -influxdb3 serve \ - --node-id node0 \ - --cluster-id cluster0 \ - --license-email example@email.com \ - --object-store file \ - --data-dir ~/.influxdb3 \ - --log-filter info -``` - -##### Example environment variables - - - -```sh -export INFLUXDB3_ENTERPRISE_LICENSE_EMAIL=example@email.com -export INFLUXDB3_OBJECT_STORE=file -export INFLUXDB3_DB_DIR=~/.influxdb3 -export LOG_FILTER=info - -influxdb3 serve -``` - -## Server configuration options - -- [General](#general) - - [cluster-id](#cluster-id) - - [data-dir](#data-dir) - - [license-email](#license-email) - - [license-file](#license-file) - - [mode](#mode) - - [node-id](#node-id) - - [node-id-from-env](#node-id-from-env) - - [object-store](#object-store) - - [tls-key](#tls-key) - - [tls-cert](#tls-cert) - - [tls-minimum-versions](#tls-minimum-version) - - [without-auth](#without-auth) - - [disable-authz](#disable-authz) -- [AWS](#aws) - - [aws-access-key-id](#aws-access-key-id) - - [aws-secret-access-key](#aws-secret-access-key) - - [aws-default-region](#aws-default-region) - - [aws-endpoint](#aws-endpoint) - - [aws-session-token](#aws-session-token) - - [aws-allow-http](#aws-allow-http) - - [aws-skip-signature](#aws-skip-signature) -- [Google Cloud Service](#google-cloud-service) - - [google-service-account](#google-service-account) -- [Microsoft Azure](#microsoft-azure) - - [azure-storage-account](#azure-storage-account) - - [azure-storage-access-key](#azure-storage-access-key) -- [Object Storage](#object-storage) - - [bucket](#bucket) - - [object-store-connection-limit](#object-store-connection-limit) - - [object-store-http2-only](#object-store-http2-only) - - [object-store-http2-max-frame-size](#object-store-http2-max-frame-size) - - [object-store-max-retries](#object-store-max-retries) - - [object-store-retry-timeout](#object-store-retry-timeout) - - [object-store-cache-endpoint](#object-store-cache-endpoint) -- [Logs](#logs) - - [log-filter](#log-filter) - - [log-destination](#log-destination) - - [log-format](#log-format) - - [query-log-size](#query-log-size) -- [Traces](#traces) - - [traces-exporter](#traces-exporter) - - [traces-exporter-jaeger-agent-host](#traces-exporter-jaeger-agent-host) - - [traces-exporter-jaeger-agent-port](#traces-exporter-jaeger-agent-port) - - [traces-exporter-jaeger-service-name](#traces-exporter-jaeger-service-name) - - [traces-exporter-jaeger-trace-context-header-name](#traces-exporter-jaeger-trace-context-header-name) - - [traces-jaeger-debug-name](#traces-jaeger-debug-name) - - [traces-jaeger-tags](#traces-jaeger-tags) - - [traces-jaeger-max-msgs-per-second](#traces-jaeger-max-msgs-per-second) -- [DataFusion](#datafusion) - - [datafusion-num-threads](#datafusion-num-threads) - - [datafusion-runtime-type](#datafusion-runtime-type) - - [datafusion-runtime-disable-lifo-slot](#datafusion-runtime-disable-lifo-slot) - - [datafusion-runtime-event-interval](#datafusion-runtime-event-interval) - - [datafusion-runtime-global-queue-interval](#datafusion-runtime-global-queue-interval) - - [datafusion-runtime-max-blocking-threads](#datafusion-runtime-max-blocking-threads) - - [datafusion-runtime-max-io-events-per-tick](#datafusion-runtime-max-io-events-per-tick) - - [datafusion-runtime-thread-keep-alive](#datafusion-runtime-thread-keep-alive) - - [datafusion-runtime-thread-priority](#datafusion-runtime-thread-priority) - - [datafusion-max-parquet-fanout](#datafusion-max-parquet-fanout) - - [datafusion-use-cached-parquet-loader](#datafusion-use-cached-parquet-loader) - - [datafusion-config](#datafusion-config) -- [HTTP](#http) - - [max-http-request-size](#max-http-request-size) - - [http-bind](#http-bind) - - [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) -- [Memory](#memory) - - [exec-mem-pool-bytes](#exec-mem-pool-bytes) - - [buffer-mem-limit-mb](#buffer-mem-limit-mb) - - [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) -- [Write-Ahead Log (WAL)](#write-ahead-log-wal) - - [wal-flush-interval](#wal-flush-interval) - - [wal-snapshot-size](#wal-snapshot-size) - - [wal-max-write-buffer-size](#wal-max-write-buffer-size) - - [snapshotted-wal-files-to-keep](#snapshotted-wal-files-to-keep) -- [Compaction](#compaction) - - [compaction-row-limit](#compaction-row-limit) - - [compaction-max-num-files-per-plan](#compaction-max-num-files-per-plan) - - [compaction-gen2-duration](#compaction-gen2-duration) - - [compaction-multipliers](#compaction-multipliers) - - [gen1-duration](#gen1-duration) -- [Caching](#caching) - - [preemptive-cache-age](#preemptive-cache-age) - - [parquet-mem-cache-size](#parquet-mem-cache-size) - - [parquet-mem-cache-prune-percentage](#parquet-mem-cache-prune-percentage) - - [parquet-mem-cache-prune-interval](#parquet-mem-cache-prune-interval) - - [parquet-mem-cache-query-path-duration](#parquet-mem-cache-query-path-duration) - - [disable-parquet-mem-cache](#disable-parquet-mem-cache) - - [last-cache-eviction-interval](#last-cache-eviction-interval) - - [last-value-cache-disable-from-history](#last-value-cache-disable-from-history) - - [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) - - [distinct-value-cache-disable-from-history](#distinct-value-cache-disable-from-history) - - [query-file-limit](#query-file-limit) -- [Processing Engine](#processing-engine) - - [plugin-dir](#plugin-dir) - - [virtual-env-location](#virtual-env-location) - - [package-manager](#package-manager) - ---- - -### General - -- [cluster-id](#cluster-id) -- [data-dir](#data-dir) -- [license-email](#license-email) -- [license-file](#license-file) -- [mode](#mode) -- [node-id](#node-id) -- [object-store](#object-store) -- [query-file-limit](#query-file-limit) - -#### cluster-id - -Specifies the cluster identifier that prefixes the object store path for the Enterprise Catalog. -This value must be different than the [`--node-id`](#node-id) value. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------------------- | -| `--cluster-id` | `INFLUXDB3_ENTERPRISE_CLUSTER_ID` | - ---- - -#### data-dir - -For the `file` object store, defines the location {{< product-name >}} uses to store files locally. -Required when using the `file` [object store](#object-store). - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--data-dir` | `INFLUXDB3_DB_DIR` | - ---- - -#### license-email - -Specifies the email address to associate with your {{< product-name >}} license -and automatically responds to the interactive email prompt when the server starts. -This option is mutually exclusive with [license-file](#license-file). - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------------------- | -| `--license-email` | `INFLUXDB3_ENTERPRISE_LICENSE_EMAIL` | - ---- - -#### license-file - -Specifies the path to a license file for {{< product-name >}}. When provided, the license -file's contents are used instead of requesting a new license. -This option is mutually exclusive with [license-email](#license-email). - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------------------- | -| `--license-file` | `INFLUXDB3_ENTERPRISE_LICENSE_FILE` | - ---- - -#### mode - -Sets the mode to start the server in. - -This option supports the following values: - -- `all` _(default)_: Enables all server modes -- `ingest`: Enables only data ingest capabilities -- `query`: Enables only query capabilities -- `compact`: Enables only compaction processes -- `process`: Enables only data processing capabilities - -You can specify multiple modes using a comma-delimited list (for example, `ingest,query`). - -**Default:** `all` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :-------------------------- | -| `--mode` | `INFLUXDB3_ENTERPRISE_MODE` | - ---- - -#### node-id - -Specifies the node identifier used as a prefix in all object store file paths. -This should be unique for any hosts sharing the same object store -configuration--for example, the same bucket. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------------------- | -| `--node-id` | `INFLUXDB3_NODE_IDENTIFIER_PREFIX` | - - -#### node-id-from-env - -Specifies the node identifier used as a prefix in all object store file paths. -Takes the name of an environment variable as an argument and uses the value of that environment variable as the node identifier. -This option cannot be used with the `--node-id` option. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------------------- | -| `--node-id-from-env` | `INFLUXDB3_NODE_IDENTIFIER_FROM_ENV` | - -##### Example using --node-id-from-env - -```bash -export DATABASE_NODE=node0 && influxdb3 serve \ - --node-id-from-env DATABASE_NODE \ - --cluster-id cluster0 \ - --object-store file \ - --data-dir ~/.influxdb3/data -``` - ---- - -#### object-store - -Specifies which object storage to use to store Parquet files. -This option supports the following values: - -- `memory`: Effectively no object persistence -- `memory-throttled`: Like `memory` but with latency and throughput that somewhat resembles a cloud object store -- `file`: Stores objects in the local filesystem (must also set `--data-dir`) -- `s3`: Amazon S3 (must also set `--bucket`, `--aws-access-key-id`, `--aws-secret-access-key`, and possibly `--aws-default-region`) -- `google`: Google Cloud Storage (must also set `--bucket` and `--google-service-account`) -- `azure`: Microsoft Azure blob storage (must also set `--bucket`, `--azure-storage-account`, and `--azure-storage-access-key`) - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------- | -| `--object-store` | `INFLUXDB3_OBJECT_STORE` | - ---- - -#### tls-key - -The path to a key file for TLS to be enabled. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------- | -| `--tls-key` | `INFLUXDB3_TLS_KEY` | - ---- - -#### tls-cert - -The path to a cert file for TLS to be enabled. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------- | -| `--tls-cert` | `INFLUXDB3_TLS_CERT` | - ---- - -#### tls-minimum-version - -The minimum version for TLS. -Valid values are `tls-1.2` or `tls-1.3`. -Default is `tls-1.2`. - -| influxdb3 serve option | Environment variable | -| :---------------------- | :----------------------- | -| `--tls-minimum-version` | `INFLUXDB3_TLS_MINIMUM_VERSION` | - ---- - -#### without-auth - -Disables authentication for all server actions (CLI commands and API requests). -The server processes all requests without requiring tokens or authentication. - ---- - -#### disable-authz - -Optionally disable authz by passing in a comma separated list of resources. -Valid values are `health`, `ping`, and `metrics`. - ---- - -### AWS - -- [aws-access-key-id](#aws-access-key-id) -- [aws-secret-access-key](#aws-secret-access-key) -- [aws-default-region](#aws-default-region) -- [aws-endpoint](#aws-endpoint) -- [aws-session-token](#aws-session-token) -- [aws-allow-http](#aws-allow-http) -- [aws-skip-signature](#aws-skip-signature) - -#### aws-access-key-id - -When using Amazon S3 as the object store, set this to an access key that has -permission to read from and write to the specified S3 bucket. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-access-key-id` | `AWS_ACCESS_KEY_ID` | - ---- - -#### aws-secret-access-key - -When using Amazon S3 as the object store, set this to the secret access key that -goes with the specified access key ID. - -| influxdb3 serve option | Environment variable | -| :------------------------ | :---------------------- | -| `--aws-secret-access-key` | `AWS_SECRET_ACCESS_KEY` | - ---- - -#### aws-default-region - -When using Amazon S3 as the object store, set this to the region that goes with -the specified bucket if different from the fallback value. - -**Default:** `us-east-1` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-default-region` | `AWS_DEFAULT_REGION` | - ---- - -#### aws-endpoint - -When using an Amazon S3 compatibility storage service, set this to the endpoint. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-endpoint` | `AWS_ENDPOINT` | - ---- - -#### aws-session-token - -When using Amazon S3 as an object store, set this to the session token. This is -handy when using a federated login or SSO and fetching credentials via the UI. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-session-token` | `AWS_SESSION_TOKEN` | - ---- - -#### aws-allow-http - -Allows unencrypted HTTP connections to AWS. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-allow-http` | `AWS_ALLOW_HTTP` | - ---- - -#### aws-skip-signature - -If enabled, S3 object stores do not fetch credentials and do not sign requests. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--aws-skip-signature` | `AWS_SKIP_SIGNATURE` | - ---- - -### Google Cloud Service - -- [google-service-account](#google-service-account) - -#### google-service-account - -When using Google Cloud Storage as the object store, set this to the path to the -JSON file that contains the Google credentials. - -| influxdb3 serve option | Environment variable | -| :------------------------- | :----------------------- | -| `--google-service-account` | `GOOGLE_SERVICE_ACCOUNT` | - ---- - -### Microsoft Azure - -- [azure-storage-account](#azure-storage-account) -- [azure-storage-access-key](#azure-storage-access-key) - -#### azure-storage-account - -When using Microsoft Azure as the object store, set this to the name you see -when navigating to **All Services > Storage accounts > `[name]`**. - -| influxdb3 serve option | Environment variable | -| :------------------------ | :---------------------- | -| `--azure-storage-account` | `AZURE_STORAGE_ACCOUNT` | - ---- - -#### azure-storage-access-key - -When using Microsoft Azure as the object store, set this to one of the Key -values in the Storage account's **Settings > Access keys**. - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :------------------------- | -| `--azure-storage-access-key` | `AZURE_STORAGE_ACCESS_KEY` | - ---- - -### Object Storage - -- [bucket](#bucket) -- [object-store-connection-limit](#object-store-connection-limit) -- [object-store-http2-only](#object-store-http2-only) -- [object-store-http2-max-frame-size](#object-store-http2-max-frame-size) -- [object-store-max-retries](#object-store-max-retries) -- [object-store-retry-timeout](#object-store-retry-timeout) -- [object-store-cache-endpoint](#object-store-cache-endpoint) - -#### bucket - -Sets the name of the object storage bucket to use. Must also set -`--object-store` to a cloud object storage for this option to take effect. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--bucket` | `INFLUXDB3_BUCKET` | - ---- - -#### object-store-connection-limit - -When using a network-based object store, limits the number of connections to -this value. - -**Default:** `16` - -| influxdb3 serve option | Environment variable | -| :-------------------------------- | :------------------------------ | -| `--object-store-connection-limit` | `OBJECT_STORE_CONNECTION_LIMIT` | - ---- - -#### object-store-http2-only - -Forces HTTP/2 connections to network-based object stores. - -| influxdb3 serve option | Environment variable | -| :-------------------------- | :------------------------ | -| `--object-store-http2-only` | `OBJECT_STORE_HTTP2_ONLY` | - ---- - -#### object-store-http2-max-frame-size - -Sets the maximum frame size (in bytes/octets) for HTTP/2 connections. - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--object-store-http2-max-frame-size` | `OBJECT_STORE_HTTP2_MAX_FRAME_SIZE` | - ---- - -#### object-store-max-retries - -Defines the maximum number of times to retry a request. - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :------------------------- | -| `--object-store-max-retries` | `OBJECT_STORE_MAX_RETRIES` | - ---- - -#### object-store-retry-timeout - -Specifies the maximum length of time from the initial request after which no -further retries are be attempted. - -| influxdb3 serve option | Environment variable | -| :----------------------------- | :--------------------------- | -| `--object-store-retry-timeout` | `OBJECT_STORE_RETRY_TIMEOUT` | - ---- - -#### object-store-cache-endpoint - -Sets the endpoint of an S3-compatible, HTTP/2-enabled object store cache. - -| influxdb3 serve option | Environment variable | -| :------------------------------ | :---------------------------- | -| `--object-store-cache-endpoint` | `OBJECT_STORE_CACHE_ENDPOINT` | - ---- - -### Logs - -- [log-filter](#log-filter) -- [log-destination](#log-destination) -- [log-format](#log-format) -- [query-log-size](#query-log-size) - -#### log-filter - -Sets the filter directive for logs. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--log-filter` | `LOG_FILTER` | - ---- - -#### log-destination - -Specifies the destination for logs. - -**Default:** `stdout` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--log-destination` | `LOG_DESTINATION` | - ---- - -#### log-format - -Defines the message format for logs. - -This option supports the following values: - -- `full` _(default)_ - -**Default:** `full` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--log-format` | `LOG_FORMAT` | - ---- - -#### query-log-size - -Defines the size of the query log. Up to this many queries remain in the -log before older queries are evicted to make room for new ones. - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------------- | -| `--query-log-size` | `INFLUXDB3_QUERY_LOG_SIZE` | - ---- - -### Traces - -- [traces-exporter](#traces-exporter) -- [traces-exporter-jaeger-agent-host](#traces-exporter-jaeger-agent-host) -- [traces-exporter-jaeger-agent-port](#traces-exporter-jaeger-agent-port) -- [traces-exporter-jaeger-service-name](#traces-exporter-jaeger-service-name) -- [traces-exporter-jaeger-trace-context-header-name](#traces-exporter-jaeger-trace-context-header-name) -- [traces-jaeger-debug-name](#traces-jaeger-debug-name) -- [traces-jaeger-tags](#traces-jaeger-tags) -- [traces-jaeger-max-msgs-per-second](#traces-jaeger-max-msgs-per-second) - -#### traces-exporter - -Sets the type of tracing exporter. - -**Default:** `none` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--traces-exporter` | `TRACES_EXPORTER` | - ---- - -#### traces-exporter-jaeger-agent-host - -Specifies the Jaeger agent network hostname for tracing. - -**Default:** `0.0.0.0` - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--traces-exporter-jaeger-agent-host` | `TRACES_EXPORTER_JAEGER_AGENT_HOST` | - ---- - -#### traces-exporter-jaeger-agent-port - -Defines the Jaeger agent network port for tracing. - -**Default:** `6831` - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--traces-exporter-jaeger-agent-port` | `TRACES_EXPORTER_JAEGER_AGENT_PORT` | - ---- - -#### traces-exporter-jaeger-service-name - -Sets the Jaeger service name for tracing. - -**Default:** `iox-conductor` - -| influxdb3 serve option | Environment variable | -| :-------------------------------------- | :------------------------------------ | -| `--traces-exporter-jaeger-service-name` | `TRACES_EXPORTER_JAEGER_SERVICE_NAME` | - ---- - -#### traces-exporter-jaeger-trace-context-header-name - -Specifies the header name used for passing trace context. - -**Default:** `uber-trace-id` - -| influxdb3 serve option | Environment variable | -| :--------------------------------------------------- | :------------------------------------------------- | -| `--traces-exporter-jaeger-trace-context-header-name` | `TRACES_EXPORTER_JAEGER_TRACE_CONTEXT_HEADER_NAME` | - ---- - -#### traces-jaeger-debug-name - -Specifies the header name used for force sampling in tracing. - -**Default:** `jaeger-debug-id` - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :---------------------------------- | -| `--traces-jaeger-debug-name` | `TRACES_EXPORTER_JAEGER_DEBUG_NAME` | - ---- - -#### traces-jaeger-tags - -Defines a set of `key=value` pairs to annotate tracing spans with. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :---------------------------- | -| `--traces-jaeger-tags` | `TRACES_EXPORTER_JAEGER_TAGS` | - ---- - -#### traces-jaeger-max-msgs-per-second - -Specifies the maximum number of messages sent to a Jaeger service per second. - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :---------------------------------- | -| `--traces-jaeger-max-msgs-per-second` | `TRACES_JAEGER_MAX_MSGS_PER_SECOND` | - ---- - -### DataFusion - -- [datafusion-num-threads](#datafusion-num-threads) -- [datafusion-runtime-type](#datafusion-runtime-type) -- [datafusion-runtime-disable-lifo-slot](#datafusion-runtime-disable-lifo-slot) -- [datafusion-runtime-event-interval](#datafusion-runtime-event-interval) -- [datafusion-runtime-global-queue-interval](#datafusion-runtime-global-queue-interval) -- [datafusion-runtime-max-blocking-threads](#datafusion-runtime-max-blocking-threads) -- [datafusion-runtime-max-io-events-per-tick](#datafusion-runtime-max-io-events-per-tick) -- [datafusion-runtime-thread-keep-alive](#datafusion-runtime-thread-keep-alive) -- [datafusion-runtime-thread-priority](#datafusion-runtime-thread-priority) -- [datafusion-max-parquet-fanout](#datafusion-max-parquet-fanout) -- [datafusion-use-cached-parquet-loader](#datafusion-use-cached-parquet-loader) -- [datafusion-config](#datafusion-config) - -#### datafusion-num-threads - -Sets the maximum number of DataFusion runtime threads to use. - -| influxdb3 serve option | Environment variable | -| :------------------------- | :--------------------------------- | -| `--datafusion-num-threads` | `INFLUXDB3_DATAFUSION_NUM_THREADS` | - ---- - -#### datafusion-runtime-type - -Specifies the DataFusion tokio runtime type. - -This option supports the following values: - -- `current-thread` -- `multi-thread` _(default)_ -- `multi-thread-alt` - -**Default:** `multi-thread` - -| influxdb3 serve option | Environment variable | -| :-------------------------- | :---------------------------------- | -| `--datafusion-runtime-type` | `INFLUXDB3_DATAFUSION_RUNTIME_TYPE` | - ---- - -#### datafusion-runtime-disable-lifo-slot - -Disables the LIFO slot of the DataFusion runtime. - -This option supports the following values: - -- `true` -- `false` - -| influxdb3 serve option | Environment variable | -| :--------------------------------------- | :----------------------------------------------- | -| `--datafusion-runtime-disable-lifo-slot` | `INFLUXDB3_DATAFUSION_RUNTIME_DISABLE_LIFO_SLOT` | - ---- - -#### datafusion-runtime-event-interval - -Sets the number of scheduler ticks after which the scheduler of the DataFusion -tokio runtime polls for external events--for example: timers, I/O. - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :-------------------------------------------- | -| `--datafusion-runtime-event-interval` | `INFLUXDB3_DATAFUSION_RUNTIME_EVENT_INTERVAL` | - ---- - -#### datafusion-runtime-global-queue-interval - -Sets the number of scheduler ticks after which the scheduler of the DataFusion -runtime polls the global task queue. - -| influxdb3 serve option | Environment variable | -| :------------------------------------------- | :--------------------------------------------------- | -| `--datafusion-runtime-global-queue-interval` | `INFLUXDB3_DATAFUSION_RUNTIME_GLOBAL_QUEUE_INTERVAL` | - ---- - -#### datafusion-runtime-max-blocking-threads - -Specifies the limit for additional threads spawned by the DataFusion runtime. - -| influxdb3 serve option | Environment variable | -| :------------------------------------------ | :-------------------------------------------------- | -| `--datafusion-runtime-max-blocking-threads` | `INFLUXDB3_DATAFUSION_RUNTIME_MAX_BLOCKING_THREADS` | - ---- - -#### datafusion-runtime-max-io-events-per-tick - -Configures the maximum number of events processed per tick by the tokio -DataFusion runtime. - -| influxdb3 serve option | Environment variable | -| :-------------------------------------------- | :---------------------------------------------------- | -| `--datafusion-runtime-max-io-events-per-tick` | `INFLUXDB3_DATAFUSION_RUNTIME_MAX_IO_EVENTS_PER_TICK` | - ---- - -#### datafusion-runtime-thread-keep-alive - -Sets a custom timeout for a thread in the blocking pool of the tokio DataFusion -runtime. - -| influxdb3 serve option | Environment variable | -| :--------------------------------------- | :----------------------------------------------- | -| `--datafusion-runtime-thread-keep-alive` | `INFLUXDB3_DATAFUSION_RUNTIME_THREAD_KEEP_ALIVE` | - ---- - -#### datafusion-runtime-thread-priority - -Sets the thread priority for tokio DataFusion runtime workers. - -**Default:** `10` - -| influxdb3 serve option | Environment variable | -| :------------------------------------- | :--------------------------------------------- | -| `--datafusion-runtime-thread-priority` | `INFLUXDB3_DATAFUSION_RUNTIME_THREAD_PRIORITY` | - ---- - -#### datafusion-max-parquet-fanout - -When multiple parquet files are required in a sorted way -(deduplication for example), specifies the maximum fanout. - -**Default:** `1000` - -| influxdb3 serve option | Environment variable | -| :-------------------------------- | :---------------------------------------- | -| `--datafusion-max-parquet-fanout` | `INFLUXDB3_DATAFUSION_MAX_PARQUET_FANOUT` | - ---- - -#### datafusion-use-cached-parquet-loader - -Uses a cached parquet loader when reading parquet files from the object store. - -| influxdb3 serve option | Environment variable | -| :--------------------------------------- | :----------------------------------------------- | -| `--datafusion-use-cached-parquet-loader` | `INFLUXDB3_DATAFUSION_USE_CACHED_PARQUET_LOADER` | - ---- - -#### datafusion-config - -Provides custom configuration to DataFusion as a comma-separated list of -`key:value` pairs. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :---------------------------- | -| `--datafusion-config` | `INFLUXDB3_DATAFUSION_CONFIG` | - ---- - -### HTTP - -- [max-http-request-size](#max-http-request-size) -- [http-bind](#http-bind) - -#### max-http-request-size - -Specifies the maximum size of HTTP requests. - -**Default:** `10485760` - -| influxdb3 serve option | Environment variable | -| :------------------------ | :-------------------------------- | -| `--max-http-request-size` | `INFLUXDB3_MAX_HTTP_REQUEST_SIZE` | - ---- - -#### http-bind - -Defines the address on which InfluxDB serves HTTP API requests. - -**Default:** `0.0.0.0:8181` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------------- | -| `--http-bind` | `INFLUXDB3_HTTP_BIND_ADDR` | - ---- - -#### admin-token-recovery-http-bind - -Enables an admin token recovery HTTP server on a separate port. This server allows regenerating lost admin tokens without existing authentication. The server automatically shuts down after a successful token regeneration. - -> [!Warning] -> This option creates an unauthenticated endpoint that can regenerate admin tokens. Only use this when you have lost access to your admin token and ensure the server is only accessible from trusted networks. - -**Default:** `127.0.0.1:8182` (when enabled) - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--admin-token-recovery-http-bind` | `INFLUXDB3_ADMIN_TOKEN_RECOVERY_HTTP_BIND` | - -##### Example usage - -```bash -# Start server with recovery endpoint -influxdb3 serve --admin-token-recovery-http-bind - -# In another terminal, regenerate the admin token -influxdb3 create token --admin --regenerate --host http://127.0.0.1:8182 -``` - ---- - -### Memory - -- [exec-mem-pool-bytes](#exec-mem-pool-bytes) -- [buffer-mem-limit-mb](#buffer-mem-limit-mb) -- [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) - -#### exec-mem-pool-bytes - -Specifies the size of memory pool used during query execution. -Can be given as absolute value in bytes or as a percentage of the total available memory--for -example: `8000000000` or `10%`). - -**Default:** `20%` - -| influxdb3 serve option | Environment variable | -| :---------------------- | :------------------------------ | -| `--exec-mem-pool-bytes` | `INFLUXDB3_EXEC_MEM_POOL_BYTES` | - ---- - -#### force-snapshot-mem-threshold - - -Specifies the threshold for the internal memory buffer. Supports either a -percentage (portion of available memory) or absolute value in MB--for example: `70%` or `1000`. - -**Default:** `50%` - -| influxdb3 serve option | Environment variable | -| :------------------------------- | :--------------------------------------- | -| `--force-snapshot-mem-threshold` | `INFLUXDB3_FORCE_SNAPSHOT_MEM_THRESHOLD` | - ---- - -### Write-Ahead Log (WAL) - -- [wal-flush-interval](#wal-flush-interval) -- [wal-snapshot-size](#wal-snapshot-size) -- [wal-max-write-buffer-size](#wal-max-write-buffer-size) -- [snapshotted-wal-files-to-keep](#snapshotted-wal-files-to-keep) - -#### wal-flush-interval - -Specifies the interval to flush buffered data to a WAL file. Writes that wait -for WAL confirmation take up to this interval to complete. - -**Default:** `1s` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :----------------------------- | -| `--wal-flush-interval` | `INFLUXDB3_WAL_FLUSH_INTERVAL` | - ---- - -#### wal-snapshot-size - -Defines the number of WAL files to attempt to remove in a snapshot. This, -multiplied by the interval, determines how often snapshots are taken. - -**Default:** `600` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :---------------------------- | -| `--wal-snapshot-size` | `INFLUXDB3_WAL_SNAPSHOT_SIZE` | - ---- - -#### wal-max-write-buffer-size - -Specifies the maximum number of write requests that can be buffered before a -flush must be executed and succeed. - -**Default:** `100000` - -| influxdb3 serve option | Environment variable | -| :---------------------------- | :------------------------------------ | -| `--wal-max-write-buffer-size` | `INFLUXDB3_WAL_MAX_WRITE_BUFFER_SIZE` | - ---- - -#### snapshotted-wal-files-to-keep - -Specifies the number of snapshotted WAL files to retain in the object store. -Flushing the WAL files does not clear the WAL files immediately; -they are deleted when the number of snapshotted WAL files exceeds this number. - -**Default:** `300` - -| influxdb3 serve option | Environment variable | -| :-------------------------------- | :-------------------------------- | -| `--snapshotted-wal-files-to-keep` | `INFLUXDB3_NUM_WAL_FILES_TO_KEEP` | - ---- - -### Compaction - -- [compaction-row-limit](#compaction-row-limit) -- [compaction-max-num-files-per-plan](#compaction-max-num-files-per-plan) -- [compaction-gen2-duration](#compaction-gen2-duration) -- [compaction-multipliers](#compaction-multipliers) -- [compaction-cleanup-wait](#compaction-cleanup-wait) -- [gen1-duration](#gen1-duration) - -#### compaction-row-limit - -Specifies the soft limit for the number of rows per file that the compactor -writes. The compactor may write more rows than this limit. - -**Default:** `1000000` - -| influxdb3 serve option | Environment variable | -| :----------------------- | :------------------------------------------ | -| `--compaction-row-limit` | `INFLUXDB3_ENTERPRISE_COMPACTION_ROW_LIMIT` | - ---- - -#### compaction-max-num-files-per-plan - -Sets the maximum number of files included in any compaction plan. - -**Default:** `500` - -| influxdb3 serve option | Environment variable | -| :------------------------------------ | :------------------------------------------------------- | -| `--compaction-max-num-files-per-plan` | `INFLUXDB3_ENTERPRISE_COMPACTION_MAX_NUM_FILES_PER_PLAN` | - ---- - -#### compaction-gen2-duration - -Specifies the duration of the first level of compaction (gen2). Later levels of -compaction are multiples of this duration. This value should be equal to or -greater than the gen1 duration. - -**Default:** `20m` - -| influxdb3 serve option | Environment variable | -| :--------------------------- | :---------------------------------------------- | -| `--compaction-gen2-duration` | `INFLUXDB3_ENTERPRISE_COMPACTION_GEN2_DURATION` | - ---- - -#### compaction-multipliers - -Specifies a comma-separated list of multiples defining the duration of each -level of compaction. The number of elements in the list determines the number of -compaction levels. The first element specifies the duration of the first level -(gen3); subsequent levels are multiples of the previous level. - -**Default:** `3,4,6,5` - -| influxdb3 serve option | Environment variable | -| :------------------------- | :-------------------------------------------- | -| `--compaction-multipliers` | `INFLUXDB3_ENTERPRISE_COMPACTION_MULTIPLIERS` | - ---- - -#### compaction-cleanup-wait - -Specifies the amount of time that the compactor waits after finishing a compaction run -to delete files marked as needing deletion during that compaction run. - -**Default:** `10m` - -| influxdb3 serve option | Environment variable | -| :-------------------------- | :--------------------------------------------- | -| `--compaction-cleanup-wait` | `INFLUXDB3_ENTERPRISE_COMPACTION_CLEANUP_WAIT` | - ---- - -#### gen1-duration - -Specifies the duration that Parquet files are arranged into. Data timestamps -land each row into a file of this duration. Supported durations are `1m`, -`5m`, and `10m`. These files are known as "generation 1" files, which the -compactor can merge into larger generations. - -**Default:** `10m` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------------ | -| `--gen1-duration` | `INFLUXDB3_GEN1_DURATION` | - ---- - -### Caching - -- [preemptive-cache-age](#preemptive-cache-age) -- [parquet-mem-cache-size](#parquet-mem-cache-size) -- [parquet-mem-cache-prune-percentage](#parquet-mem-cache-prune-percentage) -- [parquet-mem-cache-prune-interval](#parquet-mem-cache-prune-interval) -- [disable-parquet-mem-cache](#disable-parquet-mem-cache) -- [parquet-mem-cache-query-path-duration](#parquet-mem-cache-query-path-duration) -- [last-cache-eviction-interval](#last-cache-eviction-interval) -- [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) - -#### preemptive-cache-age - -Specifies the interval to prefetch into the Parquet cache during compaction. - -**Default:** `3d` - -| influxdb3 serve option | Environment variable | -| :----------------------- | :------------------------------- | -| `--preemptive-cache-age` | `INFLUXDB3_PREEMPTIVE_CACHE_AGE` | - ---- - -#### parquet-mem-cache-size - - -Specifies the size of the in-memory Parquet cache in megabytes or percentage of total available memory. - -**Default:** `20%` - -| influxdb3 serve option | Environment variable | -| :-------------------------- | :---------------------------------- | -| `--parquet-mem-cache-size` | `INFLUXDB3_PARQUET_MEM_CACHE_SIZE` | - -#### parquet-mem-cache-prune-percentage - -Specifies the percentage of entries to prune during a prune operation on the -in-memory Parquet cache. - -**Default:** `0.1` - -| influxdb3 serve option | Environment variable | -| :------------------------------------- | :--------------------------------------------- | -| `--parquet-mem-cache-prune-percentage` | `INFLUXDB3_PARQUET_MEM_CACHE_PRUNE_PERCENTAGE` | - ---- - -#### parquet-mem-cache-prune-interval - -Sets the interval to check if the in-memory Parquet cache needs to be pruned. - -**Default:** `1s` - -| influxdb3 serve option | Environment variable | -| :----------------------------------- | :------------------------------------------- | -| `--parquet-mem-cache-prune-interval` | `INFLUXDB3_PARQUET_MEM_CACHE_PRUNE_INTERVAL` | - ---- - -#### parquet-mem-cache-query-path-duration - -A [duration](/influxdb3/enterprise/reference/glossary/#duration) that specifies -the time window for caching recent Parquet files in memory. Default is `5h`. - -Only files containing data with a timestamp between `now` and `now - duration` -are cached when accessed during queries--for example, with the default `5h` setting: - -- Current time: `2024-06-10 15:00:00` -- Cache window: Last 5 hours (`2024-06-10 10:00:00` to now) - -If a query requests data from `2024-06-09` (old) and `2024-06-10 14:00` (recent): - -- **Cached**: Parquet files with data from `2024-06-10 14:00` (within 5-hour window) -- **Not cached**: Parquet files with data from `2024-06-09` (outside 5-hour window) - -| influxdb3 serve option | Environment variable | -| :---------------------------- | :------------------------------------ | -| `--parquet-mem-cache-query-path-duration` | `INFLUXDB3_PARQUET_MEM_CACHE_QUERY_PATH_DURATION` | - ---- - -#### disable-parquet-mem-cache - -Disables the in-memory Parquet cache. By default, the cache is enabled. - -| influxdb3 serve option | Environment variable | -| :---------------------------- | :------------------------------------ | -| `--disable-parquet-mem-cache` | `INFLUXDB3_DISABLE_PARQUET_MEM_CACHE` | - ---- - -#### last-cache-eviction-interval - -Specifies the interval to evict expired entries from the Last-N-Value cache, -expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. - -**Default:** `10s` - -| influxdb3 serve option | Environment variable | -| :------------------------------- | :--------------------------------------- | -| `--last-cache-eviction-interval` | `INFLUXDB3_LAST_CACHE_EVICTION_INTERVAL` | - ---- - -#### last-value-cache-disable-from-history - -Disables populating the last-N-value cache from historical data. -If disabled, the cache is still populated with data from the write-ahead log (WAL). - -| influxdb3 serve option | Environment variable | -| :---------------------------------------- | :------------------------------------------------ | -| `--last-value-cache-disable-from-history` | `INFLUXDB3_LAST_VALUE_CACHE_DISABLE_FROM_HISTORY` | - ---- - -#### distinct-cache-eviction-interval - -Specifies the interval to evict expired entries from the distinct value cache, -expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. - -**Default:** `10s` - -| influxdb3 serve option | Environment variable | -| :----------------------------------- | :------------------------------------------- | -| `--distinct-cache-eviction-interval` | `INFLUXDB3_DISTINCT_CACHE_EVICTION_INTERVAL` | - ---- - -#### distinct-value-cache-disable-from-history - -Disables populating the distinct value cache from historical data. -If disabled, the cache is still populated with data from the write-ahead log (WAL). - -| influxdb3 serve option | Environment variable | -| :-------------------------------------------- | :---------------------------------------------------- | -| `--distinct-value-cache-disable-from-history` | `INFLUXDB3_DISTINCT_VALUE_CACHE_DISABLE_FROM_HISTORY` | ---- - -#### query-file-limit - -Limits the number of Parquet files a query can access. -If a query attempts to read more than this limit, {{% product-name %}} returns an error. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------------- | -| `--query-file-limit` | `INFLUXDB3_QUERY_FILE_LIMIT` | - ---- - -### Processing Engine - -- [plugin-dir](#plugin-dir) -- [virtual-env-location](#virtual-env-location) -- [package-manager](#package-manager) - -#### plugin-dir - -Specifies the local directory that contains Python plugins and their test files. - -| influxdb3 serve option | Environment variable | -| :--------------------- | :--------------------- | -| `--plugin-dir` | `INFLUXDB3_PLUGIN_DIR` | - ---- - -#### virtual-env-location - -Specifies the location of the Python virtual environment that the processing -engine uses. - -| influxdb3 serve option | Environment variable | -| :----------------------- | :--------------------- | -| `--virtual-env-location` | `VIRTUAL_ENV` | - ---- - -#### package-manager - -Specifies the Python package manager that the processing engine uses. - -This option supports the following values: - -- `discover` _(default)_: Automatically discover available package manager -- `pip`: Use pip package manager -- `uv`: Use uv package manager - -**Default:** `discover` - -| influxdb3 serve option | Environment variable | -| :--------------------- | :------------------- | -| `--package-manager` | `PACKAGE_MANAGER` | + \ No newline at end of file diff --git a/content/shared/influxctl/release-notes.md b/content/shared/influxctl/release-notes.md index 288f6b6b9..686a3823b 100644 --- a/content/shared/influxctl/release-notes.md +++ b/content/shared/influxctl/release-notes.md @@ -1,3 +1,31 @@ +## 2.10.3 {date="2025-07-30"} + +### Features + +- Add `id` column to the output of the + [`influxctl database list` command](/influxdb3/version/reference/cli/influxctl/database/list/). +- Add [`influxctl table rename` command](/influxdb3/version/reference/cli/influxctl/table/rename/). +- Add user-agent to Granite gRPC requests. + +### Bug Fixes + +- Require the `--template-timeformat` option when the `--template-tags` option + is included when creating a database or table with custom partitions. +- Fix table iceberg enable/disable description. + +### Dependency updates + +- Update `github.com/apache/arrow-go/v18` from 18.3.1 to 18.4.0. +- Update `github.com/docker/docker` from 28.2.2+incompatible to 28.3.3+incompatible. +- Update `github.com/golang-jwt/jwt/v5` from 5.2.2 to 5.2.3. +- Update `github.com/jedib0t/go-pretty/v6` from 6.6.7 to 6.6.8. +- Update `golang.org/x/mod` from 0.25.0 to 0.26.0. +- Update `google.golang.org/grpc` from 1.73.0 to 1.74.2. +- Update `helm.sh/helm/v3` from 3.17.3 to 3.18.4. +- Update Go 1.24.5. + +--- + ## v2.10.2 {date="2025-06-30"} ### Features diff --git a/content/shared/influxctl/table/rename.md b/content/shared/influxctl/table/rename.md new file mode 100644 index 000000000..1665deb32 --- /dev/null +++ b/content/shared/influxctl/table/rename.md @@ -0,0 +1,39 @@ + +The `influxctl table rename` command renames a table in the specified database in +an {{< product-name omit=" Clustered" >}} cluster. + +## Usage + + + +```bash +influxctl table rename [flags] +``` + +## Arguments + +| Argument | Description | +| :--------------------- | :----------------------------------- | +| **DATABASE_NAME** | Name of the database the table is in | +| **CURRENT_TABLE_NAME** | Current name of the table | +| **NEW_TABLE_NAME** | New name for the table | + +## Flags + +| Flag | | Description | +| :--- | :--------- | :-------------------------------------------- | +| | `--format` | Output format (`table` _(default)_ or `json`) | +| `-h` | `--help` | Output command help | + +{{% caption %}} +_Also see [`influxctl` global flags](/influxdb3/version/reference/cli/influxctl/#global-flags)._ +{{% /caption %}} + +## Examples + + + +```bash +# Rename the "example-tb" table to "example_tb" +influxctl table rename mydb example-tb example_tb +``` diff --git a/content/shared/influxdb3-admin/mcp-server.md b/content/shared/influxdb3-admin/mcp-server.md index edde7adc9..8da885aff 100644 --- a/content/shared/influxdb3-admin/mcp-server.md +++ b/content/shared/influxdb3-admin/mcp-server.md @@ -76,11 +76,6 @@ Set the following environment variables when you start the MCP server: {{% /show-in %}} -### Other MCP server configuration options - -- **MCP_SERVER_HOST**: Customize the host of the MCP server. The default is `127.0.0.1`. -- **MCP_SERVER_PORT**: Customize the port the MCP server uses. The default is `8080`. - ## Configure your LLM agent to run the MCP server To run the MCP, user either Node.js and `npm` or Docker to run the server. diff --git a/content/shared/influxdb3-cli/config-options.md b/content/shared/influxdb3-cli/config-options.md new file mode 100644 index 000000000..268b3ac93 --- /dev/null +++ b/content/shared/influxdb3-cli/config-options.md @@ -0,0 +1,1702 @@ + +{{< product-name >}} lets you customize your server configuration by using +`influxdb3 serve` command options or by setting environment variables. + +## Configure your server + +Pass configuration options to the `influxdb serve` server using either command +options or environment variables. Command options take precedence over +environment variables. + +##### Example `influxdb3 serve` command options + + + +```sh +influxdb3 serve \ + --node-id node0 \ +{{% show-in "enterprise" %}} --cluster-id cluster0 \ + --license-email example@email.com \{{% /show-in %}} + --object-store file \ + --data-dir ~/.influxdb3 \ + --log-filter info +``` + +##### Example environment variables + + + +```sh +{{% show-in "enterprise" %}}export INFLUXDB3_ENTERPRISE_LICENSE_EMAIL=example@email.com +export INFLUXDB3_ENTERPRISE_CLUSTER_ID=cluster0 +{{% /show-in %}}export INFLUXDB3_NODE_IDENTIFIER_PREFIX=my-node +export INFLUXDB3_OBJECT_STORE=file +export INFLUXDB3_DB_DIR=~/.influxdb3 +export LOG_FILTER=info + +influxdb3 serve +``` + +## Server configuration options + +- [General](#general) +{{% show-in "enterprise" %}} - [cluster-id](#cluster-id){{% /show-in %}} + - [data-dir](#data-dir) +{{% show-in "enterprise" %}} - [license-email](#license-email) + - [license-file](#license-file) + - [mode](#mode){{% /show-in %}} + - [node-id](#node-id) +{{% show-in "enterprise" %}} - [node-id-from-env](#node-id-from-env){{% /show-in %}} + - [object-store](#object-store) + - [tls-key](#tls-key) + - [tls-cert](#tls-cert) + - [tls-minimum-versions](#tls-minimum-version) + - [without-auth](#without-auth) + - [disable-authz](#disable-authz) +- [AWS](#aws) + - [aws-access-key-id](#aws-access-key-id) + - [aws-secret-access-key](#aws-secret-access-key) + - [aws-default-region](#aws-default-region) + - [aws-endpoint](#aws-endpoint) + - [aws-session-token](#aws-session-token) + - [aws-allow-http](#aws-allow-http) + - [aws-skip-signature](#aws-skip-signature) +- [Google Cloud Service](#google-cloud-service) + - [google-service-account](#google-service-account) +- [Microsoft Azure](#microsoft-azure) + - [azure-storage-account](#azure-storage-account) + - [azure-storage-access-key](#azure-storage-access-key) +- [Object Storage](#object-storage) + - [bucket](#bucket) + - [object-store-connection-limit](#object-store-connection-limit) + - [object-store-http2-only](#object-store-http2-only) + - [object-store-http2-max-frame-size](#object-store-http2-max-frame-size) + - [object-store-max-retries](#object-store-max-retries) + - [object-store-retry-timeout](#object-store-retry-timeout) + - [object-store-cache-endpoint](#object-store-cache-endpoint) +- [Logs](#logs) + - [log-filter](#log-filter) + - [log-destination](#log-destination) + - [log-format](#log-format) + - [query-log-size](#query-log-size) +- [Traces](#traces) + - [traces-exporter](#traces-exporter) + - [traces-exporter-jaeger-agent-host](#traces-exporter-jaeger-agent-host) + - [traces-exporter-jaeger-agent-port](#traces-exporter-jaeger-agent-port) + - [traces-exporter-jaeger-service-name](#traces-exporter-jaeger-service-name) + - [traces-exporter-jaeger-trace-context-header-name](#traces-exporter-jaeger-trace-context-header-name) + - [traces-jaeger-debug-name](#traces-jaeger-debug-name) + - [traces-jaeger-tags](#traces-jaeger-tags) + - [traces-jaeger-max-msgs-per-second](#traces-jaeger-max-msgs-per-second) +- [DataFusion](#datafusion) + - [datafusion-num-threads](#datafusion-num-threads) + - [datafusion-runtime-type](#datafusion-runtime-type) + - [datafusion-runtime-disable-lifo-slot](#datafusion-runtime-disable-lifo-slot) + - [datafusion-runtime-event-interval](#datafusion-runtime-event-interval) + - [datafusion-runtime-global-queue-interval](#datafusion-runtime-global-queue-interval) + - [datafusion-runtime-max-blocking-threads](#datafusion-runtime-max-blocking-threads) + - [datafusion-runtime-max-io-events-per-tick](#datafusion-runtime-max-io-events-per-tick) + - [datafusion-runtime-thread-keep-alive](#datafusion-runtime-thread-keep-alive) + - [datafusion-runtime-thread-priority](#datafusion-runtime-thread-priority) + - [datafusion-max-parquet-fanout](#datafusion-max-parquet-fanout) + - [datafusion-use-cached-parquet-loader](#datafusion-use-cached-parquet-loader) + - [datafusion-config](#datafusion-config) +- [HTTP](#http) + - [max-http-request-size](#max-http-request-size) + - [http-bind](#http-bind) + - [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) +- [Memory](#memory) + - [exec-mem-pool-bytes](#exec-mem-pool-bytes) + - [buffer-mem-limit-mb](#buffer-mem-limit-mb) + - [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) +- [Write-Ahead Log (WAL)](#write-ahead-log-wal) + - [wal-flush-interval](#wal-flush-interval) + - [wal-snapshot-size](#wal-snapshot-size) + - [wal-max-write-buffer-size](#wal-max-write-buffer-size) + - [snapshotted-wal-files-to-keep](#snapshotted-wal-files-to-keep) + - [wal-replay-fail-on-error](#wal-replay-fail-on-error) + - [wal-replay-concurrency-limit](#wal-replay-concurrency-limit) +- [Compaction](#compaction) +{{% show-in "enterprise" %}} - [compaction-row-limit](#compaction-row-limit) + - [compaction-max-num-files-per-plan](#compaction-max-num-files-per-plan) + - [compaction-gen2-duration](#compaction-gen2-duration) + - [compaction-multipliers](#compaction-multipliers) + - [compaction-cleanup-wait](#compaction-cleanup-wait) + - [compaction-check-interval](#compaction-check-interval){{% /show-in %}} + - [gen1-duration](#gen1-duration) +- [Caching](#caching) + - [preemptive-cache-age](#preemptive-cache-age) + - [parquet-mem-cache-size](#parquet-mem-cache-size) + - [parquet-mem-cache-prune-percentage](#parquet-mem-cache-prune-percentage) + - [parquet-mem-cache-prune-interval](#parquet-mem-cache-prune-interval) + - [parquet-mem-cache-query-path-duration](#parquet-mem-cache-query-path-duration) + - [disable-parquet-mem-cache](#disable-parquet-mem-cache) + - [table-index-cache-max-entries](#table-index-cache-max-entries) + - [table-index-cache-concurrency-limit](#table-index-cache-concurrency-limit) +{{% show-in "enterprise" %}} - [last-value-cache-disable-from-history](#last-value-cache-disable-from-history){{% /show-in %}} + - [last-cache-eviction-interval](#last-cache-eviction-interval) +{{% show-in "enterprise" %}} - [distinct-value-cache-disable-from-history](#distinct-value-cache-disable-from-history){{% /show-in %}} + - [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) + - [query-file-limit](#query-file-limit) +- [Processing Engine](#processing-engine) + - [plugin-dir](#plugin-dir) + - [virtual-env-location](#virtual-env-location) + - [package-manager](#package-manager) +{{% show-in "enterprise" %}} +- [Cluster Management](#cluster-management) + - [replication-interval](#replication-interval) + - [catalog-sync-interval](#catalog-sync-interval) + - [wait-for-running-ingestor](#wait-for-running-ingestor) +- [Resource Limits](#resource-limits) + - [num-cores](#num-cores) + - [num-database-limit](#num-database-limit) + - [num-table-limit](#num-table-limit) + - [num-total-columns-per-table-limit](#num-total-columns-per-table-limit) +{{% /show-in %}} +- [Data Lifecycle Management](#data-lifecycle-management) + - [gen1-lookback-duration](#gen1-lookback-duration) + - [retention-check-interval](#retention-check-interval) + - [delete-grace-period](#delete-grace-period) + - [hard-delete-default-duration](#hard-delete-default-duration) +- [Telemetry](#telemetry) + - [telemetry-disable-upload](#telemetry-disable-upload) + - [telemetry-endpoint](#telemetry-endpoint) +- [TCP Listeners](#tcp-listeners) + - [tcp-listener-file-path](#tcp-listener-file-path) + - [admin-token-recovery-tcp-listener-file-path](#admin-token-recovery-tcp-listener-file-path) +{{% show-in "enterprise" %}} +- [Experimental Features](#experimental-features) + - [use-pacha-tree](#use-pacha-tree) +{{% /show-in %}} + +--- + +### General + +{{% show-in "enterprise" %}} +- [cluster-id](#cluster-id) +{{% /show-in %}} +- [data-dir](#data-dir) +{{% show-in "enterprise" %}} +- [license-email](#license-email) +- [license-file](#license-file) +- [mode](#mode) +{{% /show-in %}} +- [node-id](#node-id) +{{% show-in "enterprise" %}} +- [node-id-from-env](#node-id-from-env) +{{% /show-in %}} +- [object-store](#object-store) +- [query-file-limit](#query-file-limit) + +{{% show-in "enterprise" %}} +#### cluster-id + +Specifies the cluster identifier that prefixes the object store path for the Enterprise Catalog. +This value must be different than the [`--node-id`](#node-id) value. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------------------- | +| `--cluster-id` | `INFLUXDB3_ENTERPRISE_CLUSTER_ID` | + +--- +{{% /show-in %}} + +#### data-dir + +For the `file` object store, defines the location InfluxDB 3 uses to store files locally. +Required when using the `file` [object store](#object-store). + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--data-dir` | `INFLUXDB3_DB_DIR` | + +--- + +{{% show-in "enterprise" %}} +#### license-email + +Specifies the email address to associate with your InfluxDB 3 Enterprise license +and automatically responds to the interactive email prompt when the server starts. +This option is mutually exclusive with [license-file](#license-file). + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------------- | +| `--license-email` | `INFLUXDB3_ENTERPRISE_LICENSE_EMAIL` | + +--- + +#### license-file + +Specifies the path to a license file for InfluxDB 3 Enterprise. When provided, the license +file's contents are used instead of requesting a new license. +This option is mutually exclusive with [license-email](#license-email). + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------------- | +| `--license-file` | `INFLUXDB3_ENTERPRISE_LICENSE_FILE` | + +--- + +#### mode + +Sets the mode to start the server in. + +This option supports the following values: + +- `all` _(default)_: Enables all server modes +- `ingest`: Enables only data ingest capabilities +- `query`: Enables only query capabilities +- `compact`: Enables only compaction processes +- `process`: Enables only data processing capabilities + +You can specify multiple modes using a comma-delimited list (for example, `ingest,query`). + +**Default:** `all` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :-------------------------- | +| `--mode` | `INFLUXDB3_ENTERPRISE_MODE` | + +--- +{{% /show-in %}} + +#### node-id + +Specifies the node identifier used as a prefix in all object store file paths. +This should be unique for any hosts sharing the same object store +configuration--for example, the same bucket. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------------------- | +| `--node-id` | `INFLUXDB3_NODE_IDENTIFIER_PREFIX` | + +{{% show-in "enterprise" %}} +#### node-id-from-env + +Specifies the node identifier used as a prefix in all object store file paths. +Takes the name of an environment variable as an argument and uses the value of that environment variable as the node identifier. +This option cannot be used with the `--node-id` option. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------------- | +| `--node-id-from-env` | `INFLUXDB3_NODE_IDENTIFIER_FROM_ENV` | + +##### Example using --node-id-from-env + +```bash +export DATABASE_NODE=node0 && influxdb3 serve \ + --node-id-from-env DATABASE_NODE \ + --cluster-id cluster0 \ + --object-store file \ + --data-dir ~/.influxdb3/data +``` + +--- +{{% /show-in %}} + +#### object-store + +Specifies which object storage to use to store Parquet files. +This option supports the following values: + +- `memory`: Effectively no object persistence +- `memory-throttled`: Like `memory` but with latency and throughput that somewhat resembles a cloud object store +- `file`: Stores objects in the local filesystem (must also set `--data-dir`) +- `s3`: Amazon S3 (must also set `--bucket`, `--aws-access-key-id`, `--aws-secret-access-key`, and possibly `--aws-default-region`) +- `google`: Google Cloud Storage (must also set `--bucket` and `--google-service-account`) +- `azure`: Microsoft Azure blob storage (must also set `--bucket`, `--azure-storage-account`, and `--azure-storage-access-key`) + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------- | +| `--object-store` | `INFLUXDB3_OBJECT_STORE` | + +--- + +#### tls-key + +The path to a key file for TLS to be enabled. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------- | +| `--tls-key` | `INFLUXDB3_TLS_KEY` | + +--- + +#### tls-cert + +The path to a cert file for TLS to be enabled. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------- | +| `--tls-cert` | `INFLUXDB3_TLS_CERT` | + +--- + +#### tls-minimum-version + +The minimum version for TLS. +Valid values are `tls-1.2` or `tls-1.3`. +Default is `tls-1.2`. + +| influxdb3 serve option | Environment variable | +| :---------------------- | :----------------------- | +| `--tls-minimum-version` | `INFLUXDB3_TLS_MINIMUM_VERSION` | + +--- + +#### without-auth + +Disables authentication for all server actions (CLI commands and API requests). +The server processes all requests without requiring tokens or authentication. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :---------------------------- | +| `--without-auth` | `INFLUXDB3_START_WITHOUT_AUTH`| + +--- + +#### disable-authz + +Optionally disable authz by passing in a comma separated list of resources. +Valid values are `health`, `ping`, and `metrics`. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------- | +| `--disable-authz` | `INFLUXDB3_DISABLE_AUTHZ`| + +--- + +### AWS + +- [aws-access-key-id](#aws-access-key-id) +- [aws-secret-access-key](#aws-secret-access-key) +- [aws-default-region](#aws-default-region) +- [aws-endpoint](#aws-endpoint) +- [aws-session-token](#aws-session-token) +- [aws-allow-http](#aws-allow-http) +- [aws-skip-signature](#aws-skip-signature) + +#### aws-access-key-id + +When using Amazon S3 as the object store, set this to an access key that has +permission to read from and write to the specified S3 bucket. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--aws-access-key-id` | `AWS_ACCESS_KEY_ID` | + +--- + +#### aws-secret-access-key + +When using Amazon S3 as the object store, set this to the secret access key that +goes with the specified access key ID. + +| influxdb3 serve option | Environment variable | +| :------------------------ | :---------------------- | +| `--aws-secret-access-key` | `AWS_SECRET_ACCESS_KEY` | + +--- + +#### aws-default-region + +When using Amazon S3 as the object store, set this to the region that goes with +the specified bucket if different from the fallback value. + +**Default:** `us-east-1` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--aws-default-region` | `AWS_DEFAULT_REGION` | + +--- + +#### aws-endpoint + +When using an Amazon S3 compatibility storage service, set this to the endpoint. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--aws-endpoint` | `AWS_ENDPOINT` | + +--- + +#### aws-session-token + +When using Amazon S3 as an object store, set this to the session token. This is +handy when using a federated login or SSO and fetching credentials via the UI. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--aws-session-token` | `AWS_SESSION_TOKEN` | + +--- + +#### aws-allow-http + +Allows unencrypted HTTP connections to AWS. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--aws-allow-http` | `AWS_ALLOW_HTTP` | + +--- + +#### aws-skip-signature + +If enabled, S3 object stores do not fetch credentials and do not sign requests. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--aws-skip-signature` | `AWS_SKIP_SIGNATURE` | + +--- + +### Google Cloud Service + +- [google-service-account](#google-service-account) + +#### google-service-account + +When using Google Cloud Storage as the object store, set this to the path to the +JSON file that contains the Google credentials. + +| influxdb3 serve option | Environment variable | +| :------------------------- | :----------------------- | +| `--google-service-account` | `GOOGLE_SERVICE_ACCOUNT` | + +--- + +### Microsoft Azure + +- [azure-storage-account](#azure-storage-account) +- [azure-storage-access-key](#azure-storage-access-key) + +#### azure-storage-account + +When using Microsoft Azure as the object store, set this to the name you see +when navigating to **All Services > Storage accounts > `[name]`**. + +| influxdb3 serve option | Environment variable | +| :------------------------ | :---------------------- | +| `--azure-storage-account` | `AZURE_STORAGE_ACCOUNT` | + +--- + +#### azure-storage-access-key + +When using Microsoft Azure as the object store, set this to one of the Key +values in the Storage account's **Settings > Access keys**. + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :------------------------- | +| `--azure-storage-access-key` | `AZURE_STORAGE_ACCESS_KEY` | + +--- + +### Object Storage + +- [bucket](#bucket) +- [object-store-connection-limit](#object-store-connection-limit) +- [object-store-http2-only](#object-store-http2-only) +- [object-store-http2-max-frame-size](#object-store-http2-max-frame-size) +- [object-store-max-retries](#object-store-max-retries) +- [object-store-retry-timeout](#object-store-retry-timeout) +- [object-store-cache-endpoint](#object-store-cache-endpoint) + +#### bucket + +Sets the name of the object storage bucket to use. Must also set +`--object-store` to a cloud object storage for this option to take effect. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--bucket` | `INFLUXDB3_BUCKET` | + +--- + +#### object-store-connection-limit + +When using a network-based object store, limits the number of connections to +this value. + +**Default:** `16` + +| influxdb3 serve option | Environment variable | +| :-------------------------------- | :------------------------------ | +| `--object-store-connection-limit` | `OBJECT_STORE_CONNECTION_LIMIT` | + +--- + +#### object-store-http2-only + +Forces HTTP/2 connections to network-based object stores. + +| influxdb3 serve option | Environment variable | +| :-------------------------- | :------------------------ | +| `--object-store-http2-only` | `OBJECT_STORE_HTTP2_ONLY` | + +--- + +#### object-store-http2-max-frame-size + +Sets the maximum frame size (in bytes/octets) for HTTP/2 connections. + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :---------------------------------- | +| `--object-store-http2-max-frame-size` | `OBJECT_STORE_HTTP2_MAX_FRAME_SIZE` | + +--- + +#### object-store-max-retries + +Defines the maximum number of times to retry a request. + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :------------------------- | +| `--object-store-max-retries` | `OBJECT_STORE_MAX_RETRIES` | + +--- + +#### object-store-retry-timeout + +Specifies the maximum length of time from the initial request after which no +further retries are be attempted. + +| influxdb3 serve option | Environment variable | +| :----------------------------- | :--------------------------- | +| `--object-store-retry-timeout` | `OBJECT_STORE_RETRY_TIMEOUT` | + +--- + +#### object-store-cache-endpoint + +Sets the endpoint of an S3-compatible, HTTP/2-enabled object store cache. + +| influxdb3 serve option | Environment variable | +| :------------------------------ | :---------------------------- | +| `--object-store-cache-endpoint` | `OBJECT_STORE_CACHE_ENDPOINT` | + +--- + +### Logs + +- [log-filter](#log-filter) +- [log-destination](#log-destination) +- [log-format](#log-format) +- [query-log-size](#query-log-size) + +#### log-filter + +Sets the filter directive for logs. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--log-filter` | `LOG_FILTER` | + +--- + +#### log-destination + +Specifies the destination for logs. + +**Default:** `stdout` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--log-destination` | `LOG_DESTINATION` | + +--- + +#### log-format + +Defines the message format for logs. + +This option supports the following values: + +- `full` _(default)_ + +**Default:** `full` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--log-format` | `LOG_FORMAT` | + +--- + +#### query-log-size + +Defines the size of the query log. Up to this many queries remain in the +log before older queries are evicted to make room for new ones. + +**Default:** `1000` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------------- | +| `--query-log-size` | `INFLUXDB3_QUERY_LOG_SIZE` | + +--- + +### Traces + +- [traces-exporter](#traces-exporter) +- [traces-exporter-jaeger-agent-host](#traces-exporter-jaeger-agent-host) +- [traces-exporter-jaeger-agent-port](#traces-exporter-jaeger-agent-port) +- [traces-exporter-jaeger-service-name](#traces-exporter-jaeger-service-name) +- [traces-exporter-jaeger-trace-context-header-name](#traces-exporter-jaeger-trace-context-header-name) +- [traces-jaeger-debug-name](#traces-jaeger-debug-name) +- [traces-jaeger-tags](#traces-jaeger-tags) +- [traces-jaeger-max-msgs-per-second](#traces-jaeger-max-msgs-per-second) + +#### traces-exporter + +Sets the type of tracing exporter. + +**Default:** `none` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--traces-exporter` | `TRACES_EXPORTER` | + +--- + +#### traces-exporter-jaeger-agent-host + +Specifies the Jaeger agent network hostname for tracing. + +**Default:** `0.0.0.0` + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :---------------------------------- | +| `--traces-exporter-jaeger-agent-host` | `TRACES_EXPORTER_JAEGER_AGENT_HOST` | + +--- + +#### traces-exporter-jaeger-agent-port + +Defines the Jaeger agent network port for tracing. + +**Default:** `6831` + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :---------------------------------- | +| `--traces-exporter-jaeger-agent-port` | `TRACES_EXPORTER_JAEGER_AGENT_PORT` | + +--- + +#### traces-exporter-jaeger-service-name + +Sets the Jaeger service name for tracing. + +**Default:** `iox-conductor` + +| influxdb3 serve option | Environment variable | +| :-------------------------------------- | :------------------------------------ | +| `--traces-exporter-jaeger-service-name` | `TRACES_EXPORTER_JAEGER_SERVICE_NAME` | + +--- + +#### traces-exporter-jaeger-trace-context-header-name + +Specifies the header name used for passing trace context. + +**Default:** `uber-trace-id` + +| influxdb3 serve option | Environment variable | +| :--------------------------------------------------- | :------------------------------------------------- | +| `--traces-exporter-jaeger-trace-context-header-name` | `TRACES_EXPORTER_JAEGER_TRACE_CONTEXT_HEADER_NAME` | + +--- + +#### traces-jaeger-debug-name + +Specifies the header name used for force sampling in tracing. + +**Default:** `jaeger-debug-id` + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :---------------------------------- | +| `--traces-jaeger-debug-name` | `TRACES_EXPORTER_JAEGER_DEBUG_NAME` | + +--- + +#### traces-jaeger-tags + +Defines a set of `key=value` pairs to annotate tracing spans with. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :---------------------------- | +| `--traces-jaeger-tags` | `TRACES_EXPORTER_JAEGER_TAGS` | + +--- + +#### traces-jaeger-max-msgs-per-second + +Specifies the maximum number of messages sent to a Jaeger service per second. + +**Default:** `1000` + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :---------------------------------- | +| `--traces-jaeger-max-msgs-per-second` | `TRACES_JAEGER_MAX_MSGS_PER_SECOND` | + +--- + +### DataFusion + +- [datafusion-num-threads](#datafusion-num-threads) +- [datafusion-runtime-type](#datafusion-runtime-type) +- [datafusion-runtime-disable-lifo-slot](#datafusion-runtime-disable-lifo-slot) +- [datafusion-runtime-event-interval](#datafusion-runtime-event-interval) +- [datafusion-runtime-global-queue-interval](#datafusion-runtime-global-queue-interval) +- [datafusion-runtime-max-blocking-threads](#datafusion-runtime-max-blocking-threads) +- [datafusion-runtime-max-io-events-per-tick](#datafusion-runtime-max-io-events-per-tick) +- [datafusion-runtime-thread-keep-alive](#datafusion-runtime-thread-keep-alive) +- [datafusion-runtime-thread-priority](#datafusion-runtime-thread-priority) +- [datafusion-max-parquet-fanout](#datafusion-max-parquet-fanout) +- [datafusion-use-cached-parquet-loader](#datafusion-use-cached-parquet-loader) +- [datafusion-config](#datafusion-config) + +#### datafusion-num-threads + +Sets the maximum number of DataFusion runtime threads to use. + +| influxdb3 serve option | Environment variable | +| :------------------------- | :--------------------------------- | +| `--datafusion-num-threads` | `INFLUXDB3_DATAFUSION_NUM_THREADS` | + +--- + +#### datafusion-runtime-type + +Specifies the DataFusion tokio runtime type. + +This option supports the following values: + +- `current-thread` +- `multi-thread` _(default)_ +- `multi-thread-alt` + +**Default:** `multi-thread` + +| influxdb3 serve option | Environment variable | +| :-------------------------- | :---------------------------------- | +| `--datafusion-runtime-type` | `INFLUXDB3_DATAFUSION_RUNTIME_TYPE` | + +--- + +#### datafusion-runtime-disable-lifo-slot + +Disables the LIFO slot of the DataFusion runtime. + +This option supports the following values: + +- `true` +- `false` + +| influxdb3 serve option | Environment variable | +| :--------------------------------------- | :----------------------------------------------- | +| `--datafusion-runtime-disable-lifo-slot` | `INFLUXDB3_DATAFUSION_RUNTIME_DISABLE_LIFO_SLOT` | + +--- + +#### datafusion-runtime-event-interval + +Sets the number of scheduler ticks after which the scheduler of the DataFusion +tokio runtime polls for external events--for example: timers, I/O. + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :-------------------------------------------- | +| `--datafusion-runtime-event-interval` | `INFLUXDB3_DATAFUSION_RUNTIME_EVENT_INTERVAL` | + +--- + +#### datafusion-runtime-global-queue-interval + +Sets the number of scheduler ticks after which the scheduler of the DataFusion +runtime polls the global task queue. + +| influxdb3 serve option | Environment variable | +| :------------------------------------------- | :--------------------------------------------------- | +| `--datafusion-runtime-global-queue-interval` | `INFLUXDB3_DATAFUSION_RUNTIME_GLOBAL_QUEUE_INTERVAL` | + +--- + +#### datafusion-runtime-max-blocking-threads + +Specifies the limit for additional threads spawned by the DataFusion runtime. + +| influxdb3 serve option | Environment variable | +| :------------------------------------------ | :-------------------------------------------------- | +| `--datafusion-runtime-max-blocking-threads` | `INFLUXDB3_DATAFUSION_RUNTIME_MAX_BLOCKING_THREADS` | + +--- + +#### datafusion-runtime-max-io-events-per-tick + +Configures the maximum number of events processed per tick by the tokio +DataFusion runtime. + +| influxdb3 serve option | Environment variable | +| :-------------------------------------------- | :---------------------------------------------------- | +| `--datafusion-runtime-max-io-events-per-tick` | `INFLUXDB3_DATAFUSION_RUNTIME_MAX_IO_EVENTS_PER_TICK` | + +--- + +#### datafusion-runtime-thread-keep-alive + +Sets a custom timeout for a thread in the blocking pool of the tokio DataFusion +runtime. + +| influxdb3 serve option | Environment variable | +| :--------------------------------------- | :----------------------------------------------- | +| `--datafusion-runtime-thread-keep-alive` | `INFLUXDB3_DATAFUSION_RUNTIME_THREAD_KEEP_ALIVE` | + +--- + +#### datafusion-runtime-thread-priority + +Sets the thread priority for tokio DataFusion runtime workers. + +**Default:** `10` + +| influxdb3 serve option | Environment variable | +| :------------------------------------- | :--------------------------------------------- | +| `--datafusion-runtime-thread-priority` | `INFLUXDB3_DATAFUSION_RUNTIME_THREAD_PRIORITY` | + +--- + +#### datafusion-max-parquet-fanout + +When multiple parquet files are required in a sorted way +(deduplication for example), specifies the maximum fanout. + +**Default:** `1000` + +| influxdb3 serve option | Environment variable | +| :-------------------------------- | :---------------------------------------- | +| `--datafusion-max-parquet-fanout` | `INFLUXDB3_DATAFUSION_MAX_PARQUET_FANOUT` | + +--- + +#### datafusion-use-cached-parquet-loader + +Uses a cached parquet loader when reading parquet files from the object store. + +| influxdb3 serve option | Environment variable | +| :--------------------------------------- | :----------------------------------------------- | +| `--datafusion-use-cached-parquet-loader` | `INFLUXDB3_DATAFUSION_USE_CACHED_PARQUET_LOADER` | + +--- + +#### datafusion-config + +Provides custom configuration to DataFusion as a comma-separated list of +`key:value` pairs. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :---------------------------- | +| `--datafusion-config` | `INFLUXDB3_DATAFUSION_CONFIG` | + +--- + +### HTTP + +- [max-http-request-size](#max-http-request-size) +- [http-bind](#http-bind) +- [admin-token-recovery-http-bind](#admin-token-recovery-http-bind) + +#### max-http-request-size + +Specifies the maximum size of HTTP requests. + +**Default:** `10485760` + +| influxdb3 serve option | Environment variable | +| :------------------------ | :-------------------------------- | +| `--max-http-request-size` | `INFLUXDB3_MAX_HTTP_REQUEST_SIZE` | + +--- + +#### http-bind + +Defines the address on which InfluxDB serves HTTP API requests. + +**Default:** `0.0.0.0:8181` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------------- | +| `--http-bind` | `INFLUXDB3_HTTP_BIND_ADDR` | + +--- + +#### admin-token-recovery-http-bind + +Enables an admin token recovery HTTP server on a separate port. This server allows regenerating lost admin tokens without existing authentication. The server automatically shuts down after a successful token regeneration. + +> [!Warning] +> This option creates an unauthenticated endpoint that can regenerate admin tokens. Only use this when you have lost access to your admin token and ensure the server is only accessible from trusted networks. + +**Default:** `127.0.0.1:8182` (when enabled) + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--admin-token-recovery-http-bind` | `INFLUXDB3_ADMIN_TOKEN_RECOVERY_HTTP_BIND` | + +##### Example usage + +```bash +# Start server with recovery endpoint +influxdb3 serve --admin-token-recovery-http-bind + +# In another terminal, regenerate the admin token +influxdb3 create token --admin --regenerate --host http://127.0.0.1:8182 +``` + +--- + +### Memory + +- [exec-mem-pool-bytes](#exec-mem-pool-bytes) +- [buffer-mem-limit-mb](#buffer-mem-limit-mb) +- [force-snapshot-mem-threshold](#force-snapshot-mem-threshold) + +#### exec-mem-pool-bytes + +Specifies the size of memory pool used during query execution. +Can be given as absolute value in bytes or as a percentage of the total available memory--for +example: `8000000000` or `10%`). + +{{% show-in "core" %}}**Default:** `8589934592`{{% /show-in %}} +{{% show-in "enterprise" %}}**Default:** `20%`{{% /show-in %}} + +| influxdb3 serve option | Environment variable | +| :---------------------- | :------------------------------ | +| `--exec-mem-pool-bytes` | `INFLUXDB3_EXEC_MEM_POOL_BYTES` | + +{{% show-in "core" %}} +--- + +#### buffer-mem-limit-mb + + +Specifies the size limit of the buffered data in MB. If this limit is exceeded, +the server forces a snapshot. + +**Default:** `5000` + +| influxdb3 serve option | Environment variable | +| :---------------------- | :------------------------------ | +| `--buffer-mem-limit-mb` | `INFLUXDB3_BUFFER_MEM_LIMIT_MB` | + +{{% /show-in %}} + +--- + +#### force-snapshot-mem-threshold + +Specifies the threshold for the internal memory buffer. Supports either a +percentage (portion of available memory) or absolute value in MB--for example: `70%` or `1000`. + +{{% show-in "core" %}}**Default:** `70%`{{% /show-in %}} +{{% show-in "enterprise" %}}**Default:** `50%`{{% /show-in %}} + +| influxdb3 serve option | Environment variable | +| :------------------------------- | :--------------------------------------- | +| `--force-snapshot-mem-threshold` | `INFLUXDB3_FORCE_SNAPSHOT_MEM_THRESHOLD` | + +--- + +### Write-Ahead Log (WAL) + +- [wal-flush-interval](#wal-flush-interval) +- [wal-snapshot-size](#wal-snapshot-size) +- [wal-max-write-buffer-size](#wal-max-write-buffer-size) +- [snapshotted-wal-files-to-keep](#snapshotted-wal-files-to-keep) +- [wal-replay-fail-on-error](#wal-replay-fail-on-error) +- [wal-replay-concurrency-limit](#wal-replay-concurrency-limit) + +#### wal-flush-interval + +Specifies the interval to flush buffered data to a WAL file. Writes that wait +for WAL confirmation take up to this interval to complete. + +**Default:** `1s` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :----------------------------- | +| `--wal-flush-interval` | `INFLUXDB3_WAL_FLUSH_INTERVAL` | + +--- + +#### wal-snapshot-size + +Defines the number of WAL files to attempt to remove in a snapshot. This, +multiplied by the interval, determines how often snapshots are taken. + +**Default:** `600` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :---------------------------- | +| `--wal-snapshot-size` | `INFLUXDB3_WAL_SNAPSHOT_SIZE` | + +--- + +#### wal-max-write-buffer-size + +Specifies the maximum number of write requests that can be buffered before a +flush must be executed and succeed. + +**Default:** `100000` + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :------------------------------------ | +| `--wal-max-write-buffer-size` | `INFLUXDB3_WAL_MAX_WRITE_BUFFER_SIZE` | + +--- + +#### snapshotted-wal-files-to-keep + +Specifies the number of snapshotted WAL files to retain in the object store. +Flushing the WAL files does not clear the WAL files immediately; +they are deleted when the number of snapshotted WAL files exceeds this number. + +**Default:** `300` + +| influxdb3 serve option | Environment variable | +| :-------------------------------- | :-------------------------------- | +| `--snapshotted-wal-files-to-keep` | `INFLUXDB3_NUM_WAL_FILES_TO_KEEP` | + +--- + +#### wal-replay-fail-on-error + +Determines whether WAL replay should fail when encountering errors. + +**Default:** `false` + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :------------------------------------- | +| `--wal-replay-fail-on-error` | `INFLUXDB3_WAL_REPLAY_FAIL_ON_ERROR` | + +--- + +#### wal-replay-concurrency-limit + +Sets the maximum number of concurrent WAL replay operations. + +**Default:** `16` + +| influxdb3 serve option | Environment variable | +| :--------------------------------- | :------------------------------------------ | +| `--wal-replay-concurrency-limit` | `INFLUXDB3_WAL_REPLAY_CONCURRENCY_LIMIT` | + +--- + +### Compaction + +{{% show-in "enterprise" %}} +- [compaction-row-limit](#compaction-row-limit) +- [compaction-max-num-files-per-plan](#compaction-max-num-files-per-plan) +- [compaction-gen2-duration](#compaction-gen2-duration) +- [compaction-multipliers](#compaction-multipliers) +- [compaction-cleanup-wait](#compaction-cleanup-wait) +- [compaction-check-interval](#compaction-check-interval) +{{% /show-in %}} +- [gen1-duration](#gen1-duration) + +{{% show-in "enterprise" %}} +#### compaction-row-limit + +Specifies the soft limit for the number of rows per file that the compactor +writes. The compactor may write more rows than this limit. + +**Default:** `1000000` + +| influxdb3 serve option | Environment variable | +| :----------------------- | :------------------------------------------ | +| `--compaction-row-limit` | `INFLUXDB3_ENTERPRISE_COMPACTION_ROW_LIMIT` | + +--- + +#### compaction-max-num-files-per-plan + +Sets the maximum number of files included in any compaction plan. + +**Default:** `500` + +| influxdb3 serve option | Environment variable | +| :------------------------------------ | :------------------------------------------------------- | +| `--compaction-max-num-files-per-plan` | `INFLUXDB3_ENTERPRISE_COMPACTION_MAX_NUM_FILES_PER_PLAN` | + +--- + +#### compaction-gen2-duration + +Specifies the duration of the first level of compaction (gen2). Later levels of +compaction are multiples of this duration. This value should be equal to or +greater than the gen1 duration. + +**Default:** `20m` + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :---------------------------------------------- | +| `--compaction-gen2-duration` | `INFLUXDB3_ENTERPRISE_COMPACTION_GEN2_DURATION` | + +--- + +#### compaction-multipliers + +Specifies a comma-separated list of multiples defining the duration of each +level of compaction. The number of elements in the list determines the number of +compaction levels. The first element specifies the duration of the first level +(gen3); subsequent levels are multiples of the previous level. + +**Default:** `3,4,6,5` + +| influxdb3 serve option | Environment variable | +| :------------------------- | :-------------------------------------------- | +| `--compaction-multipliers` | `INFLUXDB3_ENTERPRISE_COMPACTION_MULTIPLIERS` | + +--- + +#### compaction-cleanup-wait + +Specifies the amount of time that the compactor waits after finishing a compaction run +to delete files marked as needing deletion during that compaction run. + +**Default:** `10m` + +| influxdb3 serve option | Environment variable | +| :-------------------------- | :--------------------------------------------- | +| `--compaction-cleanup-wait` | `INFLUXDB3_ENTERPRISE_COMPACTION_CLEANUP_WAIT` | + +--- + +#### compaction-check-interval + +Specifies how often the compactor checks for new compaction work to perform. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :----------------------------- | :------------------------------------------------ | +| `--compaction-check-interval` | `INFLUXDB3_ENTERPRISE_COMPACTION_CHECK_INTERVAL` | + +--- +{{% /show-in %}} + +#### gen1-duration + +Specifies the duration that Parquet files are arranged into. Data timestamps +land each row into a file of this duration. Supported durations are `1m`, +`5m`, and `10m`. These files are known as "generation 1" files{{% show-in "enterprise" %}}, which the +compactor can merge into larger generations{{% /show-in %}}{{% show-in "core" %}} that the +compactor in InfluxDB 3 Enterprise can merge into larger generations{{% /show-in %}}. + +**Default:** `10m` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------------ | +| `--gen1-duration` | `INFLUXDB3_GEN1_DURATION` | + +--- + +### Caching + +- [preemptive-cache-age](#preemptive-cache-age) +- [parquet-mem-cache-size](#parquet-mem-cache-size) +- [parquet-mem-cache-prune-percentage](#parquet-mem-cache-prune-percentage) +- [parquet-mem-cache-prune-interval](#parquet-mem-cache-prune-interval) +- [parquet-mem-cache-query-path-duration](#parquet-mem-cache-query-path-duration) +- [disable-parquet-mem-cache](#disable-parquet-mem-cache) +- [table-index-cache-max-entries](#table-index-cache-max-entries) +- [table-index-cache-concurrency-limit](#table-index-cache-concurrency-limit) +{{% show-in "enterprise" %}} +- [last-value-cache-disable-from-history](#last-value-cache-disable-from-history) +{{% /show-in %}} +- [last-cache-eviction-interval](#last-cache-eviction-interval) +{{% show-in "enterprise" %}} +- [distinct-value-cache-disable-from-history](#distinct-value-cache-disable-from-history) +{{% /show-in %}} +- [distinct-cache-eviction-interval](#distinct-cache-eviction-interval) + +#### preemptive-cache-age + +Specifies the interval to prefetch into the Parquet cache during compaction. + +**Default:** `3d` + +| influxdb3 serve option | Environment variable | +| :----------------------- | :------------------------------- | +| `--preemptive-cache-age` | `INFLUXDB3_PREEMPTIVE_CACHE_AGE` | + +--- + +#### parquet-mem-cache-size + +Specifies the size of the in-memory Parquet cache{{% show-in "core" %}} in megabytes (MB){{% /show-in %}}{{% show-in "enterprise" %}} in megabytes or percentage of total available memory{{% /show-in %}}. + +{{% show-in "core" %}}**Default:** `1000`{{% /show-in %}} +{{% show-in "enterprise" %}}**Default:** `20%`{{% /show-in %}} + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :---------------------------------- | +{{% show-in "core" %}}| `--parquet-mem-cache-size-mb` | `INFLUXDB3_PARQUET_MEM_CACHE_SIZE_MB` |{{% /show-in %}} +{{% show-in "enterprise" %}}| `--parquet-mem-cache-size` | `INFLUXDB3_PARQUET_MEM_CACHE_SIZE` |{{% /show-in %}} + +#### parquet-mem-cache-prune-percentage + +Specifies the percentage of entries to prune during a prune operation on the +in-memory Parquet cache. + +**Default:** `0.1` + +| influxdb3 serve option | Environment variable | +| :------------------------------------- | :--------------------------------------------- | +| `--parquet-mem-cache-prune-percentage` | `INFLUXDB3_PARQUET_MEM_CACHE_PRUNE_PERCENTAGE` | + +--- + +#### parquet-mem-cache-prune-interval + +Sets the interval to check if the in-memory Parquet cache needs to be pruned. + +**Default:** `1s` + +| influxdb3 serve option | Environment variable | +| :----------------------------------- | :------------------------------------------- | +| `--parquet-mem-cache-prune-interval` | `INFLUXDB3_PARQUET_MEM_CACHE_PRUNE_INTERVAL` | + +--- + +#### parquet-mem-cache-query-path-duration + +{{% show-in "enterprise" %}} +A [duration](/influxdb3/enterprise/reference/glossary/#duration) that specifies +{{% /show-in %}}{{% show-in "core" %}} +Specifies +{{% /show-in %}} +the time window for caching recent Parquet files in memory. Default is `5h`. + +Only files containing data with a timestamp between `now` and `now - duration` +are cached when accessed during queries--for example, with the default `5h` setting: + +- Current time: `2024-06-10 15:00:00` +- Cache window: Last 5 hours (`2024-06-10 10:00:00` to now) + +If a query requests data from `2024-06-09` (old) and `2024-06-10 14:00` (recent): + +- **Cached**: Parquet files with data from `2024-06-10 14:00` (within 5-hour window) +- **Not cached**: Parquet files with data from `2024-06-09` (outside 5-hour window) + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :------------------------------------ | +| `--parquet-mem-cache-query-path-duration` | `INFLUXDB3_PARQUET_MEM_CACHE_QUERY_PATH_DURATION` | + +--- + +#### disable-parquet-mem-cache + +Disables the in-memory Parquet cache. By default, the cache is enabled. + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :------------------------------------ | +| `--disable-parquet-mem-cache` | `INFLUXDB3_DISABLE_PARQUET_MEM_CACHE` | + +--- + +#### table-index-cache-max-entries + +Specifies the maximum number of entries in the table index cache. + +**Default:** `1000` + +| influxdb3 serve option | Environment variable | +| :--------------------------------- | :-------------------------------------------- | +| `--table-index-cache-max-entries` | `INFLUXDB3_TABLE_INDEX_CACHE_MAX_ENTRIES` | + +--- + +#### table-index-cache-concurrency-limit + +Limits the concurrency level for table index cache operations. + +**Default:** `8` + +| influxdb3 serve option | Environment variable | +| :---------------------------------------- | :------------------------------------------------- | +| `--table-index-cache-concurrency-limit` | `INFLUXDB3_TABLE_INDEX_CACHE_CONCURRENCY_LIMIT` | + +{{% show-in "enterprise" %}} + +--- + +#### last-value-cache-disable-from-history + +Disables populating the last-N-value cache from historical data. +If disabled, the cache is still populated with data from the write-ahead log (WAL). + +| influxdb3 serve option | Environment variable | +| :---------------------------------------- | :---------------------------------------------------------- | +| `--last-value-cache-disable-from-history` | `INFLUXDB3_ENTERPRISE_LAST_VALUE_CACHE_DISABLE_FROM_HISTORY`| + +{{% /show-in %}} + +--- + +#### last-cache-eviction-interval + +Specifies the interval to evict expired entries from the Last-N-Value cache, +expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :------------------------------- | :--------------------------------------- | +| `--last-cache-eviction-interval` | `INFLUXDB3_LAST_CACHE_EVICTION_INTERVAL` | + + +{{% show-in "enterprise" %}} +--- + +#### distinct-value-cache-disable-from-history + +Disables populating the distinct value cache from historical data. +If disabled, the cache is still populated with data from the write-ahead log (WAL). + +| influxdb3 serve option | Environment variable | +| :-------------------------------------------- | :-------------------------------------------------------------- | +| `--distinct-value-cache-disable-from-history` | `INFLUXDB3_ENTERPRISE_DISTINCT_VALUE_CACHE_DISABLE_FROM_HISTORY`| + +{{% /show-in %}} + +--- + +#### distinct-cache-eviction-interval + +Specifies the interval to evict expired entries from the distinct value cache, +expressed as a human-readable duration--for example: `20s`, `1m`, `1h`. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :----------------------------------- | :------------------------------------------- | +| `--distinct-cache-eviction-interval` | `INFLUXDB3_DISTINCT_CACHE_EVICTION_INTERVAL` | + +--- + +#### query-file-limit + +Limits the number of Parquet files a query can access. +If a query attempts to read more than this limit, {{< product-name >}} returns an error. + +{{% show-in "core" %}} +**Default:** `432` + +With the default `432` setting and the default [`gen1-duration`](#gen1-duration) +setting of 10 minutes, queries can access up to a 72 hours of data, but +potentially less depending on whether all data for a given 10 minute block of +time was ingested during the same period. + +You can increase this limit to allow more files to be queried, but be aware of +the following side-effects: + +- Degraded query performance for queries that read more Parquet files +- Increased memory usage +- Your system potentially killing the `influxdb3` process due to Out-of-Memory + (OOM) errors +- If using object storage to store data, many GET requests to access the data + (as many as 2 per file) + +> [!Note] +> We recommend keeping the default setting and querying smaller time ranges. +> If you need to query longer time ranges or faster query performance on any query +> that accesses an hour or more of data, [InfluxDB 3 Enterprise](/influxdb3/enterprise/) +> optimizes data storage by compacting and rearranging Parquet files to achieve +> faster query performance. +{{% /show-in %}} + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------------- | +| `--query-file-limit` | `INFLUXDB3_QUERY_FILE_LIMIT` | + +--- + +### Processing Engine + +- [plugin-dir](#plugin-dir) +- [virtual-env-location](#virtual-env-location) +- [package-manager](#package-manager) + +#### plugin-dir + +Specifies the local directory that contains Python plugins and their test files. + +| influxdb3 serve option | Environment variable | +| :--------------------- | :--------------------- | +| `--plugin-dir` | `INFLUXDB3_PLUGIN_DIR` | + +--- + +#### virtual-env-location + +Specifies the location of the Python virtual environment that the processing +engine uses. + +| influxdb3 serve option | Environment variable | +| :----------------------- | :--------------------- | +| `--virtual-env-location` | `VIRTUAL_ENV` | + +--- + +#### package-manager + +Specifies the Python package manager that the processing engine uses. + +This option supports the following values: + +- `discover` _(default)_: Automatically discover available package manager +- `pip`: Use pip package manager +- `uv`: Use uv package manager + +**Default:** `discover` + +| influxdb3 serve option | Environment variable | +| :--------------------- | :------------------- | +| `--package-manager` | `PACKAGE_MANAGER` | + +{{% show-in "enterprise" %}} + +--- + +### Cluster Management + +- [replication-interval](#replication-interval) +- [catalog-sync-interval](#catalog-sync-interval) +- [wait-for-running-ingestor](#wait-for-running-ingestor) + +#### replication-interval + +Specifies the interval at which data replication occurs between cluster nodes. + +**Default:** `250ms` + +| influxdb3 serve option | Environment variable | +| :------------------------- | :------------------------------------------- | +| `--replication-interval` | `INFLUXDB3_ENTERPRISE_REPLICATION_INTERVAL` | + +--- + +#### catalog-sync-interval + +Defines how often the catalog synchronizes across cluster nodes. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :--------------------------- | :------------------------------------------ | +| `--catalog-sync-interval` | `INFLUXDB3_ENTERPRISE_CATALOG_SYNC_INTERVAL`| + +--- + +#### wait-for-running-ingestor + +Specifies how long to wait for a running ingestor during startup. + +**Default:** `10s` + +| influxdb3 serve option | Environment variable | +| :------------------------------- | :------------------------------------------------ | +| `--wait-for-running-ingestor` | `INFLUXDB3_ENTERPRISE_WAIT_FOR_RUNNING_INGESTOR` | + +--- + +### Resource Limits + + +- [num-cores](#num-cores) +- [num-database-limit](#num-database-limit) +- [num-table-limit](#num-table-limit) +- [num-total-columns-per-table-limit](#num-total-columns-per-table-limit) + +#### num-cores + +Limits the number of CPU cores that the InfluxDB 3 Enterprise process can use when running on systems where resources are shared. +When specified, InfluxDB automatically assigns the number of DataFusion threads and IO threads based on the core count. + +**Thread assignment logic:** +- **1-2 cores**: 1 IO thread, 1 DataFusion thread +- **3 cores**: 1 IO thread, 2 DataFusion threads +- **4+ cores**: 2 IO threads, (n-2) DataFusion threads + +**Constraints:** +- Must be at least 2 +- Cannot exceed the number of cores available on the system +- Total thread count from other thread options cannot exceed the `num-cores` value + +| influxdb3 serve option | Environment variable | +| :--------------------- | :-------------------------------- | +| `--num-cores` | `INFLUXDB3_ENTERPRISE_NUM_CORES` | + +--- + +#### num-database-limit + +Sets the maximum number of databases that can be created. + +| influxdb3 serve option | Environment variable | +| :------------------------ | :---------------------------------------- | +| `--num-database-limit` | `INFLUXDB3_ENTERPRISE_NUM_DATABASE_LIMIT` | + +--- + +#### num-table-limit + +Defines the maximum number of tables that can be created across all databases. + +| influxdb3 serve option | Environment variable | +| :---------------------- | :------------------------------------- | +| `--num-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TABLE_LIMIT` | + +--- + +#### num-total-columns-per-table-limit + +Sets the maximum number of columns allowed per table. + +| influxdb3 serve option | Environment variable | +| :--------------------------------------- | :---------------------------------------------------------- | +| `--num-total-columns-per-table-limit` | `INFLUXDB3_ENTERPRISE_NUM_TOTAL_COLUMNS_PER_TABLE_LIMIT` | + +{{% /show-in %}} + +--- + +### Data Lifecycle Management + +- [gen1-lookback-duration](#gen1-lookback-duration) +- [retention-check-interval](#retention-check-interval) +- [delete-grace-period](#delete-grace-period) +- [hard-delete-default-duration](#hard-delete-default-duration) + +#### gen1-lookback-duration + +Specifies how far back to look when creating generation 1 Parquet files. + +**Default:** `24h` + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :-------------------------------------- | +| `--gen1-lookback-duration` | `INFLUXDB3_GEN1_LOOKBACK_DURATION` | + +--- + +#### retention-check-interval + +Defines how often the system checks for data that should be deleted according to retention policies. + +**Default:** `1h` + +| influxdb3 serve option | Environment variable | +| :----------------------------- | :--------------------------------------- | +| `--retention-check-interval` | `INFLUXDB3_RETENTION_CHECK_INTERVAL` | + +--- + +#### delete-grace-period + +Specifies the grace period before permanently deleting data. + +**Default:** `24h` + +| influxdb3 serve option | Environment variable | +| :------------------------ | :--------------------------------- | +| `--delete-grace-period` | `INFLUXDB3_DELETE_GRACE_PERIOD` | + +--- + +#### hard-delete-default-duration + +Sets the default duration for hard deletion of data. + +**Default:** `90d` + +| influxdb3 serve option | Environment variable | +| :---------------------------------- | :-------------------------------------------- | +| `--hard-delete-default-duration` | `INFLUXDB3_HARD_DELETE_DEFAULT_DURATION` | + +--- + +### Telemetry + +- [telemetry-disable-upload](#telemetry-disable-upload) +- [telemetry-endpoint](#telemetry-endpoint) + +#### telemetry-disable-upload + +Disables the upload of telemetry data to InfluxData. + +**Default:** `false` + +| influxdb3 serve option | Environment variable | +| :---------------------------- | :-------------------------------------- | +| `--telemetry-disable-upload` | `INFLUXDB3_TELEMETRY_DISABLE_UPLOAD` | + +--- + +#### telemetry-endpoint + +Specifies the endpoint for telemetry data uploads. + +| influxdb3 serve option | Environment variable | +| :----------------------- | :--------------------------------- | +| `--telemetry-endpoint` | `INFLUXDB3_TELEMETRY_ENDPOINT` | + +--- + +### TCP Listeners + +- [tcp-listener-file-path](#tcp-listener-file-path) +- [admin-token-recovery-tcp-listener-file-path](#admin-token-recovery-tcp-listener-file-path) + +#### tcp-listener-file-path + +Specifies the file path for the TCP listener configuration. + +| influxdb3 serve option | Environment variable | +| :-------------------------- | :----------------------------------- | +| `--tcp-listener-file-path` | `INFLUXDB3_TCP_LISTINER_FILE_PATH` | + +--- + +#### admin-token-recovery-tcp-listener-file-path + +Specifies the TCP listener file path for admin token recovery operations. + +| influxdb3 serve option | Environment variable | +| :---------------------------------------------- | :-------------------------------------------------------- | +| `--admin-token-recovery-tcp-listener-file-path` | `INFLUXDB3_ADMIN_TOKEN_RECOVERY_TCP_LISTENER_FILE_PATH` | + +{{% show-in "enterprise" %}} +--- + +### Experimental Features + +- [use-pacha-tree](#use-pacha-tree) + +#### use-pacha-tree + +Enables the experimental PachaTree storage engine for improved performance. + +> [!Warning] +> This is an experimental feature and should not be used in production environments. + +**Default:** `false` + +| influxdb3 serve option | Environment variable | +| :---------------------- | :------------------------------------- | +| `--use-pacha-tree` | `INFLUXDB3_ENTERPRISE_USE_PACHA_TREE` | + +{{% /show-in %}} \ No newline at end of file diff --git a/content/shared/influxdb3-cli/delete/_index.md b/content/shared/influxdb3-cli/delete/_index.md index 81a47ffc6..563618314 100644 --- a/content/shared/influxdb3-cli/delete/_index.md +++ b/content/shared/influxdb3-cli/delete/_index.md @@ -1,5 +1,5 @@ -The `influxdb3 delete` command deletes a resource such as a database or a table. +The `influxdb3 delete` command deletes a resource such as a cache, a database, or a table. ## Usage @@ -19,6 +19,7 @@ influxdb3 delete | [last_cache](/influxdb3/version/reference/cli/influxdb3/delete/last_cache/) | Delete a last value cache | | [distinct_cache](/influxdb3/version/reference/cli/influxdb3/delete/distinct_cache/) | Delete a metadata cache | | [table](/influxdb3/version/reference/cli/influxdb3/delete/table/) | Delete a table from a database | +| [token](/influxdb3/version/reference/cli/influxdb3/delete/token/) | Delete an authorization token from the server | | [trigger](/influxdb3/version/reference/cli/influxdb3/delete/trigger/) | Delete a trigger for the processing engine | | help | Print command help or the help of a subcommand | {{% /show-in %}} @@ -30,6 +31,7 @@ influxdb3 delete | [last_cache](/influxdb3/version/reference/cli/influxdb3/delete/last_cache/) | Delete a last value cache | | [distinct_cache](/influxdb3/version/reference/cli/influxdb3/delete/distinct_cache/) | Delete a metadata cache | | [table](/influxdb3/version/reference/cli/influxdb3/delete/table/) | Delete a table from a database | +| [token](/influxdb3/version/reference/cli/influxdb3/delete/token/) | Delete an authorization token from the server | | [trigger](/influxdb3/version/reference/cli/influxdb3/delete/trigger/) | Delete a trigger for the processing engine | | help | Print command help or the help of a subcommand | {{% /show-in %}} diff --git a/content/shared/influxdb3-cli/delete/token.md b/content/shared/influxdb3-cli/delete/token.md new file mode 100644 index 000000000..73cfd688a --- /dev/null +++ b/content/shared/influxdb3-cli/delete/token.md @@ -0,0 +1,32 @@ + +The `influxdb3 delete token` command deletes an authorization token from the {{% product-name %}} server. + +## Usage + +```bash +influxdb3 delete token [OPTIONS] +``` + +## Options + +| Option | Description | Default | Environment | +|----------------|-----------------------------------------------------------------------------------|---------|------------------------| +| `--token` | _({{< req >}})_ The token for authentication with the {{% product-name %}} server | | `INFLUXDB3_AUTH_TOKEN` | +| `--token-name` | _({{< req >}})_ The name of the token to be deleted | | | +| `--tls-ca` | An optional arg to use a custom ca for useful for testing with self signed certs | | `INFLUXDB3_TLS_CA` | +| `-h` | `--help` | Print help information | +| | `--help-all` | Print detailed help information | + +## Examples + +### Delete a token by name + +```bash +influxdb3 delete token --token-name TOKEN_TO_DELETE --token AUTH_TOKEN +``` + +### Show help for the command + +```bash +influxdb3 delete token --help +``` \ No newline at end of file diff --git a/content/shared/influxdb3-query-guides/query-timeout-best-practices.md b/content/shared/influxdb3-query-guides/query-timeout-best-practices.md new file mode 100644 index 000000000..c101123c7 --- /dev/null +++ b/content/shared/influxdb3-query-guides/query-timeout-best-practices.md @@ -0,0 +1,301 @@ +Learn how to set appropriate query timeouts for InfluxDB 3 to balance performance and resource protection. + +Query timeouts prevent resource monopolization while allowing legitimate queries to complete successfully. +The key is finding the "goldilocks zone"—timeouts that are not too short (causing legitimate queries to fail) and not too long (allowing runaway queries to monopolize resources). + +- [Understanding query timeouts](#understanding-query-timeouts) +- [How query routing affects timeout strategy](#how-query-routing-affects-timeout-strategy) +- [Timeout configuration best practices](#timeout-configuration-best-practices) +- [InfluxDB 3 client library examples](#influxdb-3-client-library-examples) +- [Monitoring and troubleshooting](#monitoring-and-troubleshooting) + +## Understanding query timeouts + +Query timeouts define the maximum duration a query can run before being canceled. +In {{% product-name %}}, timeouts serve multiple purposes: + +- **Resource protection**: Prevent runaway queries from monopolizing system resources +- **Performance optimization**: Ensure responsive system behavior for time-sensitive operations +- **Cost control**: Limit compute resource consumption +- **User experience**: Provide predictable response times for applications and dashboards + +Query execution includes network latency, query planning, data retrieval, processing, and result serialization. + +### The "goldilocks zone" for query timeouts + +Optimal timeouts are: +- **Long enough**: To accommodate normal query execution under typical load +- **Short enough**: To prevent resource monopolization and provide reasonable feedback +- **Adaptive**: Adjusted based on query type, system load, and historical performance + +## How query routing affects timeout strategy + +InfluxDB 3 uses round-robin query routing to balance load across multiple queriers. +This creates a "checkout line" effect that influences timeout strategy. + +> [!Note] +> #### Concurrent query execution +> +> InfluxDB 3 supports concurrent query execution, which helps minimize the impact of intensive or inefficient queries. +> However, you should still use appropriate timeouts and optimize your queries for best performance. + +### The checkout line analogy + +Consider a grocery store with multiple checkout lines: +- Customers (queries) are distributed across lines (queriers) +- A slow customer (long-running query) can block others in the same line +- More checkout lines (queriers) provide more alternatives when retrying + +If one querier is unhealthy or has been hijacked by a "noisy neighbor" query (excessively resource hungry), giving up sooner may save time--it's like jumping to a cashier with no customers in line. However, if all queriers are overloaded, then short retries may exacerbate the problem--you wouldn't jump to the end of another line if the cashier is already starting to scan your items. + +### Noisy neighbor effects + +In distributed systems: +- A single long-running query can impact other queries on the same querier +- Shorter timeouts with retries can help queries find less congested queriers +- The effectiveness depends on the number of available queriers + +### When shorter timeouts help + +- **Multiple queriers available**: Retries can find less congested queriers +- **Uneven load distribution**: Some queriers may be significantly less busy +- **Temporary congestion**: Brief spikes in query load or resource usage + +### When shorter timeouts hurt + +- **Few queriers**: Limited alternatives for retries +- **System-wide congestion**: All queriers are equally busy +- **Expensive query planning**: High overhead for query preparation + +## Timeout configuration best practices + +### Make timeouts adjustable + +Configure timeouts that can be modified without service restarts using environment variables, configuration files, runtime APIs, or per-query overrides. Design your client applications to easily adjust timeouts on the fly, allowing you to respond quickly to performance changes and test different timeout strategies without code changes. + +See the [InfluxDB 3 client library examples](#influxdb-3-client-library-examples) +for how to configure timeouts in Python. + +### Use tiered timeout strategies + +Implement different timeout classes based on query characteristics. + +#### Starting point recommendations + +{{% hide-in "cloud-serverless" %}} +| Query Type | Recommended Timeout | Use Case | Rationale | +|------------|-------------------|-----------|-----------| +| UI and dashboard | 10 seconds | Interactive dashboards, real-time monitoring | Users expect immediate feedback | +| Generic default | 60 seconds | Application queries, APIs | Balances performance and reliability | +| Mixed workload | 2 minutes | Development, testing environments | Accommodates various query types | +| Analytical and background | 5 minutes | Reports, batch processing, ETL operations | Complex queries need more time | +{{% /hide-in %}} + +{{% show-in "cloud-serverless" %}} +| Query Type | Recommended Timeout | Use Case | Rationale | +|------------|-------------------|-----------|-----------| +| UI and dashboard | 10 seconds | Interactive dashboards, real-time monitoring | Users expect immediate feedback | +| Generic default | 30 seconds | Application queries, APIs | Serverless optimized for shorter queries | +| Mixed workload | 60 seconds | Development, testing environments | Limited by serverless execution model | +| Analytical and background | 2 minutes | Reports, batch processing | Complex queries within serverless limits | +{{% /show-in %}} + +{{% show-in "enterprise, core" %}} +> [!Tip] +> #### Use caching +> Where immediate feedback is crucial, consider using [Last Value Cache](/influxdb3/version/admin/manage-last-value-caches/) to speed up queries for recent values and [Distinct Value Cache](/influxdb3/version/admin/manage-distinct-value-caches/) to speed up queries for distinct values. +{{% /show-in %}} + +### Implement progressive timeout and retry logic + +Consider using more sophisticated retry strategies rather than simple fixed retries: + +1. **Exponential backoff**: Increase delay between retry attempts +2. **Jitter**: Add randomness to prevent thundering herd effects +3. **Circuit breakers**: Stop retries when system is overloaded +4. **Deadline propagation**: Respect overall operation deadlines + +### Warning signs + +Consider these indicators that timeouts may need adjustment: + +- **Timeouts > 10 minutes**: Usually indicates [query optimization](/influxdb3/version/query-data/troubleshoot-and-optimize/optimize-queries/) opportunities +- **High retry rates**: May indicate timeouts are too aggressive +- **Resource utilization spikes**: Long-running queries may need shorter timeouts +- **User complaints**: Balance between performance and user experience + +### Environment-specific considerations + +- **Development**: Use longer timeouts for debugging +- **Production**: Use shorter timeouts with monitoring +- **Cost-sensitive**: Use aggressive timeouts and [query optimization](/influxdb3/version/query-data/troubleshoot-and-optimize/optimize-queries/) + +### Experimental and ad-hoc queries + +When introducing a new query to your application or when issuing ad-hoc queries to a database with many users, your query might be the "noisy neighbor" (the shopping cart overloaded with groceries). By setting a tighter timeout on experimental queries you can reduce the impact on other users. + + +## InfluxDB 3 client library examples + +### Python client with timeout configuration + +Configure timeouts in the InfluxDB 3 Python client: + +```python { placeholders="DATABASE_NAME|HOST_URL|AUTH_TOKEN" } +import influxdb_client_3 as InfluxDBClient3 + +# Configure different timeout classes (in seconds) +ui_timeout = 10 # For dashboard queries +api_timeout = 60 # For application queries +batch_timeout = 300 # For analytical queries + +# Create client with default timeout +client = InfluxDBClient3.InfluxDBClient3( + host="https://{{< influxdb/host >}}", + database="DATABASE_NAME", + token="AUTH_TOKEN", + timeout=api_timeout # Python client uses seconds +) + +# Quick query with short timeout +def query_latest_data(): + try: + result = client.query( + query="SELECT * FROM sensors WHERE time >= now() - INTERVAL '5 minutes' ORDER BY time DESC LIMIT 10", + timeout=ui_timeout + ) + return result.to_pandas() + except Exception as e: + print(f"Quick query failed: {e}") + return None + +# Analytical query with longer timeout +def query_daily_averages(): + query = """ + SELECT + DATE_TRUNC('day', time) as day, + room, + AVG(temperature) as avg_temp, + COUNT(*) as readings + FROM sensors + WHERE time >= now() - INTERVAL '30 days' + GROUP BY DATE_TRUNC('day', time), room + ORDER BY day DESC, room + """ + + try: + result = client.query( + query=query, + timeout=batch_timeout + ) + return result.to_pandas() + except Exception as e: + print(f"Analytical query failed: {e}") + return None +``` + +Replace the following: + +{{% hide-in "cloud-serverless" %}} +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query{{% /hide-in %}} +{{% show-in "cloud-serverless" %}} +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the bucket to query{{% /show-in %}} +{{% show-in "clustered,cloud-dedicated" %}} +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: a [database token](/influxdb3/clustered/admin/tokens/#database-tokens) with _read_ access to the specified database.{{% /show-in %}} +{{% show-in "cloud-serverless" %}} +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: an [API token](/influxdb3/cloud-serverless/admin/tokens/) with _read_ access to the specified bucket.{{% /show-in %}} +{{% show-in "enterprise,core" %}} +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}with read permissions on the specified database{{% /show-in %}} + +### Basic retry logic implementation + +Implement simple retry strategies with progressive timeouts: + +```python +import time +import influxdb_client_3 as InfluxDBClient3 + +def query_with_retry(client, query: str, initial_timeout: int = 60, max_retries: int = 2): + """Execute query with basic retry and progressive timeout increase""" + + for attempt in range(max_retries + 1): + # Progressive timeout: increase timeout on each retry + timeout_seconds = initial_timeout + attempt * 30 + + try: + result = client.query( + query=query, + timeout=timeout_seconds + ) + return result + + except Exception as e: + if attempt == max_retries: + print(f"Query failed after {max_retries + 1} attempts: {e}") + raise + + # Simple backoff delay + delay = 2 * (attempt + 1) + print(f"Query attempt {attempt + 1} failed: {e}") + print(f"Retrying in {delay} seconds with timeout {timeout_seconds}s...") + time.sleep(delay) + + return None + +# Usage example +result = query_with_retry( + client=client, + query="SELECT * FROM large_table WHERE time >= now() - INTERVAL '1 day'", + initial_timeout=60, + max_retries=2 +) +``` + +## Monitoring and troubleshooting + +### Key metrics to monitor + +Track these essential timeout-related metrics: + +- **Query duration percentiles**: P50, P95, P99 execution times +- **Timeout rate**: Percentage of queries that time out +- **Error rates**: Timeout errors vs. other failure types +- **Resource utilization**: CPU and memory usage during query execution + +### Common timeout issues + +#### High timeout rates + +**Symptoms**: Many queries exceeding timeout limits + +**Common causes**: +- Timeouts set too aggressively for query complexity +- System resource constraints +- Inefficient query patterns + +**Solutions**: +1. Analyze query performance patterns +2. [Optimize slow queries](/influxdb3/version/query-data/troubleshoot-and-optimize/optimize-queries/) or increase timeouts appropriately +3. Scale system resources + +#### Inconsistent query performance + +**Symptoms**: Same queries sometimes fast, sometimes timeout + +**Common causes**: + +- Resource contention from concurrent queries +- Data compaction state (queries may be faster after compaction completes) + +**Solutions**: + +1. Analyze query patterns to identify and optimize slow queries +2. Implement retry logic with exponential backoff in your client applications +3. Adjust timeout values based on observed query performance patterns +{{% show-in "enterprise,core" %}} +4. Implement [Last Value Cache](/influxdb3/version/admin/manage-last-value-caches/) to speed up queries for recent values +5. Implement [Distinct Value Cache](/influxdb3/version/admin/manage-distinct-value-caches/) to speed up queries for distinct values +{{% /show-in %}} + +> [!Note] +> Regular analysis of timeout patterns helps identify optimization opportunities and system scaling needs. \ No newline at end of file diff --git a/content/shared/influxdb3-write-guides/troubleshoot-distributed.md b/content/shared/influxdb3-write-guides/troubleshoot-distributed.md new file mode 100644 index 000000000..802d518fd --- /dev/null +++ b/content/shared/influxdb3-write-guides/troubleshoot-distributed.md @@ -0,0 +1,348 @@ +Learn how to avoid unexpected results and recover from errors when writing to {{% product-name %}}. + +- [Handle write responses](#handle-write-responses) + - [Review HTTP status codes](#review-http-status-codes) +- [Troubleshoot failures](#troubleshoot-failures) +- [Troubleshoot rejected points](#troubleshoot-rejected-points) +- [Report write issues](#report-write-issues) + +## Handle write responses + +{{% product-name %}} does the following when you send a write request: + +1. Validates the request. +2. If successful, attempts to [ingest data](/influxdb3/version/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](#review-http-status-codes). +3. Ingests or rejects data from the batch and returns one of the following HTTP status codes: + + - `204 No Content`: All of the data is ingested and queryable. + - `400 Bad Request`: Some {{% show-in "cloud-dedicated,clustered" %}}(_when **partial writes** are configured for the cluster_){{% /show-in %}} or all of the data has been rejected. Data that has not been rejected is ingested and queryable. + + The response body contains error details about [rejected points](#troubleshoot-rejected-points), up to 100 points. + +Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable. + +To ensure that InfluxDB handles writes in the order you request them, +wait for the response before you send the next request. + +### Review HTTP status codes + +InfluxDB uses conventional HTTP status codes to indicate the success or failure of a request. +The `message` property of the response body may contain additional details about the error. +{{< product-name >}} returns one the following HTTP status codes for a write request: + +{{% show-in "clustered,cloud-dedicated" %}} +| HTTP response code | Response body | Description | +| :-------------------------------| :--------------------------------------------------------------- | :------------- | +| `204 "No Content"` | Empty | InfluxDB ingested all of the data in the batch | +| `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | Some or all request data isn't allowed (for example, is malformed or falls outside of the database's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected | +| `401 "Unauthorized"` | Empty | The `Authorization` request header is missing or malformed or the [token](/influxdb3/version/admin/tokens/) doesn't have permission to write to the database | +| `404 "Not found"` | A requested **resource type** (for example, "database"), and **resource name** | A requested resource wasn't found | +| `422 "Unprocessable Entity"` | `message` contains details about the error | The data isn't allowed (for example, falls outside of the database's retention period). | +| `500 "Internal server error"` | Empty | Default status for an error | +| `503 "Service unavailable"` | Empty | The server is temporarily unavailable to accept writes. The `Retry-After` header contains the number of seconds to wait before trying the write again. | +{{% /show-in %}} + +{{% show-in "cloud-serverless" %}} +| HTTP response code | Response body | Description | +| :-------------------------------| :--------------------------------------------------------------- | :------------- | +| `204 "No Content"` | Empty | InfluxDB ingested all of the data in the batch | +| `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | Some or all request data isn't allowed (for example, is malformed or falls outside of the bucket's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected | +| `401 "Unauthorized"` | Empty | The `Authorization` request header is missing or malformed or the [token](/influxdb3/version/admin/tokens/) doesn't have permission to write to the bucket | +| `404 "Not found"` | A requested **resource type** (for example, "organization" or "bucket"), and **resource name** | A requested resource wasn't found | +| `413 "Request too large"` | cannot read data: points in batch is too large | The request exceeds the maximum [global limit](/influxdb3/cloud-serverless/admin/billing/limits/) | +| `422 "Unprocessable Entity"` | `message` contains details about the error | The data isn't allowed (for example, falls outside of the database's retention period). | +| `429 "Too many requests"` | Empty | The number of requests exceeds the [adjustable service quota](/influxdb3/cloud-serverless/admin/billing/limits/#adjustable-service-quotas). The `Retry-After` header contains the number of seconds to wait before trying the write again. | +| `500 "Internal server error"` | Empty | Default status for an error | +| `503 "Service unavailable"` | Empty | The server is temporarily unavailable to accept writes. The `Retry-After` header contains the number of seconds to wait before trying the write again. | +{{% /show-in %}} + +The `message` property of the response body may contain additional details about the error. +If your data did not write to the {{% show-in "cloud-serverless" %}}bucket{{% /show-in %}}{{% show-in "cloud-dedicated,clustered" %}}database{{% /show-in %}}, see how to [troubleshoot rejected points](#troubleshoot-rejected-points). + +## Troubleshoot failures + +If you notice data is missing in your database, do the following: + +- Check the [HTTP status code](#review-http-status-codes) in the response. +- Check the `message` property in the response body for details about the error. +- If the `message` describes a field error, [troubleshoot rejected points](#troubleshoot-rejected-points). +- Verify all lines contain valid syntax ([line protocol](/influxdb3/version/reference/syntax/line-protocol/)). +- Verify the timestamps in your data match the [precision parameter](/influxdb3/version/reference/glossary/#precision) in your request. +- Minimize payload size and network errors by [optimizing writes](/influxdb3/version/write-data/best-practices/optimize-writes/). + +## Troubleshoot rejected points + +When writing points from a batch, InfluxDB rejects points that have syntax errors or schema conflicts. +If InfluxDB processes the data in your batch and then rejects points, the [HTTP response](#handle-write-responses) body contains the following properties that describe rejected points: + +- `code`: `"invalid"` +- `line`: the line number of the _first_ rejected point in the batch. +- `message`: a string that contains line-separated error messages, one message for each rejected point in the batch, up to 100 rejected points. Line numbers are 1-based. + +InfluxDB rejects points for the following reasons: + +- a line protocol parsing error +- an invalid timestamp +- a schema conflict + +Schema conflicts occur when you try to write data that contains any of the following: + +- a wrong data type: the point falls within the same partition (default partitioning is measurement and day) as existing {{% show-in "cloud-serverless" %}}bucket{{% /show-in %}} {{% show-in "cloud-dedicated,clustered" %}}database{{% /show-in %}} data and contains a different data type for an existing field +- a tag and a field that use the same key + +### Example + +The following example shows a response body for a write request that contains two rejected points: + +```json +{ + "code": "invalid", + "line": 2, + "message": "failed to parse line protocol: + errors encountered on line(s): + error parsing line 2 (1-based): Invalid measurement was provided + error parsing line 4 (1-based): Unable to parse timestamp value '123461000000000000000000000000'" +} +``` + +Check for [field data type](/influxdb3/version/reference/syntax/line-protocol/#data-types-and-format) differences between the rejected data point and points within the same database and partition (default partitioning +is by measurement and day)--for example, did you attempt to write `string` data to an `int` field? + +## Report write issues + +If you experience persistent write issues that you can't resolve using the troubleshooting steps above, use these guidelines to gather the necessary information when reporting the issue to InfluxData support. + +> [!Note] +> #### Before reporting an issue +> +> Ensure you have followed all [troubleshooting steps](#troubleshoot-failures) and +> reviewed the [write optimization guidelines](/influxdb3/version/write-data/best-practices/optimize-writes/) +> to rule out common configuration and data formatting issues. + +### Gather essential information + +When reporting write issues, provide the following information to help InfluxData engineers diagnose the problem: + +#### 1. Error details and logs + +**Capture the complete error response:** + +```bash { placeholders="AUTH_TOKEN|DATABASE_NAME" } +# Example: Capture both successful and failed write attempts +curl --silent --show-error --write-out "\nHTTP Status: %{http_code}\nResponse Time: %{time_total}s\n" \ + --request POST \ + "https://{{< influxdb/host >}}/write?db=DATABASE_NAME&precision=ns" \ + --header "Authorization: Bearer AUTH_TOKEN" \ + --header "Content-Type: text/plain; charset=utf-8" \ + --data-binary @problematic-data.lp \ + > write-error-response.txt 2>&1 +``` + +**Log client-side errors:** + +If using a client library, enable debug logging and capture the full exception details: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[Python](#) +[Go](#) +[Java](#) +[JavaScript](#) +{{% /code-tabs %}} +{{% code-tab-content %}} +```python { placeholders="DATABASE_NAME|AUTH_TOKEN" } +import logging +from influxdb_client_3 import InfluxDBClient3 + +# Enable debug logging +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger("influxdb_client_3") + +try: + client = InfluxDBClient3(token="AUTH_TOKEN", host="{{< influxdb/host >}}", database="DATABASE_NAME") + client.write(data) +except Exception as e: + logger.error(f"Write failed: {str(e)}") + # Include full stack trace in your report + import traceback + traceback.print_exc() +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```go { placeholders="DATABASE_NAME|AUTH_TOKEN" } +package main + +import ( + "context" + "fmt" + "log" + "os" + + "github.com/InfluxCommunity/influxdb3-go" +) + +func main() { + // Enable debug logging + client, err := influxdb3.New(influxdb3.ClientConfig{ + Host: "https://{{< influxdb/host >}}", + Token: "AUTH_TOKEN", + Database: "DATABASE_NAME", + Debug: true, + }) + + if err != nil { + log.Fatal(err) + } + defer client.Close() + + err = client.Write(context.Background(), data) + if err != nil { + // Log the full error details + fmt.Fprintf(os.Stderr, "Write error: %+v\n", err) + } +} +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```java { placeholders="DATABASE_NAME|AUTH_TOKEN" } +import com.influxdb.v3.client.InfluxDBClient; +import java.util.logging.Logger; +import java.util.logging.Level; + +public class WriteErrorExample { + private static final Logger logger = Logger.getLogger(WriteErrorExample.class.getName()); + + public static void main(String[] args) { + try (InfluxDBClient client = InfluxDBClient.getInstance( + "https://{{< influxdb/host >}}", + "AUTH_TOKEN".toCharArray(), + "DATABASE_NAME")) { + + client.writeRecord(data); + } catch (Exception e) { + logger.log(Level.SEVERE, "Write failed", e); + // Include full stack trace in your report + e.printStackTrace(); + } + } +} +``` +{{% /code-tab-content %}} +{{% code-tab-content %}} +```javascript { placeholders="DATABASE_NAME|AUTH_TOKEN" } +import { InfluxDBClient } from '@influxdata/influxdb3-client' + +const client = new InfluxDBClient({ + host: 'https://{{< influxdb/host >}}', + token: 'AUTH_TOKEN', + database: 'DATABASE_NAME' +}) + +try { + await client.write(data) +} catch (error) { + console.error('Write failed:', error) + // Include the full error object in your report + console.error('Full error details:', JSON.stringify(error, null, 2)) +} +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +Replace the following in your code: + +{{% hide-in "cloud-serverless" %}} +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query{{% /hide-in %}} +{{% show-in "cloud-serverless" %}} +- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the bucket to query{{% /show-in %}} +{{% show-in "clustered,cloud-dedicated" %}} +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: a [database token](/influxdb3/clustered/admin/tokens/#database-tokens) with _write_ access to the specified database.{{% /show-in %}} +{{% show-in "cloud-serverless" %}} +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: an [API token](/influxdb3/cloud-serverless/admin/tokens/) with _write_ access to the specified bucket.{{% /show-in %}} +{{% show-in "enterprise,core" %}} +- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}} with write permissions on the specified database{{% /show-in %}} + +#### 2. Data samples and patterns + +**Provide representative data samples:** + +- Include 10-20 lines of the problematic line protocol data (sanitized if necessary) +- Show both successful and failing data formats +- Include timestamp ranges and precision used +- Specify if the issue occurs with specific measurements, tags, or field types + +**Example data documentation:** +``` +# Successful writes: +measurement1,tag1=value1,tag2=value2 field1=1.23,field2="text" 1640995200000000000 + +# Failing writes: +measurement1,tag1=value1,tag2=value2 field1="string",field2=456 1640995260000000000 +# Error: field data type conflict - field1 changed from float to string +``` + +#### 3. Write patterns and volume + +Document your write patterns: + +- **Frequency**: How often do you write data? (for example, every 10 seconds, once per minute) +- **Batch size**: How many points per write request? +- **Concurrency**: How many concurrent write operations? +- **Data retention**: How long is data retained? +- **Timing**: When did the issue first occur? Is it intermittent or consistent? + +#### 4. Environment details + +{{% show-in "clustered" %}} +**Cluster configuration:** +- InfluxDB Clustered version +- Kubernetes environment details +- Node specifications (CPU, memory, storage) +- Network configuration between client and cluster +{{% /show-in %}} + +**Client configuration:** +- Client library version and language +- Connection settings (timeouts, retry logic) +- Geographic location relative to cluster + +#### 5. Reproduction steps + +Provide step-by-step instructions to reproduce the issue: + +1. **Environment setup**: How to configure a similar environment +2. **Data preparation**: Sample data files or generation scripts +3. **Write commands**: Exact commands or code used +4. **Expected vs actual results**: What should happen vs what actually happens + +### Create a support package + +Organize all gathered information into a comprehensive package: + +**Files to include:** +- `write-error-response.txt` - HTTP response details +- `client-logs.txt` - Client library debug logs +- `sample-data.lp` - Representative line protocol data (sanitized) +- `reproduction-steps.md` - Detailed reproduction guide +- `environment-details.md` - {{% show-in "clustered" %}}Cluster and{{% /show-in %}} client configuration +- `write-patterns.md` - Usage patterns and volume information + +**Package format:** +```bash +# Create a timestamped support package +TIMESTAMP=$(date -Iseconds) +mkdir "write-issue-${TIMESTAMP}" +# Add all relevant files to the directory +tar -czf "write-issue-${TIMESTAMP}.tar.gz" "write-issue-${TIMESTAMP}/" +``` + +### Submit the issue + +Include the support package when contacting InfluxData support through your standard [support channels](#bug-reports-and-feedback), along with: + +- A clear description of the problem +- Impact assessment (how critical is this issue?) +- Any workarounds you've attempted +- Business context if the issue affects production systems + +This comprehensive information will help InfluxData engineers identify root causes and provide targeted solutions for your write issues. diff --git a/content/shared/influxql-v3-reference/feature-support.md b/content/shared/influxql-v3-reference/feature-support.md index feacf93fa..3fe18e3a1 100644 --- a/content/shared/influxql-v3-reference/feature-support.md +++ b/content/shared/influxql-v3-reference/feature-support.md @@ -65,11 +65,11 @@ The following table provides information about what metaqueries are available in ### Aggregate functions -| Function | Supported | -| :---------------------------------------------------------------------------------------- | :----------------------: | +| Function | Supported | +| :-------------------------------------------------------------------------------- | :----------------------: | | [COUNT()](/influxdb/version/reference/influxql/functions/aggregates/#count) | **{{< icon "check" >}}** | | [DISTINCT()](/influxdb/version/reference/influxql/functions/aggregates/#distinct) | **{{< icon "check" >}}** | -| INTEGRAL() | | +| [INTEGRAL()](/influxdb/version/reference/influxql/functions/aggregates/#integral) | **{{< icon "check" >}}** | | [MEAN()](/influxdb/version/reference/influxql/functions/aggregates/#mean) | **{{< icon "check" >}}** | | [MEDIAN()](/influxdb/version/reference/influxql/functions/aggregates/#median) | **{{< icon "check" >}}** | | [MODE()](/influxdb/version/reference/influxql/functions/aggregates/#mode) | **{{< icon "check" >}}** | @@ -77,29 +77,25 @@ The following table provides information about what metaqueries are available in | [STDDEV()](/influxdb/version/reference/influxql/functions/aggregates/#stddev) | **{{< icon "check" >}}** | | [SUM()](/influxdb/version/reference/influxql/functions/aggregates/#sum) | **{{< icon "check" >}}** | - - ### Selector functions -| Function | Supported | -| :------------------------------------------------------------------------------------------- | :----------------------: | +| Function | Supported | +| :----------------------------------------------------------------------------------- | :----------------------: | | [BOTTOM()](/influxdb/version/reference/influxql/functions/selectors/#bottom) | **{{< icon "check" >}}** | | [FIRST()](/influxdb/version/reference/influxql/functions/selectors/#first) | **{{< icon "check" >}}** | | [LAST()](/influxdb/version/reference/influxql/functions/selectors/#last) | **{{< icon "check" >}}** | | [MAX()](/influxdb/version/reference/influxql/functions/selectors/#max) | **{{< icon "check" >}}** | | [MIN()](/influxdb/version/reference/influxql/functions/selectors/#min) | **{{< icon "check" >}}** | | [PERCENTILE()](/influxdb/version/reference/influxql/functions/selectors/#percentile) | **{{< icon "check" >}}** | -| SAMPLE() | | +| SAMPLE() | | | [TOP()](/influxdb/version/reference/influxql/functions/selectors/#top) | **{{< icon "check" >}}** | ### Transformations -| Function | Supported | -| :--------------------------------------------------------------------------------------------------------------------------- | :----------------------: | +| Function | Supported | +| :------------------------------------------------------------------------------------------------------------------- | :----------------------: | | [ABS()](/influxdb/version/reference/influxql/functions/transformations/#abs) | **{{< icon "check" >}}** | | [ACOS()](/influxdb/version/reference/influxql/functions/transformations/#acos) | **{{< icon "check" >}}** | | [ASIN()](/influxdb/version/reference/influxql/functions/transformations/#asin) | **{{< icon "check" >}}** | diff --git a/content/shared/influxql-v3-reference/functions/aggregates.md b/content/shared/influxql-v3-reference/functions/aggregates.md index c4c2ecc4e..071c65d06 100644 --- a/content/shared/influxql-v3-reference/functions/aggregates.md +++ b/content/shared/influxql-v3-reference/functions/aggregates.md @@ -6,6 +6,7 @@ _Examples use the sample data set provided in the - [COUNT()](#count) - [DISTINCT()](#distinct) +- [INTEGRAL()](#integral) - [MEAN()](#mean) - [MEDIAN()](#median) - [MODE()](#mode) @@ -13,17 +14,6 @@ _Examples use the sample data set provided in the - [STDDEV()](#stddev) - [SUM()](#sum) - - - -> [!Important] -> #### Missing InfluxQL functions -> -> Some InfluxQL functions are in the process of being rearchitected to work with -> the InfluxDB 3 storage engine. If a function you need is not here, check the -> [InfluxQL feature support page](/influxdb/version/reference/influxql/feature-support/#function-support) -> for more information. - ## COUNT() Returns the number of non-null [field values](/influxdb/version/reference/glossary/#field-value). @@ -186,14 +176,14 @@ name: home {{% /expand %}} {{< /expand-wrapper >}} - +{{< /expand-wrapper >}} ## MEAN() diff --git a/content/shared/sql-reference/functions/time-and-date.md b/content/shared/sql-reference/functions/time-and-date.md index befe2331d..82738435a 100644 --- a/content/shared/sql-reference/functions/time-and-date.md +++ b/content/shared/sql-reference/functions/time-and-date.md @@ -885,9 +885,10 @@ LIMIT 1 ## from_unixtime -Converts an integer to RFC3339 timestamp format (`YYYY-MM-DDT00:00:00.000000000Z`). -Input is parsed as a [Unix nanosecond timestamp](/influxdb/version/reference/glossary/#unix-timestamp) -and returns the corresponding RFC3339 timestamp. +Converts an integer (Unix timestamp in seconds) to a timestamp value. +The underlying result is a timestamp (`Timestamp(TimeUnit::Second, None)`). +If you output query results as JSON (default for the API), CSV, or pretty (default for the CLI), the timestamp is formatted as an ISO 8601 string (`YYYY-MM-DDTHH:MM:SS`, without a timezone indicator). +When output to Parquet, the raw integer value (for example, `1641042000`) is preserved. ```sql from_unixtime(expression) @@ -1454,7 +1455,7 @@ SELECT tz(time, 'Australia/Sydney') AS time_tz, time FROM home ORDER BY time LIM differ when the input timestamp **does not** have a timezone. - When using an input timestamp that does not have a timezone (the default behavior in InfluxDB) with the - `AT TIME ZONE` operator, the operator returns the the same timestamp, but with a timezone offset + `AT TIME ZONE` operator, the operator returns the same timestamp, but with a timezone offset (also known as the "wall clock" time)--for example: ```sql diff --git a/content/telegraf/v1/release-notes.md b/content/telegraf/v1/release-notes.md index 05d994186..b387c589c 100644 --- a/content/telegraf/v1/release-notes.md +++ b/content/telegraf/v1/release-notes.md @@ -11,6 +11,122 @@ menu: weight: 60 --- +## v1.35.3 {date="2025-07-28"} + +### Bug fixes + +- [#17373](https://github.com/influxdata/telegraf/pull/17373) `agent` Handle nil timer on telegraf reload when no debounce is specified +- [#17340](https://github.com/influxdata/telegraf/pull/17340) `agent` Make Windows service install more robust +- [#17310](https://github.com/influxdata/telegraf/pull/17310) `outputs.sql` Add timestamp to derived datatypes +- [#17349](https://github.com/influxdata/telegraf/pull/17349) `outputs` Retrigger batch-available-events only for non-failing writes +- [#17293](https://github.com/influxdata/telegraf/pull/17293) `parsers.json_v2` Respect string type for objects and arrays +- [#17367](https://github.com/influxdata/telegraf/pull/17367) `plugins.snmp` Update gosnmp to prevent panic in snmp agents +- [#17292](https://github.com/influxdata/telegraf/pull/17292) `processors.snmp_lookup` Avoid re-enqueing updates after plugin stopped +- [#17369](https://github.com/influxdata/telegraf/pull/17369) `processors.snmp_lookup` Prevent deadlock during plugin shutdown + +### Dependency updates + +- [#17320](https://github.com/influxdata/telegraf/pull/17320) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/azcore from 1.18.0 to 1.18.1 +- [#17328](https://github.com/influxdata/telegraf/pull/17328) `deps` Bump github.com/SAP/go-hdb from 1.13.11 to 1.13.12 +- [#17301](https://github.com/influxdata/telegraf/pull/17301) `deps` Bump github.com/SAP/go-hdb from 1.13.9 to 1.13.11 +- [#17326](https://github.com/influxdata/telegraf/pull/17326) `deps` Bump github.com/alitto/pond/v2 from 2.4.0 to 2.5.0 +- [#17295](https://github.com/influxdata/telegraf/pull/17295) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.227.0 to 1.230.0 +- [#17332](https://github.com/influxdata/telegraf/pull/17332) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.230.0 to 1.231.0 +- [#17300](https://github.com/influxdata/telegraf/pull/17300) `deps` Bump github.com/docker/docker from 28.3.0+incompatible to 28.3.1+incompatible +- [#17334](https://github.com/influxdata/telegraf/pull/17334) `deps` Bump github.com/docker/docker from 28.3.1+incompatible to 28.3.2+incompatible +- [#17327](https://github.com/influxdata/telegraf/pull/17327) `deps` Bump github.com/google/cel-go from 0.25.0 to 0.26.0 +- [#17331](https://github.com/influxdata/telegraf/pull/17331) `deps` Bump github.com/miekg/dns from 1.1.66 to 1.1.67 +- [#17297](https://github.com/influxdata/telegraf/pull/17297) `deps` Bump github.com/nats-io/nats-server/v2 from 2.11.5 to 2.11.6 +- [#17321](https://github.com/influxdata/telegraf/pull/17321) `deps` Bump github.com/openconfig/goyang from 1.6.2 to 1.6.3 +- [#17298](https://github.com/influxdata/telegraf/pull/17298) `deps` Bump github.com/prometheus/procfs from 0.16.1 to 0.17.0 +- [#17296](https://github.com/influxdata/telegraf/pull/17296) `deps` Bump github.com/shirou/gopsutil/v4 from 4.25.5 to 4.25.6 +- [#17299](https://github.com/influxdata/telegraf/pull/17299) `deps` Bump github.com/snowflakedb/gosnowflake from 1.14.1 to 1.15.0 +- [#17323](https://github.com/influxdata/telegraf/pull/17323) `deps` Bump go.opentelemetry.io/collector/pdata from 1.35.0 to 1.36.0 +- [#17091](https://github.com/influxdata/telegraf/pull/17091) `deps` Bump go.step.sm/crypto from 0.64.0 to 0.67.0 +- [#17330](https://github.com/influxdata/telegraf/pull/17330) `deps` Bump golang.org/x/crypto from 0.39.0 to 0.40.0 +- [#17322](https://github.com/influxdata/telegraf/pull/17322) `deps` Bump golang.org/x/mod from 0.25.0 to 0.26.0 +- [#17336](https://github.com/influxdata/telegraf/pull/17336) `deps` Bump golang.org/x/net from 0.41.0 to 0.42.0 +- [#17337](https://github.com/influxdata/telegraf/pull/17337) `deps` Bump golang.org/x/sys from 0.33.0 to 0.34.0 +- [#17335](https://github.com/influxdata/telegraf/pull/17335) `deps` Bump golang.org/x/term from 0.32.0 to 0.33.0 +- [#17294](https://github.com/influxdata/telegraf/pull/17294) `deps` Bump google.golang.org/api from 0.239.0 to 0.240.0 +- [#17325](https://github.com/influxdata/telegraf/pull/17325) `deps` Bump google.golang.org/api from 0.240.0 to 0.241.0 +- [#17138](https://github.com/influxdata/telegraf/pull/17138) `deps` Bump modernc.org/sqlite from 1.37.0 to 1.38.0 + +## v1.35.2 {date="2025-07-07"} + +### Bug fixes + +- [#17248](https://github.com/influxdata/telegraf/pull/17248) `agent` Add missing config flags for migrate command +- [#17240](https://github.com/influxdata/telegraf/pull/17240) `disk-buffer` Correctly reset the mask after adding to an empty buffer +- [#17284](https://github.com/influxdata/telegraf/pull/17284) `disk-buffer` Expire metric tracking information in the right place +- [#17257](https://github.com/influxdata/telegraf/pull/17257) `disk-buffer` Mask old tracking metrics on restart +- [#17247](https://github.com/influxdata/telegraf/pull/17247) `disk-buffer` Remove empty buffer on close +- [#17285](https://github.com/influxdata/telegraf/pull/17285) `inputs.gnmi` Avoid interpreting path elements with multiple colons as namespace +- [#17278](https://github.com/influxdata/telegraf/pull/17278) `inputs.gnmi` Handle base64 encoded IEEE-754 floats correctly +- [#17258](https://github.com/influxdata/telegraf/pull/17258) `inputs.kibana` Support Kibana 8.x status API format change +- [#17214](https://github.com/influxdata/telegraf/pull/17214) `inputs.ntpq` Fix ntpq field misalignment parsing errors +- [#17234](https://github.com/influxdata/telegraf/pull/17234) `outputs.microsoft_fabric` Correct app name +- [#17291](https://github.com/influxdata/telegraf/pull/17291) `outputs.nats` Avoid initializing Jetstream unconditionally +- [#17246](https://github.com/influxdata/telegraf/pull/17246) `outputs` Retrigger batch-available-events correctly + +### Dependency updates + +- [#17217](https://github.com/influxdata/telegraf/pull/17217) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs from 1.3.2 to 1.4.0 +- [#17226](https://github.com/influxdata/telegraf/pull/17226) `deps` Bump github.com/ClickHouse/clickhouse-go/v2 from 2.37.0 to 2.37.1 +- [#17265](https://github.com/influxdata/telegraf/pull/17265) `deps` Bump github.com/ClickHouse/clickhouse-go/v2 from 2.37.1 to 2.37.2 +- [#17268](https://github.com/influxdata/telegraf/pull/17268) `deps` Bump github.com/Masterminds/semver/v3 from 3.3.1 to 3.4.0 +- [#17271](https://github.com/influxdata/telegraf/pull/17271) `deps` Bump github.com/SAP/go-hdb from 1.13.7 to 1.13.9 +- [#17232](https://github.com/influxdata/telegraf/pull/17232) `deps` Bump github.com/alitto/pond/v2 from 2.3.4 to 2.4.0 +- [#17231](https://github.com/influxdata/telegraf/pull/17231) `deps` Bump github.com/apache/arrow-go/v18 from 18.3.0 to 18.3.1 +- [#17223](https://github.com/influxdata/telegraf/pull/17223) `deps` Bump github.com/aws/aws-sdk-go-v2/config from 1.29.15 to 1.29.17 +- [#17220](https://github.com/influxdata/telegraf/pull/17220) `deps` Bump github.com/aws/aws-sdk-go-v2/credentials from 1.17.69 to 1.17.70 +- [#17227](https://github.com/influxdata/telegraf/pull/17227) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs from 1.50.3 to 1.51.0 +- [#17262](https://github.com/influxdata/telegraf/pull/17262) `deps` Bump github.com/aws/aws-sdk-go-v2/service/dynamodb from 1.43.4 to 1.44.0 +- [#17224](https://github.com/influxdata/telegraf/pull/17224) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.225.1 to 1.225.2 +- [#17260](https://github.com/influxdata/telegraf/pull/17260) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.226.0 to 1.227.0 +- [#17264](https://github.com/influxdata/telegraf/pull/17264) `deps` Bump github.com/docker/docker from 28.2.2+incompatible to 28.3.0+incompatible +- [#17256](https://github.com/influxdata/telegraf/pull/17256) `deps` Bump github.com/lxc/incus/v6 from 6.13.0 to 6.14.0 +- [#17272](https://github.com/influxdata/telegraf/pull/17272) `deps` Bump github.com/microsoft/go-mssqldb from 1.8.2 to 1.9.2 +- [#17261](https://github.com/influxdata/telegraf/pull/17261) `deps` Bump github.com/nats-io/nats-server/v2 from 2.11.4 to 2.11.5 +- [#17266](https://github.com/influxdata/telegraf/pull/17266) `deps` Bump github.com/peterbourgon/unixtransport from 0.0.5 to 0.0.6 +- [#17229](https://github.com/influxdata/telegraf/pull/17229) `deps` Bump github.com/prometheus/common from 0.64.0 to 0.65.0 +- [#17267](https://github.com/influxdata/telegraf/pull/17267) `deps` Bump github.com/redis/go-redis/v9 from 9.10.0 to 9.11.0 +- [#17273](https://github.com/influxdata/telegraf/pull/17273) `deps` Bump go.opentelemetry.io/collector/pdata from 1.34.0 to 1.35.0 +- [#17219](https://github.com/influxdata/telegraf/pull/17219) `deps` Bump google.golang.org/api from 0.237.0 to 0.238.0 +- [#17263](https://github.com/influxdata/telegraf/pull/17263) `deps` Bump google.golang.org/api from 0.238.0 to 0.239.0 +- [#17218](https://github.com/influxdata/telegraf/pull/17218) `deps` Bump k8s.io/api from 0.33.1 to 0.33.2 +- [#17228](https://github.com/influxdata/telegraf/pull/17228) `deps` Bump k8s.io/client-go from 0.33.1 to 0.33.2 + +## v1.35.1 {date="2025-06-23"} + +### Bug fixes + +- [#17178](https://github.com/influxdata/telegraf/pull/17178) `inputs.procstat` Fix user filter conditional logic +- [#17210](https://github.com/influxdata/telegraf/pull/17210) `processors.strings` Add explicit TOML tags on struct fields + +### Dependency updates + +- [#17194](https://github.com/influxdata/telegraf/pull/17194) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/azidentity from 1.10.0 to 1.10.1 +- [#17189](https://github.com/influxdata/telegraf/pull/17189) `deps` Bump github.com/ClickHouse/clickhouse-go/v2 from 2.36.0 to 2.37.0 +- [#17186](https://github.com/influxdata/telegraf/pull/17186) `deps` Bump github.com/SAP/go-hdb from 1.13.6 to 1.13.7 +- [#17188](https://github.com/influxdata/telegraf/pull/17188) `deps` Bump github.com/alitto/pond/v2 from 2.3.2 to 2.3.4 +- [#17180](https://github.com/influxdata/telegraf/pull/17180) `deps` Bump github.com/aws/aws-sdk-go-v2/credentials from 1.17.68 to 1.17.69 +- [#17185](https://github.com/influxdata/telegraf/pull/17185) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.45.1 to 1.45.2 +- [#17187](https://github.com/influxdata/telegraf/pull/17187) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs from 1.50.1 to 1.50.2 +- [#17183](https://github.com/influxdata/telegraf/pull/17183) `deps` Bump github.com/aws/aws-sdk-go-v2/service/dynamodb from 1.43.2 to 1.43.3 +- [#17182](https://github.com/influxdata/telegraf/pull/17182) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.225.0 to 1.225.1 +- [#17190](https://github.com/influxdata/telegraf/pull/17190) `deps` Bump github.com/aws/aws-sdk-go-v2/service/kinesis from 1.35.1 to 1.35.2 +- [#17193](https://github.com/influxdata/telegraf/pull/17193) `deps` Bump github.com/aws/aws-sdk-go-v2/service/timestreamwrite from 1.31.0 to 1.31.1 +- [#17195](https://github.com/influxdata/telegraf/pull/17195) `deps` Bump github.com/aws/smithy-go from 1.22.3 to 1.22.4 +- [#17196](https://github.com/influxdata/telegraf/pull/17196) `deps` Bump github.com/cloudevents/sdk-go/v2 from 2.16.0 to 2.16.1 +- [#17212](https://github.com/influxdata/telegraf/pull/17212) `deps` Bump github.com/go-chi/chi/v5 from 5.2.1 to 5.2.2 +- [#17191](https://github.com/influxdata/telegraf/pull/17191) `deps` Bump github.com/go-sql-driver/mysql from 1.9.2 to 1.9.3 +- [#17192](https://github.com/influxdata/telegraf/pull/17192) `deps` Bump github.com/peterbourgon/unixtransport from 0.0.4 to 0.0.5 +- [#17181](https://github.com/influxdata/telegraf/pull/17181) `deps` Bump github.com/redis/go-redis/v9 from 9.9.0 to 9.10.0 +- [#17197](https://github.com/influxdata/telegraf/pull/17197) `deps` Bump github.com/urfave/cli/v2 from 2.27.6 to 2.27.7 +- [#17198](https://github.com/influxdata/telegraf/pull/17198) `deps` Bump go.opentelemetry.io/collector/pdata from 1.33.0 to 1.34.0 +- [#17184](https://github.com/influxdata/telegraf/pull/17184) `deps` Bump google.golang.org/api from 0.236.0 to 0.237.0 + ## v1.35.0 {date="2025-06-16"} ### Deprecation Removals @@ -129,14 +245,14 @@ The `telegraf config migrate` command might be able to help with the migration. - [#16030](https://github.com/influxdata/telegraf/pull/16030) `processors.enum` Allow mapping to be applied to multiple fields - [#16494](https://github.com/influxdata/telegraf/pull/16494) `serializer.prometheusremotewrite` Allow sending native histograms -### Bugfixes +### Bug fixes - [#17044](https://github.com/influxdata/telegraf/pull/17044) `inputs.opcua` Fix integration test - [#16986](https://github.com/influxdata/telegraf/pull/16986) `inputs.procstat` Resolve remote usernames on Posix systems - [#16699](https://github.com/influxdata/telegraf/pull/16699) `inputs.win_wmi` Free resources to avoid leaks - [#17118](https://github.com/influxdata/telegraf/pull/17118) `migrations` Update table content for general plugin migrations -### Dependency Updates +### Dependency updates - [#17089](https://github.com/influxdata/telegraf/pull/17089) `deps` Bump cloud.google.com/go/bigquery from 1.68.0 to 1.69.0 - [#17026](https://github.com/influxdata/telegraf/pull/17026) `deps` Bump cloud.google.com/go/storage from 1.53.0 to 1.54.0 @@ -201,7 +317,7 @@ The `telegraf config migrate` command might be able to help with the migration. ## v1.34.4 {date="2025-05-19"} -### Bugfixes +### Bug fixes - [#17009](https://github.com/influxdata/telegraf/pull/17009) `inputs.cloudwatch` Restore filtering to match all dimensions - [#16978](https://github.com/influxdata/telegraf/pull/16978) `inputs.nfsclient` Handle errors during mountpoint filtering @@ -211,7 +327,7 @@ The `telegraf config migrate` command might be able to help with the migration. - [#16815](https://github.com/influxdata/telegraf/pull/16815) `inputs.win_eventlog` Handle large events to avoid they get dropped silently - [#16878](https://github.com/influxdata/telegraf/pull/16878) `parsers.json_v2` Handle measurements with multiple objects correctly -### Dependency Updates +### Dependency updates - [#16991](https://github.com/influxdata/telegraf/pull/16991) `deps` Bump cloud.google.com/go/bigquery from 1.67.0 to 1.68.0 - [#16963](https://github.com/influxdata/telegraf/pull/16963) `deps` Bump cloud.google.com/go/storage from 1.52.0 to 1.53.0 @@ -243,7 +359,7 @@ The `telegraf config migrate` command might be able to help with the migration. ## v1.34.3 {date="2025-05-05"} -### Bugfixes +### Bug fixes - [#16697](https://github.com/influxdata/telegraf/pull/16697) `agent` Correctly truncate the disk buffer - [#16868](https://github.com/influxdata/telegraf/pull/16868) `common.ratelimiter` Only grow the buffer but never shrink @@ -254,7 +370,7 @@ The `telegraf config migrate` command might be able to help with the migration. - [#16781](https://github.com/influxdata/telegraf/pull/16781) `inputs.win_wmi` Restrict threading model to APARTMENTTHREADED - [#16857](https://github.com/influxdata/telegraf/pull/16857) `outputs.quix` Allow empty certificate for new cloud managed instances -### Dependency Updates +### Dependency updates - [#16804](https://github.com/influxdata/telegraf/pull/16804) `deps` Bump cloud.google.com/go/bigquery from 1.66.2 to 1.67.0 - [#16835](https://github.com/influxdata/telegraf/pull/16835) `deps` Bump cloud.google.com/go/monitoring from 1.24.0 to 1.24.2 @@ -326,11 +442,11 @@ The `telegraf config migrate` command might be able to help with the migration. ## v1.34.2 {date="2025-04-14"} -### Bugfixes +### Bug fixes - [#16375](https://github.com/influxdata/telegraf/pull/16375) `aggregators` Handle time drift when calculating aggregation windows -### Dependency Updates +### Dependency updates - [#16689](https://github.com/influxdata/telegraf/pull/16689) `deps` Bump cloud.google.com/go/pubsub from 1.45.3 to 1.48.0 - [#16769](https://github.com/influxdata/telegraf/pull/16769) `deps` Bump cloud.google.com/go/storage from 1.50.0 to 1.51.0 @@ -376,7 +492,7 @@ The `telegraf config migrate` command might be able to help with the migration. ## v1.34.1 {date="2025-03-24"} -### Bugfixes +### Bug fixes - [#16638](https://github.com/influxdata/telegraf/pull/16638) `agent` Condense plugin source information table when multiple plugins in same file - [#16674](https://github.com/influxdata/telegraf/pull/16674) `inputs.tail` Do not seek on pipes @@ -385,7 +501,7 @@ The `telegraf config migrate` command might be able to help with the migration. - [#16625](https://github.com/influxdata/telegraf/pull/16625) `outputs.sql` Allow to disable timestamp column - [#16682](https://github.com/influxdata/telegraf/pull/16682) `secrets` Make 'insufficient lockable memory' warning work on BSDs -### Dependency Updates +### Dependency updates - [#16612](https://github.com/influxdata/telegraf/pull/16612) `deps` Bump github.com/PaesslerAG/gval from 1.2.2 to 1.2.4 - [#16650](https://github.com/influxdata/telegraf/pull/16650) `deps` Bump github.com/aws/smithy-go from 1.22.2 to 1.22.3 @@ -438,7 +554,7 @@ The `telegraf config migrate` command might be able to help with the migration. - [#16214](https://github.com/influxdata/telegraf/pull/16214) `processors.converter` Add support for base64 encoded IEEE floats - [#16497](https://github.com/influxdata/telegraf/pull/16497) `processors.template` Add sprig function for templates -### Bugfixes +### Bug fixes - [#16542](https://github.com/influxdata/telegraf/pull/16542) `inputs.gnmi` Handle path elements without name but with keys correctly - [#16606](https://github.com/influxdata/telegraf/pull/16606) `inputs.huebridge` Cleanup and fix linter issues @@ -446,7 +562,7 @@ The `telegraf config migrate` command might be able to help with the migration. - [#16555](https://github.com/influxdata/telegraf/pull/16555) `outputs.opensearch` Use correct pipeline name while creating bulk-indexers - [#16557](https://github.com/influxdata/telegraf/pull/16557) `serializers.prometheus` Use legacy validation for metric name -### Dependency Updates +### Dependency updates - [#16576](https://github.com/influxdata/telegraf/pull/16576) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/azidentity from 1.8.1 to 1.8.2 - [#16553](https://github.com/influxdata/telegraf/pull/16553) `deps` Bump github.com/Azure/go-autorest/autorest from 0.11.29 to 0.11.30 @@ -469,7 +585,7 @@ The `telegraf config migrate` command might be able to help with the migration. thus might break existing queries. Furthermore, the tag modification might increase cardinality in your database. -### Bugfixes +### Bug fixes - [#16546](https://github.com/influxdata/telegraf/pull/16546) `agent` Add authorization and user-agent when watching remote configs - [#16507](https://github.com/influxdata/telegraf/pull/16507) `inputs.gnmi` Allow to disable using first namespace as origin @@ -478,7 +594,7 @@ The `telegraf config migrate` command might be able to help with the migration. - [#16539](https://github.com/influxdata/telegraf/pull/16539) `logging` Handle closing correctly and fix tests - [#16535](https://github.com/influxdata/telegraf/pull/16535) `processors.execd` Detect line-protocol parser correctly -### Dependency Updates +### Dependency updates - [#16506](https://github.com/influxdata/telegraf/pull/16506) `deps` Bump github.com/ClickHouse/clickhouse-go/v2 from 2.30.1 to 2.30.3 - [#16502](https://github.com/influxdata/telegraf/pull/16502) `deps` Bump github.com/antchfx/xmlquery from 1.4.1 to 1.4.4 @@ -505,7 +621,7 @@ The `telegraf config migrate` command might be able to help with the migration. become an (unsigned) integer when parsing raw-packets' headers especially with SFlow v5 input. Please watch out for type-conflicts on the output side! -### Bugfixes +### Bug fixes - [#16477](https://github.com/influxdata/telegraf/pull/16477) `agent` Avoid panic by checking for skip_processors_after_aggregators - [#16489](https://github.com/influxdata/telegraf/pull/16489) `agent` Set `godebug x509negativeserial=1` as a workaround @@ -515,7 +631,7 @@ The `telegraf config migrate` command might be able to help with the migration. - [#16472](https://github.com/influxdata/telegraf/pull/16472) `outputs.sql` Fix insert into ClickHouse - [#16454](https://github.com/influxdata/telegraf/pull/16454) `service` Set address to prevent orphaned dbus-session processes -### Dependency Updates +### Dependency updates - [#16442](https://github.com/influxdata/telegraf/pull/16442) `deps` Bump cloud.google.com/go/storage from 1.47.0 to 1.50.0 - [#16414](https://github.com/influxdata/telegraf/pull/16414) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/azidentity from 1.7.0 to 1.8.1 @@ -550,7 +666,7 @@ The `telegraf config migrate` command might be able to help with the migration. `false`! To silence the warning and use the future default behavior, please explicitly set the option to `true`. -### Bugfixes +### Bug fixes - [#16290](https://github.com/influxdata/telegraf/pull/16290) `agent` Skip initialization of second processor state if requested - [#16377](https://github.com/influxdata/telegraf/pull/16377) `inputs.intel_powerstat` Fix option removal version @@ -559,7 +675,7 @@ The `telegraf config migrate` command might be able to help with the migration. - [#16388](https://github.com/influxdata/telegraf/pull/16388) `outputs.influxdb_v2` Fix panic and API error handling - [#16289](https://github.com/influxdata/telegraf/pull/16289) `outputs.remotefile` Handle tracking metrics correctly -### Dependency Updates +### Dependency updates - [#16344](https://github.com/influxdata/telegraf/pull/16344) `deps` Bump cloud.google.com/go/bigquery from 1.64.0 to 1.65.0 - [#16283](https://github.com/influxdata/telegraf/pull/16283) `deps` Bump cloud.google.com/go/monitoring from 1.21.1 to 1.22.0 @@ -614,7 +730,7 @@ The `telegraf config migrate` command might be able to help with the migration. - [#15883](https://github.com/influxdata/telegraf/pull/15883) `outputs` Only copy metric if its not filtered out - [#15893](https://github.com/influxdata/telegraf/pull/15893) `serializers.prometheusremotewrite` Log metric conversion errors -### Bugfixes +### Bug fixes - [#16248](https://github.com/influxdata/telegraf/pull/16248) `inputs.netflow` Decode flags in TCP and IP headers correctly - [#16257](https://github.com/influxdata/telegraf/pull/16257) `inputs.procstat` Handle running processes correctly across multiple filters @@ -622,7 +738,7 @@ The `telegraf config migrate` command might be able to help with the migration. - [#16255](https://github.com/influxdata/telegraf/pull/16255) `logging` Clean up extra empty spaces when redirectLogger is used - [#16274](https://github.com/influxdata/telegraf/pull/16274) `logging` Fix duplicated prefix and attrMsg in log message when redirectLogger is used -### Dependency Updates +### Dependency updates - [#16232](https://github.com/influxdata/telegraf/pull/16232) `deps` Bump cloud.google.com/go/bigquery from 1.63.1 to 1.64.0 - [#16235](https://github.com/influxdata/telegraf/pull/16235) `deps` Bump cloud.google.com/go/storage from 1.43.0 to 1.47.0 @@ -649,7 +765,7 @@ The `telegraf config migrate` command might be able to help with the migration. possible to avoid invalid values and parsing errors with the v3 XML statistics._ -### Bugfixes +### Bug fixes - [#16123](https://github.com/influxdata/telegraf/pull/16123) `agent` Restore setup order of stateful plugins to `Init()` then `SetState()` - [#16111](https://github.com/influxdata/telegraf/pull/16111) `common.socket` Make sure the scanner buffer matches the read-buffer size @@ -662,7 +778,7 @@ The `telegraf config migrate` command might be able to help with the migration. - [#16145](https://github.com/influxdata/telegraf/pull/16145) `inputs.snmp_trap` Remove timeout deprecation - [#16108](https://github.com/influxdata/telegraf/pull/16108) `logger` Avoid setting the log-format default too early -### Dependency Updates +### Dependency updates - [#16093](https://github.com/influxdata/telegraf/pull/16093) `deps` Bump cloud.google.com/go/pubsub from 1.42.0 to 1.45.1 - [#16175](https://github.com/influxdata/telegraf/pull/16175) `deps` Bump github.com/aws/aws-sdk-go-v2/credentials from 1.17.37 to 1.17.44 @@ -684,7 +800,7 @@ The `telegraf config migrate` command might be able to help with the migration. ## v1.32.2 {date="2024-10-28"} -### Bugfixes +### Bug fixes - [#15966](https://github.com/influxdata/telegraf/pull/15966) `agent` Use a unique WAL file for plugin instances of the same type - [#16074](https://github.com/influxdata/telegraf/pull/16074) `inputs.kafka_consumer` Fix deadlock @@ -695,7 +811,7 @@ The `telegraf config migrate` command might be able to help with the migration. - [#15968](https://github.com/influxdata/telegraf/pull/15968) `outputs.remotefile` Create a new serializer instance per output file - [#16014](https://github.com/influxdata/telegraf/pull/16014) `outputs.syslog` Trim field-names belonging to explicit SDIDs correctly -### Dependency Updates +### Dependency updates - [#15992](https://github.com/influxdata/telegraf/pull/15992) `deps` Bump cloud.google.com/go/bigquery from 1.62.0 to 1.63.1 - [#16056](https://github.com/influxdata/telegraf/pull/16056) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/azcore from 1.14.0 to 1.16.0 @@ -727,7 +843,7 @@ The `telegraf config migrate` command might be able to help with the migration. users as it is an API change; all serializers in Telegraf are already ported to the new framework. If you experience any issues creating serializers, [contact us](/telegraf/v1/#bug-reports-and-feedback). -### Bugfixes +### Bug fixes - [#15969](https://github.com/influxdata/telegraf/pull/15969) `agent` Fix buffer not flushing if all metrics are written - [#15937](https://github.com/influxdata/telegraf/pull/15937) `config` Correctly print removal version info @@ -740,7 +856,7 @@ The `telegraf config migrate` command might be able to help with the migration. - [#15921](https://github.com/influxdata/telegraf/pull/15921) `parsers.avro` Add mutex to cache access - [#15965](https://github.com/influxdata/telegraf/pull/15965) `processors.aws_ec2` Remove leading slash and cancel worker only if it exists -### Dependency Updates +### Dependency updates - [#15932](https://github.com/influxdata/telegraf/pull/15932) `deps` Bump cloud.google.com/go/monitoring from 1.20.2 to 1.21.1 - [#15863](https://github.com/influxdata/telegraf/pull/15863) `deps` Bump github.com/Azure/azure-kusto-go from 0.15.3 to 0.16.1 @@ -834,7 +950,7 @@ The `telegraf config migrate` command might be able to help with the migration. - [#15697](https://github.com/influxdata/telegraf/pull/15697) `parsers.value` Add base64 datatype - [#15795](https://github.com/influxdata/telegraf/pull/15795) `processors.aws_ec2` Allow to use instance metadata -### Bugfixes +### Bug fixes - [#15661](https://github.com/influxdata/telegraf/pull/15661) `agent` Fix buffer directory config and document - [#15788](https://github.com/influxdata/telegraf/pull/15788) `inputs.kinesis_consumer` Honor the configured endpoint @@ -845,7 +961,7 @@ The `telegraf config migrate` command might be able to help with the migration. - [#15615](https://github.com/influxdata/telegraf/pull/15615) `outputs.remotefile` Resolve linter not checking error - [#15740](https://github.com/influxdata/telegraf/pull/15740) `serializers.template` Unwrap metrics if required -### Dependency Updates +### Dependency updates - [#15829](https://github.com/influxdata/telegraf/pull/15829) `deps` Bump github.com/BurntSushi/toml from 1.3.2 to 1.4.0 - [#15775](https://github.com/influxdata/telegraf/pull/15775) `deps` Bump github.com/aws/aws-sdk-go-v2/feature/ec2/imds from 1.16.11 to 1.16.12 @@ -874,7 +990,7 @@ The `telegraf config migrate` command might be able to help with the migration. ## v1.31.3 {date="2024-08-12"} -### Bugfixes +### Bug fixes - [#15552](https://github.com/influxdata/telegraf/pull/15552) `inputs.chrony` Use DGRAM for the unix socket - [#15667](https://github.com/influxdata/telegraf/pull/15667) `inputs.diskio` Print warnings once, add details to messages @@ -883,7 +999,7 @@ The `telegraf config migrate` command might be able to help with the migration. - [#15724](https://github.com/influxdata/telegraf/pull/15724) `inputs.smartctl` Use --scan-open instead of --scan to provide correct device type info - [#15649](https://github.com/influxdata/telegraf/pull/15649) `inputs.tail` Prevent deadlock when closing and max undelivered lines hit -### Dependency Updates +### Dependency updates - [#15720](https://github.com/influxdata/telegraf/pull/15720) `deps` Bump Go from v1.22.5 to v1.22.6 - [#15683](https://github.com/influxdata/telegraf/pull/15683) `deps` Bump cloud.google.com/go/bigquery from 1.61.0 to 1.62.0 @@ -907,7 +1023,7 @@ The `telegraf config migrate` command might be able to help with the migration. ## v1.31.2 {date="2024-07-22"} -### Bugfixes +### Bug fixes - [#15589](https://github.com/influxdata/telegraf/pull/15589) `common.socket` Switch to context to simplify closing - [#15601](https://github.com/influxdata/telegraf/pull/15601) `inputs.ping` Check addr length to avoid crash @@ -915,7 +1031,7 @@ The `telegraf config migrate` command might be able to help with the migration. - [#15586](https://github.com/influxdata/telegraf/pull/15586) `parsers.xpath` Allow resolving extensions - [#15630](https://github.com/influxdata/telegraf/pull/15630) `tools.custom_builder` Handle multiple instances of the same plugin correctly -### Dependency Updates +### Dependency updates - [#15582](https://github.com/influxdata/telegraf/pull/15582) `deps` Bump cloud.google.com/go/storage from 1.41.0 to 1.42.0 - [#15623](https://github.com/influxdata/telegraf/pull/15623) `deps` Bump cloud.google.com/go/storage from 1.42.0 to 1.43.0 @@ -938,7 +1054,7 @@ For versions earlier than v1.13 and earlier see ## v1.31.1 {date="2024-07-01"} -### Bugfixes +### Bug fixes - [#15488](https://github.com/influxdata/telegraf/pull/15488) `agent` Ignore startup-errors in test mode - [#15568](https://github.com/influxdata/telegraf/pull/15568) `inputs.chrony` Handle ServerStats4 response @@ -949,7 +1065,7 @@ For versions earlier than v1.13 and earlier see - [#15514](https://github.com/influxdata/telegraf/pull/15514) `logging` Add back constants for backward compatibility - [#15531](https://github.com/influxdata/telegraf/pull/15531) `secretstores.oauth2` Ensure endpoint params is not nil -### Dependency Updates +### Dependency updates - [#15483](https://github.com/influxdata/telegraf/pull/15483) `deps` Bump cloud.google.com/go/monitoring from 1.18.1 to 1.19.0 - [#15559](https://github.com/influxdata/telegraf/pull/15559) `deps` Bump github.com/Azure/azure-kusto-go from 0.15.2 to 0.15.3 @@ -1643,7 +1759,7 @@ can help with migrating to newer plugins. - Avoid negative refcounts for tracking metrics - Maintain tracking information post-apply -### Dependency Updates +### Dependency updates - Update `cloud.google.com/go/bigquery` from 1.56.0 to 1.57.1 - Update `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs` from 1.26.0 to 1.27.2 @@ -1670,7 +1786,7 @@ can help with migrating to newer plugins. - JSON v2 (`parsers.json_v2`): Log inner errors. - s7comm (`inputs.s7comm`): Truncate strings to reported length. -### Dependency Updates +### Dependency updates - Update `github.com/gosnmp/gosnmp` from 1.35.1-0.20230602062452-f30602b8dad6 to 1.36.1. - Update `github.com/Masterminds/semver/v3` from 3.2.0 to 3.2.1. @@ -1701,7 +1817,7 @@ can help with migrating to newer plugins. - Parse metrics correctly on FreeBSD 14. - Support gathering metrics on zfs 2.2.0 and later. -### Dependency Updates +### Dependency updates - Update `cloud.google.com/go/storage` from 1.30.1 to 1.34.1. - Update `github.com/aws/aws-sdk-go-v2/config` from 1.18.42 to 1.19.1. @@ -1737,7 +1853,7 @@ can help with migrating to newer plugins. - s7comm (`inputs.s7comm`): Allow PDU-size to be set as config option. - Vault (`inputs.vault`): Use http client to handle redirects correctly. -### Dependency Updates +### Dependency updates - Update `github.com/apache/arrow/go/v13` from 13.0.0-git to 13.0.0. - Update `github.com/google/cel-go` from 0.14.1-git to 0.18.1. @@ -1779,7 +1895,7 @@ can help with migrating to newer plugins. - systemd Units `inputs.systemd_units`): Add missing upstream states. - Template (`processors.template`): Handle tracking metrics correctly. -### Dependency Updates +### Dependency updates - Update `github.com/aliyun/alibaba-cloud-sdk-go` from 1.62.470 to 1.62.563. - Update `github.com/aws/aws-sdk-go-v2/config` from 1.18.27 to 1.18.42. @@ -1795,7 +1911,7 @@ can help with migrating to newer plugins. ## v1.28.1 {date="2023-09-12"} -### Bugfixes +### Bug fixes - Packaging: Revert permission change on package configs - Redis (`inputs.redis`): Fix password typo @@ -1895,7 +2011,7 @@ can help with migrating to newer plugins. - Parser (`processors.parser`) Allow also non-string fields - Template (`processors.template`): Unify template metric -### Bugfixes +### Bug fixes - Packaging: Change the systemd KillMode from control-group to mixed - AMQP Consumer (`inputs.amqp_consumer`): Print error on connection failure @@ -1912,7 +2028,7 @@ can help with migrating to newer plugins. - Allow sqlite on Windows (amd64 and arm64) - Move conversion_style config option to the right place of sample config -### Dependency Updates +### Dependency updates - Update `github.com/aws/aws-sdk-go-v2/service/kinesis` from 1.18.2 to 1.18.5. - Update `github.com/hashicorp/consul/api` from 1.20.0 to 1.24.0. @@ -2068,7 +2184,7 @@ can help with migrating to newer plugins. [#9617](https://github.com/golang/go/issues/9617) or [#56528](https://github.com/golang/go/issues/56528)). If you worked around that issue, please remove the workaround before using v1.27+. In case you - experience issues with abbreviated timezones please file an issue! + experience issues with abbreviated timezones please file an issue. - **Internal Parser methods**: Removal of old-style parser creation. This should not directly affect users as it is an API change. All parsers in Telegraf are already ported to the new framework. If you experience any @@ -2147,7 +2263,7 @@ can help with migrating to newer plugins. - Prometheus Remote (`serializer.prometheusremote`): Improve performance - Test (`test`): Allow to capture all messages during test -### Bugfixes +### Bug fixes - Cloud PubSub (`inputs.cloud_pubsub`): Fix gzip decompression. - GNMI (`inputs.gnmi`): @@ -2168,7 +2284,7 @@ can help with migrating to newer plugins. - Lookup (`processors.lookup`): Do not strip tracking info. - Influx (`serializers.influx`): Restore disabled uint support by default. -### Dependency Updates +### Dependency updates - Update cloud.google.com/go/monitoring from 1.13.0 to 1.14.0. - Update github.com/aliyun/alibaba-cloud-sdk-go from 1.62.193 to 1.62.337. @@ -2390,7 +2506,7 @@ can help with migrating to newer plugins. - Add support for additional input plugins. - Convert many output plugins. -### Bugfixes +### Bug fixes - Allow graceful shutdown on interrupt (e.g. Ctrl-C). - Only rotate log on SIGHUP if needed. @@ -2400,7 +2516,7 @@ can help with migrating to newer plugins. - ethtool (`inputs.ethtool`): Close namespace file to prevent crash. - statsd (`inputs.statsd`): On close, verify listener is not nil. -### Dependency Updates +### Dependency updates - Update cloud.google.com/go/storage from 1.28.1 to 1.29.0. - Update github.com/Azure/go-autorest/autorest/adal from 0.9.21 to 0.9.22. @@ -2419,7 +2535,7 @@ can help with migrating to newer plugins. ## v1.25.3 {date="2023-02-27"} -### Bugfixes +### Bug fixes - Fix reload config on config update/SIGHUP. - Bond (`inputs.bond`): Reset slave stats for each interface. @@ -2428,7 +2544,7 @@ can help with migrating to newer plugins. - XPath (`parsers.xpath`): Fix panic for JSON name expansion. - JSON (`serializers.json`): Fix stateful transformations. -### Dependency Updates +### Dependency updates - Update cloud.google.com/go/pubsub from 1.27.1 to 1.28.0. - Update github.com/containerd/containerd from 1.6.8 to 1.6.18. @@ -2462,7 +2578,7 @@ can help with migrating to newer plugins. - Prometheus Client (`outputs.prometheus_client`): Expire with ticker, not add/collect. - Secret Stores: Check store id format and presence. -### Dependency Updates +### Dependency updates - Update cloud.google.com/go/bigquery from 1.44.0 to 1.45.0. - Update github.com/99designs/keyring from 1.2.1 to 1.2.2. - Update github.com/antchfx/xmlquery from 1.3.12 to 1.3.15. @@ -2512,7 +2628,7 @@ can help with migrating to newer plugins. - Fix handling of "id" and print failing secret-store. - Fix handling of TOML strings. -### Dependency Updates +### Dependency updates - Update cloud.google.com/go/storage from 1.23.0 to 1.28.1. - Update github.com/antchfx/jsonquery from 1.3.0 to 1.3.1. - Update github.com/aws/aws-sdk-go-v2 from 1.17.1 to 1.17.3. @@ -2632,7 +2748,7 @@ can help with migrating to newer plugins. - Azure Data Explorer (`outputs.azure_data_explorer`): Update test call to `NewSerializer`. - Parser processor (`processors.parser`): Handle empty metric names correctly. -### Dependency Updates +### Dependency updates - Update `github.com/aliyun/alibaba-cloud-sdk-go` from 1.61.1836 to 1.62.77 - Update `github.com/gosnmp/gosnmp` from 1.34.0 to 1.35.0 - Update `OpenTelemetry` from 0.2.30 to 0.2.33 @@ -2654,7 +2770,7 @@ can help with migrating to newer plugins. - Prometheus output (`outputs.prometheus`): Expire metrics correctly during adds. - Yandex Cloud Monitoring (`outputs.yandex_cloud_monitoring`): Catch int64 values. -### Dependency Updates +### Dependency updates - Update `github.com/aliyun/alibaba-cloud-sdk-go` from 1.61.1818 to 1.61.1836 - Update `github.com/prometheus/client_golang` from 1.13.0 to 1.13.1 - Update `github.com/aws/aws-sdk-go-v2/service/timestreamwrite` from 1.13.12 to 1.14.5 @@ -2695,7 +2811,7 @@ can help with migrating to newer plugins. ### Features - Support sections in markdown. -### Dependency Updates +### Dependency updates - Update github.com/snowflakedb/gosnowflake from 1.6.2 to 1.6.13 - Update github.com/sensu/sensu-go/api/core/v2 from 2.14.0 to 2.15.0 - Update github.com/gofrs/uuid from 4.2.0& to 4.3.0 @@ -2931,7 +3047,7 @@ Older versions can be manually reverted on a per-plugin basis using the `tls_min - Add coralogix dialect to opentelemetry -### Dependency Updates +### Dependency updates - Update `github.com/testcontainers/testcontainers-go` from 0.12.0 to 0.13.0. - Update `github.com/apache/thrift` from 0.15.0 to 0.16.0. @@ -2983,7 +3099,7 @@ Older versions can be manually reverted on a per-plugin basis using the `tls_min - Stackdriver (`stackdriver`) Handle when no buckets available. -### Dependency Updates +### Dependency updates - Bump github.com/testcontainers/testcontainers-go from 0.12.0 to 0.13.0. - Bump github.com/apache/thrift from 0.15.0 to 0.16.0. diff --git a/data/products.yml b/data/products.yml index a6d4060ba..44b530c43 100644 --- a/data/products.yml +++ b/data/products.yml @@ -64,7 +64,7 @@ influxdb3_cloud_dedicated: list_order: 3 latest: cloud-dedicated link: "https://www.influxdata.com/contact-sales-cloud-dedicated/" - latest_cli: 2.10.2 + latest_cli: 2.10.3 placeholder_host: cluster-id.a.influxdb.io ai_sample_questions: - How do I migrate from InfluxDB v1 to InfluxDB Cloud Dedicated? @@ -143,7 +143,7 @@ telegraf: versions: [v1] latest: v1.35 latest_patches: - v1: 1.35.0 + v1: 1.35.3 ai_sample_questions: - How do I install and configure Telegraf? - How do I write a custom Telegraf plugin? diff --git a/static/downloads/clustered-release-artifacts/20250721-1796368/app-instance-schema.json b/static/downloads/clustered-release-artifacts/20250721-1796368/app-instance-schema.json new file mode 100644 index 000000000..d953deceb --- /dev/null +++ b/static/downloads/clustered-release-artifacts/20250721-1796368/app-instance-schema.json @@ -0,0 +1,3255 @@ +{ + "additionalProperties": false, + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "metadata": { + "type": "object" + }, + "spec": { + "additionalProperties": false, + "properties": { + "imagePullSecrets": { + "items": { + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "package": { + "properties": { + "apiVersion": { + "type": "string" + }, + "image": { + "type": "string" + }, + "spec": { + "additionalProperties": false, + "properties": { + "admin": { + "additionalProperties": false, + "description": "OAuth configuration for restricting access to Clustered", + "properties": { + "dsn": { + "additionalProperties": false, + "description": "The dsn for the postgres compatible database", + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "identityProvider": { + "description": "The identity provider to be used e.g. \"keycloak\", \"auth0\", \"azure\"", + "type": "string" + }, + "internalSigningKey": { + "description": "Internal JWT secrets", + "properties": { + "id": { + "additionalProperties": false, + "description": "random ID that uniquely identifies this keypair. Generally a UUID.", + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "privateKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "publicKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "required": [ + "id", + "privateKey", + "publicKey" + ], + "type": "object" + }, + "jwksEndpoint": { + "description": "The JWKS endpoint given by your identity provider. This should look like \"https://{identityProviderDomain}/.well-known/jwks.json\"", + "type": "string" + }, + "users": { + "description": "The list of users to grant access to Clustered via influxctl", + "item": { + "properties": { + "email": { + "description": "The email of the user within your identity provider.", + "type": "string" + }, + "firstName": { + "description": "The first name of the user that will be used in Clustered.", + "type": "string" + }, + "id": { + "description": "The identifier of the user within your identity provider.", + "type": "string" + }, + "lastName": { + "description": "The last name of the user that will be used in Clustered.", + "type": "string" + }, + "userGroups": { + "description": "Optional list of user groups to assign to the user, rather than the default groups. The following groups are currently supported: Admin, Auditor, Member", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "firstName", + "lastName", + "email", + "id" + ], + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "catalog": { + "additionalProperties": false, + "description": "Configuration for the postgres-compatible database that is used as a catalog/metadata store", + "properties": { + "dsn": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "type": "object" + }, + "components": { + "additionalProperties": false, + "properties": { + "catalog": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "common": { + "additionalProperties": false, + "description": "Common configuration to all components. They will be overridden by component-specific configuration.\nAny value defined in the component-specific settings will be merged with values defined in the common settings.\n", + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "compactor": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "garbage-collector": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "granite": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ingester": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "querier": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "router": { + "additionalProperties": false, + "properties": { + "log": { + "additionalProperties": false, + "description": "Configuring logging parameters.\n", + "properties": { + "filters": { + "description": "InfluxDB 3.0 logging verbosity can be configured in fine grained way using a list\nof \"log filters\".\n\nEach log filter is an expression in the form of:\n\ntarget=level\n\nWhere \"target\" matches the \"target\" field in the logs, while level can be one of\nerror, warn, info, debug and trace.\n\nError and warn are less verbose while debug and trace are more verbose.\n\nYou can omit target and just specify a level. In that case the level \nwill set the maximum level for all events that are not enabled by other filters.\n\nIf a filter for a given target appears again in the filter list, it will override\na previous occurrence. This allows you to override the default filters.\n\nThe full documentation for the log filter syntax can be found at:\nhttps://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html\n", + "item": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { }, + "properties": { + "nodeAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object" + } + }, + "type": "object" + }, + "podAntiAffinity": { + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "items": { + "type": "object" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "containers": { + "default": { }, + "patternProperties": { + ".": { + "additionalProperties": false, + "properties": { + "env": { + "default": { }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "labels": { + "default": { }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "nodeSelector": { + "default": { }, + "patternProperties": { + ".": { + "type": "string" + } + }, + "type": "object" + }, + "tolerations": { + "default": [ ], + "description": "Pod tolerations to place onto this IOx component to affect scheduling decisions.\n\nFor further details, consult the Kubernetes documentation\nhttps://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration\n", + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "egress": { + "additionalProperties": false, + "description": "Configuration for how external resources are accessed from Clustered components", + "properties": { + "customCertificates": { + "additionalProperties": false, + "description": "Custom certificate or CA Bundle. Used to verify outbound connections performed by influxdb, such as OIDC servers,\npostgres databases, or object store API endpoints.\n\nEquivalent to the SSL_CERT_FILE environment variable used by OpenSSL.\n", + "examples": [ + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "type": "object" + }, + "featureFlags": { + "description": "An array of feature flag names. Feature flags (aka feature gates) control features that\nhave not yet been released. They can be experimental to varying degrees (alpha, beta, rc).\n", + "properties": { + "clusteredAuth": { + "description": "Use the authorization service optimized for Clustered deployments.\n\nThis authorization service communicates directly with the locally deployed\ngranite service, which allows it to become ready to validate access tokens\npromptly on pod start up. It also offers more control over the invalidation\nschedule for cached tokens, and may slightly reduce query latency.\n", + "type": "string" + }, + "enableDefaultResourceLimits": { + "description": "Enable Default Resource Limits for Containers\n\nWhen enabled, all containers will have `requests.cpu`, `requests.memory`,\n`limits.cpu`, and `limits.memory` defined. This is particularily useful\nfor namespaces that include a ResourceQuota. When enabling this feature\nflag, make sure to specify the resource limits and requests for the IOx\ncomponents as the defaults may not be properly sized for your cluster.\n", + "type": "string" + }, + "grafana": { + "description": "An experimental, minimal installation of a Grafana Deployment to use alongside Clustered.\n\nOnly this flag if you do not have your own metric visualisation setup and wish\nto experiment with Clustered. It is tested with Grafana v12.0.1-security-01.\n", + "type": "string" + }, + "localTracing": { + "description": "Experimental installation of Jaeger for tracing capabilities with InfluxDB 3.\n\nOnly enable this flag when instructed to do so by the support team.\n", + "type": "string" + }, + "noGrpcProbes": { + "description": "Remove gRPC liveness/readiness probes for debug service", + "type": "string" + }, + "noMinReadySeconds": { + "description": "Experimental flag for Kubernetes clusters that are lower than v1.25.\n\nNo longer uses minReadySeconds for workloads, this will cause downtime.\n", + "type": "string" + }, + "noPrometheus": { + "description": "Disable the install of the default bare-bones Prometheus StatefulSet installation alongside Clustered.\n\nThis feature flag is useful when you already have a monitoring setup and wish to utilise it.\n\nNOTE: In future releases, the `debug-service` will have a partial, minor, dependency on a Prometheus instance being available.\nIf you do not wish for this service to utilise your own installation of Prometheus, disabling it here may cause issues.\n", + "type": "string" + }, + "serviceMonitor": { + "description": "Deprecated. Use observability.serviceMonitor instead.\n\nCreate a ServiceMonitor resource for InfluxDB3.\n", + "type": "string" + }, + "useLicensedBinaries": { + "description": "This flag is deprecated and no longer has any effect. Licensed binaries are now always used.\n", + "type": "string" + } + }, + "type": "array" + }, + "hostingEnvironment": { + "additionalProperties": false, + "description": "Environment or cloud-specific configuration elements which are utilised by InfluxDB Clustered.", + "properties": { + "aws": { + "additionalProperties": false, + "description": "Configuration for hosting on AWS.", + "properties": { + "eksRoleArn": { + "default": "", + "description": "IAM role ARN to apply to the IOx ServiceAccount, used with EKS IRSA.", + "type": "string" + } + }, + "type": "object" + }, + "gke": { + "additionalProperties": false, + "description": "Configuration for hosting on Google Kubernetes Engine (GKE).", + "properties": { + "workloadIdentity": { + "additionalProperties": false, + "description": "Authentication via GKE workload identity. This will annotate the relevant Kubernetes ServiceAccount objects.\nSee https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity for further details.\n", + "properties": { + "serviceAccountEmail": { + "description": "Google IAM Service Account email, this should be in the format \"NAME@PROJECT_ID.iam.gserviceaccount.com\".", + "type": "string" + } + }, + "required": [ + "serviceAccountEmail" + ], + "type": "object" + } + }, + "type": "object" + }, + "openshift": { + "additionalProperties": false, + "description": "Configuration for hosting on Red Hat OpenShift.", + "properties": { }, + "type": "object" + } + }, + "type": "object" + }, + "images": { + "description": "Manipulate how images are retrieved for Clustered. This is typically useful for air-gapped environments when you need to use an internal registry.", + "properties": { + "overrides": { + "description": "Override specific images using the contained predicate fields.\n\nThis takes precedence over the registryOverride field.\n", + "item": { + "description": "Remaps an image matching naming predicates\n", + "properties": { + "name": { + "description": "Naming predicate: the part of the image name that comes after the registry name, e.g.\nIf the image name is \"oci.influxdata.com/foo/bar:1234\", the name field matches \"foo/bar\"\n", + "type": "string" + }, + "newFQIN": { + "description": "Rewrite expression: when a naming predicate matches this image, rewrite the image reference\nusing this Fully Qualified Image Name. i.e. this replaces the whole registry/imagename:tag@digest\nparts of the input image reference.\n", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "registryOverride": { + "default": "", + "description": "Place a new registry prefix infront of all Clustered component images.\n\nThis is used when you wish to maintain the original registry path for images and simply relocate them underneath\nyour own registry.\n\nExample:\nregistryOverride: 'newReg' means 'myregistry/test' becomes 'newReg/myregistry/test'\n", + "type": "string" + } + }, + "type": "object" + }, + "ingesterStorage": { + "additionalProperties": false, + "description": "Storage configuration for the Clustered ingesters.", + "properties": { + "storage": { + "description": "A higher value provides more disk space for the Write-Ahead Log (WAL) to each ingester, allowing for a greater set of leading edge data to be maintained in-memory.\nThis also reduces the frequency of WAL rotations, leading to better query performance and less burden on the compactor.\n\nNote that at 90% capacity, an ingester will stop accepting writes in order to persist its active WAL into the configured object store as parquet files.\n", + "type": "string" + }, + "storageClassName": { + "default": "", + "type": "string" + } + }, + "required": [ + "storage" + ], + "type": "object" + }, + "ingress": { + "additionalProperties": false, + "description": "Configuration for how Clustered components are accessed.", + "properties": { + "grpc": { + "additionalProperties": false, + "description": "Configuration for components which utilise gRPC", + "properties": { + "className": { + "default": "", + "type": "string" + } + }, + "type": "object" + }, + "hosts": { + "description": "A number of hosts/domains to use as entrypoints within the Ingress resources.", + "type": "array" + }, + "http": { + "additionalProperties": false, + "description": "Configuration for components which utilise HTTP", + "properties": { + "className": { + "default": "", + "type": "string" + } + }, + "type": "object" + }, + "template": { + "additionalProperties": false, + "description": "Template to apply across configured Ingress-type resources.\nThis allows you to specify a range of third party annotations onto the created Ingress objects and/or\nalter the kind of Ingress you would like to use, e.g. 'Route'.\n", + "oneOf": [ + { + "properties": { + "apiVersion": { + "const": "networking.istio.io/v1beta1" + }, + "kind": { + "const": "Gateway" + }, + "selector": { + "default": { }, + "description": "This selector determines which Istio ingress gateway pods will be chosen\nto handle traffic for the created Gateway resources. A blank selector means that all\ngateway pods in the cluster will handle traffic.\n\nFor more details, see https://istio.io/latest/docs/reference/config/networking/gateway/#Gateway\n", + "type": "object" + } + }, + "required": [ + "apiVersion", + "kind" + ] + }, + { + "properties": { + "apiVersion": { + "enum": [ + "networking.k8s.io/v1", + "route.openshift.io/v1" + ], + "type": "string" + }, + "kind": { + "enum": [ + "Ingress", + "Route" + ], + "type": "string" + } + } + } + ], + "properties": { + "apiVersion": { + "default": "networking.k8s.io/v1", + "enum": [ + "networking.k8s.io/v1", + "route.openshift.io/v1", + "networking.istio.io/v1beta1" + ], + "type": "string" + }, + "kind": { + "default": "Ingress", + "enum": [ + "Ingress", + "Route", + "Gateway" + ], + "type": "string" + }, + "metadata": { + "additionalProperties": false, + "properties": { + "annotations": { + "default": { }, + "description": "Annotations to place onto the objects which enable ingress.", + "type": "object" + } + }, + "type": "object" + }, + "selector": { + "description": "Selector to specify which gateway deployment utilises the configured ingress configuration.\n\nNote that this is only for Istio Gateway, see https://istio.io/latest/docs/reference/config/networking/gateway/#Gateway for further details\n", + "type": "object" + } + }, + "type": "object" + }, + "tlsSecretName": { + "default": "", + "description": "Kubernetes Secret name which contains TLS certificates.\n\nIf you are using cert-manager, this is the name of the Secret to create containing certificates.\nNote that cert-manager is externally managed and is not apart of a Clustered configuration.\n", + "type": "string" + } + }, + "type": "object" + }, + "monitoringStorage": { + "additionalProperties": false, + "description": "Storage configuration for the Prometheus instance shipped alongside Clustered for basic monitoring purposes.", + "properties": { + "storage": { + "description": "The amount of storage to provision for the attached volume, e.g. \"10Gi\".", + "type": "string" + }, + "storageClassName": { + "default": "", + "type": "string" + } + }, + "required": [ + "storage" + ], + "type": "object" + }, + "objectStore": { + "additionalProperties": false, + "description": "Configuration for the backing object store of IOx.", + "oneOf": [ + { + "required": [ + "bucket", + "region" + ] + }, + { + "required": [ + "s3", + "bucket" + ] + }, + { + "required": [ + "azure", + "bucket" + ] + }, + { + "required": [ + "google", + "bucket" + ] + } + ], + "properties": { + "accessKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "allowHttp": { + "default": "false", + "type": "string" + }, + "azure": { + "additionalProperties": false, + "description": "Configuration for Azure Blob Storage.", + "properties": { + "accessKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "account": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "required": [ + "accessKey", + "account" + ], + "type": "object" + }, + "bucket": { + "type": "string" + }, + "endpoint": { + "default": "", + "type": "string" + }, + "google": { + "additionalProperties": false, + "description": "Configuration for Google Cloud Storage.", + "properties": { + "serviceAccountSecret": { + "additionalProperties": false, + "description": "Authentication via Google IAM Service Account credentials file using a Kubernetes Secret name and key.\nSee https://cloud.google.com/kubernetes-engine/docs/tutorials/authenticating-to-cloud-platform for further details.\n\nIf you wish to use GKE IAM annotations, refer to the hostingEnviornment section of the schema.\n", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "region": { + "default": "", + "description": "The region in which the bucket resides. This may not be required dependent on your object store provider.", + "type": "string" + }, + "s3": { + "additionalProperties": false, + "description": "Configuration for AWS S3 (compatible) object stores.", + "properties": { + "accessKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + }, + "allowHttp": { + "description": "Allow the S3 client to accept insecure HTTP, as well as HTTPS connections to object store.", + "type": "string" + }, + "endpoint": { + "default": "", + "description": "S3 bucket region, see https://docs.aws.amazon.com/general/latest/gr/s3.html#s3_region for further details.", + "type": "string" + }, + "region": { + "description": "AWS region for the bucket, such as us-east-1.", + "type": "string" + }, + "secretKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "required": [ + "region" + ], + "type": "object" + }, + "secretKey": { + "additionalProperties": false, + "examples": [ + "value: ...", + "valueFrom: ..." + ], + "oneOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "valueFrom" + ] + } + ], + "properties": { + "value": { + "default": "", + "description": "Value", + "type": [ + "string", + "null" + ] + }, + "valueFrom": { + "additionalProperties": false, + "description": "Allows to source the value from configMaps or secrets", + "examples": [ + "configMapKeyRef: ...", + "secretKeyRef: ..." + ], + "oneOf": [ + { + "required": [ + "configMapKeyRef" + ] + }, + { + "required": [ + "secretKeyRef" + ] + } + ], + "properties": { + "configMapKeyRef": { + "additionalProperties": false, + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "default": false, + "description": "Specify whether the ConfigMap or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + }, + "secretKeyRef": { + "additionalProperties": false, + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": [ + "string", + "null" + ] + }, + "optional": { + "description": "Specify whether the Secret or it's key must be defined", + "type": [ + "boolean", + "null" + ] + } + }, + "required": [ + "key", + "name" + ], + "type": [ + "object", + "null" + ] + } + }, + "type": [ + "object", + "null" + ] + } + }, + "type": "object" + } + }, + "type": "object" + }, + "observability": { + "additionalProperties": false, + "default": { }, + "description": "Configuration for gaining operational insight into Clustered components", + "properties": { + "retention": { + "default": "12h", + "description": "The retention period for prometheus", + "type": "string" + }, + "serviceMonitor": { + "additionalProperties": false, + "description": "Configure a ServiceMonitor resource to easily expose InfluxDB metrics via the Prometheus Operator.\nSee the Prometheus Operator documentation for usage:\nhttps://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md\n", + "properties": { + "fallbackScrapeProtocol": { + "default": null, + "description": "Specifies which protocol to use when scraping endpoints that return a blank or invalid Content-Type header.\n\nRequired for Prometheus v3.0.0+ only, which enforces Content-Type validation (unlike v2).\n\nFor most standard Prometheus metrics endpoints, including InfluxDB, use \"PrometheusText0.0.4\".\n", + "type": "string" + }, + "interval": { + "default": "30s", + "description": "A duration string that controls the length of time between scrape attempts, ex: '15s', or '1m'", + "type": "string" + }, + "scrapeTimeout": { + "default": null, + "description": "A duration string that controls the scrape timeout duration, ex: '10s'", + "type": "string" + } + }, + "required": [ ], + "type": "object" + } + }, + "type": "object" + }, + "resources": { + "additionalProperties": false, + "properties": { + "catalog": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "4", + "type": "string" + }, + "memory": { + "default": "16Gi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "compactor": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "8", + "type": "string" + }, + "memory": { + "default": "32Gi", + "type": "string" + }, + "replicas": { + "default": 1, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "garbage-collector": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + }, + "replicas": { + "const": 1, + "description": "Replica configuration for the Garbage Collector.\nNOTE: This component does not support horizontal scaling at this time.\nRefer to https://docs.influxdata.com/influxdb/clustered/reference/internals/storage-engine/#garbage-collector-scaling-strategies\nfor more details.\n", + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "granite": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": "500M", + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "0.5", + "type": "string" + }, + "memory": { + "default": "500M", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ingester": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "6", + "type": "string" + }, + "memory": { + "default": "24Gi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "prometheus": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "500m", + "type": "string" + }, + "memory": { + "default": "512Mi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "querier": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "8", + "type": "string" + }, + "memory": { + "default": "32Gi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "router": { + "additionalProperties": false, + "description": "See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits", + "properties": { + "limits": { + "additionalProperties": false, + "description": "Limits describes the maximum amount of compute resources allowed.", + "properties": { + "cpu": { + "default": null, + "type": "string" + }, + "memory": { + "default": null, + "type": "string" + } + }, + "type": "object" + }, + "requests": { + "additionalProperties": false, + "description": "Requests describes the minimum amount of compute resources required.", + "properties": { + "cpu": { + "default": "1", + "type": "string" + }, + "memory": { + "default": "2Gi", + "type": "string" + }, + "replicas": { + "default": 3, + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "required": [ + "catalog", + "objectStore", + "ingesterStorage", + "monitoringStorage" + ], + "type": "object" + } + }, + "required": [ + "image", + "apiVersion" + ], + "type": "object" + }, + "pause": { + "default": false, + "type": "boolean" + } + }, + "type": "object" + }, + "status": { + "additionalProperties": true, + "type": "object" + } + }, + "type": "object" +} + diff --git a/static/downloads/clustered-release-artifacts/20250721-1796368/example-customer.yml b/static/downloads/clustered-release-artifacts/20250721-1796368/example-customer.yml new file mode 100644 index 000000000..0484330ed --- /dev/null +++ b/static/downloads/clustered-release-artifacts/20250721-1796368/example-customer.yml @@ -0,0 +1,342 @@ +# yaml-language-server: $schema=app-instance-schema.json +apiVersion: kubecfg.dev/v1alpha1 +kind: AppInstance +metadata: + name: influxdb + namespace: influxdb +spec: + # One or more secrets that are used to pull the images from an authenticated registry. + # This will either be the secret provided to you, if using our registry, or a secret for your own registry + # if self-hosting the images. + imagePullSecrets: + - name: + package: + # The version of the clustered package that will be used. + # This determines the version of all of the individual components. + # When a new version of the product is released, this version should be updated and any + # new config options should be updated below. + image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250721-1796368 + apiVersion: influxdata.com/v1alpha1 + spec: + # # Provides a way to pass down hosting environment specific configuration, such as an role ARN when using EKS IRSA. + # # This section contains three multually-exclusive "blocks". Uncomment the block named after the hosting environment + # # you run: "aws", "openshift" or "gke". + # hostingEnvironment: + # # # Uncomment this block if you're running in EKS. + # # aws: + # # eksRoleArn: 'arn:aws:iam::111111111111:role/your-influxdb-clustered-role' + # # + # # # Uncomment this block if you're running inside OpenShift. + # # # Note: there are currently no OpenShift-specific parameters. You have to pass an empty object + # # # as a marker that you're choosing OpenShift as hosting environment. + # # openshift: {} + # # + # # # Uncomment this block if you're running in GKE: + # # gke: + # # # Authenticate to Google Cloud services via workload identity, this + # # # annotates the 'iox' ServiceAccount with the role name you specify. + # # # NOTE: This setting just enables GKE specific authentication mechanism, + # # # You still need to enable `spec.objectStore.google` below if you want to use GCS. + # # workloadIdentity: + # # # Google Service Account name to use for the workload identity. + # # serviceAccountEmail: @.iam.gserviceaccount.com + catalog: + # A postgresql style DSN that points at a postgresql compatible database. + # eg: postgres://[user[:password]@][netloc][:port][/dbname][?param1=value1&...] + dsn: + valueFrom: + secretKeyRef: + name: + key: + + # images: + # # This can be used to override a specific image name with its FQIN + # # (Fully Qualified Image Name) for testing. eg. + # overrides: + # - name: influxdb2-artifacts/iox/iox + # newFQIN: mycompany/test-iox-build:aninformativetag + # + # # Set this variable to the prefix of your internal registry. This will be prefixed to all expected images. + # # eg. us-docker.pkg.dev/iox:latest => registry.mycompany.io/us-docker.pkg.dev/iox:latest + # registryOverride: + + objectStore: + # Bucket that the parquet files will be stored in + bucket: + + # Uncomment one of the following (s3, azure) + # to enable the configuration of your object store + s3: + # URL for S3 Compatible object store + endpoint: + + # Set to true to allow communication over HTTP (instead of HTTPS) + allowHttp: "false" + + # S3 Access Key + # This can also be provided as a valueFrom: secretKeyRef: + accessKey: + value: + + # S3 Secret Key + # This can also be provided as a valueFrom: secretKeyRef: + secretKey: + value: + + # This value is required for AWS S3, it may or may not be required for other providers. + region: + + # azure: + # Azure Blob Storage Access Key + # This can also be provided as a valueFrom: secretKeyRef: + # accessKey: + # value: + + # Azure Blob Storage Account + # This can also be provided as a valueFrom: secretKeyRef: + # account: + # value: + + # There are two main ways you can access a Google: + # + # a) GKE Workload Identity: configure workload identity in the top level `hostingEnvironment.gke` section. + # b) Explicit service account secret (JSON) file: use the `serviceAccountSecret` field here + # + # If you pick (a) you may not need to uncomment anything else in this section, + # but you still need to tell influxdb that you intend to use Google Cloud Storage. + # so you need to specify an empty object. Uncomment the following line: + # + # google: {} + # + # + # If you pick (b), uncomment the following block: + # + # google: + # # If you're authenticating to Google Cloud service using a Service Account credentials file, as opposed + # # as to use workload identity (see above) you need to provide a reference to a k8s secret containing the credentials file. + # serviceAccountSecret: + # # Kubernetes Secret name containing the credentials for a Google IAM Service Account. + # name: + # # The key within the Secret containing the credentials. + # key: + + # Parameters to tune observability configuration, such as Prometheus ServiceMonitor's. + observability: {} + # retention: 12h + # serviceMonitor: + # interval: 10s + # scrapeTimeout: 30s + + # Ingester pods have a volume attached. + ingesterStorage: + # (Optional) Set the storage class. This will differ based on the K8s environment and desired storage characteristics. + # If not set, the default storage class will be used. + # storageClassName: + # Set the storage size (minimum 2Gi recommended) + storage: + + # Monitoring pods have a volume attached. + monitoringStorage: + # (Optional) Set the storage class. This will differ based on the K8s environment and desired storage characteristics. + # If not set, the default storage class will be used. + # storageClassName: + # Set the storage size (minimum 10Gi recommended) + storage: + + # Uncomment the follow block if using our provided Ingress. + # + # We currently only support the ingress NGINX ingress controller: https://github.com/kubernetes/ingress-nginx + # + # ingress: + # hosts: + # # This is the host on which you will access Influxdb 3.0, for both reads and writes + # - + + # (Optional) + # The name of the Kubernetes Secret containing a TLS certificate, this should exist in the same namespace as the Clustered installation. + # If you are using cert-manager, enter a name for the Secret it should create. + # tlsSecretName: + + # http: + # # Usually you have only one ingress controller installed in a given cluster. + # # In case you have more than one, you have to specify the "class name" of the ingress controller you want to use + # className: nginx + + # grpc: + # # Usually you have only one ingress controller installed in a given cluster. + # # In case you have more than one, you have to specify the "class name" of the ingress controller you want to use + # className: nginx + # + # Enables specifying which 'type' of Ingress to use, alongside whether to place additional annotations + # onto those objects, this is useful for third party software in your environment, such as cert-manager. + # template: + # apiVersion: 'route.openshift.io/v1' + # kind: 'Route' + # metadata: + # annotations: + # 'example-annotation': 'annotation-value' + + # Enables specifying customizations for the various components in InfluxDB 3.0. + # components: + # # router: + # # template: + # # containers: + # # iox: + # # env: + # # INFLUXDB_IOX_MAX_HTTP_REQUESTS: "5000" + # # nodeSelector: + # # disktype: ssd + # # tolerations: + # # - effect: NoSchedule + # # key: example + # # operator: Exists + # # Common customizations for all components go in a pseudo-component called "common" + # # common: + # # template: + # # # Metadata contains custom annotations (and labels) to be added to a component. E.g.: + # # metadata: + # # annotations: + # # telegraf.influxdata.com/class: "foo" + + # Example of setting nodeAffinity for the querier component to ensure it runs on nodes with specific labels + # components: + # # querier: + # # template: + # # affinity: + # # nodeAffinity: + # # requiredDuringSchedulingIgnoredDuringExecution: + # # Node must have these labels to be considered for scheduling + # # nodeSelectorTerms: + # # - matchExpressions: + # # - key: required + # # operator: In + # # values: + # # - ssd + # # preferredDuringSchedulingIgnoredDuringExecution: + # # Scheduler will prefer nodes with these labels but they're not required + # # - weight: 1 + # # preference: + # # matchExpressions: + # # - key: preferred + # # operator: In + # # values: + # # - postgres + + # Example of setting podAntiAffinity for the querier component to ensure it runs on nodes with specific labels + # components: + # # querier: + # # template: + # # affinity: + # # podAntiAffinity: + # # requiredDuringSchedulingIgnoredDuringExecution: + # # Ensures that the pod will not be scheduled on a node if another pod matching the labelSelector is already running there + # # - labelSelector: + # # matchExpressions: + # # - key: app + # # operator: In + # # values: + # # - querier + # # topologyKey: "kubernetes.io/hostname" + # # preferredDuringSchedulingIgnoredDuringExecution: + # # Scheduler will prefer not to schedule pods together but may do so if necessary + # # - weight: 1 + # # podAffinityTerm: + # # labelSelector: + # # matchExpressions: + # # - key: app + # # operator: In + # # values: + # # - querier + # # topologyKey: "kubernetes.io/hostname" + + # Uncomment the following block to tune the various pods for their cpu/memory/replicas based on workload needs. + # Only uncomment the specific resources you want to change, anything uncommented will use the package default. + # (You can read more about k8s resources and limits in https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits) + # + # resources: + # # The ingester handles data being written + # ingester: + # requests: + # cpu: + # memory: + # replicas: # The default for ingesters is 3 to increase availability + # + # # optionally you can specify the resource limits which improves isolation. + # # (see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits) + # # limits: + # # cpu: + # # memory: + + # # The compactor reorganizes old data to improve query and storage efficiency. + # compactor: + # requests: + # cpu: + # memory: + # replicas: # the default is 1 + + # # The querier handles querying data. + # querier: + # requests: + # cpu: + # memory: + # replicas: # the default is 3 + + # # The router performs some api routing. + # router: + # requests: + # cpu: + # memory: + # replicas: # the default is 3 + + admin: + # The list of users to grant access to Clustered via influxctl + users: + # First name of user + - firstName: + # Last name of user + lastName: + # Email of user + email: + # The ID that the configured Identity Provider uses for the user in oauth flows + id: + # Optional list of user groups to assign to the user, rather than the default groups. The following groups are currently supported: Admin, Auditor, Member + userGroups: + - + + # The dsn for the postgres compatible database (note this is the same as defined above) + dsn: + valueFrom: + secretKeyRef: + name: + key: + # The identity provider to be used e.g. "keycloak", "auth0", "azure", etc + # Note for Azure Active Directory it must be exactly "azure" + identityProvider: + # The JWKS endpoint provided by the Identity Provider + jwksEndpoint: + + # # This (optional) section controls how InfluxDB issues outbound requests to other services + # egress: + # # If you're using a custom CA you will need to specify the full custom CA bundle here. + # # + # # NOTE: the custom CA is currently only honoured for outbound requests used to obtain + # # the JWT public keys from your identiy provider (see `jwksEndpoint`). + # customCertificates: + # valueFrom: + # configMapKeyRef: + # key: ca.pem + # name: custom-ca + + # We also include the ability to enable some features that are not yet ready for general availability + # or for which we don't yet have a proper place to turn on an optional feature in the configuration file. + # To turn on these you should include the name of the feature flag in the `featureFlag` array. + # + # featureFlags: + # # Uncomment to install a Grafana deployment. + # # Depends on one of the prometheus features being deployed. + # # - grafana + + # # The following 2 flags should be uncommented for k8s API 1.21 support. + # # Note that this is an experimental configuration. + # # - noMinReadySeconds + # # - noGrpcProbes diff --git a/yarn.lock b/yarn.lock index 65e19bf64..00856c0cd 100644 --- a/yarn.lock +++ b/yarn.lock @@ -5046,9 +5046,9 @@ tldts@^6.1.32: tldts-core "^6.1.86" tmp@~0.2.3: - version "0.2.3" - resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.2.3.tgz#eb783cc22bc1e8bebd0671476d46ea4eb32a79ae" - integrity sha512-nZD7m9iCPC5g0pYmcaxogYKggSfLsdxl8of3Q/oIbqCqLLIO9IAF0GWjX1z9NZRHPiXv8Wex4yDCaZsgEw0Y8w== + version "0.2.4" + resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.2.4.tgz#c6db987a2ccc97f812f17137b36af2b6521b0d13" + integrity sha512-UdiSoX6ypifLmrfQ/XfiawN6hkjSBpCjhKxxZcWlUUmoXLaCKQU0bx4HF/tdDK2uzRuchf1txGvrWBzYREssoQ== to-buffer@^1.1.1: version "1.2.1"