Merge branch 'master' into docs/add-dynamic-date-filtering-examples
commit
e46349188a
|
|
@ -31,7 +31,7 @@ LogicalPlan
|
|||
[Mm]onitor
|
||||
MBs?
|
||||
PBs?
|
||||
Parquet
|
||||
Parquet|\b\w*-*parquet-\w*\b|\b--\w*parquet\w*\b|`[^`]*parquet[^`]*`
|
||||
Redoc
|
||||
SQLAlchemy
|
||||
SQLAlchemy
|
||||
|
|
|
|||
|
|
@ -33,9 +33,6 @@ call_lefthook()
|
|||
then
|
||||
"$dir/node_modules/lefthook/bin/index.js" "$@"
|
||||
|
||||
elif go tool lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
go tool lefthook "$@"
|
||||
elif bundle exec lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
bundle exec lefthook "$@"
|
||||
|
|
@ -45,21 +42,12 @@ call_lefthook()
|
|||
elif pnpm lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
pnpm lefthook "$@"
|
||||
elif swift package lefthook >/dev/null 2>&1
|
||||
elif swift package plugin lefthook >/dev/null 2>&1
|
||||
then
|
||||
swift package --build-path .build/lefthook --disable-sandbox lefthook "$@"
|
||||
swift package --disable-sandbox plugin lefthook "$@"
|
||||
elif command -v mint >/dev/null 2>&1
|
||||
then
|
||||
mint run csjones/lefthook-plugin "$@"
|
||||
elif uv run lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
uv run lefthook "$@"
|
||||
elif mise exec -- lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
mise exec -- lefthook "$@"
|
||||
elif devbox run lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
devbox run lefthook "$@"
|
||||
else
|
||||
echo "Can't find lefthook in PATH"
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -33,9 +33,6 @@ call_lefthook()
|
|||
then
|
||||
"$dir/node_modules/lefthook/bin/index.js" "$@"
|
||||
|
||||
elif go tool lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
go tool lefthook "$@"
|
||||
elif bundle exec lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
bundle exec lefthook "$@"
|
||||
|
|
@ -45,21 +42,12 @@ call_lefthook()
|
|||
elif pnpm lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
pnpm lefthook "$@"
|
||||
elif swift package lefthook >/dev/null 2>&1
|
||||
elif swift package plugin lefthook >/dev/null 2>&1
|
||||
then
|
||||
swift package --build-path .build/lefthook --disable-sandbox lefthook "$@"
|
||||
swift package --disable-sandbox plugin lefthook "$@"
|
||||
elif command -v mint >/dev/null 2>&1
|
||||
then
|
||||
mint run csjones/lefthook-plugin "$@"
|
||||
elif uv run lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
uv run lefthook "$@"
|
||||
elif mise exec -- lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
mise exec -- lefthook "$@"
|
||||
elif devbox run lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
devbox run lefthook "$@"
|
||||
else
|
||||
echo "Can't find lefthook in PATH"
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -33,9 +33,6 @@ call_lefthook()
|
|||
then
|
||||
"$dir/node_modules/lefthook/bin/index.js" "$@"
|
||||
|
||||
elif go tool lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
go tool lefthook "$@"
|
||||
elif bundle exec lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
bundle exec lefthook "$@"
|
||||
|
|
@ -45,21 +42,12 @@ call_lefthook()
|
|||
elif pnpm lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
pnpm lefthook "$@"
|
||||
elif swift package lefthook >/dev/null 2>&1
|
||||
elif swift package plugin lefthook >/dev/null 2>&1
|
||||
then
|
||||
swift package --build-path .build/lefthook --disable-sandbox lefthook "$@"
|
||||
swift package --disable-sandbox plugin lefthook "$@"
|
||||
elif command -v mint >/dev/null 2>&1
|
||||
then
|
||||
mint run csjones/lefthook-plugin "$@"
|
||||
elif uv run lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
uv run lefthook "$@"
|
||||
elif mise exec -- lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
mise exec -- lefthook "$@"
|
||||
elif devbox run lefthook -h >/dev/null 2>&1
|
||||
then
|
||||
devbox run lefthook "$@"
|
||||
else
|
||||
echo "Can't find lefthook in PATH"
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -0,0 +1,44 @@
|
|||
---
|
||||
title: Rename a table
|
||||
description: >
|
||||
Use the [`influxctl table rename` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/table/rename/)
|
||||
to rename a table in your {{< product-name omit=" Cluster" >}} cluster.
|
||||
menu:
|
||||
influxdb3_cloud_dedicated:
|
||||
parent: Manage tables
|
||||
weight: 202
|
||||
list_code_example: |
|
||||
##### CLI
|
||||
```sh
|
||||
influxctl table rename <DATABASE_NAME> <CURRENT_TABLE_NAME> <NEW_TABLE_NAME>
|
||||
```
|
||||
related:
|
||||
- /influxdb3/cloud-dedicated/reference/cli/influxctl/table/rename/
|
||||
---
|
||||
|
||||
Use the [`influxctl table rename` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/table/rename/)
|
||||
to rename a table in your {{< product-name omit=" Clustered" >}} cluster.
|
||||
|
||||
> [!Note]
|
||||
> After renaming a table, write and query requests using the old table name
|
||||
> are routed to the same table.
|
||||
|
||||
## Rename a database using the influxctl CLI
|
||||
|
||||
<!-- pytest.mark.skip -->
|
||||
|
||||
```bash { placeholders="DATABASE_NAME|CURRENT_TABLE_NAME|NEW_TABLE_NAME" }
|
||||
influxctl table rename DATABASE_NAME CURRENT_TABLE_NAME NEW_TABLE_NAME
|
||||
```
|
||||
|
||||
Replace the following:
|
||||
|
||||
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Name of the database the table is in
|
||||
- {{% code-placeholder-key %}}`CURRENT_TABLE_NAME`{{% /code-placeholder-key %}}: Name of the table to change
|
||||
- {{% code-placeholder-key %}}`NEW_TABLE_NAME`{{% /code-placeholder-key %}}: New name for the table
|
||||
|
||||
> [!Note]
|
||||
> #### Renamed table retains its ID
|
||||
>
|
||||
> The table ID remains the same after renaming. When you list tables,
|
||||
> you'll see the new name associated with the original table ID.
|
||||
|
|
@ -26,6 +26,7 @@ related:
|
|||
- /influxdb3/cloud-dedicated/reference/influxql/
|
||||
- /influxdb3/cloud-dedicated/reference/sql/
|
||||
- /influxdb3/cloud-dedicated/query-data/execute-queries/troubleshoot/
|
||||
- /influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices/
|
||||
|
||||
list_code_example: |
|
||||
```py
|
||||
|
|
@ -240,7 +241,8 @@ from influxdb_client_3 import InfluxDBClient3
|
|||
client = InfluxDBClient3(
|
||||
host='{{< influxdb/host >}}',
|
||||
token='DATABASE_TOKEN',
|
||||
database='DATABASE_NAME'
|
||||
database='DATABASE_NAME',
|
||||
timeout=60 # Set default timeout to 60 seconds
|
||||
)
|
||||
```
|
||||
{{% /code-placeholders %}}
|
||||
|
|
@ -275,6 +277,7 @@ client = InfluxDBClient3(
|
|||
host="{{< influxdb/host >}}",
|
||||
token='DATABASE_TOKEN',
|
||||
database='DATABASE_NAME',
|
||||
timeout=60, # Set default timeout to 60 seconds
|
||||
flight_client_options=flight_client_options(
|
||||
tls_root_certs=cert))
|
||||
...
|
||||
|
|
@ -332,7 +335,8 @@ client = InfluxDBClient3(
|
|||
# Execute the query and return an Arrow table
|
||||
table = client.query(
|
||||
query="SELECT * FROM home",
|
||||
language="sql"
|
||||
language="sql",
|
||||
timeout=30 # Override default timeout for simple queries (30 seconds)
|
||||
)
|
||||
|
||||
print("\n#### View Schema information\n")
|
||||
|
|
@ -377,7 +381,8 @@ client = InfluxDBClient3(
|
|||
# Execute the query and return an Arrow table
|
||||
table = client.query(
|
||||
query="SELECT * FROM home",
|
||||
language="influxql"
|
||||
language="influxql",
|
||||
timeout=30 # Override default timeout for simple queries (30 seconds)
|
||||
)
|
||||
|
||||
print("\n#### View Schema information\n")
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ influxdb3/cloud-dedicated/tags: [query, sql, influxql, influxctl, CLI]
|
|||
related:
|
||||
- /influxdb3/cloud-dedicated/reference/cli/influxctl/query/
|
||||
- /influxdb3/cloud-dedicated/get-started/query/#execute-an-sql-query, Get started querying data
|
||||
- /influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices/, Query timeout best practices
|
||||
- /influxdb3/cloud-dedicated/reference/sql/
|
||||
- /influxdb3/cloud-dedicated/reference/influxql/
|
||||
list_code_example: |
|
||||
|
|
@ -142,6 +143,34 @@ Replace the following:
|
|||
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
|
||||
Name of the database to query
|
||||
|
||||
## Query timeouts
|
||||
|
||||
The [`influxctl --timeout` global flag](/influxdb3/cloud-dedicated/reference/cli/influxctl/) sets the maximum duration for API calls, including query requests.
|
||||
If a query takes longer than the specified timeout, the operation will be canceled.
|
||||
|
||||
### Timeout examples
|
||||
|
||||
Use different timeout values based on your query type:
|
||||
|
||||
{{% code-placeholders "DATABASE_(TOKEN|NAME)" %}}
|
||||
```sh
|
||||
# Shorter timeout for testing dashboard queries (10 seconds)
|
||||
influxctl query \
|
||||
--timeout 10s \
|
||||
--token DATABASE_TOKEN \
|
||||
--database DATABASE_NAME \
|
||||
"SELECT AVG(temperature) FROM sensors WHERE time >= now() - INTERVAL '1 day'"
|
||||
|
||||
# Longer timeout for analytical queries (5 minutes)
|
||||
influxctl query \
|
||||
--timeout 5m \
|
||||
--token DATABASE_TOKEN \
|
||||
--database DATABASE_NAME \
|
||||
"SELECT room, AVG(temperature) FROM sensors WHERE time >= now() - INTERVAL '30 days' GROUP BY room"
|
||||
```
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
For guidance on selecting appropriate timeout values, see [Query timeout best practices](/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices/).
|
||||
|
||||
## Output format
|
||||
|
||||
|
|
@ -243,7 +272,7 @@ influxctl query \
|
|||
{{% /influxdb/custom-timestamps %}}
|
||||
|
||||
{{< expand-wrapper >}}
|
||||
{{% expand "View example results with unix nanosecond timestamps" %}}
|
||||
{{% expand "View example results with Unix nanosecond timestamps" %}}
|
||||
{{% influxdb/custom-timestamps %}}
|
||||
```
|
||||
+-------+--------+---------+------+---------------------+
|
||||
|
|
|
|||
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
title: Query timeout best practices
|
||||
description: Learn how to set appropriate query timeouts to balance performance and resource protection.
|
||||
menu:
|
||||
influxdb3_cloud_dedicated:
|
||||
name: Query timeout best practices
|
||||
parent: Troubleshoot and optimize queries
|
||||
weight: 205
|
||||
related:
|
||||
- /influxdb3/cloud-dedicated/reference/client-libraries/v3/
|
||||
- /influxdb3/cloud-dedicated/query-data/execute-queries/influxctl-cli/
|
||||
source: shared/influxdb3-query-guides/query-timeout-best-practices.md
|
||||
---
|
||||
|
||||
<!--
|
||||
//SOURCE - content/shared/influxdb3-query-guides/query-timeout-best-practices.md
|
||||
>
|
||||
|
|
@ -12,6 +12,7 @@ related:
|
|||
- /influxdb3/cloud-dedicated/query-data/sql/
|
||||
- /influxdb3/cloud-dedicated/query-data/influxql/
|
||||
- /influxdb3/cloud-dedicated/reference/client-libraries/v3/
|
||||
- /influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices/
|
||||
aliases:
|
||||
- /influxdb3/cloud-dedicated/query-data/execute-queries/troubleshoot/
|
||||
- /influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/trace/
|
||||
|
|
@ -30,7 +31,9 @@ If a query doesn't return any data, it might be due to the following:
|
|||
|
||||
- Your data falls outside the time range (or other conditions) in the query--for example, the InfluxQL `SHOW TAG VALUES` command uses a default time range of 1 day.
|
||||
- The query (InfluxDB server) timed out.
|
||||
- The query client timed out.
|
||||
- The query client timed out.
|
||||
See [Query timeout best practices](/influxdb3/cloud-dedicated/query-data/troubleshoot-and-optimize/query-timeout-best-practices/)
|
||||
for guidance on setting appropriate timeouts.
|
||||
- The query return type is not supported by the client library.
|
||||
For example, array or list types may not be supported.
|
||||
In this case, use `array_to_string()` to convert the array value to a string--for example:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
title: influxctl table rename
|
||||
description: >
|
||||
The `influxctl table rename` command renames a table in an
|
||||
{{% product-name omit=" Clustered" %}} cluster.
|
||||
menu:
|
||||
influxdb3_cloud_dedicated:
|
||||
parent: influxctl table
|
||||
weight: 301
|
||||
metadata: [influxctl 2.10.3+]
|
||||
source: /shared/influxctl/table/rename.md
|
||||
---
|
||||
|
||||
<!-- //SOURCE content/shared/influxctl/table/rename.md -->
|
||||
|
|
@ -10,101 +10,15 @@ menu:
|
|||
influxdb3_cloud_dedicated:
|
||||
name: Troubleshoot issues
|
||||
parent: Write data
|
||||
influxdb3/cloud-dedicated/tags: [write, line protocol, errors]
|
||||
influxdb3/cloud-dedicated/tags: [write, line protocol, errors, partial writes]
|
||||
related:
|
||||
- /influxdb3/cloud-dedicated/get-started/write/
|
||||
- /influxdb3/cloud-dedicated/reference/syntax/line-protocol/
|
||||
- /influxdb3/cloud-dedicated/write-data/best-practices/
|
||||
- /influxdb3/cloud-dedicated/reference/internals/durability/
|
||||
source: /shared/influxdb3-write-guides/troubleshoot-distributed.md
|
||||
---
|
||||
|
||||
Learn how to avoid unexpected results and recover from errors when writing to {{% product-name %}}.
|
||||
|
||||
- [Handle write responses](#handle-write-responses)
|
||||
- [Review HTTP status codes](#review-http-status-codes)
|
||||
- [Troubleshoot failures](#troubleshoot-failures)
|
||||
- [Troubleshoot rejected points](#troubleshoot-rejected-points)
|
||||
|
||||
## Handle write responses
|
||||
|
||||
{{% product-name %}} does the following when you send a write request:
|
||||
|
||||
1. Validates the request.
|
||||
2. If successful, attempts to [ingest data](/influxdb3/cloud-dedicated/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](#review-http-status-codes).
|
||||
3. Ingests or rejects data in the batch and returns one of the following HTTP status codes:
|
||||
|
||||
- `204 No Content`: All data in the batch is ingested.
|
||||
- `400 Bad Request`: Some (_when **partial writes** are configured for the cluster_) or all of the data has been rejected. Data that has not been rejected is ingested and queryable.
|
||||
|
||||
The response body contains error details about [rejected points](#troubleshoot-rejected-points), up to 100 points.
|
||||
|
||||
Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable.
|
||||
|
||||
To ensure that InfluxDB handles writes in the order you request them,
|
||||
wait for the response before you send the next request.
|
||||
|
||||
### Review HTTP status codes
|
||||
|
||||
InfluxDB uses conventional HTTP status codes to indicate the success or failure of a request.
|
||||
The `message` property of the response body may contain additional details about the error.
|
||||
{{< product-name >}} returns one the following HTTP status codes for a write request:
|
||||
|
||||
| HTTP response code | Response body | Description |
|
||||
|:------------------------------|:------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `204 No Content"` | no response body | If InfluxDB ingested all of the data in the batch |
|
||||
| `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | If some (_when **partial writes** are configured for the cluster_) or all request data isn't allowed (for example, if it is malformed or falls outside of the bucket's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected |
|
||||
| `401 "Unauthorized"` | | If the `Authorization` header is missing or malformed or if the [token](/influxdb3/cloud-dedicated/admin/tokens/) doesn't have [permission](/influxdb3/cloud-dedicated/reference/cli/influxctl/token/create/#examples) to write to the database. See [examples using credentials](/influxdb3/cloud-dedicated/get-started/write/#write-line-protocol-to-influxdb) in write requests. |
|
||||
| `404 "Not found"` | requested **resource type** (for example, "organization" or "database"), and **resource name** | If a requested resource (for example, organization or database) wasn't found |
|
||||
| `422 "Unprocessable Entity"` | `message` contains details about the error | If the data isn't allowed (for example, falls outside of the database’s retention period).
|
||||
| `500 "Internal server error"` | | Default status for an error |
|
||||
| `503 "Service unavailable"` | | If the server is temporarily unavailable to accept writes. The `Retry-After` header contains the number of seconds to wait before trying the write again.
|
||||
|
||||
The `message` property of the response body may contain additional details about the error.
|
||||
If your data did not write to the database, see how to [troubleshoot rejected points](#troubleshoot-rejected-points).
|
||||
|
||||
## Troubleshoot failures
|
||||
|
||||
If you notice data is missing in your database, do the following:
|
||||
|
||||
- Check the [HTTP status code](#review-http-status-codes) in the response.
|
||||
- Check the `message` property in the response body for details about the error.
|
||||
- If the `message` describes a field error, [troubleshoot rejected points](#troubleshoot-rejected-points).
|
||||
- Verify all lines contain valid syntax ([line protocol](/influxdb3/cloud-dedicated/reference/syntax/line-protocol/)).
|
||||
- Verify the timestamps in your data match the [precision parameter](/influxdb3/cloud-dedicated/reference/glossary/#precision) in your request.
|
||||
- Minimize payload size and network errors by [optimizing writes](/influxdb3/cloud-dedicated/write-data/best-practices/optimize-writes/).
|
||||
|
||||
## Troubleshoot rejected points
|
||||
|
||||
When writing points from a batch, InfluxDB rejects points that have syntax errors or schema conflicts.
|
||||
If InfluxDB processes the data in your batch and then rejects points, the [HTTP response](#handle-write-responses) body contains the following properties that describe rejected points:
|
||||
|
||||
- `code`: `"invalid"`
|
||||
- `line`: the line number of the _first_ rejected point in the batch.
|
||||
- `message`: a string that contains line-separated error messages, one message for each rejected point in the batch, up to 100 rejected points.
|
||||
|
||||
InfluxDB rejects points for the following reasons:
|
||||
|
||||
- a line protocol parsing error
|
||||
- an invalid timestamp
|
||||
- a schema conflict
|
||||
|
||||
Schema conflicts occur when you try to write data that contains any of the following:
|
||||
|
||||
- a wrong data type: the point falls within the same partition (default partitioning is measurement and day) as existing bucket data and contains a different data type for an existing field
|
||||
- a tag and a field that use the same key
|
||||
|
||||
### Example
|
||||
|
||||
The following example shows a response body for a write request that contains two rejected points:
|
||||
|
||||
```json
|
||||
{
|
||||
"code": "invalid",
|
||||
"line": 2,
|
||||
"message": "failed to parse line protocol:
|
||||
errors encountered on line(s):
|
||||
error parsing line 2 (1-based): Invalid measurement was provided
|
||||
error parsing line 4 (1-based): Unable to parse timestamp value '123461000000000000000000000000'"
|
||||
}
|
||||
```
|
||||
|
||||
Check for [field data type](/influxdb3/cloud-dedicated/reference/syntax/line-protocol/#data-types-and-format) differences between the rejected data point and points within the same database and partition--for example, did you attempt to write `string` data to an `int` field?
|
||||
<!-- The content for this page is at
|
||||
//SOURCE - content/shared/influxdb3-write-guides/troubleshoot-distributed.md
|
||||
-->
|
||||
|
|
@ -27,6 +27,7 @@ related:
|
|||
- /influxdb3/cloud-serverless/reference/influxql/
|
||||
- /influxdb3/cloud-serverless/reference/sql/
|
||||
- /influxdb3/cloud-serverless/query-data/execute-queries/troubleshoot/
|
||||
- /influxdb3/cloud-serverless/query-data/troubleshoot-and-optimize/query-timeout-best-practices/
|
||||
|
||||
list_code_example: |
|
||||
```py
|
||||
|
|
@ -241,7 +242,8 @@ from influxdb_client_3 import InfluxDBClient3
|
|||
client = InfluxDBClient3(
|
||||
host='{{< influxdb/host >}}',
|
||||
token='API_TOKEN',
|
||||
database='BUCKET_NAME'
|
||||
database='BUCKET_NAME',
|
||||
timeout=30 # Set default timeout to 30 seconds for serverless
|
||||
)
|
||||
```
|
||||
{{% /code-placeholders %}}
|
||||
|
|
@ -332,7 +334,8 @@ client = InfluxDBClient3(
|
|||
# Execute the query and return an Arrow table
|
||||
table = client.query(
|
||||
query="SELECT * FROM home",
|
||||
language="sql"
|
||||
language="sql",
|
||||
timeout=10 # Override default timeout for simple queries (10 seconds)
|
||||
)
|
||||
|
||||
print("\n#### View Schema information\n")
|
||||
|
|
@ -377,7 +380,8 @@ client = InfluxDBClient3(
|
|||
# Execute the query and return an Arrow table
|
||||
table = client.query(
|
||||
query="SELECT * FROM home",
|
||||
language="influxql"
|
||||
language="influxql",
|
||||
timeout=10 # Override default timeout for simple queries (10 seconds)
|
||||
)
|
||||
|
||||
print("\n#### View Schema information\n")
|
||||
|
|
|
|||
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
title: Query timeout best practices
|
||||
description: Learn how to set appropriate query timeouts to balance performance and resource protection.
|
||||
menu:
|
||||
influxdb3_cloud_serverless:
|
||||
name: Query timeout best practices
|
||||
parent: Troubleshoot and optimize queries
|
||||
identifier: query-timeout-best-practices
|
||||
weight: 201
|
||||
related:
|
||||
- /influxdb3/cloud-serverless/reference/client-libraries/v3/
|
||||
source: shared/influxdb3-query-guides/query-timeout-best-practices.md
|
||||
---
|
||||
|
||||
<!--
|
||||
//SOURCE - content/shared/influxdb3-query-guides/query-timeout-best-practices.md
|
||||
>
|
||||
|
|
@ -12,6 +12,7 @@ related:
|
|||
- /influxdb3/cloud-serverless/query-data/sql/
|
||||
- /influxdb3/cloud-serverless/query-data/influxql/
|
||||
- /influxdb3/cloud-serverless/reference/client-libraries/v3/
|
||||
- /influxdb3/cloud-serverless/query-data/troubleshoot-and-optimize/query-timeout-best-practices/
|
||||
aliases:
|
||||
- /influxdb3/cloud-serverless/query-data/execute-queries/troubleshoot/
|
||||
---
|
||||
|
|
@ -29,7 +30,9 @@ If a query doesn't return any data, it might be due to the following:
|
|||
|
||||
- Your data falls outside the time range (or other conditions) in the query--for example, the InfluxQL `SHOW TAG VALUES` command uses a default time range of 1 day.
|
||||
- The query (InfluxDB server) timed out.
|
||||
- The query client timed out.
|
||||
- The query client timed out.
|
||||
See [Query timeout best practices](/influxdb3/cloud-serverless/query-data/troubleshoot-and-optimize/query-timeout-best-practices/)
|
||||
for guidance on setting appropriate timeouts.
|
||||
- The query return type is not supported by the client library.
|
||||
For example, array or list types may not be supported.
|
||||
In this case, use `array_to_string()` to convert the array value to a string--for example:
|
||||
|
|
|
|||
|
|
@ -10,103 +10,15 @@ menu:
|
|||
influxdb3_cloud_serverless:
|
||||
name: Troubleshoot issues
|
||||
parent: Write data
|
||||
influxdb3/cloud-serverless/tags: [write, line protocol, errors]
|
||||
influxdb3/cloud-serverless/tags: [write, line protocol, errors, partial writes]
|
||||
related:
|
||||
- /influxdb3/cloud-serverless/get-started/write/
|
||||
- /influxdb3/cloud-serverless/reference/syntax/line-protocol/
|
||||
- /influxdb3/cloud-serverless/write-data/best-practices/
|
||||
- /influxdb3/cloud-serverless/reference/internals/durability/
|
||||
source: /shared/influxdb3-write-guides/troubleshoot-distributed.md
|
||||
---
|
||||
|
||||
Learn how to avoid unexpected results and recover from errors when writing to {{% product-name %}}.
|
||||
|
||||
|
||||
- [Handle write responses](#handle-write-responses)
|
||||
- [Review HTTP status codes](#review-http-status-codes)
|
||||
- [Troubleshoot failures](#troubleshoot-failures)
|
||||
- [Troubleshoot rejected points](#troubleshoot-rejected-points)
|
||||
|
||||
## Handle write responses
|
||||
|
||||
{{% product-name %}} does the following when you send a write request:
|
||||
|
||||
1. Validates the request.
|
||||
2. If successful, attempts to [ingest data](/influxdb3/cloud-serverless/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](#review-http-status-codes).
|
||||
3. Ingests or rejects data from the batch and returns one of the following HTTP status codes:
|
||||
|
||||
- `204 No Content`: All of the data is ingested and queryable.
|
||||
- `400 Bad Request`: Some or all of the data has been rejected. Data that has not been rejected is ingested and queryable.
|
||||
|
||||
The response body contains error details about [rejected points](#troubleshoot-rejected-points), up to 100 points.
|
||||
|
||||
Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable.
|
||||
|
||||
To ensure that InfluxDB handles writes in the order you request them,
|
||||
wait for the response before you send the next request.
|
||||
|
||||
### Review HTTP status codes
|
||||
|
||||
InfluxDB uses conventional HTTP status codes to indicate the success or failure of a request.
|
||||
The `message` property of the response body may contain additional details about the error.
|
||||
{{< product-name >}} returns one the following HTTP status codes for a write request:
|
||||
|
||||
| HTTP response code | Response body | Description |
|
||||
| :-------------------------------| :--------------------------------------------------------------- | :------------- |
|
||||
| `204 "No Content"` | no response body | If InfluxDB ingested all of the data in the batch |
|
||||
| `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | If some or all request data isn't allowed (for example, is malformed or falls outside of the bucket's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected |
|
||||
| `401 "Unauthorized"` | | If the `Authorization` header is missing or malformed or if the [token](/influxdb3/cloud-serverless/admin/tokens/) doesn't have [permission](/influxdb3/cloud-serverless/admin/tokens/create-token/) to write to the bucket. See [examples using credentials](/influxdb3/cloud-serverless/get-started/write/#write-line-protocol-to-influxdb) in write requests. |
|
||||
| `404 "Not found"` | requested **resource type** (for example, "organization" or "bucket"), and **resource name** | If a requested resource (for example, organization or bucket) wasn't found |
|
||||
| `413 “Request too large”` | cannot read data: points in batch is too large | If a request exceeds the maximum [global limit](/influxdb3/cloud-serverless/admin/billing/limits/) |
|
||||
| `429 “Too many requests”` | | If the number of requests exceeds the [adjustable service quota](/influxdb3/cloud-serverless/admin/billing/limits/#adjustable-service-quotas). The `Retry-After` header contains the number of seconds to wait before trying the write again. | If a request exceeds your plan's [adjustable service quotas](/influxdb3/cloud-serverless/admin/billing/limits/#adjustable-service-quotas)
|
||||
| `500 "Internal server error"` | | Default status for an error |
|
||||
| `503 "Service unavailable"` | | If the server is temporarily unavailable to accept writes. The `Retry-After` header contains the number of seconds to wait before trying the write again.
|
||||
|
||||
The `message` property of the response body may contain additional details about the error.
|
||||
If your data did not write to the bucket, see how to [troubleshoot rejected points](#troubleshoot-rejected-points).
|
||||
|
||||
## Troubleshoot failures
|
||||
|
||||
If you notice data is missing in your database, do the following:
|
||||
|
||||
- Check the [HTTP status code](#review-http-status-codes) in the response.
|
||||
- Check the `message` property in the response body for details about the error.
|
||||
- If the `message` describes a field error, [troubleshoot rejected points](#troubleshoot-rejected-points).
|
||||
- Verify all lines contain valid syntax ([line protocol](/influxdb3/cloud-serverless/reference/syntax/line-protocol/)).
|
||||
- Verify the timestamps in your data match the [precision parameter](/influxdb3/cloud-serverless/reference/glossary/#precision) in your request.
|
||||
- Minimize payload size and network errors by [optimizing writes](/influxdb3/cloud-serverless/write-data/best-practices/optimize-writes/).
|
||||
|
||||
## Troubleshoot rejected points
|
||||
|
||||
When writing points from a batch, InfluxDB rejects points that have syntax errors or schema conflicts.
|
||||
If InfluxDB processes the data in your batch and then rejects points, the [HTTP response](#handle-write-responses) body contains the following properties that describe rejected points:
|
||||
|
||||
- `code`: `"invalid"`
|
||||
- `line`: the line number of the _first_ rejected point in the batch.
|
||||
- `message`: a string that contains line-separated error messages, one message for each rejected point in the batch, up to 100 rejected points.
|
||||
|
||||
InfluxDB rejects points for the following reasons:
|
||||
|
||||
- a line protocol parsing error
|
||||
- an invalid timestamp
|
||||
- a schema conflict
|
||||
|
||||
Schema conflicts occur when you try to write data that contains any of the following:
|
||||
|
||||
- a wrong data type: the point falls within the same partition (default partitioning is measurement and day) as existing bucket data and contains a different data type for an existing field
|
||||
- a tag and a field that use the same key
|
||||
|
||||
### Example
|
||||
|
||||
The following example shows a response body for a write request that contains two rejected points:
|
||||
|
||||
```json
|
||||
{
|
||||
"code": "invalid",
|
||||
"line": 2,
|
||||
"message": "failed to parse line protocol:
|
||||
errors encountered on line(s):
|
||||
error parsing line 2 (1-based): Invalid measurement was provided
|
||||
error parsing line 4 (1-based): Unable to parse timestamp value '123461000000000000000000000000'"
|
||||
}
|
||||
```
|
||||
|
||||
Check for [field data type](/influxdb3/cloud-serverless/reference/syntax/line-protocol/#data-types-and-format) differences between the rejected data point and points within the same database and partition--for example, did you attempt to write `string` data to an `int` field?
|
||||
<!-- The content for this page is at
|
||||
//SOURCE - content/shared/influxdb3-write-guides/troubleshoot-distributed.md
|
||||
-->
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
---
|
||||
title: Rename a table
|
||||
description: >
|
||||
Use the [`influxctl table rename` command](/influxdb3/clustered/reference/cli/influxctl/table/rename/)
|
||||
to rename a table in your {{< product-name omit=" Cluster" >}} cluster.
|
||||
menu:
|
||||
influxdb3_clustered:
|
||||
parent: Manage tables
|
||||
weight: 202
|
||||
list_code_example: |
|
||||
##### CLI
|
||||
```sh
|
||||
influxctl table rename <DATABASE_NAME> <CURRENT_TABLE_NAME> <NEW_TABLE_NAME>
|
||||
```
|
||||
related:
|
||||
- /influxdb3/clustered/reference/cli/influxctl/table/rename/
|
||||
---
|
||||
|
||||
Use the [`influxctl table rename` command](/influxdb3/clustered/reference/cli/influxctl/table/rename/)
|
||||
to rename a table in your {{< product-name omit=" Clustered" >}} cluster.
|
||||
|
||||
> [!Note]
|
||||
> After renaming a table, write and query requests using the old table name
|
||||
> are routed to the same table.
|
||||
|
||||
## Rename a database using the influxctl CLI
|
||||
|
||||
<!-- pytest.mark.skip -->
|
||||
|
||||
```bash { placeholders="DATABASE_NAME|CURRENT_TABLE_NAME|NEW_TABLE_NAME" }
|
||||
influxctl table rename DATABASE_NAME CURRENT_TABLE_NAME NEW_TABLE_NAME
|
||||
```
|
||||
|
||||
Replace the following:
|
||||
|
||||
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: Name of the database the table is in
|
||||
- {{% code-placeholder-key %}}`CURRENT_TABLE_NAME`{{% /code-placeholder-key %}}: Name of the table to change
|
||||
- {{% code-placeholder-key %}}`NEW_TABLE_NAME`{{% /code-placeholder-key %}}: New name for the table
|
||||
|
||||
> [!Note]
|
||||
> #### Renamed table retains its ID
|
||||
>
|
||||
> The table ID remains the same after renaming. When you list tables,
|
||||
> you'll see the new name associated with the original table ID.
|
||||
|
|
@ -20,6 +20,7 @@ related:
|
|||
- /influxdb3/clustered/query-data/sql/
|
||||
- /influxdb3/clustered/reference/influxql/
|
||||
- /influxdb3/clustered/reference/sql/
|
||||
- /influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices/
|
||||
|
||||
list_code_example: |
|
||||
```py
|
||||
|
|
@ -234,7 +235,8 @@ from influxdb_client_3 import InfluxDBClient3
|
|||
client = InfluxDBClient3(
|
||||
host='{{< influxdb/host >}}',
|
||||
token='DATABASE_TOKEN',
|
||||
database='DATABASE_NAME'
|
||||
database='DATABASE_NAME',
|
||||
timeout=60 # Set default timeout to 60 seconds
|
||||
)
|
||||
```
|
||||
{{% /code-placeholders %}}
|
||||
|
|
@ -325,7 +327,8 @@ client = InfluxDBClient3(
|
|||
# Execute the query and return an Arrow table
|
||||
table = client.query(
|
||||
query="SELECT * FROM home",
|
||||
language="sql"
|
||||
language="sql",
|
||||
timeout=30 # Override default timeout for simple queries (30 seconds)
|
||||
)
|
||||
|
||||
print("\n#### View Schema information\n")
|
||||
|
|
@ -370,7 +373,8 @@ client = InfluxDBClient3(
|
|||
# Execute the query and return an Arrow table
|
||||
table = client.query(
|
||||
query="SELECT * FROM home",
|
||||
language="influxql"
|
||||
language="influxql",
|
||||
timeout=30 # Override default timeout for simple queries (30 seconds)
|
||||
)
|
||||
|
||||
print("\n#### View Schema information\n")
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ influxdb3/clustered/tags: [query, sql, influxql, influxctl, CLI]
|
|||
related:
|
||||
- /influxdb3/clustered/reference/cli/influxctl/query/
|
||||
- /influxdb3/clustered/get-started/query/#execute-an-sql-query, Get started querying data
|
||||
- /influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices/, Query timeout best practices
|
||||
- /influxdb3/clustered/reference/sql/
|
||||
- /influxdb3/clustered/reference/influxql/
|
||||
list_code_example: |
|
||||
|
|
@ -141,6 +142,35 @@ Replace the following:
|
|||
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
|
||||
Name of the database to query
|
||||
|
||||
## Query timeouts
|
||||
|
||||
The [`influxctl --timeout` global flag](/influxdb3/clustered/reference/cli/influxctl/) sets the maximum duration for API calls, including query requests.
|
||||
If a query takes longer than the specified timeout, the operation will be canceled.
|
||||
|
||||
### Timeout examples
|
||||
|
||||
Use different timeout values based on your query type:
|
||||
|
||||
{{% code-placeholders "DATABASE_(TOKEN|NAME)" %}}
|
||||
```sh
|
||||
# Shorter timeout for testing dashboard queries (10 seconds)
|
||||
influxctl query \
|
||||
--timeout 10s \
|
||||
--token DATABASE_TOKEN \
|
||||
--database DATABASE_NAME \
|
||||
"SELECT * FROM sensors WHERE time >= now() - INTERVAL '1 hour' LIMIT 100"
|
||||
|
||||
# Longer timeout for analytical queries (5 minutes)
|
||||
influxctl query \
|
||||
--timeout 300s \
|
||||
--token DATABASE_TOKEN \
|
||||
--database DATABASE_NAME \
|
||||
"SELECT room, AVG(temperature) FROM sensors WHERE time >= now() - INTERVAL '30 days' GROUP BY room"
|
||||
```
|
||||
{{% /code-placeholders %}}
|
||||
|
||||
For guidance on selecting appropriate timeout values, see [Query timeout best practices](/influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices/).
|
||||
|
||||
## Output format
|
||||
|
||||
The `influxctl query` command supports the following output formats:
|
||||
|
|
@ -241,7 +271,7 @@ influxctl query \
|
|||
{{% /influxdb/custom-timestamps %}}
|
||||
|
||||
{{< expand-wrapper >}}
|
||||
{{% expand "View example results with unix nanosecond timestamps" %}}
|
||||
{{% expand "View example results with Unix nanosecond timestamps" %}}
|
||||
{{% influxdb/custom-timestamps %}}
|
||||
```
|
||||
+-------+--------+---------+------+---------------------+
|
||||
|
|
|
|||
|
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
title: Query timeout best practices
|
||||
description: Learn how to set appropriate query timeouts to balance performance and resource protection.
|
||||
menu:
|
||||
influxdb3_clustered:
|
||||
name: Query timeout best practices
|
||||
parent: Troubleshoot and optimize queries
|
||||
identifier: query-timeout-best-practices
|
||||
weight: 201
|
||||
related:
|
||||
- /influxdb3/clustered/reference/client-libraries/v3/
|
||||
- /influxdb3/clustered/query-data/execute-queries/influxctl-cli/
|
||||
source: shared/influxdb3-query-guides/query-timeout-best-practices.md
|
||||
---
|
||||
|
||||
<!--
|
||||
//SOURCE - content/shared/influxdb3-query-guides/query-timeout-best-practices.md
|
||||
>
|
||||
|
|
@ -12,6 +12,7 @@ related:
|
|||
- /influxdb3/clustered/query-data/sql/
|
||||
- /influxdb3/clustered/query-data/influxql/
|
||||
- /influxdb3/clustered/reference/client-libraries/v3/
|
||||
- /influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices/
|
||||
aliases:
|
||||
- /influxdb3/clustered/query-data/execute-queries/troubleshoot/
|
||||
---
|
||||
|
|
@ -29,7 +30,9 @@ If a query doesn't return any data, it might be due to the following:
|
|||
|
||||
- Your data falls outside the time range (or other conditions) in the query--for example, the InfluxQL `SHOW TAG VALUES` command uses a default time range of 1 day.
|
||||
- The query (InfluxDB server) timed out.
|
||||
- The query client timed out.
|
||||
- The query client timed out.
|
||||
See [Query timeout best practices](/influxdb3/clustered/query-data/troubleshoot-and-optimize/query-timeout-best-practices/)
|
||||
for guidance on setting appropriate timeouts.
|
||||
- The query return type is not supported by the client library.
|
||||
For example, array or list types may not be supported.
|
||||
In this case, use `array_to_string()` to convert the array value to a string--for example:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
title: influxctl table rename
|
||||
description: >
|
||||
The `influxctl table rename` command renames a table in an
|
||||
{{% product-name omit=" Clustered" %}} cluster.
|
||||
menu:
|
||||
influxdb3_clustered:
|
||||
parent: influxctl table
|
||||
weight: 301
|
||||
metadata: [influxctl 2.10.3+]
|
||||
source: /shared/influxctl/table/rename.md
|
||||
---
|
||||
|
||||
<!-- //SOURCE content/shared/influxctl/table/rename.md -->
|
||||
|
|
@ -61,6 +61,34 @@ directory. This new directory contains artifacts associated with the specified r
|
|||
|
||||
---
|
||||
|
||||
## 20250721-1796368 {date="2025-07-21"}
|
||||
|
||||
### Quickstart
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
package:
|
||||
image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250721-1796368
|
||||
```
|
||||
|
||||
#### Release artifacts
|
||||
- [app-instance-schema.json](/downloads/clustered-release-artifacts/20250721-1796368/app-instance-schema.json)
|
||||
- [example-customer.yml](/downloads/clustered-release-artifacts/20250721-1796368/example-customer.yml)
|
||||
- [InfluxDB Clustered README EULA July 2024.txt](/downloads/clustered-release-artifacts/InfluxDB%20Clustered%20README%20EULA%20July%202024.txt)
|
||||
|
||||
|
||||
### Highlights
|
||||
|
||||
#### Support for InfluxQL INTEGRAL()
|
||||
|
||||
InfluxQL `INTEGRAL()` function is now supported in the InfluxDB 3.0 database engine.
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Fix `SHOW TABLES` timeout when a database has a large number of tables.
|
||||
|
||||
---
|
||||
|
||||
## 20250707-1777929 {date="2025-07-07"}
|
||||
|
||||
### Quickstart
|
||||
|
|
|
|||
|
|
@ -11,77 +11,15 @@ menu:
|
|||
influxdb3_clustered:
|
||||
name: Troubleshoot issues
|
||||
parent: Write data
|
||||
influxdb3/clustered/tags: [write, line protocol, errors]
|
||||
influxdb3/clustered/tags: [write, line protocol, errors, partial writes]
|
||||
related:
|
||||
- /influxdb3/clustered/get-started/write/
|
||||
- /influxdb3/clustered/reference/syntax/line-protocol/
|
||||
- /influxdb3/clustered/write-data/best-practices/
|
||||
- /influxdb3/clustered/reference/internals/durability/
|
||||
source: /shared/influxdb3-write-guides/troubleshoot-distributed.md
|
||||
---
|
||||
|
||||
Learn how to avoid unexpected results and recover from errors when writing to
|
||||
{{% product-name %}}.
|
||||
|
||||
- [Handle write responses](#handle-write-responses)
|
||||
- [Review HTTP status codes](#review-http-status-codes)
|
||||
- [Troubleshoot failures](#troubleshoot-failures)
|
||||
- [Troubleshoot rejected points](#troubleshoot-rejected-points)
|
||||
|
||||
## Handle write responses
|
||||
|
||||
{{% product-name %}} does the following when you send a write request:
|
||||
|
||||
1. Validates the request.
|
||||
2. If successful, attempts to ingest data from the request body; otherwise,
|
||||
responds with an [error status](#review-http-status-codes).
|
||||
3. Ingests or rejects data in the batch and returns one of the following HTTP
|
||||
status codes:
|
||||
|
||||
- `204 No Content`: All data in the batch is ingested.
|
||||
- `400 Bad Request`: Some or all of the data has been rejected.
|
||||
Data that has not been rejected is ingested and queryable.
|
||||
|
||||
The response body contains error details about
|
||||
[rejected points](#troubleshoot-rejected-points), up to 100 points.
|
||||
|
||||
Writes are synchronous--the response status indicates the final status of the
|
||||
write and all ingested data is queryable.
|
||||
|
||||
To ensure that InfluxDB handles writes in the order you request them,
|
||||
wait for the response before you send the next request.
|
||||
|
||||
### Review HTTP status codes
|
||||
|
||||
InfluxDB uses conventional HTTP status codes to indicate the success or failure of a request.
|
||||
The `message` property of the response body may contain additional details about the error.
|
||||
Write requests return the following status codes:
|
||||
|
||||
| HTTP response code | Message | Description |
|
||||
| :-------------------------------| :--------------------------------------------------------------- | :------------- |
|
||||
| `204 "Success"` | | If InfluxDB ingested the data |
|
||||
| `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | If some or all request data isn't allowed (for example, if it is malformed or falls outside of the bucket's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected |
|
||||
| `401 "Unauthorized"` | | If the `Authorization` header is missing or malformed or if the [token](/influxdb3/clustered/admin/tokens/) doesn't have [permission](/influxdb3/clustered/reference/cli/influxctl/token/create/#examples) to write to the database. See [examples using credentials](/influxdb3/clustered/get-started/write/#write-line-protocol-to-influxdb) in write requests. |
|
||||
| `404 "Not found"` | requested **resource type** (for example, "organization" or "database"), and **resource name** | If a requested resource (for example, organization or database) wasn't found |
|
||||
| `500 "Internal server error"` | | Default status for an error |
|
||||
| `503` "Service unavailable" | | If the server is temporarily unavailable to accept writes. The `Retry-After` header describes when to try the write again.
|
||||
|
||||
If your data did not write to the database, see how to [troubleshoot rejected points](#troubleshoot-rejected-points).
|
||||
|
||||
## Troubleshoot failures
|
||||
|
||||
If you notice data is missing in your database, do the following:
|
||||
|
||||
- Check the `message` property in the response body for details about the error.
|
||||
- If the `message` describes a field error, [troubleshoot rejected points](#troubleshoot-rejected-points).
|
||||
- Verify all lines contain valid syntax ([line protocol](/influxdb3/clustered/reference/syntax/line-protocol/)).
|
||||
- Verify the timestamps in your data match the [precision parameter](/influxdb3/clustered/reference/glossary/#precision) in your request.
|
||||
- Minimize payload size and network errors by [optimizing writes](/influxdb3/clustered/write-data/best-practices/optimize-writes/).
|
||||
|
||||
## Troubleshoot rejected points
|
||||
|
||||
InfluxDB rejects points that fall within the same partition (default partitioning
|
||||
is by measurement and day) as existing bucket data and have a different data type
|
||||
for an existing field.
|
||||
|
||||
Check for [field data type](/influxdb3/clustered/reference/syntax/line-protocol/#data-types-and-format)
|
||||
differences between the rejected data point and points within the same database
|
||||
and partition--for example, did you attempt to write `string` data to an `int` field?
|
||||
<!-- The content for this page is at
|
||||
//SOURCE - content/shared/influxdb3-write-guides/troubleshoot-distributed.md
|
||||
-->
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
title: influxdb3 delete
|
||||
description: >
|
||||
The `influxdb3 delete` command deletes a resource such as a database or a table.
|
||||
The `influxdb3 delete` command deletes a resource such as a cache, database, or table.
|
||||
menu:
|
||||
influxdb3_core:
|
||||
parent: influxdb3
|
||||
|
|
@ -10,6 +10,6 @@ weight: 300
|
|||
source: /shared/influxdb3-cli/delete/_index.md
|
||||
---
|
||||
|
||||
<!--
|
||||
The content of this file is at content/shared/influxdb3-cli/delete/_index.md
|
||||
<!-- The content of this file is at
|
||||
//SOURCE - content/shared/influxdb3-cli/delete/_index.md
|
||||
-->
|
||||
|
|
|
|||
|
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
title: influxdb3 delete token
|
||||
description: >
|
||||
The `influxdb3 delete token` command deletes an authorization token from the {{% product-name %}} server.
|
||||
influxdb3/core/tags: [cli]
|
||||
menu:
|
||||
influxdb3_core:
|
||||
parent: influxdb3 delete
|
||||
weight: 201
|
||||
related:
|
||||
- /influxdb3/core/admin/tokens/
|
||||
- /influxdb3/core/api/v3/#tag/Token, InfluxDB /api/v3 Token API reference
|
||||
source: /shared/influxdb3-cli/delete/token.md
|
||||
---
|
||||
|
||||
<!-- The content of this file is at
|
||||
//SOURCE - content/shared/influxdb3-cli/delete/token.md
|
||||
-->
|
||||
|
|
@ -36,41 +36,23 @@ influxdb3 serve [OPTIONS] --node-id <HOST_IDENTIFIER_PREFIX>
|
|||
| :--------------- | :--------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------ |
|
||||
| {{< req "\*" >}} | `--node-id` | _See [configuration options](/influxdb3/core/reference/config-options/#node-id)_ |
|
||||
| | `--object-store` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store)_ |
|
||||
| | `--bucket` | _See [configuration options](/influxdb3/core/reference/config-options/#bucket)_ |
|
||||
| | `--data-dir` | _See [configuration options](/influxdb3/core/reference/config-options/#data-dir)_ |
|
||||
| | `--admin-token-recovery-http-bind` | _See [configuration options](/influxdb3/core/reference/config-options/#admin-token-recovery-http-bind)_ |
|
||||
| | `--admin-token-recovery-tcp-listener-file-path` | _See [configuration options](/influxdb3/core/reference/config-options/#admin-token-recovery-tcp-listener-file-path)_ |
|
||||
| | `--aws-access-key-id` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-access-key-id)_ |
|
||||
| | `--aws-secret-access-key` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-secret-access-key)_ |
|
||||
| | `--aws-allow-http` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-allow-http)_ |
|
||||
| | `--aws-default-region` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-default-region)_ |
|
||||
| | `--aws-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-endpoint)_ |
|
||||
| | `--aws-secret-access-key` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-secret-access-key)_ |
|
||||
| | `--aws-session-token` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-session-token)_ |
|
||||
| | `--aws-allow-http` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-allow-http)_ |
|
||||
| | `--aws-skip-signature` | _See [configuration options](/influxdb3/core/reference/config-options/#aws-skip-signature)_ |
|
||||
| | `--google-service-account` | _See [configuration options](/influxdb3/core/reference/config-options/#google-service-account)_ |
|
||||
| | `--azure-storage-account` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-account)_ |
|
||||
| | `--azure-storage-access-key` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-access-key)_ |
|
||||
| | `--object-store-connection-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-connection-limit)_ |
|
||||
| | `--object-store-http2-only` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-http2-only)_ |
|
||||
| | `--object-store-http2-max-frame-size` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-http2-max-frame-size)_ |
|
||||
| | `--object-store-max-retries` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-max-retries)_ |
|
||||
| | `--object-store-retry-timeout` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-retry-timeout)_ |
|
||||
| | `--object-store-cache-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-cache-endpoint)_ |
|
||||
| `-h` | `--help` | Print help information |
|
||||
| | `--help-all` | Print detailed help information |
|
||||
| | `--log-filter` | _See [configuration options](/influxdb3/core/reference/config-options/#log-filter)_ |
|
||||
| `-v` | `--verbose` | Enable verbose output |
|
||||
| | `--log-destination` | _See [configuration options](/influxdb3/core/reference/config-options/#log-destination)_ |
|
||||
| | `--log-format` | _See [configuration options](/influxdb3/core/reference/config-options/#log-format)_ |
|
||||
| | `--traces-exporter` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter)_ |
|
||||
| | `--traces-exporter-jaeger-agent-host` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-agent-host)_ |
|
||||
| | `--traces-exporter-jaeger-agent-port` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-agent-port)_ |
|
||||
| | `--traces-exporter-jaeger-service-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-service-name)_ |
|
||||
| | `--traces-exporter-jaeger-trace-context-header-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-trace-context-header-name)_ |
|
||||
| | `--traces-jaeger-debug-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-debug-name)_ |
|
||||
| | `--traces-jaeger-tags` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-tags)_ |
|
||||
| | `--traces-jaeger-max-msgs-per-second` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-max-msgs-per-second)_ |
|
||||
| | `--azure-storage-account` | _See [configuration options](/influxdb3/core/reference/config-options/#azure-storage-account)_ |
|
||||
| | `--bucket` | _See [configuration options](/influxdb3/core/reference/config-options/#bucket)_ |
|
||||
| | `--buffer-mem-limit-mb` | _See [configuration options](/influxdb3/core/reference/config-options/#buffer-mem-limit-mb)_ |
|
||||
| | `--data-dir` | _See [configuration options](/influxdb3/core/reference/config-options/#data-dir)_ |
|
||||
| | `--datafusion-config` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-config)_ |
|
||||
| | `--datafusion-max-parquet-fanout` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-max-parquet-fanout)_ |
|
||||
| | `--datafusion-num-threads` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-num-threads)_ |
|
||||
| | `--datafusion-runtime-type` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-type)_ |
|
||||
| | `--datafusion-runtime-disable-lifo-slot` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-disable-lifo-slot)_ |
|
||||
| | `--datafusion-runtime-event-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-event-interval)_ |
|
||||
| | `--datafusion-runtime-global-queue-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-global-queue-interval)_ |
|
||||
|
|
@ -78,29 +60,67 @@ influxdb3 serve [OPTIONS] --node-id <HOST_IDENTIFIER_PREFIX>
|
|||
| | `--datafusion-runtime-max-io-events-per-tick` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-max-io-events-per-tick)_ |
|
||||
| | `--datafusion-runtime-thread-keep-alive` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-thread-keep-alive)_ |
|
||||
| | `--datafusion-runtime-thread-priority` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-thread-priority)_ |
|
||||
| | `--datafusion-max-parquet-fanout` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-max-parquet-fanout)_ |
|
||||
| | `--datafusion-runtime-type` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-runtime-type)_ |
|
||||
| | `--datafusion-use-cached-parquet-loader` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-use-cached-parquet-loader)_ |
|
||||
| | `--datafusion-config` | _See [configuration options](/influxdb3/core/reference/config-options/#datafusion-config)_ |
|
||||
| | `--max-http-request-size` | _See [configuration options](/influxdb3/core/reference/config-options/#max-http-request-size)_ |
|
||||
| | `--http-bind` | _See [configuration options](/influxdb3/core/reference/config-options/#http-bind)_ |
|
||||
| | `--exec-mem-pool-bytes` | _See [configuration options](/influxdb3/core/reference/config-options/#exec-mem-pool-bytes)_ |
|
||||
| | `--gen1-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#gen1-duration)_ |
|
||||
| | `--wal-flush-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-flush-interval)_ |
|
||||
| | `--wal-snapshot-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-snapshot-size)_ |
|
||||
| | `--wal-max-write-buffer-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-max-write-buffer-size)_ |
|
||||
| | `--snapshotted-wal-files-to-keep` | _See [configuration options](/influxdb3/core/reference/config-options/#snapshotted-wal-files-to-keep)_ |
|
||||
| | `--query-log-size` | _See [configuration options](/influxdb3/core/reference/config-options/#query-log-size)_ |
|
||||
| | `--parquet-mem-cache-size` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-size)_ |
|
||||
| | `--parquet-mem-cache-prune-percentage` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-percentage)_ |
|
||||
| | `--parquet-mem-cache-prune-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-interval)_ |
|
||||
| | `--delete-grace-period` | _See [configuration options](/influxdb3/core/reference/config-options/#delete-grace-period)_ |
|
||||
| | `--disable-authz` | _See [configuration options](/influxdb3/core/reference/config-options/#disable-authz)_ |
|
||||
| | `--disable-parquet-mem-cache` | _See [configuration options](/influxdb3/core/reference/config-options/#disable-parquet-mem-cache)_ |
|
||||
| | `--last-cache-eviction-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#last-cache-eviction-interval)_ |
|
||||
| | `--distinct-cache-eviction-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#distinct-cache-eviction-interval)_ |
|
||||
| | `--plugin-dir` | _See [configuration options](/influxdb3/core/reference/config-options/#plugin-dir)_ |
|
||||
| | `--exec-mem-pool-bytes` | _See [configuration options](/influxdb3/core/reference/config-options/#exec-mem-pool-bytes)_ |
|
||||
| | `--force-snapshot-mem-threshold` | _See [configuration options](/influxdb3/core/reference/config-options/#force-snapshot-mem-threshold)_ |
|
||||
| | `--virtual-env-location` | _See [configuration options](/influxdb3/core/reference/config-options/#virtual-env-location)_ |
|
||||
| | `--gen1-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#gen1-duration)_ |
|
||||
| | `--gen1-lookback-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#gen1-lookback-duration)_ |
|
||||
| | `--google-service-account` | _See [configuration options](/influxdb3/core/reference/config-options/#google-service-account)_ |
|
||||
| | `--hard-delete-default-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#hard-delete-default-duration)_ |
|
||||
| `-h` | `--help` | Print help information |
|
||||
| | `--help-all` | Print detailed help information |
|
||||
| | `--http-bind` | _See [configuration options](/influxdb3/core/reference/config-options/#http-bind)_ |
|
||||
| | `--last-cache-eviction-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#last-cache-eviction-interval)_ |
|
||||
| | `--log-destination` | _See [configuration options](/influxdb3/core/reference/config-options/#log-destination)_ |
|
||||
| | `--log-filter` | _See [configuration options](/influxdb3/core/reference/config-options/#log-filter)_ |
|
||||
| | `--log-format` | _See [configuration options](/influxdb3/core/reference/config-options/#log-format)_ |
|
||||
| | `--max-http-request-size` | _See [configuration options](/influxdb3/core/reference/config-options/#max-http-request-size)_ |
|
||||
| | `--object-store-cache-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-cache-endpoint)_ |
|
||||
| | `--object-store-connection-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-connection-limit)_ |
|
||||
| | `--object-store-http2-max-frame-size` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-http2-max-frame-size)_ |
|
||||
| | `--object-store-http2-only` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-http2-only)_ |
|
||||
| | `--object-store-max-retries` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-max-retries)_ |
|
||||
| | `--object-store-retry-timeout` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-retry-timeout)_ |
|
||||
| | `--package-manager` | _See [configuration options](/influxdb3/core/reference/config-options/#package-manager)_ |
|
||||
| | `--parquet-mem-cache-prune-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-interval)_ |
|
||||
| | `--parquet-mem-cache-prune-percentage` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-percentage)_ |
|
||||
| | `--parquet-mem-cache-query-path-duration` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-query-path-duration)_ |
|
||||
| | `--parquet-mem-cache-size` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-size)_ |
|
||||
| | `--plugin-dir` | _See [configuration options](/influxdb3/core/reference/config-options/#plugin-dir)_ |
|
||||
| | `--preemptive-cache-age` | _See [configuration options](/influxdb3/core/reference/config-options/#preemptive-cache-age)_ |
|
||||
| | `--query-file-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#query-file-limit)_ |
|
||||
| | `--query-log-size` | _See [configuration options](/influxdb3/core/reference/config-options/#query-log-size)_ |
|
||||
| | `--retention-check-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#retention-check-interval)_ |
|
||||
| | `--snapshotted-wal-files-to-keep` | _See [configuration options](/influxdb3/core/reference/config-options/#snapshotted-wal-files-to-keep)_ |
|
||||
| | `--table-index-cache-concurrency-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#table-index-cache-concurrency-limit)_ |
|
||||
| | `--table-index-cache-max-entries` | _See [configuration options](/influxdb3/core/reference/config-options/#table-index-cache-max-entries)_ |
|
||||
| | `--tcp-listener-file-path` | _See [configuration options](/influxdb3/core/reference/config-options/#tcp-listener-file-path)_ |
|
||||
| | `--telemetry-disable-upload` | _See [configuration options](/influxdb3/core/reference/config-options/#telemetry-disable-upload)_ |
|
||||
| | `--telemetry-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#telemetry-endpoint)_ |
|
||||
| | `--tls-cert` | _See [configuration options](/influxdb3/core/reference/config-options/#tls-cert)_ |
|
||||
| | `--tls-key` | _See [configuration options](/influxdb3/core/reference/config-options/#tls-key)_ |
|
||||
| | `--tls-minimum-version` | _See [configuration options](/influxdb3/core/reference/config-options/#tls-minimum-version)_ |
|
||||
| | `--traces-exporter` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter)_ |
|
||||
| | `--traces-exporter-jaeger-agent-host` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-agent-host)_ |
|
||||
| | `--traces-exporter-jaeger-agent-port` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-agent-port)_ |
|
||||
| | `--traces-exporter-jaeger-service-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-service-name)_ |
|
||||
| | `--traces-exporter-jaeger-trace-context-header-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-exporter-jaeger-trace-context-header-name)_ |
|
||||
| | `--traces-jaeger-debug-name` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-debug-name)_ |
|
||||
| | `--traces-jaeger-max-msgs-per-second` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-max-msgs-per-second)_ |
|
||||
| | `--traces-jaeger-tags` | _See [configuration options](/influxdb3/core/reference/config-options/#traces-jaeger-tags)_ |
|
||||
| `-v` | `--verbose` | Enable verbose output |
|
||||
| | `--virtual-env-location` | _See [configuration options](/influxdb3/core/reference/config-options/#virtual-env-location)_ |
|
||||
| | `--wal-flush-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-flush-interval)_ |
|
||||
| | `--wal-max-write-buffer-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-max-write-buffer-size)_ |
|
||||
| | `--wal-replay-concurrency-limit` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-replay-concurrency-limit)_ |
|
||||
| | `--wal-replay-fail-on-error` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-replay-fail-on-error)_ |
|
||||
| | `--wal-snapshot-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-snapshot-size)_ |
|
||||
| | `--without-auth` | _See [configuration options](/influxdb3/core/reference/config-options/#without-auth)_ |
|
||||
|
||||
{{< caption >}}
|
||||
{{< req text="\* Required options" >}}
|
||||
|
|
@ -110,7 +130,7 @@ influxdb3 serve [OPTIONS] --node-id <HOST_IDENTIFIER_PREFIX>
|
|||
|
||||
You can use environment variables to define most `influxdb3 serve` options.
|
||||
For more information, see
|
||||
[Configuration options](/influxdb3/enterprise/reference/config-options/).
|
||||
[Configuration options](/influxdb3/core/reference/config-options/).
|
||||
|
||||
## Examples
|
||||
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
title: influxdb3 delete
|
||||
description: >
|
||||
The `influxdb3 delete` command deletes a resource such as a database or a table.
|
||||
The `influxdb3 delete` command deletes a resource such as a cache, database, or table.
|
||||
menu:
|
||||
influxdb3_enterprise:
|
||||
parent: influxdb3
|
||||
|
|
@ -10,6 +10,6 @@ weight: 300
|
|||
source: /shared/influxdb3-cli/delete/_index.md
|
||||
---
|
||||
|
||||
<!--
|
||||
The content of this file is at content/shared/influxdb3-cli/delete/_index.md
|
||||
<!-- The content of this file is at
|
||||
//SOURCE - content/shared/influxdb3-cli/delete/_index.md
|
||||
-->
|
||||
|
|
|
|||
|
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
title: influxdb3 delete token
|
||||
description: >
|
||||
The `influxdb3 delete token` command deletes an authorization token from the {{% product-name %}} server.
|
||||
influxdb3/enterprise/tags: [cli]
|
||||
menu:
|
||||
influxdb3_enterprise:
|
||||
parent: influxdb3 delete
|
||||
weight: 201
|
||||
related:
|
||||
- /influxdb3/enterprise/admin/tokens/
|
||||
- /influxdb3/enterprise/api/v3/#tag/Token, InfluxDB /api/v3 Token API reference
|
||||
source: /shared/influxdb3-cli/delete/token.md
|
||||
---
|
||||
|
||||
<!-- The content of this file is at
|
||||
//SOURCE - content/shared/influxdb3-cli/delete/token.md
|
||||
-->
|
||||
|
|
@ -38,6 +38,7 @@ influxdb3 serve [OPTIONS] \
|
|||
| Option | | Description |
|
||||
| :--------------- | :--------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| | `--admin-token-recovery-http-bind` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#admin-token-recovery-http-bind)_ |
|
||||
| | `--admin-token-recovery-tcp-listener-file-path` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#admin-token-recovery-tcp-listener-file-path)_ |
|
||||
| | `--aws-access-key-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-access-key-id)_ |
|
||||
| | `--aws-allow-http` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-allow-http)_ |
|
||||
| | `--aws-default-region` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#aws-default-region)_ |
|
||||
|
|
@ -48,7 +49,11 @@ influxdb3 serve [OPTIONS] \
|
|||
| | `--azure-storage-access-key` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-storage-access-key)_ |
|
||||
| | `--azure-storage-account` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#azure-storage-account)_ |
|
||||
| | `--bucket` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#bucket)_ |
|
||||
| | `--buffer-mem-limit-mb` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#buffer-mem-limit-mb)_ |
|
||||
| | `--catalog-sync-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#catalog-sync-interval)_ |
|
||||
| {{< req "\*" >}} | `--cluster-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#cluster-id)_ |
|
||||
| | `--compaction-check-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-check-interval)_ |
|
||||
| | `--compaction-cleanup-wait` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-cleanup-wait)_ |
|
||||
| | `--compaction-gen2-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-gen2-duration)_ |
|
||||
| | `--compaction-max-num-files-per-plan` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-max-num-files-per-plan)_ |
|
||||
| | `--compaction-multipliers` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#compaction-multipliers)_ |
|
||||
|
|
@ -66,16 +71,22 @@ influxdb3 serve [OPTIONS] \
|
|||
| | `--datafusion-runtime-thread-priority` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-thread-priority)_ |
|
||||
| | `--datafusion-runtime-type` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-runtime-type)_ |
|
||||
| | `--datafusion-use-cached-parquet-loader` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#datafusion-use-cached-parquet-loader)_ |
|
||||
| | `--delete-grace-period` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#delete-grace-period)_ |
|
||||
| | `--disable-authz` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#disable-authz)_ |
|
||||
| | `--disable-parquet-mem-cache` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#disable-parquet-mem-cache)_ |
|
||||
| | `--distinct-cache-eviction-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#distinct-cache-eviction-interval)_ |
|
||||
| | `--distinct-value-cache-disable-from-history` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#distinct-value-cache-disable-from-history)_ |
|
||||
| | `--exec-mem-pool-bytes` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#exec-mem-pool-bytes)_ |
|
||||
| | `--force-snapshot-mem-threshold` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#force-snapshot-mem-threshold)_ |
|
||||
| | `--gen1-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#gen1-duration)_ |
|
||||
| | `--gen1-lookback-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#gen1-lookback-duration)_ |
|
||||
| | `--google-service-account` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#google-service-account)_ |
|
||||
| | `--hard-delete-default-duration` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#hard-delete-default-duration)_ |
|
||||
| `-h` | `--help` | Print help information |
|
||||
| | `--help-all` | Print detailed help information |
|
||||
| | `--http-bind` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#http-bind)_ |
|
||||
| | `--last-cache-eviction-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#last-cache-eviction-interval)_ |
|
||||
| | `--last-value-cache-disable-from-history` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#last-value-cache-disable-from-history)_ |
|
||||
| | `--license-email` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#license-email)_ |
|
||||
| | `--license-file` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#license-file)_ |
|
||||
| | `--log-destination` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#log-destination)_ |
|
||||
|
|
@ -84,6 +95,11 @@ influxdb3 serve [OPTIONS] \
|
|||
| | `--max-http-request-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#max-http-request-size)_ |
|
||||
| | `--mode` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#mode)_ |
|
||||
| {{< req "\*" >}} | `--node-id` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#node-id)_ |
|
||||
| | `--node-id-from-env` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#node-id-from-env)_ |
|
||||
| | `--num-cores` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-cores)_ |
|
||||
| | `--num-database-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-database-limit)_ |
|
||||
| | `--num-table-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-table-limit)_ |
|
||||
| | `--num-total-columns-per-table-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#num-total-columns-per-table-limit)_ |
|
||||
| | `--object-store` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#object-store)_ |
|
||||
| | `--object-store-cache-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#object-store-cache-endpoint)_ |
|
||||
| | `--object-store-connection-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#object-store-connection-limit)_ |
|
||||
|
|
@ -101,7 +117,16 @@ influxdb3 serve [OPTIONS] \
|
|||
| | `--query-file-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#query-file-limit)_ |
|
||||
| | `--query-log-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#query-log-size)_ |
|
||||
| | `--replication-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#replication-interval)_ |
|
||||
| | `--retention-check-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#retention-check-interval)_ |
|
||||
| | `--snapshotted-wal-files-to-keep` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#snapshotted-wal-files-to-keep)_ |
|
||||
| | `--table-index-cache-concurrency-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#table-index-cache-concurrency-limit)_ |
|
||||
| | `--table-index-cache-max-entries` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#table-index-cache-max-entries)_ |
|
||||
| | `--tcp-listener-file-path` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tcp-listener-file-path)_ |
|
||||
| | `--telemetry-disable-upload` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#telemetry-disable-upload)_ |
|
||||
| | `--telemetry-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#telemetry-endpoint)_ |
|
||||
| | `--tls-cert` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tls-cert)_ |
|
||||
| | `--tls-key` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tls-key)_ |
|
||||
| | `--tls-minimum-version` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#tls-minimum-version)_ |
|
||||
| | `--traces-exporter` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-exporter)_ |
|
||||
| | `--traces-exporter-jaeger-agent-host` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-exporter-jaeger-agent-host)_ |
|
||||
| | `--traces-exporter-jaeger-agent-port` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-exporter-jaeger-agent-port)_ |
|
||||
|
|
@ -110,11 +135,16 @@ influxdb3 serve [OPTIONS] \
|
|||
| | `--traces-jaeger-debug-name` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-jaeger-debug-name)_ |
|
||||
| | `--traces-jaeger-max-msgs-per-second` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-jaeger-max-msgs-per-second)_ |
|
||||
| | `--traces-jaeger-tags` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#traces-jaeger-tags)_ |
|
||||
| | `--use-pacha-tree` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#use-pacha-tree)_ |
|
||||
| `-v` | `--verbose` | Enable verbose output |
|
||||
| | `--virtual-env-location` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#virtual-env-location)_ |
|
||||
| | `--wait-for-running-ingestor` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wait-for-running-ingestor)_ |
|
||||
| | `--wal-flush-interval` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-flush-interval)_ |
|
||||
| | `--wal-max-write-buffer-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-max-write-buffer-size)_ |
|
||||
| | `--wal-replay-concurrency-limit` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-replay-concurrency-limit)_ |
|
||||
| | `--wal-replay-fail-on-error` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-replay-fail-on-error)_ |
|
||||
| | `--wal-snapshot-size` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#wal-snapshot-size)_ |
|
||||
| | `--without-auth` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#without-auth)_ |
|
||||
|
||||
{{< caption >}}
|
||||
{{< req text="\* Required options" >}}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -86,7 +86,7 @@ To use {{% product-name %}} to query data from InfluxDB 3, navigate to
|
|||
The _Data Explorer_ lets you explore the
|
||||
schema of your database and automatically builds SQL queries by either
|
||||
selecting columns in the _Schema Browser_ or by using _Natural Language_ with
|
||||
the {{% product-name %}} OpenAI integration.
|
||||
the {{% product-name %}} AI integration.
|
||||
|
||||
For this getting started guide, use the Schema Browser to build a SQL query
|
||||
that returns data from the newly written sample data set.
|
||||
|
|
|
|||
|
|
@ -1,3 +1,31 @@
|
|||
## 2.10.3 {date="2025-07-30"}
|
||||
|
||||
### Features
|
||||
|
||||
- Add `id` column to the output of the
|
||||
[`influxctl database list` command](/influxdb3/version/reference/cli/influxctl/database/list/).
|
||||
- Add [`influxctl table rename` command](/influxdb3/version/reference/cli/influxctl/table/rename/).
|
||||
- Add user-agent to Granite gRPC requests.
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Require the `--template-timeformat` option when the `--template-tags` option
|
||||
is included when creating a database or table with custom partitions.
|
||||
- Fix table iceberg enable/disable description.
|
||||
|
||||
### Dependency updates
|
||||
|
||||
- Update `github.com/apache/arrow-go/v18` from 18.3.1 to 18.4.0.
|
||||
- Update `github.com/docker/docker` from 28.2.2+incompatible to 28.3.3+incompatible.
|
||||
- Update `github.com/golang-jwt/jwt/v5` from 5.2.2 to 5.2.3.
|
||||
- Update `github.com/jedib0t/go-pretty/v6` from 6.6.7 to 6.6.8.
|
||||
- Update `golang.org/x/mod` from 0.25.0 to 0.26.0.
|
||||
- Update `google.golang.org/grpc` from 1.73.0 to 1.74.2.
|
||||
- Update `helm.sh/helm/v3` from 3.17.3 to 3.18.4.
|
||||
- Update Go 1.24.5.
|
||||
|
||||
---
|
||||
|
||||
## v2.10.2 {date="2025-06-30"}
|
||||
|
||||
### Features
|
||||
|
|
|
|||
|
|
@ -0,0 +1,39 @@
|
|||
|
||||
The `influxctl table rename` command renames a table in the specified database in
|
||||
an {{< product-name omit=" Clustered" >}} cluster.
|
||||
|
||||
## Usage
|
||||
|
||||
<!-- pytest.mark.skip -->
|
||||
|
||||
```bash
|
||||
influxctl table rename [flags] <DATABASE_NAME> <CURRENT_TABLE_NAME> <NEW_TABLE_NAME>
|
||||
```
|
||||
|
||||
## Arguments
|
||||
|
||||
| Argument | Description |
|
||||
| :--------------------- | :----------------------------------- |
|
||||
| **DATABASE_NAME** | Name of the database the table is in |
|
||||
| **CURRENT_TABLE_NAME** | Current name of the table |
|
||||
| **NEW_TABLE_NAME** | New name for the table |
|
||||
|
||||
## Flags
|
||||
|
||||
| Flag | | Description |
|
||||
| :--- | :--------- | :-------------------------------------------- |
|
||||
| | `--format` | Output format (`table` _(default)_ or `json`) |
|
||||
| `-h` | `--help` | Output command help |
|
||||
|
||||
{{% caption %}}
|
||||
_Also see [`influxctl` global flags](/influxdb3/version/reference/cli/influxctl/#global-flags)._
|
||||
{{% /caption %}}
|
||||
|
||||
## Examples
|
||||
|
||||
<!-- pytest.mark.skip -->
|
||||
|
||||
```bash
|
||||
# Rename the "example-tb" table to "example_tb"
|
||||
influxctl table rename mydb example-tb example_tb
|
||||
```
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,5 +1,5 @@
|
|||
|
||||
The `influxdb3 delete` command deletes a resource such as a database or a table.
|
||||
The `influxdb3 delete` command deletes a resource such as a cache, a database, or a table.
|
||||
|
||||
## Usage
|
||||
|
||||
|
|
@ -19,6 +19,7 @@ influxdb3 delete <SUBCOMMAND>
|
|||
| [last_cache](/influxdb3/version/reference/cli/influxdb3/delete/last_cache/) | Delete a last value cache |
|
||||
| [distinct_cache](/influxdb3/version/reference/cli/influxdb3/delete/distinct_cache/) | Delete a metadata cache |
|
||||
| [table](/influxdb3/version/reference/cli/influxdb3/delete/table/) | Delete a table from a database |
|
||||
| [token](/influxdb3/version/reference/cli/influxdb3/delete/token/) | Delete an authorization token from the server |
|
||||
| [trigger](/influxdb3/version/reference/cli/influxdb3/delete/trigger/) | Delete a trigger for the processing engine |
|
||||
| help | Print command help or the help of a subcommand |
|
||||
{{% /show-in %}}
|
||||
|
|
@ -30,6 +31,7 @@ influxdb3 delete <SUBCOMMAND>
|
|||
| [last_cache](/influxdb3/version/reference/cli/influxdb3/delete/last_cache/) | Delete a last value cache |
|
||||
| [distinct_cache](/influxdb3/version/reference/cli/influxdb3/delete/distinct_cache/) | Delete a metadata cache |
|
||||
| [table](/influxdb3/version/reference/cli/influxdb3/delete/table/) | Delete a table from a database |
|
||||
| [token](/influxdb3/version/reference/cli/influxdb3/delete/token/) | Delete an authorization token from the server |
|
||||
| [trigger](/influxdb3/version/reference/cli/influxdb3/delete/trigger/) | Delete a trigger for the processing engine |
|
||||
| help | Print command help or the help of a subcommand |
|
||||
{{% /show-in %}}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,32 @@
|
|||
|
||||
The `influxdb3 delete token` command deletes an authorization token from the {{% product-name %}} server.
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
influxdb3 delete token [OPTIONS]
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
| Option | Description | Default | Environment |
|
||||
|----------------|-----------------------------------------------------------------------------------|---------|------------------------|
|
||||
| `--token` | _({{< req >}})_ The token for authentication with the {{% product-name %}} server | | `INFLUXDB3_AUTH_TOKEN` |
|
||||
| `--token-name` | _({{< req >}})_ The name of the token to be deleted | | |
|
||||
| `--tls-ca` | An optional arg to use a custom ca for useful for testing with self signed certs | | `INFLUXDB3_TLS_CA` |
|
||||
| `-h` | `--help` | Print help information |
|
||||
| | `--help-all` | Print detailed help information |
|
||||
|
||||
## Examples
|
||||
|
||||
### Delete a token by name
|
||||
|
||||
```bash
|
||||
influxdb3 delete token --token-name TOKEN_TO_DELETE --token AUTH_TOKEN
|
||||
```
|
||||
|
||||
### Show help for the command
|
||||
|
||||
```bash
|
||||
influxdb3 delete token --help
|
||||
```
|
||||
|
|
@ -0,0 +1,301 @@
|
|||
Learn how to set appropriate query timeouts for InfluxDB 3 to balance performance and resource protection.
|
||||
|
||||
Query timeouts prevent resource monopolization while allowing legitimate queries to complete successfully.
|
||||
The key is finding the "goldilocks zone"—timeouts that are not too short (causing legitimate queries to fail) and not too long (allowing runaway queries to monopolize resources).
|
||||
|
||||
- [Understanding query timeouts](#understanding-query-timeouts)
|
||||
- [How query routing affects timeout strategy](#how-query-routing-affects-timeout-strategy)
|
||||
- [Timeout configuration best practices](#timeout-configuration-best-practices)
|
||||
- [InfluxDB 3 client library examples](#influxdb-3-client-library-examples)
|
||||
- [Monitoring and troubleshooting](#monitoring-and-troubleshooting)
|
||||
|
||||
## Understanding query timeouts
|
||||
|
||||
Query timeouts define the maximum duration a query can run before being canceled.
|
||||
In {{% product-name %}}, timeouts serve multiple purposes:
|
||||
|
||||
- **Resource protection**: Prevent runaway queries from monopolizing system resources
|
||||
- **Performance optimization**: Ensure responsive system behavior for time-sensitive operations
|
||||
- **Cost control**: Limit compute resource consumption
|
||||
- **User experience**: Provide predictable response times for applications and dashboards
|
||||
|
||||
Query execution includes network latency, query planning, data retrieval, processing, and result serialization.
|
||||
|
||||
### The "goldilocks zone" for query timeouts
|
||||
|
||||
Optimal timeouts are:
|
||||
- **Long enough**: To accommodate normal query execution under typical load
|
||||
- **Short enough**: To prevent resource monopolization and provide reasonable feedback
|
||||
- **Adaptive**: Adjusted based on query type, system load, and historical performance
|
||||
|
||||
## How query routing affects timeout strategy
|
||||
|
||||
InfluxDB 3 uses round-robin query routing to balance load across multiple queriers.
|
||||
This creates a "checkout line" effect that influences timeout strategy.
|
||||
|
||||
> [!Note]
|
||||
> #### Concurrent query execution
|
||||
>
|
||||
> InfluxDB 3 supports concurrent query execution, which helps minimize the impact of intensive or inefficient queries.
|
||||
> However, you should still use appropriate timeouts and optimize your queries for best performance.
|
||||
|
||||
### The checkout line analogy
|
||||
|
||||
Consider a grocery store with multiple checkout lines:
|
||||
- Customers (queries) are distributed across lines (queriers)
|
||||
- A slow customer (long-running query) can block others in the same line
|
||||
- More checkout lines (queriers) provide more alternatives when retrying
|
||||
|
||||
If one querier is unhealthy or has been hijacked by a "noisy neighbor" query (excessively resource hungry), giving up sooner may save time--it's like jumping to a cashier with no customers in line. However, if all queriers are overloaded, then short retries may exacerbate the problem--you wouldn't jump to the end of another line if the cashier is already starting to scan your items.
|
||||
|
||||
### Noisy neighbor effects
|
||||
|
||||
In distributed systems:
|
||||
- A single long-running query can impact other queries on the same querier
|
||||
- Shorter timeouts with retries can help queries find less congested queriers
|
||||
- The effectiveness depends on the number of available queriers
|
||||
|
||||
### When shorter timeouts help
|
||||
|
||||
- **Multiple queriers available**: Retries can find less congested queriers
|
||||
- **Uneven load distribution**: Some queriers may be significantly less busy
|
||||
- **Temporary congestion**: Brief spikes in query load or resource usage
|
||||
|
||||
### When shorter timeouts hurt
|
||||
|
||||
- **Few queriers**: Limited alternatives for retries
|
||||
- **System-wide congestion**: All queriers are equally busy
|
||||
- **Expensive query planning**: High overhead for query preparation
|
||||
|
||||
## Timeout configuration best practices
|
||||
|
||||
### Make timeouts adjustable
|
||||
|
||||
Configure timeouts that can be modified without service restarts using environment variables, configuration files, runtime APIs, or per-query overrides. Design your client applications to easily adjust timeouts on the fly, allowing you to respond quickly to performance changes and test different timeout strategies without code changes.
|
||||
|
||||
See the [InfluxDB 3 client library examples](#influxdb-3-client-library-examples)
|
||||
for how to configure timeouts in Python.
|
||||
|
||||
### Use tiered timeout strategies
|
||||
|
||||
Implement different timeout classes based on query characteristics.
|
||||
|
||||
#### Starting point recommendations
|
||||
|
||||
{{% hide-in "cloud-serverless" %}}
|
||||
| Query Type | Recommended Timeout | Use Case | Rationale |
|
||||
|------------|-------------------|-----------|-----------|
|
||||
| UI and dashboard | 10 seconds | Interactive dashboards, real-time monitoring | Users expect immediate feedback |
|
||||
| Generic default | 60 seconds | Application queries, APIs | Balances performance and reliability |
|
||||
| Mixed workload | 2 minutes | Development, testing environments | Accommodates various query types |
|
||||
| Analytical and background | 5 minutes | Reports, batch processing, ETL operations | Complex queries need more time |
|
||||
{{% /hide-in %}}
|
||||
|
||||
{{% show-in "cloud-serverless" %}}
|
||||
| Query Type | Recommended Timeout | Use Case | Rationale |
|
||||
|------------|-------------------|-----------|-----------|
|
||||
| UI and dashboard | 10 seconds | Interactive dashboards, real-time monitoring | Users expect immediate feedback |
|
||||
| Generic default | 30 seconds | Application queries, APIs | Serverless optimized for shorter queries |
|
||||
| Mixed workload | 60 seconds | Development, testing environments | Limited by serverless execution model |
|
||||
| Analytical and background | 2 minutes | Reports, batch processing | Complex queries within serverless limits |
|
||||
{{% /show-in %}}
|
||||
|
||||
{{% show-in "enterprise, core" %}}
|
||||
> [!Tip]
|
||||
> #### Use caching
|
||||
> Where immediate feedback is crucial, consider using [Last Value Cache](/influxdb3/version/admin/manage-last-value-caches/) to speed up queries for recent values and [Distinct Value Cache](/influxdb3/version/admin/manage-distinct-value-caches/) to speed up queries for distinct values.
|
||||
{{% /show-in %}}
|
||||
|
||||
### Implement progressive timeout and retry logic
|
||||
|
||||
Consider using more sophisticated retry strategies rather than simple fixed retries:
|
||||
|
||||
1. **Exponential backoff**: Increase delay between retry attempts
|
||||
2. **Jitter**: Add randomness to prevent thundering herd effects
|
||||
3. **Circuit breakers**: Stop retries when system is overloaded
|
||||
4. **Deadline propagation**: Respect overall operation deadlines
|
||||
|
||||
### Warning signs
|
||||
|
||||
Consider these indicators that timeouts may need adjustment:
|
||||
|
||||
- **Timeouts > 10 minutes**: Usually indicates [query optimization](/influxdb3/version/query-data/troubleshoot-and-optimize/optimize-queries/) opportunities
|
||||
- **High retry rates**: May indicate timeouts are too aggressive
|
||||
- **Resource utilization spikes**: Long-running queries may need shorter timeouts
|
||||
- **User complaints**: Balance between performance and user experience
|
||||
|
||||
### Environment-specific considerations
|
||||
|
||||
- **Development**: Use longer timeouts for debugging
|
||||
- **Production**: Use shorter timeouts with monitoring
|
||||
- **Cost-sensitive**: Use aggressive timeouts and [query optimization](/influxdb3/version/query-data/troubleshoot-and-optimize/optimize-queries/)
|
||||
|
||||
### Experimental and ad-hoc queries
|
||||
|
||||
When introducing a new query to your application or when issuing ad-hoc queries to a database with many users, your query might be the "noisy neighbor" (the shopping cart overloaded with groceries). By setting a tighter timeout on experimental queries you can reduce the impact on other users.
|
||||
|
||||
|
||||
## InfluxDB 3 client library examples
|
||||
|
||||
### Python client with timeout configuration
|
||||
|
||||
Configure timeouts in the InfluxDB 3 Python client:
|
||||
|
||||
```python { placeholders="DATABASE_NAME|HOST_URL|AUTH_TOKEN" }
|
||||
import influxdb_client_3 as InfluxDBClient3
|
||||
|
||||
# Configure different timeout classes (in seconds)
|
||||
ui_timeout = 10 # For dashboard queries
|
||||
api_timeout = 60 # For application queries
|
||||
batch_timeout = 300 # For analytical queries
|
||||
|
||||
# Create client with default timeout
|
||||
client = InfluxDBClient3.InfluxDBClient3(
|
||||
host="https://{{< influxdb/host >}}",
|
||||
database="DATABASE_NAME",
|
||||
token="AUTH_TOKEN",
|
||||
timeout=api_timeout # Python client uses seconds
|
||||
)
|
||||
|
||||
# Quick query with short timeout
|
||||
def query_latest_data():
|
||||
try:
|
||||
result = client.query(
|
||||
query="SELECT * FROM sensors WHERE time >= now() - INTERVAL '5 minutes' ORDER BY time DESC LIMIT 10",
|
||||
timeout=ui_timeout
|
||||
)
|
||||
return result.to_pandas()
|
||||
except Exception as e:
|
||||
print(f"Quick query failed: {e}")
|
||||
return None
|
||||
|
||||
# Analytical query with longer timeout
|
||||
def query_daily_averages():
|
||||
query = """
|
||||
SELECT
|
||||
DATE_TRUNC('day', time) as day,
|
||||
room,
|
||||
AVG(temperature) as avg_temp,
|
||||
COUNT(*) as readings
|
||||
FROM sensors
|
||||
WHERE time >= now() - INTERVAL '30 days'
|
||||
GROUP BY DATE_TRUNC('day', time), room
|
||||
ORDER BY day DESC, room
|
||||
"""
|
||||
|
||||
try:
|
||||
result = client.query(
|
||||
query=query,
|
||||
timeout=batch_timeout
|
||||
)
|
||||
return result.to_pandas()
|
||||
except Exception as e:
|
||||
print(f"Analytical query failed: {e}")
|
||||
return None
|
||||
```
|
||||
|
||||
Replace the following:
|
||||
|
||||
{{% hide-in "cloud-serverless" %}}
|
||||
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query{{% /hide-in %}}
|
||||
{{% show-in "cloud-serverless" %}}
|
||||
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the bucket to query{{% /show-in %}}
|
||||
{{% show-in "clustered,cloud-dedicated" %}}
|
||||
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: a [database token](/influxdb3/clustered/admin/tokens/#database-tokens) with _read_ access to the specified database.{{% /show-in %}}
|
||||
{{% show-in "cloud-serverless" %}}
|
||||
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: an [API token](/influxdb3/cloud-serverless/admin/tokens/) with _read_ access to the specified bucket.{{% /show-in %}}
|
||||
{{% show-in "enterprise,core" %}}
|
||||
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}}with read permissions on the specified database{{% /show-in %}}
|
||||
|
||||
### Basic retry logic implementation
|
||||
|
||||
Implement simple retry strategies with progressive timeouts:
|
||||
|
||||
```python
|
||||
import time
|
||||
import influxdb_client_3 as InfluxDBClient3
|
||||
|
||||
def query_with_retry(client, query: str, initial_timeout: int = 60, max_retries: int = 2):
|
||||
"""Execute query with basic retry and progressive timeout increase"""
|
||||
|
||||
for attempt in range(max_retries + 1):
|
||||
# Progressive timeout: increase timeout on each retry
|
||||
timeout_seconds = initial_timeout + attempt * 30
|
||||
|
||||
try:
|
||||
result = client.query(
|
||||
query=query,
|
||||
timeout=timeout_seconds
|
||||
)
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
if attempt == max_retries:
|
||||
print(f"Query failed after {max_retries + 1} attempts: {e}")
|
||||
raise
|
||||
|
||||
# Simple backoff delay
|
||||
delay = 2 * (attempt + 1)
|
||||
print(f"Query attempt {attempt + 1} failed: {e}")
|
||||
print(f"Retrying in {delay} seconds with timeout {timeout_seconds}s...")
|
||||
time.sleep(delay)
|
||||
|
||||
return None
|
||||
|
||||
# Usage example
|
||||
result = query_with_retry(
|
||||
client=client,
|
||||
query="SELECT * FROM large_table WHERE time >= now() - INTERVAL '1 day'",
|
||||
initial_timeout=60,
|
||||
max_retries=2
|
||||
)
|
||||
```
|
||||
|
||||
## Monitoring and troubleshooting
|
||||
|
||||
### Key metrics to monitor
|
||||
|
||||
Track these essential timeout-related metrics:
|
||||
|
||||
- **Query duration percentiles**: P50, P95, P99 execution times
|
||||
- **Timeout rate**: Percentage of queries that time out
|
||||
- **Error rates**: Timeout errors vs. other failure types
|
||||
- **Resource utilization**: CPU and memory usage during query execution
|
||||
|
||||
### Common timeout issues
|
||||
|
||||
#### High timeout rates
|
||||
|
||||
**Symptoms**: Many queries exceeding timeout limits
|
||||
|
||||
**Common causes**:
|
||||
- Timeouts set too aggressively for query complexity
|
||||
- System resource constraints
|
||||
- Inefficient query patterns
|
||||
|
||||
**Solutions**:
|
||||
1. Analyze query performance patterns
|
||||
2. [Optimize slow queries](/influxdb3/version/query-data/troubleshoot-and-optimize/optimize-queries/) or increase timeouts appropriately
|
||||
3. Scale system resources
|
||||
|
||||
#### Inconsistent query performance
|
||||
|
||||
**Symptoms**: Same queries sometimes fast, sometimes timeout
|
||||
|
||||
**Common causes**:
|
||||
|
||||
- Resource contention from concurrent queries
|
||||
- Data compaction state (queries may be faster after compaction completes)
|
||||
|
||||
**Solutions**:
|
||||
|
||||
1. Analyze query patterns to identify and optimize slow queries
|
||||
2. Implement retry logic with exponential backoff in your client applications
|
||||
3. Adjust timeout values based on observed query performance patterns
|
||||
{{% show-in "enterprise,core" %}}
|
||||
4. Implement [Last Value Cache](/influxdb3/version/admin/manage-last-value-caches/) to speed up queries for recent values
|
||||
5. Implement [Distinct Value Cache](/influxdb3/version/admin/manage-distinct-value-caches/) to speed up queries for distinct values
|
||||
{{% /show-in %}}
|
||||
|
||||
> [!Note]
|
||||
> Regular analysis of timeout patterns helps identify optimization opportunities and system scaling needs.
|
||||
|
|
@ -0,0 +1,348 @@
|
|||
Learn how to avoid unexpected results and recover from errors when writing to {{% product-name %}}.
|
||||
|
||||
- [Handle write responses](#handle-write-responses)
|
||||
- [Review HTTP status codes](#review-http-status-codes)
|
||||
- [Troubleshoot failures](#troubleshoot-failures)
|
||||
- [Troubleshoot rejected points](#troubleshoot-rejected-points)
|
||||
- [Report write issues](#report-write-issues)
|
||||
|
||||
## Handle write responses
|
||||
|
||||
{{% product-name %}} does the following when you send a write request:
|
||||
|
||||
1. Validates the request.
|
||||
2. If successful, attempts to [ingest data](/influxdb3/version/reference/internals/durability/#data-ingest) from the request body; otherwise, responds with an [error status](#review-http-status-codes).
|
||||
3. Ingests or rejects data from the batch and returns one of the following HTTP status codes:
|
||||
|
||||
- `204 No Content`: All of the data is ingested and queryable.
|
||||
- `400 Bad Request`: Some {{% show-in "cloud-dedicated,clustered" %}}(_when **partial writes** are configured for the cluster_){{% /show-in %}} or all of the data has been rejected. Data that has not been rejected is ingested and queryable.
|
||||
|
||||
The response body contains error details about [rejected points](#troubleshoot-rejected-points), up to 100 points.
|
||||
|
||||
Writes are synchronous--the response status indicates the final status of the write and all ingested data is queryable.
|
||||
|
||||
To ensure that InfluxDB handles writes in the order you request them,
|
||||
wait for the response before you send the next request.
|
||||
|
||||
### Review HTTP status codes
|
||||
|
||||
InfluxDB uses conventional HTTP status codes to indicate the success or failure of a request.
|
||||
The `message` property of the response body may contain additional details about the error.
|
||||
{{< product-name >}} returns one the following HTTP status codes for a write request:
|
||||
|
||||
{{% show-in "clustered,cloud-dedicated" %}}
|
||||
| HTTP response code | Response body | Description |
|
||||
| :-------------------------------| :--------------------------------------------------------------- | :------------- |
|
||||
| `204 "No Content"` | Empty | InfluxDB ingested all of the data in the batch |
|
||||
| `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | Some or all request data isn't allowed (for example, is malformed or falls outside of the database's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected |
|
||||
| `401 "Unauthorized"` | Empty | The `Authorization` request header is missing or malformed or the [token](/influxdb3/version/admin/tokens/) doesn't have permission to write to the database |
|
||||
| `404 "Not found"` | A requested **resource type** (for example, "database"), and **resource name** | A requested resource wasn't found |
|
||||
| `422 "Unprocessable Entity"` | `message` contains details about the error | The data isn't allowed (for example, falls outside of the database's retention period). |
|
||||
| `500 "Internal server error"` | Empty | Default status for an error |
|
||||
| `503 "Service unavailable"` | Empty | The server is temporarily unavailable to accept writes. The `Retry-After` header contains the number of seconds to wait before trying the write again. |
|
||||
{{% /show-in %}}
|
||||
|
||||
{{% show-in "cloud-serverless" %}}
|
||||
| HTTP response code | Response body | Description |
|
||||
| :-------------------------------| :--------------------------------------------------------------- | :------------- |
|
||||
| `204 "No Content"` | Empty | InfluxDB ingested all of the data in the batch |
|
||||
| `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | Some or all request data isn't allowed (for example, is malformed or falls outside of the bucket's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected |
|
||||
| `401 "Unauthorized"` | Empty | The `Authorization` request header is missing or malformed or the [token](/influxdb3/version/admin/tokens/) doesn't have permission to write to the bucket |
|
||||
| `404 "Not found"` | A requested **resource type** (for example, "organization" or "bucket"), and **resource name** | A requested resource wasn't found |
|
||||
| `413 "Request too large"` | cannot read data: points in batch is too large | The request exceeds the maximum [global limit](/influxdb3/cloud-serverless/admin/billing/limits/) |
|
||||
| `422 "Unprocessable Entity"` | `message` contains details about the error | The data isn't allowed (for example, falls outside of the database's retention period). |
|
||||
| `429 "Too many requests"` | Empty | The number of requests exceeds the [adjustable service quota](/influxdb3/cloud-serverless/admin/billing/limits/#adjustable-service-quotas). The `Retry-After` header contains the number of seconds to wait before trying the write again. |
|
||||
| `500 "Internal server error"` | Empty | Default status for an error |
|
||||
| `503 "Service unavailable"` | Empty | The server is temporarily unavailable to accept writes. The `Retry-After` header contains the number of seconds to wait before trying the write again. |
|
||||
{{% /show-in %}}
|
||||
|
||||
The `message` property of the response body may contain additional details about the error.
|
||||
If your data did not write to the {{% show-in "cloud-serverless" %}}bucket{{% /show-in %}}{{% show-in "cloud-dedicated,clustered" %}}database{{% /show-in %}}, see how to [troubleshoot rejected points](#troubleshoot-rejected-points).
|
||||
|
||||
## Troubleshoot failures
|
||||
|
||||
If you notice data is missing in your database, do the following:
|
||||
|
||||
- Check the [HTTP status code](#review-http-status-codes) in the response.
|
||||
- Check the `message` property in the response body for details about the error.
|
||||
- If the `message` describes a field error, [troubleshoot rejected points](#troubleshoot-rejected-points).
|
||||
- Verify all lines contain valid syntax ([line protocol](/influxdb3/version/reference/syntax/line-protocol/)).
|
||||
- Verify the timestamps in your data match the [precision parameter](/influxdb3/version/reference/glossary/#precision) in your request.
|
||||
- Minimize payload size and network errors by [optimizing writes](/influxdb3/version/write-data/best-practices/optimize-writes/).
|
||||
|
||||
## Troubleshoot rejected points
|
||||
|
||||
When writing points from a batch, InfluxDB rejects points that have syntax errors or schema conflicts.
|
||||
If InfluxDB processes the data in your batch and then rejects points, the [HTTP response](#handle-write-responses) body contains the following properties that describe rejected points:
|
||||
|
||||
- `code`: `"invalid"`
|
||||
- `line`: the line number of the _first_ rejected point in the batch.
|
||||
- `message`: a string that contains line-separated error messages, one message for each rejected point in the batch, up to 100 rejected points. Line numbers are 1-based.
|
||||
|
||||
InfluxDB rejects points for the following reasons:
|
||||
|
||||
- a line protocol parsing error
|
||||
- an invalid timestamp
|
||||
- a schema conflict
|
||||
|
||||
Schema conflicts occur when you try to write data that contains any of the following:
|
||||
|
||||
- a wrong data type: the point falls within the same partition (default partitioning is measurement and day) as existing {{% show-in "cloud-serverless" %}}bucket{{% /show-in %}} {{% show-in "cloud-dedicated,clustered" %}}database{{% /show-in %}} data and contains a different data type for an existing field
|
||||
- a tag and a field that use the same key
|
||||
|
||||
### Example
|
||||
|
||||
The following example shows a response body for a write request that contains two rejected points:
|
||||
|
||||
```json
|
||||
{
|
||||
"code": "invalid",
|
||||
"line": 2,
|
||||
"message": "failed to parse line protocol:
|
||||
errors encountered on line(s):
|
||||
error parsing line 2 (1-based): Invalid measurement was provided
|
||||
error parsing line 4 (1-based): Unable to parse timestamp value '123461000000000000000000000000'"
|
||||
}
|
||||
```
|
||||
|
||||
Check for [field data type](/influxdb3/version/reference/syntax/line-protocol/#data-types-and-format) differences between the rejected data point and points within the same database and partition (default partitioning
|
||||
is by measurement and day)--for example, did you attempt to write `string` data to an `int` field?
|
||||
|
||||
## Report write issues
|
||||
|
||||
If you experience persistent write issues that you can't resolve using the troubleshooting steps above, use these guidelines to gather the necessary information when reporting the issue to InfluxData support.
|
||||
|
||||
> [!Note]
|
||||
> #### Before reporting an issue
|
||||
>
|
||||
> Ensure you have followed all [troubleshooting steps](#troubleshoot-failures) and
|
||||
> reviewed the [write optimization guidelines](/influxdb3/version/write-data/best-practices/optimize-writes/)
|
||||
> to rule out common configuration and data formatting issues.
|
||||
|
||||
### Gather essential information
|
||||
|
||||
When reporting write issues, provide the following information to help InfluxData engineers diagnose the problem:
|
||||
|
||||
#### 1. Error details and logs
|
||||
|
||||
**Capture the complete error response:**
|
||||
|
||||
```bash { placeholders="AUTH_TOKEN|DATABASE_NAME" }
|
||||
# Example: Capture both successful and failed write attempts
|
||||
curl --silent --show-error --write-out "\nHTTP Status: %{http_code}\nResponse Time: %{time_total}s\n" \
|
||||
--request POST \
|
||||
"https://{{< influxdb/host >}}/write?db=DATABASE_NAME&precision=ns" \
|
||||
--header "Authorization: Bearer AUTH_TOKEN" \
|
||||
--header "Content-Type: text/plain; charset=utf-8" \
|
||||
--data-binary @problematic-data.lp \
|
||||
> write-error-response.txt 2>&1
|
||||
```
|
||||
|
||||
**Log client-side errors:**
|
||||
|
||||
If using a client library, enable debug logging and capture the full exception details:
|
||||
|
||||
{{< code-tabs-wrapper >}}
|
||||
{{% code-tabs %}}
|
||||
[Python](#)
|
||||
[Go](#)
|
||||
[Java](#)
|
||||
[JavaScript](#)
|
||||
{{% /code-tabs %}}
|
||||
{{% code-tab-content %}}
|
||||
```python { placeholders="DATABASE_NAME|AUTH_TOKEN" }
|
||||
import logging
|
||||
from influxdb_client_3 import InfluxDBClient3
|
||||
|
||||
# Enable debug logging
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
logger = logging.getLogger("influxdb_client_3")
|
||||
|
||||
try:
|
||||
client = InfluxDBClient3(token="AUTH_TOKEN", host="{{< influxdb/host >}}", database="DATABASE_NAME")
|
||||
client.write(data)
|
||||
except Exception as e:
|
||||
logger.error(f"Write failed: {str(e)}")
|
||||
# Include full stack trace in your report
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```go { placeholders="DATABASE_NAME|AUTH_TOKEN" }
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/InfluxCommunity/influxdb3-go"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Enable debug logging
|
||||
client, err := influxdb3.New(influxdb3.ClientConfig{
|
||||
Host: "https://{{< influxdb/host >}}",
|
||||
Token: "AUTH_TOKEN",
|
||||
Database: "DATABASE_NAME",
|
||||
Debug: true,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
err = client.Write(context.Background(), data)
|
||||
if err != nil {
|
||||
// Log the full error details
|
||||
fmt.Fprintf(os.Stderr, "Write error: %+v\n", err)
|
||||
}
|
||||
}
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```java { placeholders="DATABASE_NAME|AUTH_TOKEN" }
|
||||
import com.influxdb.v3.client.InfluxDBClient;
|
||||
import java.util.logging.Logger;
|
||||
import java.util.logging.Level;
|
||||
|
||||
public class WriteErrorExample {
|
||||
private static final Logger logger = Logger.getLogger(WriteErrorExample.class.getName());
|
||||
|
||||
public static void main(String[] args) {
|
||||
try (InfluxDBClient client = InfluxDBClient.getInstance(
|
||||
"https://{{< influxdb/host >}}",
|
||||
"AUTH_TOKEN".toCharArray(),
|
||||
"DATABASE_NAME")) {
|
||||
|
||||
client.writeRecord(data);
|
||||
} catch (Exception e) {
|
||||
logger.log(Level.SEVERE, "Write failed", e);
|
||||
// Include full stack trace in your report
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{% code-tab-content %}}
|
||||
```javascript { placeholders="DATABASE_NAME|AUTH_TOKEN" }
|
||||
import { InfluxDBClient } from '@influxdata/influxdb3-client'
|
||||
|
||||
const client = new InfluxDBClient({
|
||||
host: 'https://{{< influxdb/host >}}',
|
||||
token: 'AUTH_TOKEN',
|
||||
database: 'DATABASE_NAME'
|
||||
})
|
||||
|
||||
try {
|
||||
await client.write(data)
|
||||
} catch (error) {
|
||||
console.error('Write failed:', error)
|
||||
// Include the full error object in your report
|
||||
console.error('Full error details:', JSON.stringify(error, null, 2))
|
||||
}
|
||||
```
|
||||
{{% /code-tab-content %}}
|
||||
{{< /code-tabs-wrapper >}}
|
||||
|
||||
Replace the following in your code:
|
||||
|
||||
{{% hide-in "cloud-serverless" %}}
|
||||
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the database to query{{% /hide-in %}}
|
||||
{{% show-in "cloud-serverless" %}}
|
||||
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: the name of the bucket to query{{% /show-in %}}
|
||||
{{% show-in "clustered,cloud-dedicated" %}}
|
||||
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: a [database token](/influxdb3/clustered/admin/tokens/#database-tokens) with _write_ access to the specified database.{{% /show-in %}}
|
||||
{{% show-in "cloud-serverless" %}}
|
||||
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: an [API token](/influxdb3/cloud-serverless/admin/tokens/) with _write_ access to the specified bucket.{{% /show-in %}}
|
||||
{{% show-in "enterprise,core" %}}
|
||||
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}: your {{% token-link "database" %}} with write permissions on the specified database{{% /show-in %}}
|
||||
|
||||
#### 2. Data samples and patterns
|
||||
|
||||
**Provide representative data samples:**
|
||||
|
||||
- Include 10-20 lines of the problematic line protocol data (sanitized if necessary)
|
||||
- Show both successful and failing data formats
|
||||
- Include timestamp ranges and precision used
|
||||
- Specify if the issue occurs with specific measurements, tags, or field types
|
||||
|
||||
**Example data documentation:**
|
||||
```
|
||||
# Successful writes:
|
||||
measurement1,tag1=value1,tag2=value2 field1=1.23,field2="text" 1640995200000000000
|
||||
|
||||
# Failing writes:
|
||||
measurement1,tag1=value1,tag2=value2 field1="string",field2=456 1640995260000000000
|
||||
# Error: field data type conflict - field1 changed from float to string
|
||||
```
|
||||
|
||||
#### 3. Write patterns and volume
|
||||
|
||||
Document your write patterns:
|
||||
|
||||
- **Frequency**: How often do you write data? (for example, every 10 seconds, once per minute)
|
||||
- **Batch size**: How many points per write request?
|
||||
- **Concurrency**: How many concurrent write operations?
|
||||
- **Data retention**: How long is data retained?
|
||||
- **Timing**: When did the issue first occur? Is it intermittent or consistent?
|
||||
|
||||
#### 4. Environment details
|
||||
|
||||
{{% show-in "clustered" %}}
|
||||
**Cluster configuration:**
|
||||
- InfluxDB Clustered version
|
||||
- Kubernetes environment details
|
||||
- Node specifications (CPU, memory, storage)
|
||||
- Network configuration between client and cluster
|
||||
{{% /show-in %}}
|
||||
|
||||
**Client configuration:**
|
||||
- Client library version and language
|
||||
- Connection settings (timeouts, retry logic)
|
||||
- Geographic location relative to cluster
|
||||
|
||||
#### 5. Reproduction steps
|
||||
|
||||
Provide step-by-step instructions to reproduce the issue:
|
||||
|
||||
1. **Environment setup**: How to configure a similar environment
|
||||
2. **Data preparation**: Sample data files or generation scripts
|
||||
3. **Write commands**: Exact commands or code used
|
||||
4. **Expected vs actual results**: What should happen vs what actually happens
|
||||
|
||||
### Create a support package
|
||||
|
||||
Organize all gathered information into a comprehensive package:
|
||||
|
||||
**Files to include:**
|
||||
- `write-error-response.txt` - HTTP response details
|
||||
- `client-logs.txt` - Client library debug logs
|
||||
- `sample-data.lp` - Representative line protocol data (sanitized)
|
||||
- `reproduction-steps.md` - Detailed reproduction guide
|
||||
- `environment-details.md` - {{% show-in "clustered" %}}Cluster and{{% /show-in %}} client configuration
|
||||
- `write-patterns.md` - Usage patterns and volume information
|
||||
|
||||
**Package format:**
|
||||
```bash
|
||||
# Create a timestamped support package
|
||||
TIMESTAMP=$(date -Iseconds)
|
||||
mkdir "write-issue-${TIMESTAMP}"
|
||||
# Add all relevant files to the directory
|
||||
tar -czf "write-issue-${TIMESTAMP}.tar.gz" "write-issue-${TIMESTAMP}/"
|
||||
```
|
||||
|
||||
### Submit the issue
|
||||
|
||||
Include the support package when contacting InfluxData support through your standard [support channels](#bug-reports-and-feedback), along with:
|
||||
|
||||
- A clear description of the problem
|
||||
- Impact assessment (how critical is this issue?)
|
||||
- Any workarounds you've attempted
|
||||
- Business context if the issue affects production systems
|
||||
|
||||
This comprehensive information will help InfluxData engineers identify root causes and provide targeted solutions for your write issues.
|
||||
|
|
@ -65,11 +65,11 @@ The following table provides information about what metaqueries are available in
|
|||
|
||||
### Aggregate functions
|
||||
|
||||
| Function | Supported |
|
||||
| :---------------------------------------------------------------------------------------- | :----------------------: |
|
||||
| Function | Supported |
|
||||
| :-------------------------------------------------------------------------------- | :----------------------: |
|
||||
| [COUNT()](/influxdb/version/reference/influxql/functions/aggregates/#count) | **{{< icon "check" >}}** |
|
||||
| [DISTINCT()](/influxdb/version/reference/influxql/functions/aggregates/#distinct) | **{{< icon "check" >}}** |
|
||||
| <span style="opacity: .5;">INTEGRAL()</span> | |
|
||||
| [INTEGRAL()](/influxdb/version/reference/influxql/functions/aggregates/#integral) | **{{< icon "check" >}}** |
|
||||
| [MEAN()](/influxdb/version/reference/influxql/functions/aggregates/#mean) | **{{< icon "check" >}}** |
|
||||
| [MEDIAN()](/influxdb/version/reference/influxql/functions/aggregates/#median) | **{{< icon "check" >}}** |
|
||||
| [MODE()](/influxdb/version/reference/influxql/functions/aggregates/#mode) | **{{< icon "check" >}}** |
|
||||
|
|
@ -77,29 +77,25 @@ The following table provides information about what metaqueries are available in
|
|||
| [STDDEV()](/influxdb/version/reference/influxql/functions/aggregates/#stddev) | **{{< icon "check" >}}** |
|
||||
| [SUM()](/influxdb/version/reference/influxql/functions/aggregates/#sum) | **{{< icon "check" >}}** |
|
||||
|
||||
<!--
|
||||
INTEGRAL [influxdb_iox#6937](https://github.com/influxdata/influxdb_iox/issues/6937)
|
||||
-->
|
||||
|
||||
### Selector functions
|
||||
|
||||
| Function | Supported |
|
||||
| :------------------------------------------------------------------------------------------- | :----------------------: |
|
||||
| Function | Supported |
|
||||
| :----------------------------------------------------------------------------------- | :----------------------: |
|
||||
| [BOTTOM()](/influxdb/version/reference/influxql/functions/selectors/#bottom) | **{{< icon "check" >}}** |
|
||||
| [FIRST()](/influxdb/version/reference/influxql/functions/selectors/#first) | **{{< icon "check" >}}** |
|
||||
| [LAST()](/influxdb/version/reference/influxql/functions/selectors/#last) | **{{< icon "check" >}}** |
|
||||
| [MAX()](/influxdb/version/reference/influxql/functions/selectors/#max) | **{{< icon "check" >}}** |
|
||||
| [MIN()](/influxdb/version/reference/influxql/functions/selectors/#min) | **{{< icon "check" >}}** |
|
||||
| [PERCENTILE()](/influxdb/version/reference/influxql/functions/selectors/#percentile) | **{{< icon "check" >}}** |
|
||||
| <span style="opacity: .5;">SAMPLE()</span> | |
|
||||
| <span style="opacity: .5;">SAMPLE()</span> | |
|
||||
| [TOP()](/influxdb/version/reference/influxql/functions/selectors/#top) | **{{< icon "check" >}}** |
|
||||
|
||||
<!-- SAMPLE() [influxdb_iox#6935](https://github.com/influxdata/influxdb_iox/issues/6935) -->
|
||||
|
||||
### Transformations
|
||||
|
||||
| Function | Supported |
|
||||
| :--------------------------------------------------------------------------------------------------------------------------- | :----------------------: |
|
||||
| Function | Supported |
|
||||
| :------------------------------------------------------------------------------------------------------------------- | :----------------------: |
|
||||
| [ABS()](/influxdb/version/reference/influxql/functions/transformations/#abs) | **{{< icon "check" >}}** |
|
||||
| [ACOS()](/influxdb/version/reference/influxql/functions/transformations/#acos) | **{{< icon "check" >}}** |
|
||||
| [ASIN()](/influxdb/version/reference/influxql/functions/transformations/#asin) | **{{< icon "check" >}}** |
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ _Examples use the sample data set provided in the
|
|||
|
||||
- [COUNT()](#count)
|
||||
- [DISTINCT()](#distinct)
|
||||
- [INTEGRAL()](#integral)
|
||||
- [MEAN()](#mean)
|
||||
- [MEDIAN()](#median)
|
||||
- [MODE()](#mode)
|
||||
|
|
@ -13,17 +14,6 @@ _Examples use the sample data set provided in the
|
|||
- [STDDEV()](#stddev)
|
||||
- [SUM()](#sum)
|
||||
|
||||
<!-- When implemented, place back in alphabetical order -->
|
||||
<!-- - [INTEGRAL()](#integral) -->
|
||||
|
||||
> [!Important]
|
||||
> #### Missing InfluxQL functions
|
||||
>
|
||||
> Some InfluxQL functions are in the process of being rearchitected to work with
|
||||
> the InfluxDB 3 storage engine. If a function you need is not here, check the
|
||||
> [InfluxQL feature support page](/influxdb/version/reference/influxql/feature-support/#function-support)
|
||||
> for more information.
|
||||
|
||||
## COUNT()
|
||||
|
||||
Returns the number of non-null [field values](/influxdb/version/reference/glossary/#field-value).
|
||||
|
|
@ -186,14 +176,14 @@ name: home
|
|||
{{% /expand %}}
|
||||
{{< /expand-wrapper >}}
|
||||
|
||||
<!-- ## INTEGRAL()
|
||||
## INTEGRAL()
|
||||
|
||||
Returns the area under the curve for queried [field values](/influxdb/version/reference/glossary/#field-value)
|
||||
and converts those results into the summed area per **unit** of time.
|
||||
|
||||
> [!Note]
|
||||
> `INTEGRAL()` does not support [`fill()`](/influxdb/version/query-data/influxql/explore-data/group-by/> #group-by-time-intervals-and-fill).
|
||||
> `INTEGRAL()` supports int64 and float64 field value [data types](/influxdb/version/reference/glossary/#data-type).
|
||||
> [!Important]
|
||||
> - `INTEGRAL()` does not support [`fill()`](/influxdb/version/reference/influxql/group-by/#group-by-time-and-fill-gaps).
|
||||
> - `INTEGRAL()` supports int64 and float64 field value [data types](/influxdb/version/reference/glossary/#data-type).
|
||||
|
||||
```sql
|
||||
INTEGRAL(field_expression[, unit])
|
||||
|
|
@ -318,7 +308,7 @@ name: home
|
|||
{{% /influxdb/custom-timestamps %}}
|
||||
|
||||
{{% /expand %}}
|
||||
{{< /expand-wrapper >}} -->
|
||||
{{< /expand-wrapper >}}
|
||||
|
||||
## MEAN()
|
||||
|
||||
|
|
|
|||
|
|
@ -885,9 +885,10 @@ LIMIT 1
|
|||
|
||||
## from_unixtime
|
||||
|
||||
Converts an integer to RFC3339 timestamp format (`YYYY-MM-DDT00:00:00.000000000Z`).
|
||||
Input is parsed as a [Unix nanosecond timestamp](/influxdb/version/reference/glossary/#unix-timestamp)
|
||||
and returns the corresponding RFC3339 timestamp.
|
||||
Converts an integer (Unix timestamp in seconds) to a timestamp value.
|
||||
The underlying result is a timestamp (`Timestamp(TimeUnit::Second, None)`).
|
||||
If you output query results as JSON (default for the API), CSV, or pretty (default for the CLI), the timestamp is formatted as an ISO 8601 string (`YYYY-MM-DDTHH:MM:SS`, without a timezone indicator).
|
||||
When output to Parquet, the raw integer value (for example, `1641042000`) is preserved.
|
||||
|
||||
```sql
|
||||
from_unixtime(expression)
|
||||
|
|
@ -1454,7 +1455,7 @@ SELECT tz(time, 'Australia/Sydney') AS time_tz, time FROM home ORDER BY time LIM
|
|||
differ when the input timestamp **does not** have a timezone.
|
||||
|
||||
- When using an input timestamp that does not have a timezone (the default behavior in InfluxDB) with the
|
||||
`AT TIME ZONE` operator, the operator returns the the same timestamp, but with a timezone offset
|
||||
`AT TIME ZONE` operator, the operator returns the same timestamp, but with a timezone offset
|
||||
(also known as the "wall clock" time)--for example:
|
||||
|
||||
```sql
|
||||
|
|
|
|||
|
|
@ -11,6 +11,122 @@ menu:
|
|||
weight: 60
|
||||
---
|
||||
|
||||
## v1.35.3 {date="2025-07-28"}
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [#17373](https://github.com/influxdata/telegraf/pull/17373) `agent` Handle nil timer on telegraf reload when no debounce is specified
|
||||
- [#17340](https://github.com/influxdata/telegraf/pull/17340) `agent` Make Windows service install more robust
|
||||
- [#17310](https://github.com/influxdata/telegraf/pull/17310) `outputs.sql` Add timestamp to derived datatypes
|
||||
- [#17349](https://github.com/influxdata/telegraf/pull/17349) `outputs` Retrigger batch-available-events only for non-failing writes
|
||||
- [#17293](https://github.com/influxdata/telegraf/pull/17293) `parsers.json_v2` Respect string type for objects and arrays
|
||||
- [#17367](https://github.com/influxdata/telegraf/pull/17367) `plugins.snmp` Update gosnmp to prevent panic in snmp agents
|
||||
- [#17292](https://github.com/influxdata/telegraf/pull/17292) `processors.snmp_lookup` Avoid re-enqueing updates after plugin stopped
|
||||
- [#17369](https://github.com/influxdata/telegraf/pull/17369) `processors.snmp_lookup` Prevent deadlock during plugin shutdown
|
||||
|
||||
### Dependency updates
|
||||
|
||||
- [#17320](https://github.com/influxdata/telegraf/pull/17320) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/azcore from 1.18.0 to 1.18.1
|
||||
- [#17328](https://github.com/influxdata/telegraf/pull/17328) `deps` Bump github.com/SAP/go-hdb from 1.13.11 to 1.13.12
|
||||
- [#17301](https://github.com/influxdata/telegraf/pull/17301) `deps` Bump github.com/SAP/go-hdb from 1.13.9 to 1.13.11
|
||||
- [#17326](https://github.com/influxdata/telegraf/pull/17326) `deps` Bump github.com/alitto/pond/v2 from 2.4.0 to 2.5.0
|
||||
- [#17295](https://github.com/influxdata/telegraf/pull/17295) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.227.0 to 1.230.0
|
||||
- [#17332](https://github.com/influxdata/telegraf/pull/17332) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.230.0 to 1.231.0
|
||||
- [#17300](https://github.com/influxdata/telegraf/pull/17300) `deps` Bump github.com/docker/docker from 28.3.0+incompatible to 28.3.1+incompatible
|
||||
- [#17334](https://github.com/influxdata/telegraf/pull/17334) `deps` Bump github.com/docker/docker from 28.3.1+incompatible to 28.3.2+incompatible
|
||||
- [#17327](https://github.com/influxdata/telegraf/pull/17327) `deps` Bump github.com/google/cel-go from 0.25.0 to 0.26.0
|
||||
- [#17331](https://github.com/influxdata/telegraf/pull/17331) `deps` Bump github.com/miekg/dns from 1.1.66 to 1.1.67
|
||||
- [#17297](https://github.com/influxdata/telegraf/pull/17297) `deps` Bump github.com/nats-io/nats-server/v2 from 2.11.5 to 2.11.6
|
||||
- [#17321](https://github.com/influxdata/telegraf/pull/17321) `deps` Bump github.com/openconfig/goyang from 1.6.2 to 1.6.3
|
||||
- [#17298](https://github.com/influxdata/telegraf/pull/17298) `deps` Bump github.com/prometheus/procfs from 0.16.1 to 0.17.0
|
||||
- [#17296](https://github.com/influxdata/telegraf/pull/17296) `deps` Bump github.com/shirou/gopsutil/v4 from 4.25.5 to 4.25.6
|
||||
- [#17299](https://github.com/influxdata/telegraf/pull/17299) `deps` Bump github.com/snowflakedb/gosnowflake from 1.14.1 to 1.15.0
|
||||
- [#17323](https://github.com/influxdata/telegraf/pull/17323) `deps` Bump go.opentelemetry.io/collector/pdata from 1.35.0 to 1.36.0
|
||||
- [#17091](https://github.com/influxdata/telegraf/pull/17091) `deps` Bump go.step.sm/crypto from 0.64.0 to 0.67.0
|
||||
- [#17330](https://github.com/influxdata/telegraf/pull/17330) `deps` Bump golang.org/x/crypto from 0.39.0 to 0.40.0
|
||||
- [#17322](https://github.com/influxdata/telegraf/pull/17322) `deps` Bump golang.org/x/mod from 0.25.0 to 0.26.0
|
||||
- [#17336](https://github.com/influxdata/telegraf/pull/17336) `deps` Bump golang.org/x/net from 0.41.0 to 0.42.0
|
||||
- [#17337](https://github.com/influxdata/telegraf/pull/17337) `deps` Bump golang.org/x/sys from 0.33.0 to 0.34.0
|
||||
- [#17335](https://github.com/influxdata/telegraf/pull/17335) `deps` Bump golang.org/x/term from 0.32.0 to 0.33.0
|
||||
- [#17294](https://github.com/influxdata/telegraf/pull/17294) `deps` Bump google.golang.org/api from 0.239.0 to 0.240.0
|
||||
- [#17325](https://github.com/influxdata/telegraf/pull/17325) `deps` Bump google.golang.org/api from 0.240.0 to 0.241.0
|
||||
- [#17138](https://github.com/influxdata/telegraf/pull/17138) `deps` Bump modernc.org/sqlite from 1.37.0 to 1.38.0
|
||||
|
||||
## v1.35.2 {date="2025-07-07"}
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [#17248](https://github.com/influxdata/telegraf/pull/17248) `agent` Add missing config flags for migrate command
|
||||
- [#17240](https://github.com/influxdata/telegraf/pull/17240) `disk-buffer` Correctly reset the mask after adding to an empty buffer
|
||||
- [#17284](https://github.com/influxdata/telegraf/pull/17284) `disk-buffer` Expire metric tracking information in the right place
|
||||
- [#17257](https://github.com/influxdata/telegraf/pull/17257) `disk-buffer` Mask old tracking metrics on restart
|
||||
- [#17247](https://github.com/influxdata/telegraf/pull/17247) `disk-buffer` Remove empty buffer on close
|
||||
- [#17285](https://github.com/influxdata/telegraf/pull/17285) `inputs.gnmi` Avoid interpreting path elements with multiple colons as namespace
|
||||
- [#17278](https://github.com/influxdata/telegraf/pull/17278) `inputs.gnmi` Handle base64 encoded IEEE-754 floats correctly
|
||||
- [#17258](https://github.com/influxdata/telegraf/pull/17258) `inputs.kibana` Support Kibana 8.x status API format change
|
||||
- [#17214](https://github.com/influxdata/telegraf/pull/17214) `inputs.ntpq` Fix ntpq field misalignment parsing errors
|
||||
- [#17234](https://github.com/influxdata/telegraf/pull/17234) `outputs.microsoft_fabric` Correct app name
|
||||
- [#17291](https://github.com/influxdata/telegraf/pull/17291) `outputs.nats` Avoid initializing Jetstream unconditionally
|
||||
- [#17246](https://github.com/influxdata/telegraf/pull/17246) `outputs` Retrigger batch-available-events correctly
|
||||
|
||||
### Dependency updates
|
||||
|
||||
- [#17217](https://github.com/influxdata/telegraf/pull/17217) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs from 1.3.2 to 1.4.0
|
||||
- [#17226](https://github.com/influxdata/telegraf/pull/17226) `deps` Bump github.com/ClickHouse/clickhouse-go/v2 from 2.37.0 to 2.37.1
|
||||
- [#17265](https://github.com/influxdata/telegraf/pull/17265) `deps` Bump github.com/ClickHouse/clickhouse-go/v2 from 2.37.1 to 2.37.2
|
||||
- [#17268](https://github.com/influxdata/telegraf/pull/17268) `deps` Bump github.com/Masterminds/semver/v3 from 3.3.1 to 3.4.0
|
||||
- [#17271](https://github.com/influxdata/telegraf/pull/17271) `deps` Bump github.com/SAP/go-hdb from 1.13.7 to 1.13.9
|
||||
- [#17232](https://github.com/influxdata/telegraf/pull/17232) `deps` Bump github.com/alitto/pond/v2 from 2.3.4 to 2.4.0
|
||||
- [#17231](https://github.com/influxdata/telegraf/pull/17231) `deps` Bump github.com/apache/arrow-go/v18 from 18.3.0 to 18.3.1
|
||||
- [#17223](https://github.com/influxdata/telegraf/pull/17223) `deps` Bump github.com/aws/aws-sdk-go-v2/config from 1.29.15 to 1.29.17
|
||||
- [#17220](https://github.com/influxdata/telegraf/pull/17220) `deps` Bump github.com/aws/aws-sdk-go-v2/credentials from 1.17.69 to 1.17.70
|
||||
- [#17227](https://github.com/influxdata/telegraf/pull/17227) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs from 1.50.3 to 1.51.0
|
||||
- [#17262](https://github.com/influxdata/telegraf/pull/17262) `deps` Bump github.com/aws/aws-sdk-go-v2/service/dynamodb from 1.43.4 to 1.44.0
|
||||
- [#17224](https://github.com/influxdata/telegraf/pull/17224) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.225.1 to 1.225.2
|
||||
- [#17260](https://github.com/influxdata/telegraf/pull/17260) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.226.0 to 1.227.0
|
||||
- [#17264](https://github.com/influxdata/telegraf/pull/17264) `deps` Bump github.com/docker/docker from 28.2.2+incompatible to 28.3.0+incompatible
|
||||
- [#17256](https://github.com/influxdata/telegraf/pull/17256) `deps` Bump github.com/lxc/incus/v6 from 6.13.0 to 6.14.0
|
||||
- [#17272](https://github.com/influxdata/telegraf/pull/17272) `deps` Bump github.com/microsoft/go-mssqldb from 1.8.2 to 1.9.2
|
||||
- [#17261](https://github.com/influxdata/telegraf/pull/17261) `deps` Bump github.com/nats-io/nats-server/v2 from 2.11.4 to 2.11.5
|
||||
- [#17266](https://github.com/influxdata/telegraf/pull/17266) `deps` Bump github.com/peterbourgon/unixtransport from 0.0.5 to 0.0.6
|
||||
- [#17229](https://github.com/influxdata/telegraf/pull/17229) `deps` Bump github.com/prometheus/common from 0.64.0 to 0.65.0
|
||||
- [#17267](https://github.com/influxdata/telegraf/pull/17267) `deps` Bump github.com/redis/go-redis/v9 from 9.10.0 to 9.11.0
|
||||
- [#17273](https://github.com/influxdata/telegraf/pull/17273) `deps` Bump go.opentelemetry.io/collector/pdata from 1.34.0 to 1.35.0
|
||||
- [#17219](https://github.com/influxdata/telegraf/pull/17219) `deps` Bump google.golang.org/api from 0.237.0 to 0.238.0
|
||||
- [#17263](https://github.com/influxdata/telegraf/pull/17263) `deps` Bump google.golang.org/api from 0.238.0 to 0.239.0
|
||||
- [#17218](https://github.com/influxdata/telegraf/pull/17218) `deps` Bump k8s.io/api from 0.33.1 to 0.33.2
|
||||
- [#17228](https://github.com/influxdata/telegraf/pull/17228) `deps` Bump k8s.io/client-go from 0.33.1 to 0.33.2
|
||||
|
||||
## v1.35.1 {date="2025-06-23"}
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [#17178](https://github.com/influxdata/telegraf/pull/17178) `inputs.procstat` Fix user filter conditional logic
|
||||
- [#17210](https://github.com/influxdata/telegraf/pull/17210) `processors.strings` Add explicit TOML tags on struct fields
|
||||
|
||||
### Dependency updates
|
||||
|
||||
- [#17194](https://github.com/influxdata/telegraf/pull/17194) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/azidentity from 1.10.0 to 1.10.1
|
||||
- [#17189](https://github.com/influxdata/telegraf/pull/17189) `deps` Bump github.com/ClickHouse/clickhouse-go/v2 from 2.36.0 to 2.37.0
|
||||
- [#17186](https://github.com/influxdata/telegraf/pull/17186) `deps` Bump github.com/SAP/go-hdb from 1.13.6 to 1.13.7
|
||||
- [#17188](https://github.com/influxdata/telegraf/pull/17188) `deps` Bump github.com/alitto/pond/v2 from 2.3.2 to 2.3.4
|
||||
- [#17180](https://github.com/influxdata/telegraf/pull/17180) `deps` Bump github.com/aws/aws-sdk-go-v2/credentials from 1.17.68 to 1.17.69
|
||||
- [#17185](https://github.com/influxdata/telegraf/pull/17185) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatch from 1.45.1 to 1.45.2
|
||||
- [#17187](https://github.com/influxdata/telegraf/pull/17187) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs from 1.50.1 to 1.50.2
|
||||
- [#17183](https://github.com/influxdata/telegraf/pull/17183) `deps` Bump github.com/aws/aws-sdk-go-v2/service/dynamodb from 1.43.2 to 1.43.3
|
||||
- [#17182](https://github.com/influxdata/telegraf/pull/17182) `deps` Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.225.0 to 1.225.1
|
||||
- [#17190](https://github.com/influxdata/telegraf/pull/17190) `deps` Bump github.com/aws/aws-sdk-go-v2/service/kinesis from 1.35.1 to 1.35.2
|
||||
- [#17193](https://github.com/influxdata/telegraf/pull/17193) `deps` Bump github.com/aws/aws-sdk-go-v2/service/timestreamwrite from 1.31.0 to 1.31.1
|
||||
- [#17195](https://github.com/influxdata/telegraf/pull/17195) `deps` Bump github.com/aws/smithy-go from 1.22.3 to 1.22.4
|
||||
- [#17196](https://github.com/influxdata/telegraf/pull/17196) `deps` Bump github.com/cloudevents/sdk-go/v2 from 2.16.0 to 2.16.1
|
||||
- [#17212](https://github.com/influxdata/telegraf/pull/17212) `deps` Bump github.com/go-chi/chi/v5 from 5.2.1 to 5.2.2
|
||||
- [#17191](https://github.com/influxdata/telegraf/pull/17191) `deps` Bump github.com/go-sql-driver/mysql from 1.9.2 to 1.9.3
|
||||
- [#17192](https://github.com/influxdata/telegraf/pull/17192) `deps` Bump github.com/peterbourgon/unixtransport from 0.0.4 to 0.0.5
|
||||
- [#17181](https://github.com/influxdata/telegraf/pull/17181) `deps` Bump github.com/redis/go-redis/v9 from 9.9.0 to 9.10.0
|
||||
- [#17197](https://github.com/influxdata/telegraf/pull/17197) `deps` Bump github.com/urfave/cli/v2 from 2.27.6 to 2.27.7
|
||||
- [#17198](https://github.com/influxdata/telegraf/pull/17198) `deps` Bump go.opentelemetry.io/collector/pdata from 1.33.0 to 1.34.0
|
||||
- [#17184](https://github.com/influxdata/telegraf/pull/17184) `deps` Bump google.golang.org/api from 0.236.0 to 0.237.0
|
||||
|
||||
## v1.35.0 {date="2025-06-16"}
|
||||
|
||||
### Deprecation Removals
|
||||
|
|
@ -129,14 +245,14 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
- [#16030](https://github.com/influxdata/telegraf/pull/16030) `processors.enum` Allow mapping to be applied to multiple fields
|
||||
- [#16494](https://github.com/influxdata/telegraf/pull/16494) `serializer.prometheusremotewrite` Allow sending native histograms
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- [#17044](https://github.com/influxdata/telegraf/pull/17044) `inputs.opcua` Fix integration test
|
||||
- [#16986](https://github.com/influxdata/telegraf/pull/16986) `inputs.procstat` Resolve remote usernames on Posix systems
|
||||
- [#16699](https://github.com/influxdata/telegraf/pull/16699) `inputs.win_wmi` Free resources to avoid leaks
|
||||
- [#17118](https://github.com/influxdata/telegraf/pull/17118) `migrations` Update table content for general plugin migrations
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- [#17089](https://github.com/influxdata/telegraf/pull/17089) `deps` Bump cloud.google.com/go/bigquery from 1.68.0 to 1.69.0
|
||||
- [#17026](https://github.com/influxdata/telegraf/pull/17026) `deps` Bump cloud.google.com/go/storage from 1.53.0 to 1.54.0
|
||||
|
|
@ -201,7 +317,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
|
||||
## v1.34.4 {date="2025-05-19"}
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- [#17009](https://github.com/influxdata/telegraf/pull/17009) `inputs.cloudwatch` Restore filtering to match all dimensions
|
||||
- [#16978](https://github.com/influxdata/telegraf/pull/16978) `inputs.nfsclient` Handle errors during mountpoint filtering
|
||||
|
|
@ -211,7 +327,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
- [#16815](https://github.com/influxdata/telegraf/pull/16815) `inputs.win_eventlog` Handle large events to avoid they get dropped silently
|
||||
- [#16878](https://github.com/influxdata/telegraf/pull/16878) `parsers.json_v2` Handle measurements with multiple objects correctly
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- [#16991](https://github.com/influxdata/telegraf/pull/16991) `deps` Bump cloud.google.com/go/bigquery from 1.67.0 to 1.68.0
|
||||
- [#16963](https://github.com/influxdata/telegraf/pull/16963) `deps` Bump cloud.google.com/go/storage from 1.52.0 to 1.53.0
|
||||
|
|
@ -243,7 +359,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
|
||||
## v1.34.3 {date="2025-05-05"}
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- [#16697](https://github.com/influxdata/telegraf/pull/16697) `agent` Correctly truncate the disk buffer
|
||||
- [#16868](https://github.com/influxdata/telegraf/pull/16868) `common.ratelimiter` Only grow the buffer but never shrink
|
||||
|
|
@ -254,7 +370,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
- [#16781](https://github.com/influxdata/telegraf/pull/16781) `inputs.win_wmi` Restrict threading model to APARTMENTTHREADED
|
||||
- [#16857](https://github.com/influxdata/telegraf/pull/16857) `outputs.quix` Allow empty certificate for new cloud managed instances
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- [#16804](https://github.com/influxdata/telegraf/pull/16804) `deps` Bump cloud.google.com/go/bigquery from 1.66.2 to 1.67.0
|
||||
- [#16835](https://github.com/influxdata/telegraf/pull/16835) `deps` Bump cloud.google.com/go/monitoring from 1.24.0 to 1.24.2
|
||||
|
|
@ -326,11 +442,11 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
|
||||
## v1.34.2 {date="2025-04-14"}
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- [#16375](https://github.com/influxdata/telegraf/pull/16375) `aggregators` Handle time drift when calculating aggregation windows
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- [#16689](https://github.com/influxdata/telegraf/pull/16689) `deps` Bump cloud.google.com/go/pubsub from 1.45.3 to 1.48.0
|
||||
- [#16769](https://github.com/influxdata/telegraf/pull/16769) `deps` Bump cloud.google.com/go/storage from 1.50.0 to 1.51.0
|
||||
|
|
@ -376,7 +492,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
|
||||
## v1.34.1 {date="2025-03-24"}
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- [#16638](https://github.com/influxdata/telegraf/pull/16638) `agent` Condense plugin source information table when multiple plugins in same file
|
||||
- [#16674](https://github.com/influxdata/telegraf/pull/16674) `inputs.tail` Do not seek on pipes
|
||||
|
|
@ -385,7 +501,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
- [#16625](https://github.com/influxdata/telegraf/pull/16625) `outputs.sql` Allow to disable timestamp column
|
||||
- [#16682](https://github.com/influxdata/telegraf/pull/16682) `secrets` Make 'insufficient lockable memory' warning work on BSDs
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- [#16612](https://github.com/influxdata/telegraf/pull/16612) `deps` Bump github.com/PaesslerAG/gval from 1.2.2 to 1.2.4
|
||||
- [#16650](https://github.com/influxdata/telegraf/pull/16650) `deps` Bump github.com/aws/smithy-go from 1.22.2 to 1.22.3
|
||||
|
|
@ -438,7 +554,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
- [#16214](https://github.com/influxdata/telegraf/pull/16214) `processors.converter` Add support for base64 encoded IEEE floats
|
||||
- [#16497](https://github.com/influxdata/telegraf/pull/16497) `processors.template` Add sprig function for templates
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- [#16542](https://github.com/influxdata/telegraf/pull/16542) `inputs.gnmi` Handle path elements without name but with keys correctly
|
||||
- [#16606](https://github.com/influxdata/telegraf/pull/16606) `inputs.huebridge` Cleanup and fix linter issues
|
||||
|
|
@ -446,7 +562,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
- [#16555](https://github.com/influxdata/telegraf/pull/16555) `outputs.opensearch` Use correct pipeline name while creating bulk-indexers
|
||||
- [#16557](https://github.com/influxdata/telegraf/pull/16557) `serializers.prometheus` Use legacy validation for metric name
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- [#16576](https://github.com/influxdata/telegraf/pull/16576) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/azidentity from 1.8.1 to 1.8.2
|
||||
- [#16553](https://github.com/influxdata/telegraf/pull/16553) `deps` Bump github.com/Azure/go-autorest/autorest from 0.11.29 to 0.11.30
|
||||
|
|
@ -469,7 +585,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
thus might break existing queries. Furthermore, the tag modification might
|
||||
increase cardinality in your database.
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- [#16546](https://github.com/influxdata/telegraf/pull/16546) `agent` Add authorization and user-agent when watching remote configs
|
||||
- [#16507](https://github.com/influxdata/telegraf/pull/16507) `inputs.gnmi` Allow to disable using first namespace as origin
|
||||
|
|
@ -478,7 +594,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
- [#16539](https://github.com/influxdata/telegraf/pull/16539) `logging` Handle closing correctly and fix tests
|
||||
- [#16535](https://github.com/influxdata/telegraf/pull/16535) `processors.execd` Detect line-protocol parser correctly
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- [#16506](https://github.com/influxdata/telegraf/pull/16506) `deps` Bump github.com/ClickHouse/clickhouse-go/v2 from 2.30.1 to 2.30.3
|
||||
- [#16502](https://github.com/influxdata/telegraf/pull/16502) `deps` Bump github.com/antchfx/xmlquery from 1.4.1 to 1.4.4
|
||||
|
|
@ -505,7 +621,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
become an (unsigned) integer when parsing raw-packets' headers especially with SFlow v5 input. Please watch
|
||||
out for type-conflicts on the output side!
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- [#16477](https://github.com/influxdata/telegraf/pull/16477) `agent` Avoid panic by checking for skip_processors_after_aggregators
|
||||
- [#16489](https://github.com/influxdata/telegraf/pull/16489) `agent` Set `godebug x509negativeserial=1` as a workaround
|
||||
|
|
@ -515,7 +631,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
- [#16472](https://github.com/influxdata/telegraf/pull/16472) `outputs.sql` Fix insert into ClickHouse
|
||||
- [#16454](https://github.com/influxdata/telegraf/pull/16454) `service` Set address to prevent orphaned dbus-session processes
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- [#16442](https://github.com/influxdata/telegraf/pull/16442) `deps` Bump cloud.google.com/go/storage from 1.47.0 to 1.50.0
|
||||
- [#16414](https://github.com/influxdata/telegraf/pull/16414) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/azidentity from 1.7.0 to 1.8.1
|
||||
|
|
@ -550,7 +666,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
`false`! To silence the warning and use the future default behavior, please
|
||||
explicitly set the option to `true`.
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- [#16290](https://github.com/influxdata/telegraf/pull/16290) `agent` Skip initialization of second processor state if requested
|
||||
- [#16377](https://github.com/influxdata/telegraf/pull/16377) `inputs.intel_powerstat` Fix option removal version
|
||||
|
|
@ -559,7 +675,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
- [#16388](https://github.com/influxdata/telegraf/pull/16388) `outputs.influxdb_v2` Fix panic and API error handling
|
||||
- [#16289](https://github.com/influxdata/telegraf/pull/16289) `outputs.remotefile` Handle tracking metrics correctly
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- [#16344](https://github.com/influxdata/telegraf/pull/16344) `deps` Bump cloud.google.com/go/bigquery from 1.64.0 to 1.65.0
|
||||
- [#16283](https://github.com/influxdata/telegraf/pull/16283) `deps` Bump cloud.google.com/go/monitoring from 1.21.1 to 1.22.0
|
||||
|
|
@ -614,7 +730,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
- [#15883](https://github.com/influxdata/telegraf/pull/15883) `outputs` Only copy metric if its not filtered out
|
||||
- [#15893](https://github.com/influxdata/telegraf/pull/15893) `serializers.prometheusremotewrite` Log metric conversion errors
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- [#16248](https://github.com/influxdata/telegraf/pull/16248) `inputs.netflow` Decode flags in TCP and IP headers correctly
|
||||
- [#16257](https://github.com/influxdata/telegraf/pull/16257) `inputs.procstat` Handle running processes correctly across multiple filters
|
||||
|
|
@ -622,7 +738,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
- [#16255](https://github.com/influxdata/telegraf/pull/16255) `logging` Clean up extra empty spaces when redirectLogger is used
|
||||
- [#16274](https://github.com/influxdata/telegraf/pull/16274) `logging` Fix duplicated prefix and attrMsg in log message when redirectLogger is used
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- [#16232](https://github.com/influxdata/telegraf/pull/16232) `deps` Bump cloud.google.com/go/bigquery from 1.63.1 to 1.64.0
|
||||
- [#16235](https://github.com/influxdata/telegraf/pull/16235) `deps` Bump cloud.google.com/go/storage from 1.43.0 to 1.47.0
|
||||
|
|
@ -649,7 +765,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
possible to avoid invalid values and parsing errors with the v3 XML
|
||||
statistics._
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- [#16123](https://github.com/influxdata/telegraf/pull/16123) `agent` Restore setup order of stateful plugins to `Init()` then `SetState()`
|
||||
- [#16111](https://github.com/influxdata/telegraf/pull/16111) `common.socket` Make sure the scanner buffer matches the read-buffer size
|
||||
|
|
@ -662,7 +778,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
- [#16145](https://github.com/influxdata/telegraf/pull/16145) `inputs.snmp_trap` Remove timeout deprecation
|
||||
- [#16108](https://github.com/influxdata/telegraf/pull/16108) `logger` Avoid setting the log-format default too early
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- [#16093](https://github.com/influxdata/telegraf/pull/16093) `deps` Bump cloud.google.com/go/pubsub from 1.42.0 to 1.45.1
|
||||
- [#16175](https://github.com/influxdata/telegraf/pull/16175) `deps` Bump github.com/aws/aws-sdk-go-v2/credentials from 1.17.37 to 1.17.44
|
||||
|
|
@ -684,7 +800,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
|
||||
## v1.32.2 {date="2024-10-28"}
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- [#15966](https://github.com/influxdata/telegraf/pull/15966) `agent` Use a unique WAL file for plugin instances of the same type
|
||||
- [#16074](https://github.com/influxdata/telegraf/pull/16074) `inputs.kafka_consumer` Fix deadlock
|
||||
|
|
@ -695,7 +811,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
- [#15968](https://github.com/influxdata/telegraf/pull/15968) `outputs.remotefile` Create a new serializer instance per output file
|
||||
- [#16014](https://github.com/influxdata/telegraf/pull/16014) `outputs.syslog` Trim field-names belonging to explicit SDIDs correctly
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- [#15992](https://github.com/influxdata/telegraf/pull/15992) `deps` Bump cloud.google.com/go/bigquery from 1.62.0 to 1.63.1
|
||||
- [#16056](https://github.com/influxdata/telegraf/pull/16056) `deps` Bump github.com/Azure/azure-sdk-for-go/sdk/azcore from 1.14.0 to 1.16.0
|
||||
|
|
@ -727,7 +843,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
users as it is an API change; all serializers in Telegraf are already ported
|
||||
to the new framework. If you experience any issues creating serializers, [contact us](/telegraf/v1/#bug-reports-and-feedback).
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- [#15969](https://github.com/influxdata/telegraf/pull/15969) `agent` Fix buffer not flushing if all metrics are written
|
||||
- [#15937](https://github.com/influxdata/telegraf/pull/15937) `config` Correctly print removal version info
|
||||
|
|
@ -740,7 +856,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
- [#15921](https://github.com/influxdata/telegraf/pull/15921) `parsers.avro` Add mutex to cache access
|
||||
- [#15965](https://github.com/influxdata/telegraf/pull/15965) `processors.aws_ec2` Remove leading slash and cancel worker only if it exists
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- [#15932](https://github.com/influxdata/telegraf/pull/15932) `deps` Bump cloud.google.com/go/monitoring from 1.20.2 to 1.21.1
|
||||
- [#15863](https://github.com/influxdata/telegraf/pull/15863) `deps` Bump github.com/Azure/azure-kusto-go from 0.15.3 to 0.16.1
|
||||
|
|
@ -834,7 +950,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
- [#15697](https://github.com/influxdata/telegraf/pull/15697) `parsers.value` Add base64 datatype
|
||||
- [#15795](https://github.com/influxdata/telegraf/pull/15795) `processors.aws_ec2` Allow to use instance metadata
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- [#15661](https://github.com/influxdata/telegraf/pull/15661) `agent` Fix buffer directory config and document
|
||||
- [#15788](https://github.com/influxdata/telegraf/pull/15788) `inputs.kinesis_consumer` Honor the configured endpoint
|
||||
|
|
@ -845,7 +961,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
- [#15615](https://github.com/influxdata/telegraf/pull/15615) `outputs.remotefile` Resolve linter not checking error
|
||||
- [#15740](https://github.com/influxdata/telegraf/pull/15740) `serializers.template` Unwrap metrics if required
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- [#15829](https://github.com/influxdata/telegraf/pull/15829) `deps` Bump github.com/BurntSushi/toml from 1.3.2 to 1.4.0
|
||||
- [#15775](https://github.com/influxdata/telegraf/pull/15775) `deps` Bump github.com/aws/aws-sdk-go-v2/feature/ec2/imds from 1.16.11 to 1.16.12
|
||||
|
|
@ -874,7 +990,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
|
||||
## v1.31.3 {date="2024-08-12"}
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- [#15552](https://github.com/influxdata/telegraf/pull/15552) `inputs.chrony` Use DGRAM for the unix socket
|
||||
- [#15667](https://github.com/influxdata/telegraf/pull/15667) `inputs.diskio` Print warnings once, add details to messages
|
||||
|
|
@ -883,7 +999,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
- [#15724](https://github.com/influxdata/telegraf/pull/15724) `inputs.smartctl` Use --scan-open instead of --scan to provide correct device type info
|
||||
- [#15649](https://github.com/influxdata/telegraf/pull/15649) `inputs.tail` Prevent deadlock when closing and max undelivered lines hit
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- [#15720](https://github.com/influxdata/telegraf/pull/15720) `deps` Bump Go from v1.22.5 to v1.22.6
|
||||
- [#15683](https://github.com/influxdata/telegraf/pull/15683) `deps` Bump cloud.google.com/go/bigquery from 1.61.0 to 1.62.0
|
||||
|
|
@ -907,7 +1023,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
|
||||
## v1.31.2 {date="2024-07-22"}
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- [#15589](https://github.com/influxdata/telegraf/pull/15589) `common.socket` Switch to context to simplify closing
|
||||
- [#15601](https://github.com/influxdata/telegraf/pull/15601) `inputs.ping` Check addr length to avoid crash
|
||||
|
|
@ -915,7 +1031,7 @@ The `telegraf config migrate` command might be able to help with the migration.
|
|||
- [#15586](https://github.com/influxdata/telegraf/pull/15586) `parsers.xpath` Allow resolving extensions
|
||||
- [#15630](https://github.com/influxdata/telegraf/pull/15630) `tools.custom_builder` Handle multiple instances of the same plugin correctly
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- [#15582](https://github.com/influxdata/telegraf/pull/15582) `deps` Bump cloud.google.com/go/storage from 1.41.0 to 1.42.0
|
||||
- [#15623](https://github.com/influxdata/telegraf/pull/15623) `deps` Bump cloud.google.com/go/storage from 1.42.0 to 1.43.0
|
||||
|
|
@ -938,7 +1054,7 @@ For versions earlier than v1.13 and earlier see
|
|||
|
||||
## v1.31.1 {date="2024-07-01"}
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- [#15488](https://github.com/influxdata/telegraf/pull/15488) `agent` Ignore startup-errors in test mode
|
||||
- [#15568](https://github.com/influxdata/telegraf/pull/15568) `inputs.chrony` Handle ServerStats4 response
|
||||
|
|
@ -949,7 +1065,7 @@ For versions earlier than v1.13 and earlier see
|
|||
- [#15514](https://github.com/influxdata/telegraf/pull/15514) `logging` Add back constants for backward compatibility
|
||||
- [#15531](https://github.com/influxdata/telegraf/pull/15531) `secretstores.oauth2` Ensure endpoint params is not nil
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- [#15483](https://github.com/influxdata/telegraf/pull/15483) `deps` Bump cloud.google.com/go/monitoring from 1.18.1 to 1.19.0
|
||||
- [#15559](https://github.com/influxdata/telegraf/pull/15559) `deps` Bump github.com/Azure/azure-kusto-go from 0.15.2 to 0.15.3
|
||||
|
|
@ -1643,7 +1759,7 @@ can help with migrating to newer plugins.
|
|||
- Avoid negative refcounts for tracking metrics
|
||||
- Maintain tracking information post-apply
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- Update `cloud.google.com/go/bigquery` from 1.56.0 to 1.57.1
|
||||
- Update `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs` from 1.26.0 to 1.27.2
|
||||
|
|
@ -1670,7 +1786,7 @@ can help with migrating to newer plugins.
|
|||
- JSON v2 (`parsers.json_v2`): Log inner errors.
|
||||
- s7comm (`inputs.s7comm`): Truncate strings to reported length.
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- Update `github.com/gosnmp/gosnmp` from 1.35.1-0.20230602062452-f30602b8dad6 to 1.36.1.
|
||||
- Update `github.com/Masterminds/semver/v3` from 3.2.0 to 3.2.1.
|
||||
|
|
@ -1701,7 +1817,7 @@ can help with migrating to newer plugins.
|
|||
- Parse metrics correctly on FreeBSD 14.
|
||||
- Support gathering metrics on zfs 2.2.0 and later.
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- Update `cloud.google.com/go/storage` from 1.30.1 to 1.34.1.
|
||||
- Update `github.com/aws/aws-sdk-go-v2/config` from 1.18.42 to 1.19.1.
|
||||
|
|
@ -1737,7 +1853,7 @@ can help with migrating to newer plugins.
|
|||
- s7comm (`inputs.s7comm`): Allow PDU-size to be set as config option.
|
||||
- Vault (`inputs.vault`): Use http client to handle redirects correctly.
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- Update `github.com/apache/arrow/go/v13` from 13.0.0-git to 13.0.0.
|
||||
- Update `github.com/google/cel-go` from 0.14.1-git to 0.18.1.
|
||||
|
|
@ -1779,7 +1895,7 @@ can help with migrating to newer plugins.
|
|||
- systemd Units `inputs.systemd_units`): Add missing upstream states.
|
||||
- Template (`processors.template`): Handle tracking metrics correctly.
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- Update `github.com/aliyun/alibaba-cloud-sdk-go` from 1.62.470 to 1.62.563.
|
||||
- Update `github.com/aws/aws-sdk-go-v2/config` from 1.18.27 to 1.18.42.
|
||||
|
|
@ -1795,7 +1911,7 @@ can help with migrating to newer plugins.
|
|||
|
||||
## v1.28.1 {date="2023-09-12"}
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- Packaging: Revert permission change on package configs
|
||||
- Redis (`inputs.redis`): Fix password typo
|
||||
|
|
@ -1895,7 +2011,7 @@ can help with migrating to newer plugins.
|
|||
- Parser (`processors.parser`) Allow also non-string fields
|
||||
- Template (`processors.template`): Unify template metric
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- Packaging: Change the systemd KillMode from control-group to mixed
|
||||
- AMQP Consumer (`inputs.amqp_consumer`): Print error on connection failure
|
||||
|
|
@ -1912,7 +2028,7 @@ can help with migrating to newer plugins.
|
|||
- Allow sqlite on Windows (amd64 and arm64)
|
||||
- Move conversion_style config option to the right place of sample config
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- Update `github.com/aws/aws-sdk-go-v2/service/kinesis` from 1.18.2 to 1.18.5.
|
||||
- Update `github.com/hashicorp/consul/api` from 1.20.0 to 1.24.0.
|
||||
|
|
@ -2068,7 +2184,7 @@ can help with migrating to newer plugins.
|
|||
[#9617](https://github.com/golang/go/issues/9617) or
|
||||
[#56528](https://github.com/golang/go/issues/56528)). If you worked around
|
||||
that issue, please remove the workaround before using v1.27+. In case you
|
||||
experience issues with abbreviated timezones please file an issue!
|
||||
experience issues with abbreviated timezones please file an issue.
|
||||
- **Internal Parser methods**: Removal of old-style parser creation. This
|
||||
should not directly affect users as it is an API change. All parsers in
|
||||
Telegraf are already ported to the new framework. If you experience any
|
||||
|
|
@ -2147,7 +2263,7 @@ can help with migrating to newer plugins.
|
|||
- Prometheus Remote (`serializer.prometheusremote`): Improve performance
|
||||
- Test (`test`): Allow to capture all messages during test
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- Cloud PubSub (`inputs.cloud_pubsub`): Fix gzip decompression.
|
||||
- GNMI (`inputs.gnmi`):
|
||||
|
|
@ -2168,7 +2284,7 @@ can help with migrating to newer plugins.
|
|||
- Lookup (`processors.lookup`): Do not strip tracking info.
|
||||
- Influx (`serializers.influx`): Restore disabled uint support by default.
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- Update cloud.google.com/go/monitoring from 1.13.0 to 1.14.0.
|
||||
- Update github.com/aliyun/alibaba-cloud-sdk-go from 1.62.193 to 1.62.337.
|
||||
|
|
@ -2390,7 +2506,7 @@ can help with migrating to newer plugins.
|
|||
- Add support for additional input plugins.
|
||||
- Convert many output plugins.
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- Allow graceful shutdown on interrupt (e.g. Ctrl-C).
|
||||
- Only rotate log on SIGHUP if needed.
|
||||
|
|
@ -2400,7 +2516,7 @@ can help with migrating to newer plugins.
|
|||
- ethtool (`inputs.ethtool`): Close namespace file to prevent crash.
|
||||
- statsd (`inputs.statsd`): On close, verify listener is not nil.
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- Update cloud.google.com/go/storage from 1.28.1 to 1.29.0.
|
||||
- Update github.com/Azure/go-autorest/autorest/adal from 0.9.21 to 0.9.22.
|
||||
|
|
@ -2419,7 +2535,7 @@ can help with migrating to newer plugins.
|
|||
|
||||
## v1.25.3 {date="2023-02-27"}
|
||||
|
||||
### Bugfixes
|
||||
### Bug fixes
|
||||
|
||||
- Fix reload config on config update/SIGHUP.
|
||||
- Bond (`inputs.bond`): Reset slave stats for each interface.
|
||||
|
|
@ -2428,7 +2544,7 @@ can help with migrating to newer plugins.
|
|||
- XPath (`parsers.xpath`): Fix panic for JSON name expansion.
|
||||
- JSON (`serializers.json`): Fix stateful transformations.
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- Update cloud.google.com/go/pubsub from 1.27.1 to 1.28.0.
|
||||
- Update github.com/containerd/containerd from 1.6.8 to 1.6.18.
|
||||
|
|
@ -2462,7 +2578,7 @@ can help with migrating to newer plugins.
|
|||
- Prometheus Client (`outputs.prometheus_client`): Expire with ticker, not add/collect.
|
||||
- Secret Stores: Check store id format and presence.
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
- Update cloud.google.com/go/bigquery from 1.44.0 to 1.45.0.
|
||||
- Update github.com/99designs/keyring from 1.2.1 to 1.2.2.
|
||||
- Update github.com/antchfx/xmlquery from 1.3.12 to 1.3.15.
|
||||
|
|
@ -2512,7 +2628,7 @@ can help with migrating to newer plugins.
|
|||
- Fix handling of "id" and print failing secret-store.
|
||||
- Fix handling of TOML strings.
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
- Update cloud.google.com/go/storage from 1.23.0 to 1.28.1.
|
||||
- Update github.com/antchfx/jsonquery from 1.3.0 to 1.3.1.
|
||||
- Update github.com/aws/aws-sdk-go-v2 from 1.17.1 to 1.17.3.
|
||||
|
|
@ -2632,7 +2748,7 @@ can help with migrating to newer plugins.
|
|||
- Azure Data Explorer (`outputs.azure_data_explorer`): Update test call to `NewSerializer`.
|
||||
- Parser processor (`processors.parser`): Handle empty metric names correctly.
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
- Update `github.com/aliyun/alibaba-cloud-sdk-go` from 1.61.1836 to 1.62.77
|
||||
- Update `github.com/gosnmp/gosnmp` from 1.34.0 to 1.35.0
|
||||
- Update `OpenTelemetry` from 0.2.30 to 0.2.33
|
||||
|
|
@ -2654,7 +2770,7 @@ can help with migrating to newer plugins.
|
|||
- Prometheus output (`outputs.prometheus`): Expire metrics correctly during adds.
|
||||
- Yandex Cloud Monitoring (`outputs.yandex_cloud_monitoring`): Catch int64 values.
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
- Update `github.com/aliyun/alibaba-cloud-sdk-go` from 1.61.1818 to 1.61.1836
|
||||
- Update `github.com/prometheus/client_golang` from 1.13.0 to 1.13.1
|
||||
- Update `github.com/aws/aws-sdk-go-v2/service/timestreamwrite` from 1.13.12 to 1.14.5
|
||||
|
|
@ -2695,7 +2811,7 @@ can help with migrating to newer plugins.
|
|||
### Features
|
||||
- Support sections in markdown.
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
- Update github.com/snowflakedb/gosnowflake from 1.6.2 to 1.6.13
|
||||
- Update github.com/sensu/sensu-go/api/core/v2 from 2.14.0 to 2.15.0
|
||||
- Update github.com/gofrs/uuid from 4.2.0& to 4.3.0
|
||||
|
|
@ -2931,7 +3047,7 @@ Older versions can be manually reverted on a per-plugin basis using the `tls_min
|
|||
|
||||
- Add coralogix dialect to opentelemetry
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- Update `github.com/testcontainers/testcontainers-go` from 0.12.0 to 0.13.0.
|
||||
- Update `github.com/apache/thrift` from 0.15.0 to 0.16.0.
|
||||
|
|
@ -2983,7 +3099,7 @@ Older versions can be manually reverted on a per-plugin basis using the `tls_min
|
|||
- Stackdriver (`stackdriver`) Handle when no buckets available.
|
||||
|
||||
|
||||
### Dependency Updates
|
||||
### Dependency updates
|
||||
|
||||
- Bump github.com/testcontainers/testcontainers-go from 0.12.0 to 0.13.0.
|
||||
- Bump github.com/apache/thrift from 0.15.0 to 0.16.0.
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ influxdb3_cloud_dedicated:
|
|||
list_order: 3
|
||||
latest: cloud-dedicated
|
||||
link: "https://www.influxdata.com/contact-sales-cloud-dedicated/"
|
||||
latest_cli: 2.10.2
|
||||
latest_cli: 2.10.3
|
||||
placeholder_host: cluster-id.a.influxdb.io
|
||||
ai_sample_questions:
|
||||
- How do I migrate from InfluxDB v1 to InfluxDB Cloud Dedicated?
|
||||
|
|
@ -143,7 +143,7 @@ telegraf:
|
|||
versions: [v1]
|
||||
latest: v1.35
|
||||
latest_patches:
|
||||
v1: 1.35.0
|
||||
v1: 1.35.3
|
||||
ai_sample_questions:
|
||||
- How do I install and configure Telegraf?
|
||||
- How do I write a custom Telegraf plugin?
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,342 @@
|
|||
# yaml-language-server: $schema=app-instance-schema.json
|
||||
apiVersion: kubecfg.dev/v1alpha1
|
||||
kind: AppInstance
|
||||
metadata:
|
||||
name: influxdb
|
||||
namespace: influxdb
|
||||
spec:
|
||||
# One or more secrets that are used to pull the images from an authenticated registry.
|
||||
# This will either be the secret provided to you, if using our registry, or a secret for your own registry
|
||||
# if self-hosting the images.
|
||||
imagePullSecrets:
|
||||
- name: <name of the secret>
|
||||
package:
|
||||
# The version of the clustered package that will be used.
|
||||
# This determines the version of all of the individual components.
|
||||
# When a new version of the product is released, this version should be updated and any
|
||||
# new config options should be updated below.
|
||||
image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:20250721-1796368
|
||||
apiVersion: influxdata.com/v1alpha1
|
||||
spec:
|
||||
# # Provides a way to pass down hosting environment specific configuration, such as an role ARN when using EKS IRSA.
|
||||
# # This section contains three multually-exclusive "blocks". Uncomment the block named after the hosting environment
|
||||
# # you run: "aws", "openshift" or "gke".
|
||||
# hostingEnvironment:
|
||||
# # # Uncomment this block if you're running in EKS.
|
||||
# # aws:
|
||||
# # eksRoleArn: 'arn:aws:iam::111111111111:role/your-influxdb-clustered-role'
|
||||
# #
|
||||
# # # Uncomment this block if you're running inside OpenShift.
|
||||
# # # Note: there are currently no OpenShift-specific parameters. You have to pass an empty object
|
||||
# # # as a marker that you're choosing OpenShift as hosting environment.
|
||||
# # openshift: {}
|
||||
# #
|
||||
# # # Uncomment this block if you're running in GKE:
|
||||
# # gke:
|
||||
# # # Authenticate to Google Cloud services via workload identity, this
|
||||
# # # annotates the 'iox' ServiceAccount with the role name you specify.
|
||||
# # # NOTE: This setting just enables GKE specific authentication mechanism,
|
||||
# # # You still need to enable `spec.objectStore.google` below if you want to use GCS.
|
||||
# # workloadIdentity:
|
||||
# # # Google Service Account name to use for the workload identity.
|
||||
# # serviceAccountEmail: <service-account>@<project-name>.iam.gserviceaccount.com
|
||||
catalog:
|
||||
# A postgresql style DSN that points at a postgresql compatible database.
|
||||
# eg: postgres://[user[:password]@][netloc][:port][/dbname][?param1=value1&...]
|
||||
dsn:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: <your secret name here>
|
||||
key: <the key in the secret that contains the dsn>
|
||||
|
||||
# images:
|
||||
# # This can be used to override a specific image name with its FQIN
|
||||
# # (Fully Qualified Image Name) for testing. eg.
|
||||
# overrides:
|
||||
# - name: influxdb2-artifacts/iox/iox
|
||||
# newFQIN: mycompany/test-iox-build:aninformativetag
|
||||
#
|
||||
# # Set this variable to the prefix of your internal registry. This will be prefixed to all expected images.
|
||||
# # eg. us-docker.pkg.dev/iox:latest => registry.mycompany.io/us-docker.pkg.dev/iox:latest
|
||||
# registryOverride: <the domain name portion of your registry (registry.mycompany.io in the example above)>
|
||||
|
||||
objectStore:
|
||||
# Bucket that the parquet files will be stored in
|
||||
bucket: <bucket name>
|
||||
|
||||
# Uncomment one of the following (s3, azure)
|
||||
# to enable the configuration of your object store
|
||||
s3:
|
||||
# URL for S3 Compatible object store
|
||||
endpoint: <S3 url>
|
||||
|
||||
# Set to true to allow communication over HTTP (instead of HTTPS)
|
||||
allowHttp: "false"
|
||||
|
||||
# S3 Access Key
|
||||
# This can also be provided as a valueFrom: secretKeyRef:
|
||||
accessKey:
|
||||
value: <your access key>
|
||||
|
||||
# S3 Secret Key
|
||||
# This can also be provided as a valueFrom: secretKeyRef:
|
||||
secretKey:
|
||||
value: <your secret>
|
||||
|
||||
# This value is required for AWS S3, it may or may not be required for other providers.
|
||||
region: <region>
|
||||
|
||||
# azure:
|
||||
# Azure Blob Storage Access Key
|
||||
# This can also be provided as a valueFrom: secretKeyRef:
|
||||
# accessKey:
|
||||
# value: <your access key>
|
||||
|
||||
# Azure Blob Storage Account
|
||||
# This can also be provided as a valueFrom: secretKeyRef:
|
||||
# account:
|
||||
# value: <your access key>
|
||||
|
||||
# There are two main ways you can access a Google:
|
||||
#
|
||||
# a) GKE Workload Identity: configure workload identity in the top level `hostingEnvironment.gke` section.
|
||||
# b) Explicit service account secret (JSON) file: use the `serviceAccountSecret` field here
|
||||
#
|
||||
# If you pick (a) you may not need to uncomment anything else in this section,
|
||||
# but you still need to tell influxdb that you intend to use Google Cloud Storage.
|
||||
# so you need to specify an empty object. Uncomment the following line:
|
||||
#
|
||||
# google: {}
|
||||
#
|
||||
#
|
||||
# If you pick (b), uncomment the following block:
|
||||
#
|
||||
# google:
|
||||
# # If you're authenticating to Google Cloud service using a Service Account credentials file, as opposed
|
||||
# # as to use workload identity (see above) you need to provide a reference to a k8s secret containing the credentials file.
|
||||
# serviceAccountSecret:
|
||||
# # Kubernetes Secret name containing the credentials for a Google IAM Service Account.
|
||||
# name: <secret name>
|
||||
# # The key within the Secret containing the credentials.
|
||||
# key: <key name>
|
||||
|
||||
# Parameters to tune observability configuration, such as Prometheus ServiceMonitor's.
|
||||
observability: {}
|
||||
# retention: 12h
|
||||
# serviceMonitor:
|
||||
# interval: 10s
|
||||
# scrapeTimeout: 30s
|
||||
|
||||
# Ingester pods have a volume attached.
|
||||
ingesterStorage:
|
||||
# (Optional) Set the storage class. This will differ based on the K8s environment and desired storage characteristics.
|
||||
# If not set, the default storage class will be used.
|
||||
# storageClassName: <storage-class>
|
||||
# Set the storage size (minimum 2Gi recommended)
|
||||
storage: <storage-size>
|
||||
|
||||
# Monitoring pods have a volume attached.
|
||||
monitoringStorage:
|
||||
# (Optional) Set the storage class. This will differ based on the K8s environment and desired storage characteristics.
|
||||
# If not set, the default storage class will be used.
|
||||
# storageClassName: <storage-class>
|
||||
# Set the storage size (minimum 10Gi recommended)
|
||||
storage: <storage-size>
|
||||
|
||||
# Uncomment the follow block if using our provided Ingress.
|
||||
#
|
||||
# We currently only support the ingress NGINX ingress controller: https://github.com/kubernetes/ingress-nginx
|
||||
#
|
||||
# ingress:
|
||||
# hosts:
|
||||
# # This is the host on which you will access Influxdb 3.0, for both reads and writes
|
||||
# - <influxdb-host>
|
||||
|
||||
# (Optional)
|
||||
# The name of the Kubernetes Secret containing a TLS certificate, this should exist in the same namespace as the Clustered installation.
|
||||
# If you are using cert-manager, enter a name for the Secret it should create.
|
||||
# tlsSecretName: <secret-name>
|
||||
|
||||
# http:
|
||||
# # Usually you have only one ingress controller installed in a given cluster.
|
||||
# # In case you have more than one, you have to specify the "class name" of the ingress controller you want to use
|
||||
# className: nginx
|
||||
|
||||
# grpc:
|
||||
# # Usually you have only one ingress controller installed in a given cluster.
|
||||
# # In case you have more than one, you have to specify the "class name" of the ingress controller you want to use
|
||||
# className: nginx
|
||||
#
|
||||
# Enables specifying which 'type' of Ingress to use, alongside whether to place additional annotations
|
||||
# onto those objects, this is useful for third party software in your environment, such as cert-manager.
|
||||
# template:
|
||||
# apiVersion: 'route.openshift.io/v1'
|
||||
# kind: 'Route'
|
||||
# metadata:
|
||||
# annotations:
|
||||
# 'example-annotation': 'annotation-value'
|
||||
|
||||
# Enables specifying customizations for the various components in InfluxDB 3.0.
|
||||
# components:
|
||||
# # router:
|
||||
# # template:
|
||||
# # containers:
|
||||
# # iox:
|
||||
# # env:
|
||||
# # INFLUXDB_IOX_MAX_HTTP_REQUESTS: "5000"
|
||||
# # nodeSelector:
|
||||
# # disktype: ssd
|
||||
# # tolerations:
|
||||
# # - effect: NoSchedule
|
||||
# # key: example
|
||||
# # operator: Exists
|
||||
# # Common customizations for all components go in a pseudo-component called "common"
|
||||
# # common:
|
||||
# # template:
|
||||
# # # Metadata contains custom annotations (and labels) to be added to a component. E.g.:
|
||||
# # metadata:
|
||||
# # annotations:
|
||||
# # telegraf.influxdata.com/class: "foo"
|
||||
|
||||
# Example of setting nodeAffinity for the querier component to ensure it runs on nodes with specific labels
|
||||
# components:
|
||||
# # querier:
|
||||
# # template:
|
||||
# # affinity:
|
||||
# # nodeAffinity:
|
||||
# # requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# # Node must have these labels to be considered for scheduling
|
||||
# # nodeSelectorTerms:
|
||||
# # - matchExpressions:
|
||||
# # - key: required
|
||||
# # operator: In
|
||||
# # values:
|
||||
# # - ssd
|
||||
# # preferredDuringSchedulingIgnoredDuringExecution:
|
||||
# # Scheduler will prefer nodes with these labels but they're not required
|
||||
# # - weight: 1
|
||||
# # preference:
|
||||
# # matchExpressions:
|
||||
# # - key: preferred
|
||||
# # operator: In
|
||||
# # values:
|
||||
# # - postgres
|
||||
|
||||
# Example of setting podAntiAffinity for the querier component to ensure it runs on nodes with specific labels
|
||||
# components:
|
||||
# # querier:
|
||||
# # template:
|
||||
# # affinity:
|
||||
# # podAntiAffinity:
|
||||
# # requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# # Ensures that the pod will not be scheduled on a node if another pod matching the labelSelector is already running there
|
||||
# # - labelSelector:
|
||||
# # matchExpressions:
|
||||
# # - key: app
|
||||
# # operator: In
|
||||
# # values:
|
||||
# # - querier
|
||||
# # topologyKey: "kubernetes.io/hostname"
|
||||
# # preferredDuringSchedulingIgnoredDuringExecution:
|
||||
# # Scheduler will prefer not to schedule pods together but may do so if necessary
|
||||
# # - weight: 1
|
||||
# # podAffinityTerm:
|
||||
# # labelSelector:
|
||||
# # matchExpressions:
|
||||
# # - key: app
|
||||
# # operator: In
|
||||
# # values:
|
||||
# # - querier
|
||||
# # topologyKey: "kubernetes.io/hostname"
|
||||
|
||||
# Uncomment the following block to tune the various pods for their cpu/memory/replicas based on workload needs.
|
||||
# Only uncomment the specific resources you want to change, anything uncommented will use the package default.
|
||||
# (You can read more about k8s resources and limits in https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits)
|
||||
#
|
||||
# resources:
|
||||
# # The ingester handles data being written
|
||||
# ingester:
|
||||
# requests:
|
||||
# cpu: <cpu amount>
|
||||
# memory: <ram amount>
|
||||
# replicas: <num replicas> # The default for ingesters is 3 to increase availability
|
||||
#
|
||||
# # optionally you can specify the resource limits which improves isolation.
|
||||
# # (see https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits)
|
||||
# # limits:
|
||||
# # cpu: <cpu amount>
|
||||
# # memory: <ram amount>
|
||||
|
||||
# # The compactor reorganizes old data to improve query and storage efficiency.
|
||||
# compactor:
|
||||
# requests:
|
||||
# cpu: <cpu amount>
|
||||
# memory: <ram amount>
|
||||
# replicas: <num replicas> # the default is 1
|
||||
|
||||
# # The querier handles querying data.
|
||||
# querier:
|
||||
# requests:
|
||||
# cpu: <cpu amount>
|
||||
# memory: <ram amount>
|
||||
# replicas: <num replicas> # the default is 3
|
||||
|
||||
# # The router performs some api routing.
|
||||
# router:
|
||||
# requests:
|
||||
# cpu: <cpu amount>
|
||||
# memory: <ram amount>
|
||||
# replicas: <num replicas> # the default is 3
|
||||
|
||||
admin:
|
||||
# The list of users to grant access to Clustered via influxctl
|
||||
users:
|
||||
# First name of user
|
||||
- firstName: <first-name>
|
||||
# Last name of user
|
||||
lastName: <last-name>
|
||||
# Email of user
|
||||
email: <email>
|
||||
# The ID that the configured Identity Provider uses for the user in oauth flows
|
||||
id: <id>
|
||||
# Optional list of user groups to assign to the user, rather than the default groups. The following groups are currently supported: Admin, Auditor, Member
|
||||
userGroups:
|
||||
- <group-name>
|
||||
|
||||
# The dsn for the postgres compatible database (note this is the same as defined above)
|
||||
dsn:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: <secret name>
|
||||
key: <dsn key>
|
||||
# The identity provider to be used e.g. "keycloak", "auth0", "azure", etc
|
||||
# Note for Azure Active Directory it must be exactly "azure"
|
||||
identityProvider: <identity-provider>
|
||||
# The JWKS endpoint provided by the Identity Provider
|
||||
jwksEndpoint: <endpoint>
|
||||
|
||||
# # This (optional) section controls how InfluxDB issues outbound requests to other services
|
||||
# egress:
|
||||
# # If you're using a custom CA you will need to specify the full custom CA bundle here.
|
||||
# #
|
||||
# # NOTE: the custom CA is currently only honoured for outbound requests used to obtain
|
||||
# # the JWT public keys from your identiy provider (see `jwksEndpoint`).
|
||||
# customCertificates:
|
||||
# valueFrom:
|
||||
# configMapKeyRef:
|
||||
# key: ca.pem
|
||||
# name: custom-ca
|
||||
|
||||
# We also include the ability to enable some features that are not yet ready for general availability
|
||||
# or for which we don't yet have a proper place to turn on an optional feature in the configuration file.
|
||||
# To turn on these you should include the name of the feature flag in the `featureFlag` array.
|
||||
#
|
||||
# featureFlags:
|
||||
# # Uncomment to install a Grafana deployment.
|
||||
# # Depends on one of the prometheus features being deployed.
|
||||
# # - grafana
|
||||
|
||||
# # The following 2 flags should be uncommented for k8s API 1.21 support.
|
||||
# # Note that this is an experimental configuration.
|
||||
# # - noMinReadySeconds
|
||||
# # - noGrpcProbes
|
||||
|
|
@ -5046,9 +5046,9 @@ tldts@^6.1.32:
|
|||
tldts-core "^6.1.86"
|
||||
|
||||
tmp@~0.2.3:
|
||||
version "0.2.3"
|
||||
resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.2.3.tgz#eb783cc22bc1e8bebd0671476d46ea4eb32a79ae"
|
||||
integrity sha512-nZD7m9iCPC5g0pYmcaxogYKggSfLsdxl8of3Q/oIbqCqLLIO9IAF0GWjX1z9NZRHPiXv8Wex4yDCaZsgEw0Y8w==
|
||||
version "0.2.4"
|
||||
resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.2.4.tgz#c6db987a2ccc97f812f17137b36af2b6521b0d13"
|
||||
integrity sha512-UdiSoX6ypifLmrfQ/XfiawN6hkjSBpCjhKxxZcWlUUmoXLaCKQU0bx4HF/tdDK2uzRuchf1txGvrWBzYREssoQ==
|
||||
|
||||
to-buffer@^1.1.1:
|
||||
version "1.2.1"
|
||||
|
|
|
|||
Loading…
Reference in New Issue