resolved merge conflicts

staging/commandbar-clustered-install
Scott Anderson 2024-09-27 09:34:00 -06:00
commit 6ec7504da8
35 changed files with 1839 additions and 462 deletions

View File

@ -68,7 +68,7 @@ influx3
influxctl
influxd
influxdata.com
iox
(iox|IOx)
keep-url
lat
locf

View File

@ -240,6 +240,83 @@ paths:
application/json:
schema:
$ref: '#/components/schemas/Error'
/ping:
get:
description: |
Reports the InfluxQL bridge querier health and the InfluxDB version of the instance.
The response is a HTTP `204` status code to inform you the querier is available.
For InfluxDB Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters.
To check the health of ingesters before writing data, send a request to one of the [write endpoints](/influxdb/cloud-dedicated/api/v2/#tag/Write).
This endpoint doesn't require authentication.
operationId: GetPing
responses:
'204':
description: |
Success--the querier is available.
Headers contain InfluxDB version information.
headers:
X-Influxdb-Build:
description: |
The type of InfluxDB build.
schema:
type: string
X-Influxdb-Version:
description: |
The version of InfluxDB.
schema:
type: integer
4xx:
description: |
#### InfluxDB Cloud
- Doesn't return this error.
security:
- {}
servers: []
summary: Get the status of the instance
tags:
- Ping
head:
description: |
Reports the InfluxQL bridge querier health and the InfluxDB version of the instance.
The response is a HTTP `204` status code to inform you the querier is available.
For InfluxDB Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters.
To check the health of ingesters before writing data, send a request to one of the [write endpoints](/influxdb/cloud-dedicated/api/v2/#tag/Write).
This endpoint doesn't require authentication.
operationId: HeadPing
responses:
'204':
description: |
Success--the querier is available.
Headers contain InfluxDB version information.
headers:
X-Influxdb-Build:
description: The type of InfluxDB build.
schema:
type: string
X-Influxdb-Version:
description: |
The version of InfluxDB.
schema:
type: integer
4xx:
description: |
#### InfluxDB Cloud
- Doesn't return this error.
security:
- {}
servers: []
summary: Get the status of the instance
tags:
- Ping
components:
parameters:
TraceSpan:

View File

@ -149,17 +149,20 @@ paths:
/ping:
get:
description: |
Retrieves the status and InfluxDB version of the instance.
Reports the InfluxQL bridge querier health and the InfluxDB version of the instance.
The response is a HTTP `204` status code to inform you the querier is available.
Use this endpoint to monitor uptime for the InfluxDB instance. The response
returns a HTTP `204` status code to inform you the instance is available.
For InfluxDB Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters.
To check the health of ingesters before writing data, send a request to one of the [write endpoints](/influxdb/cloud-dedicated/api/v2/#tag/Write).
This endpoint doesn't require authentication.
operationId: GetPing
responses:
'204':
description: |
Success.
Success--the querier is available.
Headers contain InfluxDB version information.
headers:
X-Influxdb-Build:
@ -184,17 +187,20 @@ paths:
- Ping
head:
description: |
Returns the status and InfluxDB version of the instance.
Reports the InfluxQL bridge querier health and the InfluxDB version of the instance.
The response is a HTTP `204` status code to inform you the querier is available.
Use this endpoint to monitor uptime for the InfluxDB instance. The response
returns a HTTP `204` status code to inform you the instance is available.
For InfluxDB Cloud Dedicated, this endpoint only checks the status of queriers; doesn't check the status of ingesters.
To check the health of ingesters before writing data, send a request to one of the [write endpoints](/influxdb/cloud-dedicated/api/v2/#tag/Write).
This endpoint doesn't require authentication.
operationId: HeadPing
responses:
'204':
description: |
Success.
Success--the querier is available.
Headers contain InfluxDB version information.
headers:
X-Influxdb-Build:

View File

@ -646,17 +646,33 @@ paths:
'204':
description: Write data is correctly formatted and accepted for writing to the database.
'400':
description: |
Data from the batch was rejected and not written. The response body indicates if a partial write occurred or all data was rejected.
If a partial write occurred, then some points from the batch are written and queryable.
The response body contains details about the [rejected points](/influxdb/clustered/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points.
content:
application/json:
examples:
rejectedAllPoints:
summary: Rejected all points
value:
code: invalid
line: 2
message: 'no data written, errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)'
partialWriteErrorWithRejectedPoints:
summary: Partial write rejects some points
value:
code: invalid
line: 2
message: 'partial write has occurred, errors encountered on line(s): error message for first rejected point</n> error message for second rejected point</n> error message for Nth rejected point (up to 100 rejected points)'
schema:
$ref: '#/components/schemas/LineProtocolError'
description: Line protocol poorly formed and no points were written. Response can be used to determine the first malformed line in the body line-protocol. All data in body was rejected and not written.
'401':
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
description: Token doesn't have sufficient permissions to write to this database or the database doesn't exist.
description: Token doesn't have sufficient permissions to write to this database or the database doesn't exist.
'403':
content:
application/json:

View File

@ -74,6 +74,7 @@ Custom partitioning has the following limitations:
- Database and table partitions can only be defined on create.
You cannot update the partition strategy of a database or table after it has
been created.
- A partition template must include a time part.
- You can partition by up to eight dimensions (seven tags and a time interval).
## How partitioning works
@ -89,11 +90,14 @@ _For more detailed information, see [Partition templates](/influxdb/cloud-dedica
### Partition keys
A partition key uniquely identifies a partition. The structure of partition keys
is defined by a _[partition template](#partition-templates)_. Partition keys are
composed of up to eight parts or dimensions (tags, tag buckets, and time).
A partition key uniquely identifies a partition.
A _[partition template](#partition-templates)_ defines the partition key format.
Partition keys are
composed of up to 8 dimensions (1 time part and up to 7 tag or tag bucket parts).
Each part is delimited by the partition key separator (`|`).
The default format for partition keys is `%Y-%m-%d` (for example, `2024-01-01`).
{{< expand-wrapper >}}
{{% expand "View example partition templates and keys" %}}

View File

@ -15,20 +15,20 @@ A partition key uniquely identifies a partition and is used to name the partitio
Parquet file in the [Object store](/influxdb/cloud-dedicated/reference/internals/storage-engine/#object-store).
A partition template consists of 1-8 _template parts_---dimensions to partition data by.
There are three types of template parts:
Three types of template parts exist:
- **tag**: An [InfluxDB tag](/influxdb/cloud-dedicated/reference/glossary/#tag)
to partition by.
- **tag bucket**: An [InfluxDB tag](/influxdb/cloud-dedicated/reference/glossary/#tag)
and number of "buckets" to group tag values into. Data is partitioned by the
tag bucket rather than each distinct tag value.
- **time**: A Rust strftime date and time string that specifies the time interval
- {{< req type="key" >}} **time**: A Rust strftime date and time string that specifies the time interval
to partition data by. The smallest unit of time included in the time part
template is the interval used to partition data.
{{% note %}}
A partition template can include up to 7 total tag and tag bucket parts
and only 1 time part.
A partition template must include 1 [time part](#time-part-templates)
and can include up to 7 total [tag](#tag-part-templates) and [tag bucket](#tag-bucket-part-templates) parts.
{{% /note %}}
<!-- TOC -->
@ -75,6 +75,12 @@ characters must be [percent encoded](https://developer.mozilla.org/en-US/docs/Gl
Tag part templates consist of a _tag key_ to partition by.
Generated partition keys include the unique _tag value_ specific to each partition.
A partition template may include a given tag key only once in template parts
that operate on tags (tag value and tag bucket)--for example:
If a template partitions on unique values of `tag_A`, then
you can't use `tag_A` as a tag bucket part.
## Tag bucket part templates
Tag bucket part templates consist of a _tag key_ to partition by and the
@ -102,6 +108,12 @@ Tag buckets should be used to partition by high cardinality tags or tags with an
unknown number of distinct values.
{{% /note %}}
A partition template may include a given tag key only once in template parts
that operate on tags (tag value and tag bucket)--for example:
If a template partitions on unique values of `tag_A`, then
you can't use `tag_A` as a tag bucket part.
## Time part templates
Time part templates use a limited subset of the

View File

@ -35,7 +35,7 @@ your {{< product-name omit=" Clustered" >}} cluster.
#### System tables are subject to change
System tables are not part of InfluxDB's stable API and may change with new releases.
The provided schema information and query examples are valid as of **August 22, 2024**.
The provided schema information and query examples are valid as of **September 24, 2024**.
If you detect a schema change or a non-functioning query example, please
[submit an issue](https://github.com/influxdata/docs-v2/issues/new/choose).
@ -108,27 +108,7 @@ with the name of the table you want to query information about.
---
{{% code-placeholders "TABLE_NAME" %}}
### View partition templates of all tables
```sql
SELECT * FROM system.tables
```
#### Example results
| table_name | partition_template |
| :--------- | :----------------------------------------------------------------------------------------- |
| weather | `{"parts":[{"timeFormat":"%Y-%m-%d"},{"bucket":{"tagName":"location","numBuckets":250}}]}` |
| home | `{"parts":[{"timeFormat":"%Y-%m-%d"},{"tagValue":"room"},{"tagValue":"sensor_id"}]}` |
| numbers | `{"parts":[{"timeFormat":"%Y"}]}` |
{{% note %}}
If a table doesn't include a partition template in the output of this command,
the table uses the default (1 day) partition strategy and doesn't partition
by tags or tag buckets.
{{% /note %}}
{{% code-placeholders "TABLE_NAME_(1|2|3)|TABLE_NAME" %}}
### View the partition template of a specific table
@ -142,6 +122,12 @@ SELECT * FROM system.tables WHERE table_name = 'TABLE_NAME'
| :--------- | :----------------------------------------------------------------------------------------- |
| weather | `{"parts":[{"timeFormat":"%Y-%m-%d"},{"bucket":{"tagName":"location","numBuckets":250}}]}` |
{{% note %}}
If a table doesn't include a partition template in the output of this command,
the table uses the default (1 day) partition strategy and doesn't partition
by tags or tag buckets.
{{% /note %}}
### View all partitions for a table
```sql
@ -166,6 +152,8 @@ SELECT
COUNT(*) AS partition_count
FROM
system.partitions
WHERE
table_name IN ('TABLE_NAME_1', 'TABLE_NAME_2', 'TABLE_NAME_3')
GROUP BY
table_name
```

View File

@ -1,4 +1,4 @@
---
---
title: Query system data
description: >
Query system tables in your InfluxDB Cloud Dedicated cluster to see data related
@ -10,28 +10,42 @@ menu:
weight: 105
related:
- /influxdb/cloud-dedicated/reference/cli/influxctl/query/
---
- /influxdb/cloud-dedicated/reference/internals/system-tables/
---
{{< product-name >}} stores data related to queries, tables, partitions, and
compaction in system tables in your cluster.
Query data in your cluster's system tables for information about your cluster.
compaction in _system tables_ within your cluster.
System tables contain time series data used by and generated from the
{{< product-name >}} internal monitoring system.
You can query the cluster system tables for information about your cluster.
- [Query system tables](#query-system-tables)
- [Optimize queries to reduce impact to your cluster](#optimize-queries-to-reduce-impact-to-your-cluster)
- [System tables](#system-tables)
- [Understanding system table data distribution](#understanding-system-table-data-distribution)
- [system.queries](#systemqueries)
- [system.tables](#systemtables)
- [system.partitions](#systempartitions)
- [system.compactor](#systemcompactor)
- [System query examples](#system-query-examples)
- [Query logs](#query-logs)
- [Partitions](#partitions)
- [Storage usage](#storage-usage)
- [Compaction](#compaction)
{{% warn %}}
#### May impact overall cluster performance
#### May impact cluster performance
Querying InfluxDB v3 system tables may impact the overall write and query
Querying InfluxDB v3 system tables may impact write and query
performance of your {{< product-name omit=" Clustered" >}} cluster.
Use filters to [optimize queries to reduce impact to your cluster](#optimize-queries-to-reduce-impact-to-your-cluster).
<!--------------- UPDATE THE DATE BELOW AS EXAMPLES ARE UPDATED --------------->
#### System tables are subject to change
System tables are not part of InfluxDB's stable API and may change with new releases.
The provided schema information and query examples are valid as of **August 22, 2024**.
The provided schema information and query examples are valid as of **September 18, 2024**.
If you detect a schema change or a non-functioning query example, please
[submit an issue](https://github.com/influxdata/docs-v2/issues/new/choose).
@ -40,16 +54,13 @@ If you detect a schema change or a non-functioning query example, please
## Query system tables
{{% warn %}}
_Querying system tables [may impact overall cluster performance](#may-impact-overall-cluster-performance)._
{{% /warn %}}
{{% note %}}
Querying system tables with `influxctl` requires **`influxctl` v2.8.0 or newer**.
{{% /note %}}
Use the [`influxctl query` command](/influxdb/cloud-dedicated/reference/cli/influxctl/query/)
and SQL to query system tables. Provide the following:
and SQL to query system tables.
Provide the following:
- **Enable system tables** with the `--enable-system-tables` command flag.
- **Database token**: A [database token](/influxdb/cloud-dedicated/admin/tokens/#database-tokens)
@ -61,6 +72,7 @@ and SQL to query system tables. Provide the following:
[`influxctl` connection profile](/influxdb/cloud-dedicated/reference/cli/influxctl/#configure-connection-profiles)
or the `--database` command flag.
- **SQL query**: The SQL query to execute.
Pass the query in one of the following ways:
- a string on the command line
@ -119,12 +131,152 @@ Replace the following:
When prompted, enter `y` to acknowledge the potential impact querying system
tables may have on your cluster.
### Optimize queries to reduce impact to your cluster
Querying InfluxDB v3 system tables may impact the performance of your
{{< product-name omit=" Clustered" >}} cluster.
As you write data to a cluster, the number of partitions and Parquet files
can increase to a point that impacts system table performance.
Queries that took milliseconds with fewer files and partitions might take 10
seconds or longer as files and partitions increase.
Use the following filters to optimize your system table queries and reduce the impact on your
cluster's performance.
In your queries, replace the following:
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the table to retrieve partitions for
- {{% code-placeholder-key %}}`PARTITION_ID`{{% /code-placeholder-key %}}: a [partition ID](#retrieve-a-partition-id) (int64)
- {{% code-placeholder-key %}}`PARTITION_KEY`{{% /code-placeholder-key %}}: a [partition key](/influxdb/cloud-dedicated/admin/custom-partitions/#partition-keys)
derived from the table's partition template.
The default format is `%Y-%m-%d` (for example, `2024-01-01`).
##### Filter by table name
When querying the `system.tables`, `system.partitions`, or `system.compactor` tables, use the
`WHERE` clause to filter by `table_name` .
{{% code-placeholders "TABLE_NAME" %}}
```sql
SELECT * FROM system.partitions WHERE table_name = 'TABLE_NAME'
```
{{% /code-placeholders%}}
##### Filter by partition key
When querying the `system.partitions` or `system.compactor` tables, use the `WHERE` clause to
filter by `partition_key`.
{{% code-placeholders "PARTITION_KEY" %}}
```sql
SELECT * FROM system.partitions WHERE partition_key = 'PARTITION_KEY'
```
{{% /code-placeholders %}}
To further improve performance, use `AND` to pair `partition_key` with `table_name`--for example:
{{% code-placeholders "TABLE_NAME|PARTITION_KEY" %}}
```sql
SELECT *
FROM system.partitions
WHERE
table_name = 'TABLE_NAME'
AND partition_key = 'PARTITION_KEY';
```
{{% /code-placeholders %}}
##### Filter by partition ID
When querying the `system.partitions` or `system.compactor` table, use the `WHERE` clause to
filter by `partition_id` .
{{% code-placeholders "PARTITION_ID" %}}
```sql
SELECT * FROM system.partitions WHERE partition_id = PARTITION_ID
```
{{% /code-placeholders %}}
For the most optimized approach, use `AND` to pair `partition_id` with `table_name`--for example:
{{% code-placeholders "TABLE_NAME|PARTITION_ID" %}}
```sql
SELECT *
FROM system.partitions
WHERE
table_name = 'TABLE_NAME'
AND partition_id = PARTITION_ID;
```
{{% /code-placeholders %}}
Although you don't need to pair `partition_id` with `table_name` (because a partition ID is unique within a cluster),
it's the most optimized approach, _especially when you have many tables in a database_.
###### Retrieve a partition ID
To retrieve a partition ID, query `system.partitions` for a `table_name` and `partition_key` pair--for example:
{{% code-placeholders "TABLE_NAME|PARTITION_KEY" %}}
```sql
SELECT
table_name,
partition_key,
partition_id
FROM system.partitions
WHERE
table_name = 'TABLE_NAME'
AND partition_key = 'PARTITION_KEY';
```
{{% /code-placeholders %}}
The result contains the `partition_id`:
| table_name | partition_key | partition_id |
| :--------- | :---------------- | -----------: |
| weather | 43 \| 2020-05-27 | 1362 |
##### Combine filters for performance improvement
Use the `AND`, `OR`, or `IN` keywords to combine filters in your query.
- **Use `OR` or `IN` conditions when filtering for different values in the same column**--for example:
```sql
WHERE partition_id = 1 OR partition_id = 2
```
Use `IN` to make multiple `OR` conditions more readable--for example:
```sql
WHERE table_name IN ('foo', 'bar', 'baz')
```
- **Avoid mixing different columns in `OR` conditions**, as this won't improve performance--for example:
```sql
WHERE table_name = 'foo' OR partition_id = 2 -- This will not improve performance
```
## System tables
{{% warn %}}
_System tables are [subject to change](#system-tables-are-subject-to-change)._
{{% /warn %}}
### Understanding system table data distribution
Data in `system.tables`, `system.partitions`, and `system.compactor` includes
data for all [InfluxDB Queriers](/influxdb/cloud-dedicated/reference/internals/storage-engine/#querier) in your cluster.
The data comes from the catalog, and because all the queriers share one catalog,
the results from these three tables derive from the same source data,
regardless of which querier you connect to.
However, the `system.queries` table is different--data is local to each Querier.
`system.queries` contains a non-persisted log of queries run against the current
querier to which your query is routed.
The query log is specific to the current Querier and isn't shared across
queriers in your cluster.
Logs are scoped to the specified database.
- [system.queries](#systemqueries)
- [system.tables](#systemtables)
- [system.partitions](#systempartitions)
@ -132,12 +284,18 @@ _System tables are [subject to change](#system-tables-are-subject-to-change)._
### system.queries
The `system.queries` table contains an unpersisted log of queries run against
the current [InfluxDB Querier](/influxdb/cloud-dedicated/reference/internals/storage-engine/#querier)
to which your query is routed.
The query log is specific to the current Querier and is not shared across Queriers
in your cluster.
Logs are scoped to the specified database.
The `system.queries` table stores log entries for queries executed for the provided namespace (database) on the node that is _currently handling queries_.
`system.queries` reflects a process-local, in-memory, namespace-scoped query log.
While this table may be useful for debugging and monitoring queries, keep the following in mind:
- Records stored in `system.queries` are transient and volatile
- InfluxDB deletes `system.queries` records during pod restarts.
- Queries for one namespace can evict records from another namespace.
- Data reflects the state of a specific pod answering queries for the namespace.
- Data isn't shared across queriers in your cluster.
- A query for records in `system.queries` can return different results
depending on the pod the request was routed to.
{{< expand-wrapper >}}
{{% expand "View `system.queries` schema" %}}
@ -146,9 +304,9 @@ The `system.queries` table contains the following columns:
- id
- phase
- issue_time
- query_type
- query_text
- **issue_time**: timestamp when the query was issued
- **query_type**: type (syntax: `sql`, `flightsql`, or `influxql`) of the query
- **query_text**: query statement text
- partitions
- parquet_files
- plan_duration
@ -157,14 +315,20 @@ The `system.queries` table contains the following columns:
- end2end_duration
- compute_duration
- max_memory
- success
- **success**: execution status (boolean) of the query
- running
- cancelled
- trace_id
- **trace_id**: trace ID for debugging and monitoring events
{{% /expand %}}
{{< /expand-wrapper >}}
{{% note %}}
_When listing measurements (tables) available within a namespace,
some clients and query tools may include the `queries` table in the list of
namespace tables._
{{% /note %}}
### system.tables
The `system.tables` table contains information about tables in the specified database.
@ -202,7 +366,7 @@ The `system.partitions` table contains the following columns:
### system.compactor
The `system.compaction` table contains information about compacted partition Parquet
The `system.compactor` table contains information about compacted partition Parquet
files associated with the specified database.
{{< expand-wrapper >}}
@ -222,27 +386,36 @@ The `system.compactor` table contains the following columns:
- skipped_reason
{{% /expand %}}
{{< /expand-wrapper >}}
{{< /expand-wrapper >}}
## System query examples
{{% warn %}}
#### May impact cluster performance
Querying InfluxDB v3 system tables may impact write and query
performance of your {{< product-name omit=" Clustered" >}} cluster.
The examples in this section include `WHERE` filters to [optimize queries and reduce impact to your cluster](#optimize-queries-to-reduce-impact-to-your-cluster).
{{% /warn %}}
- [Query logs](#query-logs)
- [View all stored query logs](#view-all-stored-query-logs)
- [View query logs for queries with end-to-end durations above a threshold](#view-query-logs-for-queries-with-end-to-end-durations-above-a-threshold)
- [View query logs for a specific query within a time interval](#view-query-logs-for-a-specific-query-within-a-time-interval)
- [Partitions](#partitions)
- [View partition templates of all tables](#view-partition-templates-of-all-tables)
- [View the partition template of a specific table](#view-the-partition-template-of-a-specific-table)
- [View all partitions for a table](#view-all-partitions-for-a-table)
- [View the number of partitions per table](#view-the-number-of-partitions-per-table)
- [View the number of partitions for a specific table](#view-the-number-of-partitions-for-a-specific-table)
- [Storage usage](#storage-usage)
- [View the size of tables in megabytes](#view-the-size-of-tables-in-megabytes)
- [View the size of a specific table in megabytes](#view-the-size-of-a-specific-table-in-megabytes)
- [View the total size of all compacted partitions per table in bytes](#view-the-total-size-of-all-compacted-partitions-per-table-in-bytes)
- [View the total size of all compacted partitions in bytes](#view-the-total-size-of-all-compacted-partitions-in-bytes)
- [View the size in megabytes of a specific table](#view-the-size-in-megabytes-of-a-specific-table)
- [View the size in megabytes per table](#view-the-size-in-megabytes-per-table)
- [View the total size in bytes of compacted partitions per table](#view-the-total-size-in-bytes-of-compacted-partitions-per-table)
- [View the total size in bytes of compacted partitions for a specific table](#view-the-total-size-in-bytes-of-compacted-partitions-for-a-specific-table)
- [Compaction](#compaction)
- [View overall compaction totals for each table](#view-overall-compaction-totals-for-each-table)
- [View overall compaction totals for a specific table](#view-overall-compaction-totals-for-a-specific-table)
- [View compaction totals for each table](#view-compaction-totals-for-each-table)
- [View compaction totals for a specific table](#view-compaction-totals-for-a-specific-table)
In the examples below, replace {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}
with the name of the table you want to query information about.
@ -265,29 +438,75 @@ The following returns query logs for queries with an end-to-end duration greater
than 50 milliseconds.
```sql
SELECT * FROM system.queries WHERE end2end_duration::BIGINT > (50 * 1000000)
SELECT *
FROM
system.queries
WHERE
end2end_duration::BIGINT > (50 * 1000000)
```
---
### View query logs for a specific query within a time interval
{{< code-tabs >}}
{{% tabs %}}
[SQL](#)
[Python](#)
{{% /tabs %}}
{{% code-tab-content %}}
<!-----------------------------------BEGIN SQL------------------------------>
```sql
SELECT *
FROM system.queries
WHERE issue_time >= now() - INTERVAL '1 day'
AND query_text LIKE '%select * from home%'
```
<!-----------------------------------END SQL------------------------------>
{{% /code-tab-content %}}
{{% code-tab-content %}}
<!-----------------------------------BEGIN PYTHON------------------------------>
```python
from influxdb_client_3 import InfluxDBClient3
client = InfluxDBClient3(token = DATABASE_TOKEN,
host = HOSTNAME,
org = '',
database=DATABASE_NAME)
client.query('select * from home')
reader = client.query('''
SELECT *
FROM system.queries
WHERE issue_time >= now() - INTERVAL '1 day'
AND query_text LIKE '%select * from home%'
''',
language='sql',
headers=[(b"iox-debug", b"true")],
mode="reader")
```
<!-----------------------------------END PYTHON------------------------------>
{{% /code-tab-content %}}
{{< /code-tabs >}}
---
### Partitions
#### View partition templates of all tables
```sql
SELECT * FROM system.tables
```
#### View the partition template of a specific table
```sql
SELECT * FROM system.tables WHERE table_name = 'TABLE_NAME'
SELECT *
FROM
system.tables
WHERE
table_name = 'TABLE_NAME'
```
#### View all partitions for a table
```sql
SELECT * FROM system.partitions WHERE table_name = 'TABLE_NAME'
SELECT *
FROM
system.partitions
WHERE
table_name = 'TABLE_NAME'
```
#### View the number of partitions per table
@ -298,6 +517,8 @@ SELECT
COUNT(*) AS partition_count
FROM
system.partitions
WHERE
table_name IN ('foo', 'bar', 'baz')
GROUP BY
table_name
```
@ -313,23 +534,11 @@ WHERE
table_name = 'TABLE_NAME'
```
---
---
### Storage usage
#### View the size of tables in megabytes
```sql
SELECT
table_name,
SUM(total_size_mb) AS total_size_mb
FROM
system.partitions
GROUP BY
table_name
```
#### View the size of a specific table in megabytes
#### View the size in megabytes of a specific table
```sql
SELECT
@ -340,7 +549,21 @@ WHERE
table_name = 'TABLE_NAME'
```
#### View the total size of all compacted partitions per table in bytes
#### View the size in megabytes per table
```sql
SELECT
table_name,
SUM(total_size_mb) AS total_size_mb
FROM
system.partitions
WHERE
table_name IN ('foo', 'bar', 'baz')
GROUP BY
table_name
```
#### View the total size in bytes of compacted partitions per table
```sql
SELECT
@ -348,24 +571,28 @@ SELECT
SUM(total_l0_bytes) + SUM(total_l1_bytes) + SUM(total_l2_bytes) AS total_bytes
FROM
system.compactor
WHERE
table_name IN ('foo', 'bar', 'baz')
GROUP BY
table_name
```
#### View the total size of all compacted partitions in bytes
#### View the total size in bytes of compacted partitions for a specific table
```sql
SELECT
SUM(total_l0_bytes) + SUM(total_l1_bytes) + SUM(total_l2_bytes) AS total_bytes
FROM
system.compactor
WHERE
table_name = 'TABLE_NAME'
```
---
---
### Compaction
#### View overall compaction totals for each table
#### View compaction totals for each table
```sql
SELECT
@ -378,11 +605,13 @@ SELECT
SUM(total_l2_bytes) AS total_l2_bytes
FROM
system.compactor
WHERE
table_name IN ('foo', 'bar', 'baz')
GROUP BY
table_name
```
#### View overall compaction totals for a specific table
#### View compaction totals for a specific table
```sql
SELECT

View File

@ -420,7 +420,7 @@ Querying system tables can impact the overall performance of your
InfluxDB's stable API and are subject to change.
{{% /warn %}}
{{% code-placeholders "DATABASE_(TOKEN|NAME)" %}}
{{% code-placeholders "DATABASE_(TOKEN|NAME)|TABLE_NAME" %}}
{{< code-tabs-wrapper >}}
{{% code-tabs %}}
@ -435,7 +435,7 @@ influxctl query \
--enable-system-tables \
--token DATABASE_TOKEN \
--database DATABASE_NAME \
"SELECT * FROM system.tables"
"SELECT * FROM system.tables WHERE table_name = 'TABLE_NAME'"
```
{{% /influxdb/custom-timestamps %}}
{{% /code-tab-content %}}

View File

@ -1,7 +1,7 @@
---
title: InfluxDB v2 API client libraries
description: >
InfluxDB v2 client libraries use InfluxDB `/api/v2` endpoints and work with [InfluxDB 2.0 API compatibility endpoints](/influxdb/v1/tools/api/#influxdb-20-api-compatibility-endpoints).
InfluxDB v2 client libraries use InfluxDB `/api/v2` endpoints and work with [InfluxDB 2.x API compatibility endpoints](/influxdb/v1/tools/api/#influxdb-2x-api-compatibility-endpoints).
View the list of available client libraries.
weight: 101
menu:
@ -25,7 +25,7 @@ prepend:
## Client libraries for InfluxDB 2.x and 1.8+
InfluxDB client libraries are language-specific tools that integrate with InfluxDB APIs.
InfluxDB v2 client libraries use InfluxDB `/api/v2` endpoints and work with [InfluxDB 2.0 API compatibility endpoints](/influxdb/v1/tools/api/#influxdb-20-api-compatibility-endpoints).
InfluxDB v2 client libraries use InfluxDB `/api/v2` endpoints and work with [InfluxDB 2.x API compatibility endpoints](/influxdb/v1/tools/api/#influxdb-2x-api-compatibility-endpoints).
Functionality varies among client libraries.
InfluxDB client libraries are maintained by the InfluxDB community.

View File

@ -1,89 +0,0 @@
---
title: InfluxDB system tables
description: >
InfluxDB system measurements contain time series data used by and generated from the
InfluxDB internal monitoring system.
menu:
influxdb_cloud_dedicated:
name: System tables
parent: InfluxDB internals
weight: 103
influxdb/cloud-dedicated/tags: [tables, information schema]
related:
- /influxdb/cloud-dedicated/reference/sql/information-schema/
---
InfluxDB system measurements contain time series data used by and generated from the
InfluxDB internal monitoring system.
Each {{% product-name %}} namespace includes the following system measurements:
<!-- TOC -->
- [system.queries measurement](#systemqueries-measurement)
- [system.queries schema](#systemqueries-schema)
## system.queries measurement
The `system.queries` measurement stores log entries for queries executed for the provided namespace (database) on the node that is currently handling queries.
```python
from influxdb_client_3 import InfluxDBClient3
client = InfluxDBClient3(token = DATABASE_TOKEN,
host = HOSTNAME,
org = '',
database=DATABASE_NAME)
client.query('select * from home')
reader = client.query('''
SELECT *
FROM system.queries
WHERE issue_time >= now() - INTERVAL '1 day'
AND query_text LIKE '%select * from home%'
''',
language='sql',
headers=[(b"iox-debug", b"true")],
mode="reader")
print("# system.queries schema\n")
print(reader.schema)
```
<!--pytest-codeblocks:expected-output-->
`system.queries` has the following schema:
```python
# system.queries schema
issue_time: timestamp[ns] not null
query_type: string not null
query_text: string not null
completed_duration: duration[ns]
success: bool not null
trace_id: string
```
_When listing measurements (tables) available within a namespace, some clients and query tools may include the `queries` table in the list of namespace tables._
`system.queries` reflects a process-local, in-memory, namespace-scoped query log.
The query log isn't shared across instances within the same deployment.
While this table may be useful for debugging and monitoring queries, keep the following in mind:
- Records stored in `system.queries` are volatile.
- Records are lost on pod restarts.
- Queries for one namespace can evict records from another namespace.
- Data reflects the state of a specific pod answering queries for the namespace----the log view is scoped to the requesting namespace and queries aren't leaked across namespaces.
- A query for records in `system.queries` can return different results depending on the pod the request was routed to.
**Data retention:** System data can be transient and is deleted on pod restarts.
The log size per instance is limited and the log view is scoped to the requesting namespace.
### system.queries schema
- **system.queries** _(measurement)_
- **fields**:
- **issue_time**: timestamp when the query was issued
- **query_type**: type (syntax: `sql`, `flightsql`, or `influxql`) of the query
- **query_text**: query statement text
- **success**: execution status (boolean) of the query
- **completed_duration**: time (duration) that the query took to complete
- **trace_id**: trace ID for debugging and monitoring events

View File

@ -1,7 +1,7 @@
---
title: InfluxDB v2 API client libraries
description: >
InfluxDB v2 client libraries use InfluxDB `/api/v2` endpoints and work with [InfluxDB 2.0 API compatibility endpoints](/influxdb/v1/tools/api/#influxdb-20-api-compatibility-endpoints).
InfluxDB v2 client libraries use InfluxDB `/api/v2` endpoints and work with [InfluxDB 2.x API compatibility endpoints](/influxdb/v1/tools/api/#influxdb-2x-api-compatibility-endpoints).
View the list of available client libraries.
weight: 101
menu:
@ -25,11 +25,10 @@ prepend:
## Client libraries for InfluxDB 2.x and 1.8+
InfluxDB client libraries are language-specific tools that integrate with InfluxDB APIs.
InfluxDB v2 client libraries use InfluxDB `/api/v2` endpoints and work with [InfluxDB 2.0 API compatibility endpoints](/influxdb/v1/tools/api/#influxdb-20-api-compatibility-endpoints).
InfluxDB v2 client libraries use InfluxDB `/api/v2` endpoints and work with [InfluxDB 2.x API compatibility endpoints](/influxdb/v1/tools/api/#influxdb-2x-api-compatibility-endpoints).
Functionality varies among client libraries.
These client libraries are in active development and may not be feature-complete.
InfluxDB client libraries are maintained by the InfluxDB community.
For specifics about a client library, see the library's GitHub repository.
{{< children depth="999" type="list" >}}

View File

@ -7,7 +7,7 @@ description: >
menu:
influxdb_clustered:
parent: Administer InfluxDB Clustered
weight: 208
weight: 209
---
{{< product-name >}} generates a valid access token (known as the _admin token_)
@ -63,13 +63,13 @@ The only way to revoke the token is to do the following:
{{% code-placeholders "INFLUXDB_NAMESPACE|KEY_GEN_JOB|001" %}}
1. Delete the `rsa-keys` secret from your InfluxDB cluster's context and namespace:
1. Delete the `rsa-keys` and `admin-token` secrets from your InfluxDB cluster's context and namespace:
```sh
kubectl delete secrets/rsa-keys --namespace INFLUXDB_NAMESPACE
kubectl delete secret rsa-keys admin-token --namespace INFLUXDB_NAMESPACE
```
2. Rerun the `key-gen` job:
2. Rerun the `key-gen` and `create-amin-token` jobs:
1. List the jobs in your InfluxDB namespace to find the key-gen job pod:
@ -78,12 +78,11 @@ The only way to revoke the token is to do the following:
kubectl get jobs --namespace INFLUXDB_NAMESPACE
```
2. Run the key-gen job and increment the job number as needed:
2. Delete the key-gen and create-admin-token jobs so they it will be re-created by kubit:
```sh
kubectl create job \
--from=job/KEY_GEN_JOB key-gen-001 \
--namespace INFLUXDB_NAMESPACE
kubectl delete job/KEY_GEN_JOB job/CREATE_ADMIN_TOKEN_JOB \
--namespace INFLUXDB_NAMESPACE
```
3. Restart the `token-management` service:

View File

@ -74,6 +74,7 @@ Custom partitioning has the following limitations:
- Database and table partitions can only be defined on create.
You cannot update the partition strategy of a database or table after it has
been created.
- A partition template must include a time part.
- You can partition by up to eight dimensions (seven tags and a time interval).
## How partitioning works
@ -89,11 +90,14 @@ _For more detailed information, see [Partition templates](/influxdb/clustered/ad
### Partition keys
A partition key uniquely identifies a partition. The structure of partition keys
is defined by a _[partition template](#partition-templates)_. Partition keys are
composed of up to eight parts or dimensions (tags, tag buckets, and time).
A partition key uniquely identifies a partition.
A _[partition template](#partition-templates)_ defines the partition key format.
Partition keys are
composed of up to 8 dimensions (1 time part and up to 7 tag or tag bucket parts).
Each part is delimited by the partition key separator (`|`).
The default format for partition keys is `%Y-%m-%d` (for example, `2024-01-01`).
{{< expand-wrapper >}}
{{% expand "View example partition templates and keys" %}}
@ -335,7 +339,7 @@ _For more information about the query lifecycle, see
##### Query example
Consider the following query that selects everything in the `production` table
where the `line` tag is `A` and the `station` tag is `1`:
where the `line` tag is `A` and the `station` tag is `cnc`:
```sql
SELECT *

View File

@ -15,20 +15,20 @@ A partition key uniquely identifies a partition and is used to name the partitio
Parquet file in the [Object store](/influxdb/clustered/reference/internals/storage-engine/#object-store).
A partition template consists of 1-8 _template parts_---dimensions to partition data by.
There are three types of template parts:
Three types of template parts exist:
- **tag**: An [InfluxDB tag](/influxdb/clustered/reference/glossary/#tag)
to partition by.
- **tag bucket**: An [InfluxDB tag](/influxdb/clustered/reference/glossary/#tag)
and number of "buckets" to group tag values into. Data is partitioned by the
tag bucket rather than each distinct tag value.
- **time**: A Rust strftime date and time string that specifies the time interval
- {{< req type="key" >}} **time**: A Rust strftime date and time string that specifies the time interval
to partition data by. The smallest unit of time included in the time part
template is the interval used to partition data.
{{% note %}}
A partition template can include up to 7 total tag and tag bucket parts
and only 1 time part.
A partition template must include 1 [time part](#time-part-templates)
and can include up to 7 total [tag](#tag-part-templates) and [tag bucket](#tag-bucket-part-templates) parts.
{{% /note %}}
<!-- TOC -->
@ -75,6 +75,12 @@ characters must be [percent encoded](https://developer.mozilla.org/en-US/docs/Gl
Tag part templates consist of a _tag key_ to partition by.
Generated partition keys include the unique _tag value_ specific to each partition.
A partition template may include a given tag key only once in template parts
that operate on tags (tag value and tag bucket)--for example:
If a template partitions on unique values of `tag_A`, then
you can't use `tag_A` as a tag bucket part.
## Tag bucket part templates
Tag bucket part templates consist of a _tag key_ to partition by and the
@ -102,6 +108,12 @@ Tag buckets should be used to partition by high cardinality tags or tags with an
unknown number of distinct values.
{{% /note %}}
A partition template may include a given tag key only once in template parts
that operate on tags (tag value and tag bucket)--for example:
If a template partitions on unique values of `tag_A`, then
you can't use `tag_A` as a tag bucket part.
## Time part templates
Time part templates use a limited subset of the

View File

@ -35,7 +35,7 @@ your {{< product-name omit=" Clustered" >}} cluster.
#### System tables are subject to change
System tables are not part of InfluxDB's stable API and may change with new releases.
The provided schema information and query examples are valid as of **August 22, 2024**.
The provided schema information and query examples are valid as of **September 24, 2024**.
If you detect a schema change or a non-functioning query example, please
[submit an issue](https://github.com/influxdata/docs-v2/issues/new/choose).
@ -108,27 +108,7 @@ with the name of the table you want to query information about.
---
{{% code-placeholders "TABLE_NAME" %}}
### View partition templates of all tables
```sql
SELECT * FROM system.tables
```
#### Example results
| table_name | partition_template |
| :--------- | :----------------------------------------------------------------------------------------- |
| weather | `{"parts":[{"timeFormat":"%Y-%m-%d"},{"bucket":{"tagName":"location","numBuckets":250}}]}` |
| home | `{"parts":[{"timeFormat":"%Y-%m-%d"},{"tagValue":"room"},{"tagValue":"sensor_id"}]}` |
| numbers | `{"parts":[{"timeFormat":"%Y"}]}` |
{{% note %}}
If a table doesn't include a partition template in the output of this command,
the table uses the default (1 day) partition strategy and doesn't partition
by tags or tag buckets.
{{% /note %}}
{{% code-placeholders "TABLE_NAME_(1|2|3)|TABLE_NAME" %}}
### View the partition template of a specific table
@ -142,6 +122,12 @@ SELECT * FROM system.tables WHERE table_name = 'TABLE_NAME'
| :--------- | :----------------------------------------------------------------------------------------- |
| weather | `{"parts":[{"timeFormat":"%Y-%m-%d"},{"bucket":{"tagName":"location","numBuckets":250}}]}` |
{{% note %}}
If a table doesn't include a partition template in the output of this command,
the table uses the default (1 day) partition strategy and doesn't partition
by tags or tag buckets.
{{% /note %}}
### View all partitions for a table
```sql
@ -166,6 +152,8 @@ SELECT
COUNT(*) AS partition_count
FROM
system.partitions
WHERE
table_name IN ('TABLE_NAME_1', 'TABLE_NAME_2', 'TABLE_NAME_3')
GROUP BY
table_name
```

View File

@ -0,0 +1,171 @@
---
title: Manage environment variables in your InfluxDB Cluster
description: >
Use environment variables to define settings for individual components in your
InfluxDB cluster.
menu:
influxdb_clustered:
parent: Administer InfluxDB Clustered
name: Manage environment variables
weight: 208
---
Use environment variables to define settings for individual components in your
InfluxDB cluster and adjust your cluster's running configuration.
Define environment variables for each component in your `AppInstance` resource.
InfluxDB Clustered components support various environment variables.
While many of these variables have default settings, you can customize them by
setting your own values.
{{% warn %}}
#### Overriding default settings may affect overall cluster performance
{{% product-name %}} components have complex interactions that can be affected
when overriding default configuration settings.
Changing these settings may impact overall cluster performance.
Before making configuration changes using environment variables, consider
consulting [InfluxData Support](https://support.influxdata.com/) to identify any
potential unintended consequences.
{{% /warn %}}
## AppInstance component schema
In your `AppInstance` resource, configure individual component settings in the
`spec.package.spec.components` property. This property supports the following
InfluxDB Clustered component keys:
- `ingester`
- `querier`
- `router`
- `compactor`
- `garbage-collector`
```yaml
apiVersion: kubecfg.dev/v1alpha1
kind: AppInstance
metadata:
name: influxdb
namespace: influxdb
spec:
package:
# ...
spec:
components:
ingester:
# Ingester settings ...
querier:
# Querier settings ...
router:
# Router settings. ...
compactor:
# Compactor settings ...
garbage-collector:
# Garbage collector settings ...
```
_For more information about components in the InfluxDB v3 storage engine, see
the [InfluxDB v3 storage engine architecture](/influxdb/clustered/reference/internals/storage-engine/)._
## Set environment variables for a component
1. Under the specific component property, use the
`<component>.template.containers.iox.env` property to define environment
variables.
2. In the `env` property, structure each environment variable as a key-value pair.
For example, to configure environment variables for the Garbage collector:
```yaml
apiVersion: kubecfg.dev/v1alpha1
kind: AppInstance
metadata:
name: influxdb
namespace: influxdb
spec:
package:
# ...
spec:
components:
garbage-collector:
template:
containers:
iox:
env:
INFLUXDB_IOX_GC_OBJECTSTORE_CUTOFF: '6h'
INFLUXDB_IOX_GC_PARQUETFILE_CUTOFF: '6h'
```
3. Use `kubectl apply` to apply the configuration changes to your cluster and
add or update environment variables in each component.
<!-- pytest.mark.skip -->
```bash
kubectl apply \
--filename myinfluxdb.yml \
--namespace influxdb
```
{{% note %}}
#### Update environment variables instead of removing them
Most configuration settings that can be overridden by environment variables have
default values that are used if the environment variable is unset. Removing
environment variables from your `AppInstance` resource configuration will not
remove those environment variables entirely; instead, they will revert to their
default settings. To revert to the default settings, simply unset the
environment variable or update the value in your `AppInstance` resource to the
default value.
In the preceding example, the `INFLUXDB_IOX_GC_OBJECTSTORE_CUTOFF` environment
variable is set to `6h`. If you remove `INFLUXDB_IOX_GC_OBJECTSTORE_CUTOFF` from
the `env` property, the cutoff reverts to its default setting of `30d`.
{{% /note %}}
{{< expand-wrapper >}}
{{% expand "View example of environment variables in all components" %}}
```yaml
apiVersion: kubecfg.dev/v1alpha1
kind: AppInstance
metadata:
name: influxdb
namespace: influxdb
spec:
package:
# ...
spec:
components:
ingester:
template:
containers:
iox:
env:
INFLUXDB_IOX_WAL_ROTATION_PERIOD_SECONDS: '360'
querier:
template:
containers:
iox:
env:
INFLUXDB_IOX_EXEC_MEM_POOL_BYTES: '10737418240' # 10GiB
router:
template:
containers:
iox:
env:
INFLUXDB_IOX_MAX_HTTP_REQUESTS: '5000'
compactor:
template:
containers:
iox:
env:
INFLUXDB_IOX_EXEC_MEM_POOL_PERCENT: '80'
garbage-collector:
template:
containers:
iox:
env:
INFLUXDB_IOX_GC_OBJECTSTORE_CUTOFF: '6h'
INFLUXDB_IOX_GC_PARQUETFILE_CUTOFF: '6h'
```
{{% /expand %}}
{{< /expand-wrapper >}}

View File

@ -27,20 +27,6 @@ the InfluxDB Clustered software.
- [License expiry logs](#license-expiry-logs)
- [Query brownout](#query-brownout)
{{% note %}}
#### License enforcement is currently an opt-in feature
In currently available versions of InfluxDB Clustered, license enforcement is an
opt-in feature that allows InfluxData to introduce license enforcement to
customers, and allows customers to deactivate the feature if issues arise.
In the future, all releases of InfluxDB Clustered will require customers to
configure an active license before they can use the product.
To opt into license enforcement, include the `useLicensedBinaries` feature flag
in your `AppInstance` resource _([See the example below](#enable-feature-flag))_.
To deactivate license enforcement, remove the `useLicensedBinaries` feature flag.
{{% /note %}}
## Install your InfluxDB license
{{% note %}}
@ -64,22 +50,6 @@ install your license.
kubectl apply --filename license.yml --namespace influxdb
```
4. <span id="enable-feature-flag"></span>
Update your `AppInstance` resource to include the `useLicensedBinaries` feature flag.
Add the `useLicensedBinaries` entry to the `.spec.package.spec.featureFlags`
property--for example:
```yml
apiVersion: kubecfg.dev/v1alpha1
kind: AppInstance
# ...
spec:
package:
spec:
featureFlags:
- useLicensedBinaries
```
InfluxDB Clustered detects the `License` resource and extracts the credentials
into a secret required by InfluxDB Clustered Kubernetes pods.
Pods validate the license secret both at startup and periodically (roughly once
@ -115,7 +85,6 @@ license enforcement.
### A valid license is required
_When you include the `useLicensedBinaries` feature flag_,
Kubernetes pods running in your InfluxDB cluster must have a valid `License`
resource to run. Licenses are issued by InfluxData. If there is no `License`
resource installed in your cluster, one of two things may happen:

View File

@ -0,0 +1,640 @@
---
title: Query system data
description: >
Query system tables in your InfluxDB cluster to see data related
to queries, tables, partitions, and compaction in your cluster.
menu:
influxdb_clustered:
parent: Administer InfluxDB Clustered
name: Query system data
weight: 105
related:
- /influxdb/clustered/reference/cli/influxctl/query/
- /influxdb/clustered/reference/internals/system-tables/
---
{{< product-name >}} stores data related to queries, tables, partitions, and
compaction in _system tables_ within your cluster.
System tables contain time series data used by and generated from the
{{< product-name >}} internal monitoring system.
You can query the cluster system tables for information about your cluster.
- [Query system tables](#query-system-tables)
- [Optimize queries to reduce impact to your cluster](#optimize-queries-to-reduce-impact-to-your-cluster)
- [System tables](#system-tables)
- [Understanding system table data distribution](#understanding-system-table-data-distribution)
- [system.queries](#systemqueries)
- [system.tables](#systemtables)
- [system.partitions](#systempartitions)
- [system.compactor](#systemcompactor)
- [System query examples](#system-query-examples)
- [Query logs](#query-logs)
- [Partitions](#partitions)
- [Storage usage](#storage-usage)
- [Compaction](#compaction)
{{% warn %}}
#### May impact cluster performance
Querying InfluxDB v3 system tables may impact write and query
performance of your {{< product-name omit=" Clustered" >}} cluster.
Use filters to [optimize queries to reduce impact to your cluster](#optimize-queries-to-reduce-impact-to-your-cluster).
<!--------------- UPDATE THE DATE BELOW AS EXAMPLES ARE UPDATED --------------->
#### System tables are subject to change
System tables are not part of InfluxDB's stable API and may change with new releases.
The provided schema information and query examples are valid as of **September 18, 2024**.
If you detect a schema change or a non-functioning query example, please
[submit an issue](https://github.com/influxdata/docs-v2/issues/new/choose).
<!--------------- UPDATE THE DATE ABOVE AS EXAMPLES ARE UPDATED --------------->
{{% /warn %}}
## Query system tables
{{% note %}}
Querying system tables with `influxctl` requires **`influxctl` v2.8.0 or newer**.
{{% /note %}}
Use the [`influxctl query` command](/influxdb/clustered/reference/cli/influxctl/query/)
and SQL to query system tables.
Provide the following:
- **Enable system tables** with the `--enable-system-tables` command flag.
- **Database token**: A [database token](/influxdb/clustered/admin/tokens/#database-tokens)
with read permissions on the specified database. Uses the `token` setting from
the [`influxctl` connection profile](/influxdb/clustered/reference/cli/influxctl/#configure-connection-profiles)
or the `--token` command flag.
- **Database name**: The name of the database to query information about.
Uses the `database` setting from the
[`influxctl` connection profile](/influxdb/clustered/reference/cli/influxctl/#configure-connection-profiles)
or the `--database` command flag.
- **SQL query**: The SQL query to execute.
Pass the query in one of the following ways:
- a string on the command line
- a path to a file that contains the query
- a single dash (`-`) to read the query from stdin
{{% code-placeholders "DATABASE_(TOKEN|NAME)|SQL_QUERY" %}}
{{< code-tabs-wrapper >}}
{{% code-tabs %}}
[string](#)
[file](#)
[stdin](#)
{{% /code-tabs %}}
{{% code-tab-content %}}
```sh
influxctl query \
--enable-system-tables \
--database DATABASE_NAME \
--token DATABASE_TOKEN \
"SQL_QUERY"
```
{{% /code-tab-content %}}
{{% code-tab-content %}}
```sh
influxctl query \
--enable-system-tables \
--database DATABASE_NAME \
--token DATABASE_TOKEN \
/path/to/query.sql
```
{{% /code-tab-content %}}
{{% code-tab-content %}}
```sh
cat ./query.sql | influxctl query \
--enable-system-tables \
--database DATABASE_NAME \
--token DATABASE_TOKEN \
-
```
{{% /code-tab-content %}}
{{< /code-tabs-wrapper >}}
{{% /code-placeholders %}}
Replace the following:
- {{% code-placeholder-key %}}`DATABASE_TOKEN`{{% /code-placeholder-key %}}:
A database token with read access to the specified database
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
The name of the database to query information about.
- {{% code-placeholder-key %}}`SQL_QUERY`{{% /code-placeholder-key %}}:
The SQL query to execute. For examples, see
[System query examples](#system-query-examples).
When prompted, enter `y` to acknowledge the potential impact querying system
tables may have on your cluster.
### Optimize queries to reduce impact to your cluster
Querying InfluxDB v3 system tables may impact the performance of your
{{< product-name omit=" Clustered" >}} cluster.
As you write data to a cluster, the number of partitions and Parquet files
can increase to a point that impacts system table performance.
Queries that took milliseconds with fewer files and partitions might take 10
seconds or longer as files and partitions increase.
Use the following filters to optimize your system table queries and reduce the impact on your
cluster's performance.
In your queries, replace the following:
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the table to retrieve partitions for
- {{% code-placeholder-key %}}`PARTITION_ID`{{% /code-placeholder-key %}}: a [partition ID](#retrieve-a-partition-id) (int64)
- {{% code-placeholder-key %}}`PARTITION_KEY`{{% /code-placeholder-key %}}: a [partition key](/influxdb/clustered/admin/custom-partitions/#partition-keys)
derived from the table's partition template.
The default format is `%Y-%m-%d` (for example, `2024-01-01`).
##### Filter by table name
When querying the `system.tables`, `system.partitions`, or `system.compactor` tables, use the
`WHERE` clause to filter by `table_name` .
{{% code-placeholders "TABLE_NAME" %}}
```sql
SELECT * FROM system.partitions WHERE table_name = 'TABLE_NAME'
```
{{% /code-placeholders%}}
##### Filter by partition key
When querying the `system.partitions` or `system.compactor` tables, use the `WHERE` clause to
filter by `partition_key`.
{{% code-placeholders "PARTITION_KEY" %}}
```sql
SELECT * FROM system.partitions WHERE partition_key = 'PARTITION_KEY'
```
{{% /code-placeholders %}}
To further improve performance, use `AND` to pair `partition_key` with `table_name`--for example:
{{% code-placeholders "TABLE_NAME|PARTITION_KEY" %}}
```sql
SELECT *
FROM system.partitions
WHERE
table_name = 'TABLE_NAME'
AND partition_key = 'PARTITION_KEY';
```
{{% /code-placeholders %}}
{{% code-placeholders "TABLE_NAME|PARTITION_KEY" %}}
```sql
SELECT *
FROM system.compactor
WHERE
table_name = 'TABLE_NAME'
AND partition_key = 'PARTITION_KEY';
```
{{% /code-placeholders %}}
##### Filter by partition ID
When querying the `system.partitions` or `system.compactor` table, use the `WHERE` clause to
filter by `partition_id` .
{{% code-placeholders "PARTITION_ID" %}}
```sql
SELECT * FROM system.partitions WHERE partition_id = PARTITION_ID
```
{{% /code-placeholders %}}
For the most optimized approach, use `AND` to pair `partition_id` with `table_name`--for example:
{{% code-placeholders "TABLE_NAME|PARTITION_ID" %}}
```sql
SELECT *
FROM system.partitions
WHERE
table_name = 'TABLE_NAME'
AND partition_id = PARTITION_ID;
```
{{% /code-placeholders %}}
Although you don't need to pair `partition_id` with `table_name` (because a partition ID is unique within a cluster),
it's the most optimized approach, _especially when you have many tables in a database_.
###### Retrieve a partition ID
To retrieve a partition ID, query `system.partitions` for a `table_name` and `partition_key` pair--for example:
{{% code-placeholders "TABLE_NAME|PARTITION_KEY" %}}
```sql
SELECT
table_name,
partition_key,
partition_id
FROM system.partitions
WHERE
table_name = 'TABLE_NAME'
AND partition_key = 'PARTITION_KEY';
```
{{% /code-placeholders %}}
The result contains the `partition_id`:
| table_name | partition_key | partition_id |
| :--------- | :---------------- | -----------: |
| weather | 43 \| 2020-05-27 | 1362 |
##### Combine filters for performance improvement
Use the `AND`, `OR`, or `IN` keywords to combine filters in your query.
- **Use `OR` or `IN` conditions when filtering for different values in the same column**--for example:
```sql
WHERE partition_id = 1 OR partition_id = 2
```
Use `IN` to make multiple `OR` conditions more readable--for example:
```sql
WHERE table_name IN ('foo', 'bar', 'baz')
```
- **Avoid mixing different columns in `OR` conditions**, as this won't improve performance--for example:
```sql
WHERE table_name = 'foo' OR partition_id = 2 -- This will not improve performance
```
## System tables
{{% warn %}}
_System tables are [subject to change](#system-tables-are-subject-to-change)._
{{% /warn %}}
### Understanding system table data distribution
Data in `system.tables`, `system.partitions`, and `system.compactor` includes
data for all [InfluxDB Queriers](/influxdb/clustered/reference/internals/storage-engine/#querier) in your cluster.
The data comes from the catalog, and because all the queriers share one catalog,
the results from these three tables derive from the same source data,
regardless of which querier you connect to.
However, the `system.queries` table is different--data is local to each Querier.
`system.queries` contains a non-persisted log of queries run against the current
querier to which your query is routed.
The query log is specific to the current Querier and isn't shared across
queriers in your cluster.
Logs are scoped to the specified database.
- [system.queries](#systemqueries)
- [system.tables](#systemtables)
- [system.partitions](#systempartitions)
- [system.compactor](#systemcompactor)
### system.queries
The `system.queries` table stores log entries for queries executed for the provided namespace (database) on the node that is _currently handling queries_.
`system.queries` reflects a process-local, in-memory, namespace-scoped query log.
While this table may be useful for debugging and monitoring queries, keep the following in mind:
- Records stored in `system.queries` are transient and volatile
- InfluxDB deletes `system.queries` records during pod restarts.
- Queries for one namespace can evict records from another namespace.
- Data reflects the state of a specific pod answering queries for the namespace.
- Data isn't shared across queriers in your cluster.
- A query for records in `system.queries` can return different results
depending on the pod the request was routed to.
{{< expand-wrapper >}}
{{% expand "View `system.queries` schema" %}}
The `system.queries` table contains the following columns:
- id
- phase
- **issue_time**: timestamp when the query was issued
- **query_type**: type (syntax: `sql`, `flightsql`, or `influxql`) of the query
- **query_text**: query statement text
- partitions
- parquet_files
- plan_duration
- permit_duration
- execute_duration
- end2end_duration
- compute_duration
- max_memory
- **success**: execution status (boolean) of the query
- running
- cancelled
- **trace_id**: trace ID for debugging and monitoring events
{{% /expand %}}
{{< /expand-wrapper >}}
{{% note %}}
_When listing measurements (tables) available within a namespace,
some clients and query tools may include the `queries` table in the list of
namespace tables._
{{% /note %}}
### system.tables
The `system.tables` table contains information about tables in the specified database.
{{< expand-wrapper >}}
{{% expand "View `system.tables` schema" %}}
The `system.tables` table contains the following columns:
- table_name
- partition_template
{{% /expand %}}
{{< /expand-wrapper >}}
### system.partitions
The `system.partitions` table contains information about partitions associated
with the specified database.
{{< expand-wrapper >}}
{{% expand "View `system.partitions` schema" %}}
The `system.partitions` table contains the following columns:
- partition_id
- table_name
- partition_key
- last_new_file_created_at
- num_files
- total_size_mb
{{% /expand %}}
{{< /expand-wrapper >}}
### system.compactor
The `system.compactor` table contains information about compacted partition Parquet
files associated with the specified database.
{{< expand-wrapper >}}
{{% expand "View `system.compactor` schema" %}}
The `system.compactor` table contains the following columns:
- partition_id
- table_name
- partition_key
- total_l0_files
- total_l1_files
- total_l2_files
- total_l0_bytes
- total_l1_bytes
- total_l2_bytes
- skipped_reason
{{% /expand %}}
{{< /expand-wrapper >}}
## System query examples
{{% warn %}}
#### May impact cluster performance
Querying InfluxDB v3 system tables may impact write and query
performance of your {{< product-name omit=" Clustered" >}} cluster.
The examples in this section include `WHERE` filters to [optimize queries and reduce impact to your cluster](#optimize-queries-to-reduce-impact-to-your-cluster).
{{% /warn %}}
- [Query logs](#query-logs)
- [View all stored query logs](#view-all-stored-query-logs)
- [View query logs for queries with end-to-end durations above a threshold](#view-query-logs-for-queries-with-end-to-end-durations-above-a-threshold)
- [View query logs for a specific query within a time interval](#view-query-logs-for-a-specific-query-within-a-time-interval)
- [Partitions](#partitions)
- [View the partition template of a specific table](#view-the-partition-template-of-a-specific-table)
- [View all partitions for a table](#view-all-partitions-for-a-table)
- [View the number of partitions per table](#view-the-number-of-partitions-per-table)
- [View the number of partitions for a specific table](#view-the-number-of-partitions-for-a-specific-table)
- [Storage usage](#storage-usage)
- [View the size in megabytes of a specific table](#view-the-size-in-megabytes-of-a-specific-table)
- [View the size in megabytes per table](#view-the-size-in-megabytes-per-table)
- [View the total size in bytes of compacted partitions per table](#view-the-total-size-in-bytes-of-compacted-partitions-per-table)
- [View the total size in bytes of compacted partitions for a specific table](#view-the-total-size-in-bytes-of-compacted-partitions-for-a-specific-table)
- [Compaction](#compaction)
- [View compaction totals for each table](#view-compaction-totals-for-each-table)
- [View compaction totals for a specific table](#view-compaction-totals-for-a-specific-table)
In the examples below, replace {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}
with the name of the table you want to query information about.
---
{{% code-placeholders "TABLE_NAME" %}}
### Query logs
#### View all stored query logs
```sql
SELECT * FROM system.queries
```
#### View query logs for queries with end-to-end durations above a threshold
The following returns query logs for queries with an end-to-end duration greater
than 50 milliseconds.
```sql
SELECT *
FROM
system.queries
WHERE
end2end_duration::BIGINT > (50 * 1000000)
```
### View query logs for a specific query within a time interval
{{< code-tabs >}}
{{% tabs %}}
[SQL](#)
[Python](#)
{{% /tabs %}}
{{% code-tab-content %}}
<!-----------------------------------BEGIN SQL------------------------------>
```sql
SELECT *
FROM system.queries
WHERE issue_time >= now() - INTERVAL '1 day'
AND query_text LIKE '%select * from home%'
```
<!-----------------------------------END SQL------------------------------>
{{% /code-tab-content %}}
{{% code-tab-content %}}
<!-----------------------------------BEGIN PYTHON------------------------------>
```python
from influxdb_client_3 import InfluxDBClient3
client = InfluxDBClient3(token = DATABASE_TOKEN,
host = HOSTNAME,
org = '',
database=DATABASE_NAME)
client.query('select * from home')
reader = client.query('''
SELECT *
FROM system.queries
WHERE issue_time >= now() - INTERVAL '1 day'
AND query_text LIKE '%select * from home%'
''',
language='sql',
headers=[(b"iox-debug", b"true")],
mode="reader")
```
<!-----------------------------------END PYTHON------------------------------>
{{% /code-tab-content %}}
{{< /code-tabs >}}
---
### Partitions
#### View the partition template of a specific table
```sql
SELECT *
FROM
system.tables
WHERE
table_name = 'TABLE_NAME'
```
#### View all partitions for a table
```sql
SELECT *
FROM
system.partitions
WHERE
table_name = 'TABLE_NAME'
```
#### View the number of partitions per table
```sql
SELECT
table_name,
COUNT(*) AS partition_count
FROM
system.partitions
WHERE
table_name IN ('foo', 'bar', 'baz')
GROUP BY
table_name
```
#### View the number of partitions for a specific table
```sql
SELECT
COUNT(*) AS partition_count
FROM
system.partitions
WHERE
table_name = 'TABLE_NAME'
```
---
### Storage usage
#### View the size in megabytes of a specific table
```sql
SELECT
SUM(total_size_mb) AS total_size_mb
FROM
system.partitions
WHERE
table_name = 'TABLE_NAME'
```
#### View the size in megabytes per table
```sql
SELECT
table_name,
SUM(total_size_mb) AS total_size_mb
FROM
system.partitions
WHERE
table_name IN ('foo', 'bar', 'baz')
GROUP BY
table_name
```
#### View the total size in bytes of compacted partitions per table
```sql
SELECT
table_name,
SUM(total_l0_bytes) + SUM(total_l1_bytes) + SUM(total_l2_bytes) AS total_bytes
FROM
system.compactor
WHERE
table_name IN ('foo', 'bar', 'baz')
GROUP BY
table_name
```
#### View the total size in bytes of compacted partitions for a specific table
```sql
SELECT
SUM(total_l0_bytes) + SUM(total_l1_bytes) + SUM(total_l2_bytes) AS total_bytes
FROM
system.compactor
WHERE
table_name = 'TABLE_NAME'
```
---
### Compaction
#### View compaction totals for each table
```sql
SELECT
table_name,
SUM(total_l0_files) AS total_l0_files,
SUM(total_l1_files) AS total_l1_files,
SUM(total_l2_files) AS total_l2_files,
SUM(total_l0_bytes) AS total_l0_bytes,
SUM(total_l1_bytes) AS total_l1_bytes,
SUM(total_l2_bytes) AS total_l2_bytes
FROM
system.compactor
WHERE
table_name IN ('foo', 'bar', 'baz')
GROUP BY
table_name
```
#### View compaction totals for a specific table
```sql
SELECT
SUM(total_l0_files) AS total_l0_files,
SUM(total_l1_files) AS total_l1_files,
SUM(total_l2_files) AS total_l2_files,
SUM(total_l0_bytes) AS total_l0_bytes,
SUM(total_l1_bytes) AS total_l1_bytes,
SUM(total_l2_bytes) AS total_l2_bytes
FROM
system.compactor
WHERE
table_name = 'TABLE_NAME'
```
{{% /code-placeholders %}}

View File

@ -91,3 +91,6 @@ less efficient.
Learn how to [analyze a query plan](/influxdb/clustered/query-data/troubleshoot-and-optimize/analyze-query-plan/)
to troubleshoot queries and find performance bottlenecks.
If you need help troubleshooting, follow the guidelines to
[report query performance issues](/influxdb/clustered/query-data/troubleshoot-and-optimize/report-query-performance-issues/).

View File

@ -7,6 +7,8 @@ menu:
name: Report query performance issues
parent: Troubleshoot and optimize queries
weight: 402
related:
- /influxdb/clustered/admin/query-system-data/
---
These guidelines are intended to faciliate collaboration between InfluxData
@ -23,13 +25,17 @@ queries](/influxdb/clustered/query-data//troubleshoot-and-optimize).
6. [Reduce query noise](#reduce-query-noise)
7. [Establish baseline single-query performance](#establish-baseline-single-query-performance)
8. [Run queries at multiple load scales](#run-queries-at-multiple-load-scales)
9. [Gather debug info](#gather-debug-info)
9. [Gather debug information](#gather-debug-information)
1. [Kubernetes-specific information](#kubernetes-specific-information)
2. [Clustered-specific information](#clustered-specific-information)
3. [Query analysis](#query-analysis)
1. [EXPLAIN](#explain)
2. [EXPLAIN VERBOSE](#explain-verbose)
3. [EXPLAIN ANALYZE](#explain-analyze)
10. [Gather system information](#gather-system-information)
- [Collect table information](#collect-table-information)
- [Collect compaction information for the table](#collect-compaction-information-for-the-table)
- [Collect partition information for multiple tables](#collect-partition-information-for-multiple-tables)
{{% note %}}
Please note that this document may change from one support engagement to the
@ -141,14 +147,23 @@ As an example, consider the following test plan outline:
4. Run 10 concurrent instances of Query A and allow the cluster to recover for 1 minute.
5. Run 20 concurrent instances of Query A and allow the cluster to recover for 1 minute.
6. Run 40 concurrent instances of Query A and allow the cluster to recover for 1 minute.
7. Provide InfluxData the debug information [described below](#gather-debug-info).
7. Provide InfluxData the debug information [described below](#gather-debug-information).
{{% note %}}
This is just an example. You don't have to go beyond the scale where queries get slower
but you may also need to go further than what's outlined here.
{{% /note %}}
### Gather debug info
<!-- Don't mention dashboards until they're working working in a future Clustered release --
### Capture dashboard screens
If you have set up alerts and dashboards for monitoring your cluster, capture
screenshots of dashboard events for Queriers, Compactors, and Ingesters.
-->
### Gather debug information
The following debug information should be collected shortly _after_ a
problematic query has been tried against your InfluxDB cluster.
@ -165,7 +180,7 @@ kubectl cluster-info dump --namespace influxdb --output-directory "${DATETIME}-c
tar -czf "${DATETIME}-cluster-info.tar.gz" "${DATETIME}-cluster-info/"
```
#### Clustered-Specific Info
#### Clustered-specific information
**Outputs:**
@ -310,3 +325,96 @@ curl --get "https://{{< influxdb/host >}}/query" \
{{< /code-tabs-wrapper >}}
{{% /code-placeholders %}}
### Gather system information
{{% warn %}}
#### May impact cluster performance
Querying InfluxDB v3 system tables may impact write and query
performance of your {{< product-name omit=" Clustered" >}} cluster.
Use filters to [optimize queries to reduce impact to your cluster](/influxdb/clustered/admin/query-system-data/#optimize-queries-to-reduce-impact-to-your-cluster).
<!--------------- UPDATE THE DATE BELOW AS EXAMPLES ARE UPDATED --------------->
#### System tables are subject to change
System tables are not part of InfluxDB's stable API and may change with new releases.
The provided schema information and query examples are valid as of **September 20, 2024**.
If you detect a schema change or a non-functioning query example, please
[submit an issue](https://github.com/influxdata/docs-v2/issues/new/choose).
<!--------------- UPDATE THE DATE ABOVE AS EXAMPLES ARE UPDATED --------------->
{{% /warn %}}
If queries are slow for a specific table, run the following system queries to collect information for troubleshooting.
- [Collect table information](#collect-table-information)
- [Collect compaction information for the table](#collect-compaction-information-for-the-table)
- [Collect partition information for multiple tables](#collect-partition-information-for-multiple-tables)
To [optimize system queries](/influxdb/clustered/admin/query-system-data/#optimize-queries-to-reduce-impact-to-your-cluster), use `table_name`, `partition_key`, and
`partition_id` filters.
In your queries, replace the following:
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}: the table to retrieve partitions for
- {{% code-placeholder-key %}}`PARTITION_ID`{{% /code-placeholder-key %}}: a [partition ID](/influxdb/clustered/admin/query-system-data/#retrieve-a-partition-id) (int64)
- {{% code-placeholder-key %}}`PARTITION_KEY`{{% /code-placeholder-key %}}: a [partition key](/influxdb/clustered/admin/custom-partitions/#partition-keys)
derived from the table's partition template.
The default format is `%Y-%m-%d` (for example, `2024-01-01`).
#### Collect table information
{{% code-placeholders "TABLE_NAME" %}}
```sql
SELECT *
FROM system.tables
WHERE table_name = 'TABLE_NAME';
```
{{% /code-placeholders%}}
#### Collect compaction information for the table
Query the `system.compactor` table to collect compaction information--for example, run one of the following
queries:
{{% code-placeholders "TABLE_NAME|PARTITION_KEY" %}}
```sql
SELECT *
FROM system.compactor
WHERE
table_name = 'TABLE_NAME'
AND partition_key = 'PARTITION_KEY';
```
{{% /code-placeholders %}}
{{% code-placeholders "TABLE_NAME|PARTITION_ID" %}}
```sql
SELECT *
FROM system.compactor
WHERE
table_name = 'TABLE_NAME'
AND partition_id = 'PARTITION_ID';
```
{{% /code-placeholders %}}
#### Collect partition information for multiple tables
If the same queries are slow on more than 1 table, also run the following query to collect the size and
number of partitions for all tables:
{{% code-placeholders "TABLE_NAME" %}}
```sql
SELECT table_name,
COUNT(*) as partition_count,
MAX(last_new_file_created_at) as last_new_file_created_at,
SUM(total_size_mb) as total_size_mb
FROM system.partitions
WHERE table_name IN ('foo', 'bar', 'baz')
GROUP BY table_name;
```
{{% /code-placeholders%}}

View File

@ -418,7 +418,7 @@ Querying system tables can impact the overall performance of your
InfluxDB's stable API and are subject to change.
{{% /warn %}}
{{% code-placeholders "DATABASE_(TOKEN|NAME)" %}}
{{% code-placeholders "DATABASE_(TOKEN|NAME)|TABLE_NAME" %}}
{{< code-tabs-wrapper >}}
{{% code-tabs %}}
@ -433,7 +433,7 @@ influxctl query \
--enable-system-tables \
--token DATABASE_TOKEN \
--database DATABASE_NAME \
"SELECT * FROM system.tables"
"SELECT * FROM system.tables WHERE table_name = 'TABLE_NAME'"
```
{{% /influxdb/custom-timestamps %}}
{{% /code-tab-content %}}

View File

@ -1,7 +1,7 @@
---
title: InfluxDB v2 API client libraries
description: >
InfluxDB v2 client libraries use InfluxDB `/api/v2` endpoints and work with [InfluxDB 2.0 API compatibility endpoints](/influxdb/v1/tools/api/#influxdb-20-api-compatibility-endpoints).
InfluxDB v2 client libraries use InfluxDB `/api/v2` endpoints and work with [InfluxDB 2.x API compatibility endpoints](/influxdb/v1/tools/api/#influxdb-2x-api-compatibility-endpoints).
View the list of available client libraries.
weight: 101
menu:
@ -25,7 +25,7 @@ prepend:
## Client libraries for InfluxDB 2.x and 1.8+
InfluxDB client libraries are language-specific tools that integrate with InfluxDB APIs.
InfluxDB v2 client libraries use InfluxDB `/api/v2` endpoints and work with [InfluxDB 2.0 API compatibility endpoints](/influxdb/v1/tools/api/#influxdb-20-api-compatibility-endpoints).
InfluxDB v2 client libraries use InfluxDB `/api/v2` endpoints and work with [InfluxDB 2.x API compatibility endpoints](/influxdb/v1/tools/api/#influxdb-2x-api-compatibility-endpoints).
Functionality varies among client libraries.
InfluxDB client libraries are maintained by the InfluxDB community.

View File

@ -1,63 +0,0 @@
---
title: InfluxDB system tables
description: >
InfluxDB system measurements contain time series data used by and generated from the
InfluxDB internal monitoring system.
menu:
influxdb_clustered:
name: System tables
parent: InfluxDB internals
weight: 103
influxdb/clustered/tags: [tables, information schema]
related:
- /influxdb/clustered/reference/sql/information-schema/
---
{{% warn %}}
Queries of InfluxDB system tables may affect production performance while
system tables are accessed.
System tables are not currently part of the stable API and the schema may change
in subsequent releases.
{{% /warn %}}
InfluxDB system measurements contain time series data used by and generated from the
InfluxDB internal monitoring system.
Each InfluxDB Clustered namespace includes the following system measurements:
- [queries](#_queries-system-measurement)
## queries system measurement
The `system.queries` measurement stores log entries for queries executed for the provided namespace (database) on the node that is currently handling queries.
The following example shows how to list queries recorded in the `system.queries` measurement:
```sql
SELECT issue_time, query_type, query_text, success FROM system.queries;
```
_When listing measurements (tables) available within a namespace, some clients and query tools may include the `queries` table in the list of namespace tables._
`system.queries` reflects a process-local, in-memory, namespace-scoped query log.
While this table may be useful for debugging and monitoring queries, keep the following in mind:
- Records stored in `system.queries` are volatile.
- Records are lost on pod restarts.
- Queries for one namespace can evict records from another namespace.
- Data reflects the state of a specific pod answering queries for the namespace.
- A query for records in `system.queries` can return different results depending on the pod the request was routed to.
**Data retention:** System data can be transient and is deleted on pod restarts.
### queries measurement schema
- **system.queries** _(measurement)_
- **fields**:
- **issue_time**: timestamp when the query was issued
- **query_type**: type (syntax: `sql`, `flightsql`, or `influxql`) of the query
- **query_text**: query statement text
- **success**: execution status (boolean) of the query
- **completed_duration**: time (duration) that the query took to complete
- **trace_id**: trace ID for debugging and monitoring events

View File

@ -26,6 +26,129 @@ identified below with the <span class="cf-icon Shield pink"></span> icon.
---
## 20240925-1257864 {date="2024-09-25" .checkpoint}
### Quickstart
```yaml
spec:
package:
image: us-docker.pkg.dev/influxdb2-artifacts/clustered/influxdb:202409XX-XXXXXXX
```
### Highlights
#### Default to partial write semantics
In InfluxDB Clustered 20240925-1257864+, "partial writes" are enabled by default.
With partial writes enabled, InfluxDB accepts write requests with invalid or
malformed lines of line protocol and successfully write valid lines and rejects
invalid lines. Previously, if any line protocol in a batch was invalid, the
entire batch was rejected and no data was written.
To disable partial writes and revert back to the previous behavior, set the
`INFLUXDB_IOX_PARTIAL_WRITES_ENABLED` environment variable on your cluster's
Ingester to `false`. Define this environment variable in the
`spec.package.spec.components.ingester.template.containers.iox.env` property in
your `AppInstance` resource.
{{< expand-wrapper >}}
{{% expand "View example of disabling partial writes in your `AppInstance` resource" %}}
```yaml
apiVersion: kubecfg.dev/v1alpha1
kind: AppInstance
metadata:
name: influxdb
namespace: influxdb
spec:
package:
spec:
components:
ingester:
template:
containers:
iox:
env:
INFLUXDB_IOX_PARTIAL_WRITES_ENABLED: false
```
{{% /expand %}}
{{< /expand-wrapper >}}
For more information about defining variables in your InfluxDB cluster, see
[Manage environment variables in your InfluxDB Cluster](/influxdb/clustered/admin/env-vars/).
##### Write API behaviors
When submitting a write request that includes invalid or malformed line protocol,
The InfluxDB write API returns a 400 response code and does the following:
- With partial writes _enabled_:
- Writes all valid points and rejects all invalid points.
- Includes details about the [rejected points](/influxdb/clustered/write-data/troubleshoot/#troubleshoot-rejected-points)
(up to 100 points) in the response body.
- With partial writes _disabled_:
- Rejects all points in the batch.
- Includes an error message and the first malformed line of line protocol in
the response body.
#### Deploy and use the Catalog service by default
The Catalog service is a new IOx component that centralizes access to the
InfluxDB Catalog among Ingesters, Queriers, Compactors, and Garbage Collectors.
This is expected to improve Catalog query performance overall with an expected
drop in ninety-ninth percentile (p99) latencies.
### Upgrade notes
#### License now required
A valid license token is now required to start up your InfluxDB Cluster.
To avoid possible complications, ensure you have a valid license token. If you
do not, contact your InfluxData sales representative to get a license token
**before upgrading to this release**.
#### Removed prometheusOperator feature flag
The `prometheusOperator` feature flag has been removed.
**If you current have this feature flag enabled in your `AppInstance` resource,
remove it before upgrading to this release.**
This flag was deprecated in a previous release, but from this release forward,
enabling this feature flag may cause errors.
The installation of the Prometheus operator should be handled externally.
### Changes
#### Deployment
- Introduces the `nodeAffinity` and CPU/Memory requests setting for "granite"
components. Previously, these settings were only available for core IOx
components.
- Prior to this release, many of the IOx dashboards deployed with the `grafana`
feature flag were showing "no data." This has been fixed and now all
dashboards should display actual data.
#### Database Engine
- Adjusted compactor concurrency scaling heuristic to improve performance as
memory and CPU scale.
- Adjusted default `INFLUXDB_IOX_COMPACTION_PARTITION_MINUTE_THRESHOLD` from
`20m` to `100m` to help compactor more quickly rediscover cool partitions.
#### Configuration
- Introduces the `podAntiAffinity` setting for InfluxDB Clustered components.
Previously, the scheduling of pods was influenced by the Kubernetes
scheduler's default behavior. For further details, see the
[Kubernetes pod affinity documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#types-of-inter-pod-affinity-and-anti-affinity).
---
## 20240819-1176644 {date="2024-08-19" .checkpoint}
### Quickstart
@ -463,7 +586,7 @@ mounted into your existing Grafana instance.
An authentication component, previously known as `authz`, has been consolidated
into the `token-management` service.
There is now a temporary `Job` in place, `delete-authz-schema`, that
Now there is a temporary `Job` in place, `delete-authz-schema`, that
automatically removes the `authz` schema from the configured PostgreSQL database.
### Changes
@ -704,7 +827,7 @@ the `create-admin-token` job.
#### Deployment
- Increase HTTP write request limit from 10MB to 50MB.
- Increase HTTP write request limit from 10 MB to 50 MB.
- Added support for [Telegraf Operator](https://github.com/influxdata/telegraf-operator).
We have added the `telegraf.influxdata.com/port` annotation to all the pods.
No configuration is required. We don't yet provide a way to specify the

View File

@ -5,7 +5,8 @@ weight: 106
description: >
Troubleshoot issues writing data.
Find response codes for failed writes.
Discover how writes fail, from exceeding rate or payload limits, to syntax errors and schema conflicts.
Discover how writes fail, from exceeding rate or payload limits, to syntax
errors and schema conflicts.
menu:
influxdb_clustered:
name: Troubleshoot issues
@ -17,7 +18,8 @@ related:
- /influxdb/clustered/reference/internals/durability/
---
Learn how to avoid unexpected results and recover from errors when writing to {{% product-name %}}.
Learn how to avoid unexpected results and recover from errors when writing to
{{% product-name %}}.
- [Handle write responses](#handle-write-responses)
- [Review HTTP status codes](#review-http-status-codes)
@ -26,12 +28,26 @@ Learn how to avoid unexpected results and recover from errors when writing to {{
## Handle write responses
In {{% product-name %}}, writes are synchronous.
After InfluxDB validates the request and ingests the data, it sends a _success_ response (HTTP `204` status code) as an acknowledgement that the data is written and queryable.
To ensure that InfluxDB handles writes in the order you request them, wait for the acknowledgement before you send the next request.
{{% product-name %}} does the following when you send a write request:
If InfluxDB successfully writes all the request data to the database, it returns _success_ (HTTP `204` status code).
The first rejected point in a batch causes InfluxDB to reject the entire batch and respond with an [HTTP error status](#review-http-status-codes).
1. Validates the request.
2. If successful, attempts to ingest data from the request body; otherwise,
responds with an [error status](#review-http-status-codes).
3. Ingests or rejects data in the batch and returns one of the following HTTP
status codes:
- `204 No Content`: All data in the batch is ingested.
- `400 Bad Request`: Some or all of the data has been rejected.
Data that has not been rejected is ingested and queryable.
The response body contains error details about
[rejected points](#troubleshoot-rejected-points), up to 100 points.
Writes are synchronous--the response status indicates the final status of the
write and all ingested data is queryable.
To ensure that InfluxDB handles writes in the order you request them,
wait for the response before you send the next request.
### Review HTTP status codes
@ -42,7 +58,7 @@ Write requests return the following status codes:
| HTTP response code | Message | Description |
| :-------------------------------| :--------------------------------------------------------------- | :------------- |
| `204 "Success"` | | If InfluxDB ingested the data |
| `400 "Bad request"` | `message` contains the first malformed line | If data is malformed |
| `400 "Bad request"` | error details about rejected points, up to 100 points: `line` contains the first rejected line, `message` describes rejections | If some or all request data isn't allowed (for example, if it is malformed or falls outside of the bucket's retention period)--the response body indicates whether a partial write has occurred or if all data has been rejected |
| `401 "Unauthorized"` | | If the `Authorization` header is missing or malformed or if the [token](/influxdb/clustered/admin/tokens/) doesn't have [permission](/influxdb/clustered/reference/cli/influxctl/token/create/#examples) to write to the database. See [examples using credentials](/influxdb/clustered/get-started/write/#write-line-protocol-to-influxdb) in write requests. |
| `404 "Not found"` | requested **resource type** (for example, "organization" or "database"), and **resource name** | If a requested resource (for example, organization or database) wasn't found |
| `500 "Internal server error"` | | Default status for an error |
@ -62,6 +78,10 @@ If you notice data is missing in your database, do the following:
## Troubleshoot rejected points
InfluxDB rejects points that fall within the same partition (default partitioning is measurement and day) as existing bucket data and have a different data type for an existing field.
InfluxDB rejects points that fall within the same partition (default partitioning
is by measurement and day) as existing bucket data and have a different data type
for an existing field.
Check for [field data type](/influxdb/clustered/reference/syntax/line-protocol/#data-types-and-format) differences between the rejected data point and points within the same database and partition--for example, did you attempt to write `string` data to an `int` field?
Check for [field data type](/influxdb/clustered/reference/syntax/line-protocol/#data-types-and-format)
differences between the rejected data point and points within the same database
and partition--for example, did you attempt to write `string` data to an `int` field?

View File

@ -163,7 +163,7 @@ This release updates support for the Flux language and queries. To learn about F
#### Forward compatibility
- [InfluxDB 2.0 API compatibility endpoints](/influxdb/v1/tools/api/#influxdb-20-api-compatibility-endpoints) are now part of the InfluxDB 1.x line.
- [InfluxDB 2.x API compatibility endpoints](/influxdb/v1/tools/api/#influxdb-2x-api-compatibility-endpoints) are now part of the InfluxDB 1.x line.
This allows you to leverage the new InfluxDB 2.0 [client libraries](/influxdb/v1/tools/api_client_libraries/)
for both writing and querying data with Flux. Take advantage of the latest client libraries
while readying your implementation for a move to InfluxDB 2.0 Cloud when you're ready to scale.

View File

@ -27,7 +27,7 @@ Related entries: [InfluxDB line protocol](/influxdb/v1/concepts/glossary/#influx
## bucket
A bucket is a named location where time series data is stored in **InfluxDB 2.0**. In InfluxDB 1.8+, each combination of a database and a retention policy (database/retention-policy) represents a bucket. Use the [InfluxDB 2.0 API compatibility endpoints](/influxdb/v1/tools/api#influxdb-2-0-api-compatibility-endpoints) included with InfluxDB 1.8+ to interact with buckets.
A bucket is a named location where time series data is stored in **InfluxDB 2.0**. In InfluxDB 1.8+, each combination of a database and a retention policy (database/retention-policy) represents a bucket. Use the [InfluxDB 2.x API compatibility endpoints](/influxdb/v1/tools/api#influxdb-2-0-api-compatibility-endpoints) included with InfluxDB 1.8+ to interact with buckets.
## continuous query (CQ)

View File

@ -54,7 +54,7 @@ The header row defines column labels for the table. The `cpu` [measurement](/inf
### Flux
Check out the [Get started with Flux](/influxdb/v2/query-data/get-started/) to learn more about building queries with Flux.
For more information about querying data with the InfluxDB API using Flux, see the [API reference documentation](/influxdb/v1/tools/api/#influxdb-2-0-api-compatibility-endpoints).
For more information about querying data with the InfluxDB API using Flux, see the [API reference documentation](/influxdb/v1/tools/api/#influxdb-2x-api-compatibility-endpoints).
## Query data with InfluxQL

View File

@ -13,43 +13,65 @@ v2: /influxdb/v2/reference/api/
---
The InfluxDB API provides a simple way to interact with the database.
It uses HTTP response codes, HTTP authentication, JWT Tokens, and basic authentication, and responses are returned in JSON.
It uses HTTP authentication and JWT Tokens.
Responses use standard HTTP response codes and JSON format.
To send API requests, you can use
the [InfluxDB v1 client libraries](/influxdb/v1/tools/api_client_libraries/),
the [InfluxDB v2 client libraries](/influxdb/v1/tools/api_client_libraries/),
[Telegraf](https://docs.influxdata.com/telegraf/v1/),
or the client of your choice.
{{% note %}}
If you're getting started with InfluxDB v1, we recommend using the
InfluxDB v1 client libraries and InfluxQL for
[InfluxDB v3 compatibility](/influxdb/v1/tools/api/#influxdb-v3-compatibility).
{{% /note %}}
The following sections assume your InfluxDB instance is running on `localhost`
port `8086` and HTTPS is not enabled.
Those settings [are configurable](/influxdb/v1/administration/config/#http-endpoints-settings).
- [InfluxDB 2.0 API compatibility endpoints](#influxdb-2-0-api-compatibility-endpoints)
- [InfluxDB 1.x HTTP endpoints](#influxdb-1-x-http-endpoints)
- [InfluxDB v3 compatibility](#influxdb-v3-compatibility)
- [InfluxDB 2.x API compatibility endpoints](#influxdb-2x-api-compatibility-endpoints)
- [InfluxDB 1.x HTTP endpoints](#influxdb-1x-http-endpoints)
## InfluxDB 2.0 API compatibility endpoints
## InfluxDB v3 compatibility
InfluxDB 1.8.0 introduced forward compatibility APIs for InfluxDB 2.0.
There are multiple reasons for introducing these:
InfluxDB v3 is InfluxDBs next generation that allows
infinite series cardinality without impact on overall database performance, and
brings native SQL support and improved InfluxQL performance.
- The latest [InfluxDB client libraries](/influxdb/v1/tools/api_client_libraries/)
are built for the InfluxDB 2.0 API, but now also work with **InfluxDB 1.8.0+**.
- InfluxDB Cloud is a generally available service across multiple cloud service providers and regions
that is fully compatible with the **latest** client libraries.
InfluxDB v3 supports the v1 `/write` and `/query` HTTP API endpoints.
InfluxDB v3 isn't optimized for Flux.
If you are just getting started with InfluxDB 1.x today, we recommend adopting
the [latest client libraries](/influxdb/v1/tools/api_client_libraries/).
They allow you to easily move from InfluxDB 1.x to InfluxDB 2.0 Cloud or open source,
(when you are ready).
If you're getting started with InfluxDB v1, we recommend using the
InfluxDB v1 client libraries and InfluxQL.
When you're ready, you can migrate to InfluxDB v3 and continue using the v1 client
libraries as you gradually move your query workloads to use the v3 client libraries
(and the v3 Flight API).
The following forward compatible APIs are available:
For more information, see [API compatibility and migration guides for InfluxDB v3](/influxdb/cloud-dedicated/guides).
## InfluxDB 2.x API compatibility endpoints
InfluxDB 1.8.0 introduced forward compatibility APIs for InfluxDB v2.
[InfluxDB v2 client libraries](/influxdb/v1/tools/api_client_libraries/)
are built for the InfluxDB v2 API, but also work with **InfluxDB 1.8+**.
The following v2 compatible APIs are available:
| Endpoint | Description |
|:---------- |:---------- |
| [/api/v2/query](#api-v2-query-http-endpoint) | Query data in InfluxDB 1.8.0+ using the InfluxDB 2.0 API and [Flux](/flux/latest/) |
| [/api/v2/write](#api-v2-write-http-endpoint) | Write data to InfluxDB 1.8.0+ using the InfluxDB 2.0 API _(compatible with InfluxDB 2.0 client libraries)_ |
| [/api/v2/query](#api-v2-query-http-endpoint) | Query data in InfluxDB 1.8.0+ using the InfluxDB v2 API and [Flux](/flux/latest/) |
| [/api/v2/write](#api-v2-write-http-endpoint) | Write data to InfluxDB 1.8.0+ using the InfluxDB v2 API _(compatible with InfluxDB v2 client libraries)_ |
| [/health](#health-http-endpoint) | Check the health of your InfluxDB instance |
### `/api/v2/query/` HTTP endpoint
The `/api/v2/query` endpoint accepts `POST` HTTP requests.
Use this endpoint to query data using [Flux](/influxdb/v1/flux/) and [InfluxDB 2.0 client libraries](/influxdb/v2/api-guide/client-libraries/).
Flux is the primary language for working with data in InfluxDB 2.0.
Use this endpoint to query data using [Flux](/influxdb/v1/flux/) and [InfluxDB v2 client libraries](/influxdb/v2/api-guide/client-libraries/).
Flux is the primary language for working with data in InfluxDB v2.
**Include the following HTTP headers:**
@ -90,11 +112,11 @@ curl -XPOST localhost:8086/api/v2/query -sS \
### `/api/v2/write/` HTTP endpoint
The `/api/v2/write` endpoint accepts `POST` HTTP requests.
Use this endpoint to write to an InfluxDB 1.8.0+ database using [InfluxDB 2.0 client libraries](/influxdb/v2/api-guide/client-libraries/).
Use this endpoint to write to an InfluxDB 1.8.0+ database using [InfluxDB v2 client libraries](/influxdb/v2/api-guide/client-libraries/).
Both InfluxDB 1.x and 2.0 APIs support the same line protocol format for raw time series data.
For the purposes of writing data, the APIs differ only in the URL parameters and request headers.
InfluxDB 2.0 uses [organizations](/influxdb/v2/reference/glossary/#organization)
InfluxDB v2 uses [organizations](/influxdb/v2/reference/glossary/#organization)
and [buckets](/influxdb/v2/reference/glossary/#bucket)
instead of databases and retention policies.
The `/api/v2/write` endpoint maps the supplied version 1.8 database and retention policy to a bucket.
@ -112,7 +134,7 @@ The `/api/v2/write` endpoint maps the supplied version 1.8 database and retentio
**Include the following HTTP header:**
- `Authorization`: In InfluxDB 2.0 uses [API Tokens](/influxdb/v2/admin/tokens/)
- `Authorization`: In InfluxDB v2 uses [API Tokens](/influxdb/v2/admin/tokens/)
to access the platform and all its capabilities.
InfluxDB v1.x uses a username and password combination when accessing the HTTP APIs.
Use the Token schema to provide your InfluxDB 1.x username and password separated by a colon (`:`).

View File

@ -13,15 +13,16 @@ menu:
v2: /influxdb/v2/api-guide/client-libraries/
---
InfluxDB client libraries are language-specific packages that integrate with the InfluxDB 2.0 API and support both **InfluxDB 1.8+** and **InfluxDB 2.0**.
InfluxDB v2 client libraries are language-specific packages that integrate
with the InfluxDB v2 API and support both **InfluxDB 1.8+** and **InfluxDB 2.x**.
{{% note %}}
We recommend using the new client libraries on this page to leverage the new
read (via Flux) and write APIs and prepare for conversion to InfluxDB v2 and
InfluxDB Cloud.
For more information, see [InfluxDB 2.0 API compatibility endpoints](/influxdb/v1/tools/api/#influxdb-2-0-api-compatibility-endpoints).
Client libraries for [InfluxDB 1.7 and earlier](/influxdb/v1/tools/api_client_libraries/)
may continue to work, but are not maintained by InfluxData.
If you're getting started with InfluxDB v1, we recommend using the
InfluxDB v1 client libraries and InfluxQL for
[InfluxDB v3 compatibility](/influxdb/v1/tools/api/#influxdb-v3-compatibility).
For more information about API and client library compatibility, see the
[InfluxDB v1 API reference](/influxdb/v1/tools/api/).
{{% /note %}}
## Client libraries

View File

@ -23,7 +23,8 @@ To set up TLS over HTTPS, do the following:
- [Self-signed certificates](#self-signed-certificates)
- [Configure InfluxDB to use TLS](#configure-influxdb-to-use-tls)
- [Connect Telegraf to a secured InfluxDB instance](#connect-telegraf-to-a-secured-influxdb-instance)
- [Example configuration](#example-configuration)
- [Example Telegraf configuration](#example-telegraf-configuration)
- [Troubleshoot TLS](#troubleshoot-tls)
{{% warn %}}
InfluxData **strongly recommends** enabling HTTPS, especially if you plan on sending requests to InfluxDB over a network.
@ -58,86 +59,164 @@ You can generate a self-signed certificate on your own machine.
## Configure InfluxDB to use TLS
1. **Download or generate certificate files**
1. [Download or generate certificate files](#1-download-or-generate-certificate-files)
2. [Set certificate file permissions](#2-set-certificate-file-permissions)
3. [Run `influxd` with TLS flags](#3-run-influxd-with-tls-flags)
4. [Verify TLS connection](#4-verify-tls-connection)
If using a [certificate signed by a CA](#single-domain-certificates-signed-by-a-certificate-authority-ca), follow their instructions to download and install the certificate files.
Note the location where certificate files are installed, and then continue to [set certificate file permissions](#set-certificate-file-permissions).
### 1. Download or generate certificate files
{{% note %}}
#### Where are my certificates?
If using a [certificate signed by a CA](#single-domain-certificates-signed-by-a-certificate-authority-ca), follow their instructions to download and install the certificate files.
Note the location where certificate files are installed, and then continue to [set certificate file permissions](#set-certificate-file-permissions).
The location of your certificate files depends on your system, domain, and certificate authority.
{{% note %}}
#### Where are my certificates?
For example, if [Let's Encrypt](https://letsencrypt.org/) is your CA and you use [certbot](https://certbot.eff.org/) to install certificates, the default location is
`/etc/letsencrypt/live/$domain`. For more information about Let's Encrypt certificate paths, see [Where are my certificates?](https://eff-certbot.readthedocs.io/en/latest/using.html#where-are-my-certificates)
{{% /note %}}
The location of your certificate files depends on your system, domain, and certificate authority.
To generate [self-signed certificates](#self-signed-certificates), use the `openssl` command on your system.
For example, if [Let's Encrypt](https://letsencrypt.org/) is your CA and you use [certbot](https://certbot.eff.org/) to install certificates, the default location is
`/etc/letsencrypt/live/$domain`. For more information about Let's Encrypt certificate paths, see [Where are my certificates?](https://eff-certbot.readthedocs.io/en/latest/using.html#where-are-my-certificates)
{{% /note %}}
The following example shows how to generate certificates located in `/etc/ssl`.
Files remain valid for the specified `NUMBER_OF_DAYS`.
The `openssl` command prompts you for optional fields that you can fill out or leave blank; both actions generate valid certificate files.
To generate [self-signed certificates](#self-signed-certificates), use the `openssl` command on your system.
```bash
sudo openssl req -x509 -nodes -newkey rsa:2048 \
-keyout /etc/ssl/influxdb-selfsigned.key \
-out /etc/ssl/influxdb-selfsigned.crt \
-days <NUMBER_OF_DAYS>
```
The following example shows how to generate certificates located in `/etc/ssl`
on Unix-like systems and Windows.
_For example purposes only, the code creates an unencrypted private key._
1. **Set certificate file permissions**
<span id="set-certificate-file-permissions"><span>
{{% warn %}}
#### Encrypt private keys
The user running InfluxDB must have read permissions on the TLS certificate files.
Use encrypted keys to enhance security.
If you must use an unencrypted key, ensure it's stored securely and has appropriate file permissions.
{{% /warn %}}
{{% note %}}You may opt to set up multiple users, groups, and permissions.
Ultimately, make sure all users running InfluxDB have read permissions for the TLS certificate.
{{% /note %}}
```bash
# Create a temporary configuration file that defines properties for
# the Subject Alternative Name (SAN) extension
cat > san.cnf <<EOF
[req]
distinguished_name = req_distinguished_name
req_extensions = v3_req
prompt = no
In your terminal, run `chmod` to set permissions on your installed certificate files--for example:
[req_distinguished_name]
C = US
ST = California
L = San Francisco
O = Example Company
OU = IT Department
CN = example.com
```bash
sudo chmod 644 <path/to/crt>
sudo chmod 600 <path/to/key>
```
[v3_req]
keyUsage = keyEncipherment, dataEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names
The following example shows how to set read permissions on the self-signed certificate files saved in `/etc/ssl`:
[alt_names]
DNS.1 = example.com
DNS.2 = www.example.com
EOF
```bash
sudo chmod 644 /etc/ssl/influxdb-selfsigned.crt
sudo chmod 600 /etc/ssl/influxdb-selfsigned.key
```
# Generate a private key and certificate signing request (CSR)
# using the configuration file
openssl req -new -newkey rsa:2048 -nodes \
-keyout /etc/ssl/influxdb-selfsigned.key \
-out /etc/ssl/influxdb-selfsigned.csr \
-config san.cnf
2. **Run `influxd` with TLS flags**
# Generate the self-signed certificate
openssl x509 -req -in /etc/ssl/influxdb-selfsigned.csr \
-signkey /etc/ssl/influxdb-selfsigned.key \
-out /etc/ssl/influxdb-selfsigned.crt \
-days NUMBER_OF_DAYS \
-extensions v3_req -extfile san.cnf
Start InfluxDB with TLS command line flags:
# Remove the temporary configuration file
rm san.cnf
```
```bash
influxd \
--tls-cert="<path-to-crt>" \
--tls-key="<path-to-key>"
```
Replace the following with your own values:
3. **Verify TLS connection**
- {{% code-placeholder-key %}}`NUMBER_OF_DAYS`{{% /code-placeholder-key %}}: the number of days for files to remain valid
- {{% code-placeholder-key %}}`/etc/ssl`{{% /code-placeholder-key %}}: the SSL configurations directory for your system
- Configuration field values in `req_distinguished_name` and `alt_names`
To test your certificates, access InfluxDB using the `https://` protocol--for example, using cURL:
### 2. Set certificate file permissions
```bash
curl --verbose https://localhost:8086/api/v2/ping
```
The user running InfluxDB must have read permissions on the TLS certificate files.
If using a self-signed certificate, skip certificate verification--for example, in a cURL command,
pass the `-k, --insecure` flag:
{{% note %}}You may opt to set up multiple users, groups, and permissions.
Ultimately, make sure all users running InfluxDB have read permissions for the TLS certificate.
{{% /note %}}
```bash
curl --verbose --insecure https://localhost:8086/api/v2/ping
```
In your terminal, run `chmod` to set permissions on your installed certificate files--for example:
The following example shows how to set read permissions on the self-signed
certificate and key files generated in [the preceding step](#1-download-or-generate-certificate-files):
If successful, the `curl --verbose` output shows a TLS handshake--for example:
```bash
sudo chmod 644 /etc/ssl/influxdb-selfsigned.crt
sudo chmod 600 /etc/ssl/influxdb-selfsigned.key
```
```bash
* [CONN-0-0][CF-SSL] TLSv1.3 (IN), TLS handshake
```
### 3. Verify certificate and key files
To ensure that the certificate and key files are correct and match each other,
enter the following commands in your terminal:
```bash
openssl x509 -noout -modulus -in /etc/ssl/influxdb-selfsigned.crt | openssl md5
openssl rsa -noout -modulus -in /etc/ssl/influxdb-selfsigned.key | openssl md5
```
### 4. Run `influxd` with TLS flags
To start InfluxDB with TLS command line flags, enter the following command with
paths to your key and certificate files:
```bash
influxd \
--tls-cert="/etc/ssl/influxdb-selfsigned.crt" \
--tls-key="/etc/ssl/influxdb-selfsigned.key" > /var/log/influxdb.log 2>&1 &
```
If successful, InfluxDB runs in the background and logs to `influxdb.log`.
### 4. Verify TLS connection
To test your certificates, access InfluxDB using the `https://` protocol--for example, using cURL:
<!--pytest-codeblocks:cont-->
<!--test:nextblock
```bash
# Wait...
sleep 1 && curl --verbose --insecure https://localhost:8086/api/v2/ping
```
-->
<!--pytest.mark.skip-->
```bash
curl --verbose https://localhost:8086/api/v2/ping
```
If using a self-signed certificate, skip certificate verification--for example, in a cURL command,
pass the `-k, --insecure` flag:
<!--pytest.mark.skip-->
```bash
curl --verbose --insecure https://localhost:8086/api/v2/ping
```
If successful, the `curl --verbose` output shows a TLS handshake--for example:
<!--pytest.mark.skip-->
```bash
* [CONN-0-0][CF-SSL] TLSv1.3 (IN), TLS handshake
```
You can further configure TLS settings using
[`tls-min-version`](/influxdb/v2/reference/config-options/#tls-min-version)
@ -152,7 +231,7 @@ update the following `influxdb_v2` output settings in your Telegraf configuratio
- Update URLs to use HTTPS instead of HTTP.
- If using a self-signed certificate, uncomment and set `insecure_skip_verify` to `true`.
### Example configuration
### Example Telegraf configuration
```toml
###############################################################################
@ -176,3 +255,65 @@ update the following `influxdb_v2` output settings in your Telegraf configuratio
```
Restart Telegraf using the updated configuration file.
## Troubleshoot TLS
Identify and resolve issues after activating TLS.
- [Check InfluxDB logs](#check-influxdb-logs)
- [Verify certificate and key files](#verify-certificate-and-key-files)
- [Test with OpenSSL](#test-with-openssl)
- [Check file permissions](#check-file-permissions)
- [Verify TLS configuration](#verify-tls-configuration)
- [Update OpenSSL and InfluxDB](#update-openssl-and-influxdb)
### Check InfluxDB logs
Review the InfluxDB logs for any error messages or warnings about the issue.
#### Example TLS error
```text
msg="http: TLS handshake error from [::1]:50476:
remote error: tls: illegal parameter" log_id=0rqN8H_0000 service=http
```
### Verify certificate and key Files
To ensure that the certificate and key files are correct and match each other,
enter the following command in your terminal:
```bash
openssl x509 -noout -modulus -in /etc/ssl/influxdb-selfsigned.crt | openssl md5
openssl rsa -noout -modulus -in /etc/ssl/influxdb-selfsigned.key | openssl md5
```
### Test with OpenSSL
Use OpenSSL to test the server's certificate and key--for example, enter the
following command in your terminal:
```bash
openssl s_client -connect localhost:8086 -CAfile /etc/ssl/influxdb-selfsigned.crt
```
### Check file permissions
Ensure that the InfluxDB process has read access to the certificate and key
files--for example, enter the following command to set file permissions:
```bash
sudo chmod 644 /etc/ssl/influxdb-selfsigned.crt
sudo chmod 600 /etc/ssl/influxdb-selfsigned.key
```
### Verify TLS configuration
Ensure that the TLS configuration in InfluxDB is correct.
Check the paths to the certificate and key files in the InfluxDB configuration
or command line flags.
### Update OpenSSL and InfluxDB
Ensure that you are using the latest versions of OpenSSL and InfluxDB, as
updates may include fixes for TLS-related issues.

View File

@ -19,9 +19,4 @@ influxdb/v2/tags: [client libraries]
InfluxDB client libraries are language-specific packages that integrate with the InfluxDB v2 API.
The following **InfluxDB v2** client libraries are available:
{{% note %}}
These client libraries are in active development and may not be feature-complete.
This list will continue to grow as more client libraries are released.
{{% /note %}}
{{< children type="list" >}}

View File

@ -1,6 +1,6 @@
{{ $productPathData := findRE "[^/]+.*?" .RelPermalink }}
{{ $product := index $productPathData 0 }}
{{ $productName := (index .Site.Data.products $product).name }}
{{ $productName := cond (isset (index .Site.Data.products $product) "altname") (index .Site.Data.products $product).altname (index .Site.Data.products $product).name }}
{{ $currentVersion := index $productPathData 1 }}
{{ $latestV2 := index (.Site.Data.products.influxdb.versions) 0 }}

View File

@ -35,7 +35,7 @@ function substitute_placeholders {
yesterday_timestamp=$(date -u -d "$yesterday_datetime" +%s)
# Replace the extracted timestamp with `yesterday_timestamp`
sed -i "s|$specific_timestamp|$yesterday_timestamp|g;" $file
sed -i "s|$specific_timestamp|$yesterday_timestamp|g;" $file
fi
done
@ -66,8 +66,8 @@ function substitute_placeholders {
# Shell-specific replacements.
## In JSON Heredoc
sed -i 's|"orgID": "ORG_ID"|"orgID": "$INFLUX_ORG"|g;
s|"name": "BUCKET_NAME"|"name": "$INFLUX_DATABASE"|g;' \
$file
s|"name": "BUCKET_NAME"|"name": "$INFLUX_DATABASE"|g;
' $file
# Replace remaining placeholders with variables.
# If the placeholder is inside of a Python os.getenv() function, don't replace it.
@ -89,39 +89,41 @@ function substitute_placeholders {
/os.getenv("ORG_ID")/! s/ORG_ID/$INFLUX_ORG/g;
/os.getenv("PASSWORD")/! s/PASSWORD/$INFLUX_PASSWORD/g;
/os.getenv("ORG_ID")/! s/ORG_ID/$INFLUX_ORG/g;
/os.getenv("RETENTION_POLICY")/! s/RETENTION_POLICY_NAME\|RETENTION_POLICY/$INFLUX_RETENTION_POLICY/g;
/os.getenv("RETENTION_POLICY")/! s/RETENTION_POLICY_NAME/$INFLUX_RETENTION_POLICY/g;
/os.getenv("RETENTION_POLICY")/! s/RETENTION_POLICY/$INFLUX_RETENTION_POLICY/g;
/os.getenv("USERNAME")/! s/USERNAME/$INFLUX_USERNAME/g;
s/exampleuser@influxdata.com/$INFLUX_EMAIL_ADDRESS/g;
s/CONFIG_NAME/CONFIG_$(shuf -i 0-100 -n1)/g;
s/TEST_RUN/TEST_RUN_$(date +%s)/g;
s|@path/to/line-protocol.txt|data/home-sensor-data.lp/g;
s|/path/to/custom/assets-dir|/app/custom-assets|g;' \
$file
s|NUMBER_OF_DAYS|365|g;
s|@path/to/line-protocol.txt|data/home-sensor-data.lp|g;
s|/path/to/custom/assets-dir|/app/custom-assets|g;
' $file
# v2-specific replacements.
sed -i 's|https:\/\/us-west-2-1.aws.cloud2.influxdata.com|$INFLUX_HOST|g;
s|influxdb2-{{< latest-patch >}}|influxdb2-${influxdb_latest_patches_v2}|g;
s|{{< latest-patch cli=true >}}|${influxdb_latest_cli_v2}|g;
s|influxdb2-{{% latest-patch %}}|influxdb2-${influxdb_latest_patches_v2}|g;
s|{{% latest-patch cli=true %}}|${influxdb_latest_cli_v2}|g;' \
$file
s|{{% latest-patch cli=true %}}|${influxdb_latest_cli_v2}|g;
' $file
# Telegraf-specific replacements
sed -i 's|telegraf-{{< latest-patch >}}|telegraf-${telegraf_latest_patches_v1}|g;
s|telegraf-{{% latest-patch %}}|telegraf-${telegraf_latest_patches_v1}|g;
s/--input-filter <INPUT_PLUGIN_NAME>\[:<INPUT_PLUGIN_NAME>\]/--input-filter cpu:influxdb/g;
s/--output-filter <OUTPUT_PLUGIN_NAME>\[:<OUTPUT_PLUGIN_NAME>\]/--output-filter influxdb_v2:file/g;' \
$file
s/--output-filter <OUTPUT_PLUGIN_NAME>\[:<OUTPUT_PLUGIN_NAME>\]/--output-filter influxdb_v2:file/g;
' $file
# Skip package manager commands.
sed -i 's|sudo dpkg.*$||g;
s|sudo yum.*$||g;' \
$file
s|sudo yum.*$||g;
' $file
# Environment-specific replacements.
# You can't use sudo with Docker.
sed -i 's|sudo ||g;' \
$file
sed -i 's|sudo ||g;
' $file
fi
done
}