Merge branch 'master' into 5950-reference-influxdb3-processing-engine
commit
be4a88aca0
|
@ -120,9 +120,9 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
&.beta {
|
&.new {
|
||||||
.product-info h3::after {
|
.product-info h3::after {
|
||||||
content: "beta";
|
content: "New";
|
||||||
margin-left: .5rem;
|
margin-left: .5rem;
|
||||||
font-size: 1rem;
|
font-size: 1rem;
|
||||||
padding: .25em .5em .25em .4em;
|
padding: .25em .5em .25em .4em;
|
||||||
|
|
|
@ -99,6 +99,26 @@
|
||||||
pre { background: rgba($r-basalt, .35); }
|
pre { background: rgba($r-basalt, .35); }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
&.ga-announcement {
|
||||||
|
background-image: url('/svgs/influxdb3-ga-background.svg');
|
||||||
|
background-size: cover;
|
||||||
|
a:hover { color: $br-dark-blue; }
|
||||||
|
code { color: $gr-gypsy; background: rgba($gr-gypsy, .25); }
|
||||||
|
pre { background: rgba($gr-gypsy, .25); }
|
||||||
|
|
||||||
|
h3 {font-size: 1.4rem !important;}
|
||||||
|
.notification-slug { font-size: 1.15rem;
|
||||||
|
.btn {
|
||||||
|
display: inline-block;
|
||||||
|
background: $g20-white;
|
||||||
|
color: $br-dark-blue;
|
||||||
|
padding: .5rem 1rem;
|
||||||
|
border-radius: $radius * 2;
|
||||||
|
font-size: 1rem;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//////////// Basic HTML element styles for notification content ////////////
|
//////////// Basic HTML element styles for notification content ////////////
|
||||||
|
|
||||||
h1,h2,h3,h4,h5,h6 {
|
h1,h2,h3,h4,h5,h6 {
|
||||||
|
@ -156,6 +176,16 @@
|
||||||
}
|
}
|
||||||
.show::before {content: "Show more"}
|
.show::before {content: "Show more"}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.title-tag {
|
||||||
|
padding: .15rem .45rem;
|
||||||
|
text-transform: uppercase;
|
||||||
|
font-size: .85rem;
|
||||||
|
border-radius: $radius * 2;
|
||||||
|
font-family: $code;
|
||||||
|
background: $br-dark-blue;
|
||||||
|
}
|
||||||
|
.title-tag + h3 {margin-top: .75rem;}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -96,5 +96,4 @@ blockquote {
|
||||||
"blocks/tip",
|
"blocks/tip",
|
||||||
"blocks/important",
|
"blocks/important",
|
||||||
"blocks/warning",
|
"blocks/warning",
|
||||||
"blocks/caution",
|
"blocks/caution";
|
||||||
"blocks/beta";
|
|
||||||
|
|
|
@ -15,27 +15,48 @@
|
||||||
padding-right: 2rem;
|
padding-right: 2rem;
|
||||||
|
|
||||||
ul {
|
ul {
|
||||||
display: flex;
|
|
||||||
flex-wrap: wrap;
|
|
||||||
margin-bottom: 1.25rem;
|
margin-bottom: 1.25rem;
|
||||||
padding: 0;
|
padding: 0;
|
||||||
list-style: none;
|
list-style: none;
|
||||||
|
|
||||||
li {display: inline-block}
|
|
||||||
|
|
||||||
a {
|
a {
|
||||||
margin-right: 1.5rem;
|
|
||||||
color: $article-heading;
|
color: $article-heading;
|
||||||
|
font-weight: $medium;
|
||||||
|
position: relative;
|
||||||
|
|
||||||
|
&::after {
|
||||||
|
content: "\e90a";
|
||||||
|
font-family: 'icomoon-v4';
|
||||||
|
font-weight: bold;
|
||||||
|
font-size: 1.3rem;
|
||||||
|
display: inline-block;
|
||||||
|
position: absolute;
|
||||||
|
@include gradient($grad-burningDusk);
|
||||||
|
background-clip: text;
|
||||||
|
-webkit-text-fill-color: transparent;
|
||||||
|
right: 0;
|
||||||
|
transform: translateX(.25rem);
|
||||||
|
opacity: 0;
|
||||||
|
transition: transform .2s, opacity .2s;
|
||||||
|
}
|
||||||
|
|
||||||
&:hover {
|
&:hover {
|
||||||
color: $article-link;
|
&::after {transform: translateX(1.5rem); opacity: 1;}
|
||||||
border-radius: calc($radius * 1.5);
|
}
|
||||||
|
|
||||||
|
&.discord:before {
|
||||||
|
content: url('/svgs/discord.svg');
|
||||||
|
display: inline-block;
|
||||||
|
height: 1.1rem;
|
||||||
|
width: 1.25rem;
|
||||||
|
vertical-align: top;
|
||||||
|
margin: 2px .65rem 0 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
&.community:before {
|
&.community:before {
|
||||||
content: "\e900";
|
content: "\e900";
|
||||||
color: $article-heading;
|
color: $article-heading;
|
||||||
margin: 0 .5rem 0 -.25rem;
|
margin-right: .75rem;
|
||||||
font-size: 1.2rem;
|
font-size: 1.2rem;
|
||||||
font-family: 'icomoon-v2';
|
font-family: 'icomoon-v2';
|
||||||
vertical-align: middle;
|
vertical-align: middle;
|
||||||
|
@ -46,7 +67,16 @@
|
||||||
height: 1.1rem;
|
height: 1.1rem;
|
||||||
width: 1.1rem;
|
width: 1.1rem;
|
||||||
vertical-align: text-top;
|
vertical-align: text-top;
|
||||||
margin-right: .5rem;
|
margin-right: .8rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
&.reddit:before {
|
||||||
|
content: url('/svgs/reddit.svg');
|
||||||
|
display: inline-block;
|
||||||
|
height: 1.1rem;
|
||||||
|
width: 1.2rem;
|
||||||
|
vertical-align: top;
|
||||||
|
margin: 2px .75rem 0 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,105 +0,0 @@
|
||||||
.block.beta {
|
|
||||||
@include gradient($grad-burningDusk);
|
|
||||||
padding: 4px;
|
|
||||||
border: none;
|
|
||||||
border-radius: 25px !important;
|
|
||||||
|
|
||||||
.beta-content {
|
|
||||||
background: $article-bg;
|
|
||||||
border-radius: 21px;
|
|
||||||
padding: calc(1.65rem - 4px) calc(2rem - 4px) calc(.1rem + 4px) calc(2rem - 4px);
|
|
||||||
|
|
||||||
h4 {
|
|
||||||
color: $article-heading;
|
|
||||||
}
|
|
||||||
|
|
||||||
p {margin-bottom: 1rem;}
|
|
||||||
|
|
||||||
.expand-wrapper {
|
|
||||||
border: none;
|
|
||||||
margin: .5rem 0 1.5rem;
|
|
||||||
}
|
|
||||||
.expand {
|
|
||||||
border: none;
|
|
||||||
padding: 0;
|
|
||||||
|
|
||||||
.expand-content p {
|
|
||||||
margin-left: 2rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
ul {
|
|
||||||
|
|
||||||
margin-top: -1rem;
|
|
||||||
|
|
||||||
&.feedback-channels {
|
|
||||||
|
|
||||||
padding: 0;
|
|
||||||
margin: -1rem 0 1.5rem 2rem;
|
|
||||||
list-style: none;
|
|
||||||
|
|
||||||
a {
|
|
||||||
color: $article-heading;
|
|
||||||
font-weight: $medium;
|
|
||||||
position: relative;
|
|
||||||
|
|
||||||
&.discord:before {
|
|
||||||
content: url('/svgs/discord.svg');
|
|
||||||
display: inline-block;
|
|
||||||
height: 1.1rem;
|
|
||||||
width: 1.25rem;
|
|
||||||
vertical-align: top;
|
|
||||||
margin: 2px .65rem 0 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
&.community:before {
|
|
||||||
content: "\e900";
|
|
||||||
color: $article-heading;
|
|
||||||
margin: 0 .65rem 0 0;
|
|
||||||
font-size: 1.2rem;
|
|
||||||
font-family: 'icomoon-v2';
|
|
||||||
vertical-align: middle;
|
|
||||||
}
|
|
||||||
|
|
||||||
&.slack:before {
|
|
||||||
content: url('/svgs/slack.svg');
|
|
||||||
display: inline-block;
|
|
||||||
height: 1.1rem;
|
|
||||||
width: 1.1rem;
|
|
||||||
vertical-align: text-top;
|
|
||||||
margin-right: .65rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
&.reddit:before {
|
|
||||||
content: url('/svgs/reddit.svg');
|
|
||||||
display: inline-block;
|
|
||||||
height: 1.1rem;
|
|
||||||
width: 1.2rem;
|
|
||||||
vertical-align: top;
|
|
||||||
margin: 2px .65rem 0 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
&::after {
|
|
||||||
content: "\e90a";
|
|
||||||
font-family: 'icomoon-v4';
|
|
||||||
font-weight: bold;
|
|
||||||
font-size: 1.3rem;
|
|
||||||
display: inline-block;
|
|
||||||
position: absolute;
|
|
||||||
@include gradient($grad-burningDusk);
|
|
||||||
background-clip: text;
|
|
||||||
-webkit-text-fill-color: transparent;
|
|
||||||
right: 0;
|
|
||||||
transform: translateX(.25rem);
|
|
||||||
opacity: 0;
|
|
||||||
transition: transform .2s, opacity .2s;
|
|
||||||
}
|
|
||||||
|
|
||||||
&:hover {
|
|
||||||
&::after {transform: translateX(1.5rem); opacity: 1;}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -10,6 +10,16 @@ aliases:
|
||||||
- /chronograf/v1/about_the_project/release-notes-changelog/
|
- /chronograf/v1/about_the_project/release-notes-changelog/
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## v1.10.7 {date="2025-04-15"}
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- Fix Hosts page loading.
|
||||||
|
|
||||||
|
### Dependency updates
|
||||||
|
|
||||||
|
- Upgrade Go to 1.23.8.
|
||||||
|
|
||||||
## v1.10.6 {date="2024-12-16"}
|
## v1.10.6 {date="2024-12-16"}
|
||||||
|
|
||||||
### Bug Fixes
|
### Bug Fixes
|
||||||
|
|
|
@ -9,6 +9,60 @@ menu:
|
||||||
parent: About the project
|
parent: About the project
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## v1.12.0 {date="2025-04-15"}
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Add additional log output when using
|
||||||
|
[`influx_inspect buildtsi`](/enterprise_influxdb/v1/tools/influx_inspect/#buildtsi) to
|
||||||
|
rebuild the TSI index.
|
||||||
|
- Use [`influx_inspect export`](/enterprise_influxdb/v1/tools/influx_inspect/#export) with
|
||||||
|
[`-tsmfile` option](/enterprise_influxdb/v1/tools/influx_inspect/#--tsmfile-tsm_file-) to
|
||||||
|
export a single TSM file.
|
||||||
|
- Add `-m` flag to the [`influxd-ctl show-shards` command](/enterprise_influxdb/v1/tools/influxd-ctl/show-shards/)
|
||||||
|
to output inconsistent shards.
|
||||||
|
- Allow the specification of a write window for retention policies.
|
||||||
|
- Add `fluxQueryRespBytes` metric to the `/debug/vars` metrics endpoint.
|
||||||
|
- Log whenever meta gossip times exceed expiration.
|
||||||
|
- Add [`query-log-path` configuration option](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#query-log-path)
|
||||||
|
to data nodes.
|
||||||
|
- Add [`aggressive-points-per-block` configuration option](/influxdb/v1/administration/config/#aggressive-points-per-block)
|
||||||
|
to prevent TSM files from not getting fully compacted.
|
||||||
|
- Log TLS configuration settings on startup.
|
||||||
|
- Check for TLS certificate and private key permissions.
|
||||||
|
- Add a warning if the TLS certificate is expired.
|
||||||
|
- Add authentication to the Raft portal and add the following related _data_
|
||||||
|
node configuration options:
|
||||||
|
- [`[meta].raft-portal-auth-required`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#raft-portal-auth-required)
|
||||||
|
- [`[meta].raft-dialer-auth-required`](/enterprise_influxdb/v1/administration/configure/config-data-nodes/#raft-dialer-auth-required)
|
||||||
|
- Improve error handling.
|
||||||
|
- InfluxQL updates:
|
||||||
|
- Delete series by retention policy.
|
||||||
|
- Allow retention policies to discard writes that fall within their range, but
|
||||||
|
outside of [`FUTURE LIMIT`](/enterprise_influxdb/v1/query_language/manage-database/#future-limit)
|
||||||
|
and [`PAST LIMIT`](/enterprise_influxdb/v1/query_language/manage-database/#past-limit).
|
||||||
|
|
||||||
|
## Bug fixes
|
||||||
|
|
||||||
|
- Log rejected writes to subscriptions.
|
||||||
|
- Update `xxhash` and avoid `stringtoslicebyte` in the cache.
|
||||||
|
- Prevent a panic when a shard group has no shards.
|
||||||
|
- Fix file handle leaks in `Compactor.write`.
|
||||||
|
- Ensure fields in memory match the fields on disk.
|
||||||
|
- Ensure temporary files are removed after failed compactions.
|
||||||
|
- Do not panic on invalid multiple subqueries.
|
||||||
|
- Update the `/shard-status` API to return the correct result and use a
|
||||||
|
consistent "idleness" definition for shards.
|
||||||
|
|
||||||
|
## Other
|
||||||
|
|
||||||
|
- Update Go to 1.23.5.
|
||||||
|
- Upgrade Flux to v0.196.1.
|
||||||
|
- Upgrade InfluxQL to v1.4.1.
|
||||||
|
- Various other dependency updates.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
{{% note %}}
|
{{% note %}}
|
||||||
#### InfluxDB Enterprise and FIPS-compliance
|
#### InfluxDB Enterprise and FIPS-compliance
|
||||||
|
|
||||||
|
@ -21,6 +75,10 @@ InfluxDB Enterprise builds are available. For more information, see
|
||||||
|
|
||||||
## v1.11.8 {date="2024-11-15"}
|
## v1.11.8 {date="2024-11-15"}
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- Add a startup logger to InfluxDB Enterprise data nodes.
|
||||||
|
|
||||||
### Bug Fixes
|
### Bug Fixes
|
||||||
|
|
||||||
- Strip double quotes from measurement names in the [`/api/v2/delete` compatibility
|
- Strip double quotes from measurement names in the [`/api/v2/delete` compatibility
|
||||||
|
@ -28,6 +86,8 @@ InfluxDB Enterprise builds are available. For more information, see
|
||||||
string comparisons (e.g. to allow special characters in measurement names).
|
string comparisons (e.g. to allow special characters in measurement names).
|
||||||
- Enable SHA256 for FIPS RPMs.
|
- Enable SHA256 for FIPS RPMs.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## v1.11.7 {date="2024-09-19"}
|
## v1.11.7 {date="2024-09-19"}
|
||||||
|
|
||||||
### Bug Fixes
|
### Bug Fixes
|
||||||
|
@ -581,7 +641,7 @@ in that there is no corresponding InfluxDB OSS release.
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
- Upgrade to Go 1.15.10.
|
- Upgrade to Go 1.15.10.
|
||||||
- Support user-defined *node labels*.
|
- Support user-defined _node labels_.
|
||||||
Node labels let you assign arbitrary key-value pairs to meta and data nodes in a cluster.
|
Node labels let you assign arbitrary key-value pairs to meta and data nodes in a cluster.
|
||||||
For instance, an operator might want to label nodes with the availability zone in which they're located.
|
For instance, an operator might want to label nodes with the availability zone in which they're located.
|
||||||
- Improve performance of `SHOW SERIES CARDINALITY` and `SHOW SERIES CARDINALITY from <measurement>` InfluxQL queries.
|
- Improve performance of `SHOW SERIES CARDINALITY` and `SHOW SERIES CARDINALITY from <measurement>` InfluxQL queries.
|
||||||
|
@ -756,11 +816,15 @@ For details on changes incorporated from the InfluxDB OSS release, see
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
|
||||||
#### **Back up meta data only**
|
#### Back up meta data only
|
||||||
|
|
||||||
- Add option to back up **meta data only** (users, roles, databases, continuous queries, and retention policies) using the new `-strategy` flag and `only meta` option: `influx ctl backup -strategy only meta </your-backup-directory>`.
|
- Add option to back up **meta data only** (users, roles, databases, continuous
|
||||||
|
queries, and retention policies) using the new `-strategy` flag and `only meta`
|
||||||
|
option: `influx ctl backup -strategy only meta </your-backup-directory>`.
|
||||||
|
|
||||||
> **Note:** To restore a meta data backup, use the `restore -full` command and specify your backup manifest: `influxd-ctl restore -full </backup-directory/backup.manifest>`.
|
> [!Note]
|
||||||
|
> To restore a meta data backup, use the `restore -full` command and specify
|
||||||
|
> your backup manifest: `influxd-ctl restore -full </backup-directory/backup.manifest>`.
|
||||||
|
|
||||||
For more information, see [Perform a metastore only backup](/enterprise_influxdb/v1/administration/backup-and-restore/#perform-a-metastore-only-backup).
|
For more information, see [Perform a metastore only backup](/enterprise_influxdb/v1/administration/backup-and-restore/#perform-a-metastore-only-backup).
|
||||||
|
|
||||||
|
@ -1007,7 +1071,10 @@ The following summarizes the expected settings for proper configuration of JWT a
|
||||||
`""`.
|
`""`.
|
||||||
- A long pass phrase is recommended for better security.
|
- A long pass phrase is recommended for better security.
|
||||||
|
|
||||||
>**Note:** To provide encrypted internode communication, you must enable HTTPS. Although the JWT signature is encrypted, the the payload of a JWT token is encoded, but is not encrypted.
|
> [!Note]
|
||||||
|
> To provide encrypted internode communication, you must enable HTTPS. Although
|
||||||
|
> the JWT signature is encrypted, the the payload of a JWT token is encoded, but
|
||||||
|
> is not encrypted.
|
||||||
|
|
||||||
### Bug fixes
|
### Bug fixes
|
||||||
|
|
||||||
|
@ -1082,8 +1149,10 @@ Please see the [InfluxDB OSS release notes](/influxdb/v1/about_the_project/relea
|
||||||
|
|
||||||
## v1.5.0 {date="2018-03-06"}
|
## v1.5.0 {date="2018-03-06"}
|
||||||
|
|
||||||
> ***Note:*** This release builds off of the 1.5 release of InfluxDB OSS. Please see the [InfluxDB OSS release
|
> [!Note]
|
||||||
> notes](/influxdb/v1/about_the_project/release-notes/) for more information about the InfluxDB OSS release.
|
> This release builds off of the 1.5 release of InfluxDB OSS.
|
||||||
|
> Please see the [InfluxDB OSS release notes](/influxdb/v1/about_the_project/release-notes/)
|
||||||
|
> for more information about the InfluxDB OSS release.
|
||||||
|
|
||||||
For highlights of the InfluxDB 1.5 release, see [What's new in InfluxDB 1.5](/influxdb/v1/about_the_project/whats_new/).
|
For highlights of the InfluxDB 1.5 release, see [What's new in InfluxDB 1.5](/influxdb/v1/about_the_project/whats_new/).
|
||||||
|
|
||||||
|
|
|
@ -259,6 +259,29 @@ For detailed configuration information, see [`meta.ensure-fips`](/enterprise_inf
|
||||||
|
|
||||||
Environment variable: `INFLUXDB_META_ENSURE_FIPS`
|
Environment variable: `INFLUXDB_META_ENSURE_FIPS`
|
||||||
|
|
||||||
|
#### raft-portal-auth-required {metadata="v1.12.0+"}
|
||||||
|
|
||||||
|
Default is `false`.
|
||||||
|
|
||||||
|
Require Raft clients to authenticate with server using the
|
||||||
|
[`meta-internal-shared-secret`](#meta-internal-shared-secret).
|
||||||
|
This requires that all meta nodes are running InfluxDB Enterprise v1.12.0+ and
|
||||||
|
are configured with the correct `meta-internal-shared-secret`.
|
||||||
|
|
||||||
|
Environment variable: `INFLUXDB_META_RAFT_PORTAL_AUTH_REQUIRED`
|
||||||
|
|
||||||
|
#### raft-dialer-auth-required {metadata="v1.12.0+"}
|
||||||
|
|
||||||
|
Default is `false`.
|
||||||
|
|
||||||
|
Require Raft servers to authenticate Raft clients using the
|
||||||
|
[`meta-internal-shared-secret`](#meta-internal-shared-secret).
|
||||||
|
This requires that all meta nodes are running InfluxDB Enterprise v1.12.0+, have
|
||||||
|
`raft-portal-auth-required=true`, and are configured with the correct
|
||||||
|
`meta-internal-shared-secret`.
|
||||||
|
|
||||||
|
Environment variable: `INFLUXDB_META_RAFT_DIALER_AUTH_REQUIRED`
|
||||||
|
|
||||||
-----
|
-----
|
||||||
|
|
||||||
## Data settings
|
## Data settings
|
||||||
|
@ -305,6 +328,8 @@ Environment variable: `INFLUXDB_DATA_QUERY_LOG_ENABLED`
|
||||||
|
|
||||||
#### query-log-path
|
#### query-log-path
|
||||||
|
|
||||||
|
Default is `""`.
|
||||||
|
|
||||||
An absolute path to the query log file.
|
An absolute path to the query log file.
|
||||||
The default is `""` (queries aren't logged to a file).
|
The default is `""` (queries aren't logged to a file).
|
||||||
|
|
||||||
|
@ -326,6 +351,8 @@ The following is an example of a `logrotate` configuration:
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Environment variable: `INFLUXDB_DATA_QUERY_LOG_PATH`
|
||||||
|
|
||||||
#### wal-fsync-delay
|
#### wal-fsync-delay
|
||||||
|
|
||||||
Default is `"0s"`.
|
Default is `"0s"`.
|
||||||
|
@ -422,6 +449,16 @@ The duration at which to compact all TSM and TSI files in a shard if it has not
|
||||||
|
|
||||||
Environment variable: `INFLUXDB_DATA_COMPACT_FULL_WRITE_COLD_DURATION`
|
Environment variable: `INFLUXDB_DATA_COMPACT_FULL_WRITE_COLD_DURATION`
|
||||||
|
|
||||||
|
#### aggressive-points-per-block {metadata="v1.12.0+"}
|
||||||
|
|
||||||
|
Default is `10000`.
|
||||||
|
|
||||||
|
The number of points per block to use during aggressive compaction. There are
|
||||||
|
certain cases where TSM files do not get fully compacted. This adjusts an
|
||||||
|
internal parameter to help ensure these files do get fully compacted.
|
||||||
|
|
||||||
|
Environment variable: `INFLUXDB_DATA_AGGRESSIVE_POINTS_PER_BLOCK`
|
||||||
|
|
||||||
#### index-version
|
#### index-version
|
||||||
|
|
||||||
Default is `"inmem"`.
|
Default is `"inmem"`.
|
||||||
|
|
|
@ -62,17 +62,22 @@ Creates a new database.
|
||||||
#### Syntax
|
#### Syntax
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DATABASE <database_name> [WITH [DURATION <duration>] [REPLICATION <n>] [SHARD DURATION <duration>] [NAME <retention-policy-name>]]
|
CREATE DATABASE <database_name> [WITH [DURATION <duration>] [REPLICATION <n>] [SHARD DURATION <duration>] [PAST LIMIT <duration>] [FUTURE LIMIT <duration>] [NAME <retention-policy-name>]]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Description of syntax
|
#### Description of syntax
|
||||||
|
|
||||||
`CREATE DATABASE` requires a database [name](/enterprise_influxdb/v1/troubleshooting/frequently-asked-questions/#what-words-and-characters-should-i-avoid-when-writing-data-to-influxdb).
|
`CREATE DATABASE` requires a database [name](/enterprise_influxdb/v1/troubleshooting/frequently-asked-questions/#what-words-and-characters-should-i-avoid-when-writing-data-to-influxdb).
|
||||||
|
|
||||||
The `WITH`, `DURATION`, `REPLICATION`, `SHARD DURATION`, and `NAME` clauses are optional and create a single [retention policy](/enterprise_influxdb/v1/concepts/glossary/#retention-policy-rp) associated with the created database.
|
The `WITH`, `DURATION`, `REPLICATION`, `SHARD DURATION`, `PAST LIMIT`,
|
||||||
If you do not specify one of the clauses after `WITH`, the relevant behavior defaults to the `autogen` retention policy settings.
|
`FUTURE LIMIT, and `NAME` clauses are optional and create a single
|
||||||
|
[retention policy](/enterprise_influxdb/v1/concepts/glossary/#retention-policy-rp)
|
||||||
|
associated with the created database.
|
||||||
|
If you do not specify one of the clauses after `WITH`, the relevant behavior
|
||||||
|
defaults to the `autogen` retention policy settings.
|
||||||
The created retention policy automatically serves as the database's default retention policy.
|
The created retention policy automatically serves as the database's default retention policy.
|
||||||
For more information about those clauses, see [Retention Policy Management](/enterprise_influxdb/v1/query_language/manage-database/#retention-policy-management).
|
For more information about those clauses, see
|
||||||
|
[Retention Policy Management](/enterprise_influxdb/v1/query_language/manage-database/#retention-policy-management).
|
||||||
|
|
||||||
A successful `CREATE DATABASE` query returns an empty result.
|
A successful `CREATE DATABASE` query returns an empty result.
|
||||||
If you attempt to create a database that already exists, InfluxDB does nothing and does not return an error.
|
If you attempt to create a database that already exists, InfluxDB does nothing and does not return an error.
|
||||||
|
@ -122,21 +127,25 @@ The `DROP SERIES` query deletes all points from a [series](/enterprise_influxdb/
|
||||||
and it drops the series from the index.
|
and it drops the series from the index.
|
||||||
|
|
||||||
The query takes the following form, where you must specify either the `FROM` clause or the `WHERE` clause:
|
The query takes the following form, where you must specify either the `FROM` clause or the `WHERE` clause:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
DROP SERIES FROM <measurement_name[,measurement_name]> WHERE <tag_key>='<tag_value>'
|
DROP SERIES FROM <measurement_name[,measurement_name]> WHERE <tag_key>='<tag_value>'
|
||||||
```
|
```
|
||||||
|
|
||||||
Drop all series from a single measurement:
|
Drop all series from a single measurement:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
> DROP SERIES FROM "h2o_feet"
|
> DROP SERIES FROM "h2o_feet"
|
||||||
```
|
```
|
||||||
|
|
||||||
Drop series with a specific tag pair from a single measurement:
|
Drop series with a specific tag pair from a single measurement:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
> DROP SERIES FROM "h2o_feet" WHERE "location" = 'santa_monica'
|
> DROP SERIES FROM "h2o_feet" WHERE "location" = 'santa_monica'
|
||||||
```
|
```
|
||||||
|
|
||||||
Drop all points in the series that have a specific tag pair from all measurements in the database:
|
Drop all points in the series that have a specific tag pair from all measurements in the database:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
> DROP SERIES WHERE "location" = 'santa_monica'
|
> DROP SERIES WHERE "location" = 'santa_monica'
|
||||||
```
|
```
|
||||||
|
@ -152,35 +161,49 @@ Unlike
|
||||||
|
|
||||||
You must include either the `FROM` clause, the `WHERE` clause, or both:
|
You must include either the `FROM` clause, the `WHERE` clause, or both:
|
||||||
|
|
||||||
```
|
```sql
|
||||||
DELETE FROM <measurement_name> WHERE [<tag_key>='<tag_value>'] | [<time interval>]
|
DELETE FROM <measurement_name> WHERE [<tag_key>='<tag_value>'] | [<time interval>]
|
||||||
```
|
```
|
||||||
|
|
||||||
Delete all data associated with the measurement `h2o_feet`:
|
Delete all data associated with the measurement `h2o_feet`:
|
||||||
```
|
|
||||||
|
```sql
|
||||||
> DELETE FROM "h2o_feet"
|
> DELETE FROM "h2o_feet"
|
||||||
```
|
```
|
||||||
|
|
||||||
Delete all data associated with the measurement `h2o_quality` and where the tag `randtag` equals `3`:
|
Delete all data associated with the measurement `h2o_quality` and where the tag `randtag` equals `3`:
|
||||||
```
|
|
||||||
|
```sql
|
||||||
> DELETE FROM "h2o_quality" WHERE "randtag" = '3'
|
> DELETE FROM "h2o_quality" WHERE "randtag" = '3'
|
||||||
```
|
```
|
||||||
|
|
||||||
Delete all data in the database that occur before January 01, 2020:
|
Delete all data in the database that occur before January 01, 2020:
|
||||||
```
|
|
||||||
|
```sql
|
||||||
> DELETE WHERE time < '2020-01-01'
|
> DELETE WHERE time < '2020-01-01'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Delete all data associated with the measurement `h2o_feet` in retention policy `one_day`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
> DELETE FROM "one_day"."h2o_feet"
|
||||||
|
```
|
||||||
|
|
||||||
A successful `DELETE` query returns an empty result.
|
A successful `DELETE` query returns an empty result.
|
||||||
|
|
||||||
Things to note about `DELETE`:
|
Things to note about `DELETE`:
|
||||||
|
|
||||||
* `DELETE` supports
|
* `DELETE` supports
|
||||||
[regular expressions](/enterprise_influxdb/v1/query_language/explore-data/#regular-expressions)
|
[regular expressions](/enterprise_influxdb/v1/query_language/explore-data/#regular-expressions)
|
||||||
in the `FROM` clause when specifying measurement names and in the `WHERE` clause
|
in the `FROM` clause when specifying measurement names and in the `WHERE` clause
|
||||||
when specifying tag values.
|
when specifying tag values. It *does not* support regular expressions for the
|
||||||
* `DELETE` does not support [fields](/enterprise_influxdb/v1/concepts/glossary/#field) in the `WHERE` clause.
|
retention policy in the `FROM` clause.
|
||||||
* If you need to delete points in the future, you must specify that time period as `DELETE SERIES` runs for `time < now()` by default. [Syntax](https://github.com/influxdata/influxdb/issues/8007)
|
If deleting a series in a retention policy, `DELETE` requires that you define
|
||||||
|
*only one* retention policy in the `FROM` clause.
|
||||||
|
* `DELETE` does not support [fields](/enterprise_influxdb/v1/concepts/glossary/#field)
|
||||||
|
in the `WHERE` clause.
|
||||||
|
* If you need to delete points in the future, you must specify that time period
|
||||||
|
as `DELETE SERIES` runs for `time < now()` by default.
|
||||||
|
|
||||||
### Delete measurements with DROP MEASUREMENT
|
### Delete measurements with DROP MEASUREMENT
|
||||||
|
|
||||||
|
@ -234,8 +257,9 @@ You may disable its auto-creation in the [configuration file](/enterprise_influx
|
||||||
### Create retention policies with CREATE RETENTION POLICY
|
### Create retention policies with CREATE RETENTION POLICY
|
||||||
|
|
||||||
#### Syntax
|
#### Syntax
|
||||||
```
|
|
||||||
CREATE RETENTION POLICY <retention_policy_name> ON <database_name> DURATION <duration> REPLICATION <n> [SHARD DURATION <duration>] [DEFAULT]
|
```sql
|
||||||
|
CREATE RETENTION POLICY <retention_policy_name> ON <database_name> DURATION <duration> REPLICATION <n> [SHARD DURATION <duration>] [PAST LIMIT <duration>] [FUTURE LIMIT <duration>] [DEFAULT]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Description of syntax
|
#### Description of syntax
|
||||||
|
@ -283,6 +307,28 @@ See
|
||||||
[Shard group duration management](/enterprise_influxdb/v1/concepts/schema_and_data_layout/#shard-group-duration-management)
|
[Shard group duration management](/enterprise_influxdb/v1/concepts/schema_and_data_layout/#shard-group-duration-management)
|
||||||
for recommended configurations.
|
for recommended configurations.
|
||||||
|
|
||||||
|
##### `PAST LIMIT`
|
||||||
|
|
||||||
|
The `PAST LIMIT` clause defines a time boundary before and relative to _now_
|
||||||
|
in which points written to the retention policy are accepted. If a point has a
|
||||||
|
timestamp before the specified boundary, the point is rejected and the write
|
||||||
|
request returns a partial write error.
|
||||||
|
|
||||||
|
For example, if a write request tries to write data to a retention policy with a
|
||||||
|
`PAST LIMIT 6h` and there are points in the request with timestamps older than
|
||||||
|
6 hours, those points are rejected.
|
||||||
|
|
||||||
|
##### `FUTURE LIMIT`
|
||||||
|
|
||||||
|
The `FUTURE LIMIT` clause defines a time boundary after and relative to _now_
|
||||||
|
in which points written to the retention policy are accepted. If a point has a
|
||||||
|
timestamp after the specified boundary, the point is rejected and the write
|
||||||
|
request returns a partial write error.
|
||||||
|
|
||||||
|
For example, if a write request tries to write data to a retention policy with a
|
||||||
|
`FUTURE LIMIT 6h` and there are points in the request with future timestamps
|
||||||
|
greater than 6 hours from now, those points are rejected.
|
||||||
|
|
||||||
##### `DEFAULT`
|
##### `DEFAULT`
|
||||||
|
|
||||||
Sets the new retention policy as the default retention policy for the database.
|
Sets the new retention policy as the default retention policy for the database.
|
||||||
|
|
|
@ -122,15 +122,15 @@ ALL ALTER ANY AS ASC BEGIN
|
||||||
BY CREATE CONTINUOUS DATABASE DATABASES DEFAULT
|
BY CREATE CONTINUOUS DATABASE DATABASES DEFAULT
|
||||||
DELETE DESC DESTINATIONS DIAGNOSTICS DISTINCT DROP
|
DELETE DESC DESTINATIONS DIAGNOSTICS DISTINCT DROP
|
||||||
DURATION END EVERY EXPLAIN FIELD FOR
|
DURATION END EVERY EXPLAIN FIELD FOR
|
||||||
FROM GRANT GRANTS GROUP GROUPS IN
|
FROM FUTURE GRANT GRANTS GROUP GROUPS
|
||||||
INF INSERT INTO KEY KEYS KILL
|
IN INF INSERT INTO KEY KEYS
|
||||||
LIMIT SHOW MEASUREMENT MEASUREMENTS NAME OFFSET
|
KILL LIMIT SHOW MEASUREMENT MEASUREMENTS NAME
|
||||||
ON ORDER PASSWORD POLICY POLICIES PRIVILEGES
|
OFFSET ON ORDER PASSWORD PAST POLICY
|
||||||
QUERIES QUERY READ REPLICATION RESAMPLE RETENTION
|
POLICIES PRIVILEGES QUERIES QUERY READ REPLICATION
|
||||||
REVOKE SELECT SERIES SET SHARD SHARDS
|
RESAMPLE RETENTION REVOKE SELECT SERIES SET
|
||||||
SLIMIT SOFFSET STATS SUBSCRIPTION SUBSCRIPTIONS TAG
|
SHARD SHARDS SLIMIT SOFFSET STATS SUBSCRIPTION
|
||||||
TO USER USERS VALUES WHERE WITH
|
SUBSCRIPTIONS TAG TO USER USERS VALUES
|
||||||
WRITE
|
WHERE WITH WRITE
|
||||||
```
|
```
|
||||||
|
|
||||||
If you use an InfluxQL keywords as an
|
If you use an InfluxQL keywords as an
|
||||||
|
@ -380,12 +380,14 @@ create_database_stmt = "CREATE DATABASE" db_name
|
||||||
[ retention_policy_duration ]
|
[ retention_policy_duration ]
|
||||||
[ retention_policy_replication ]
|
[ retention_policy_replication ]
|
||||||
[ retention_policy_shard_group_duration ]
|
[ retention_policy_shard_group_duration ]
|
||||||
|
[ retention_past_limit ]
|
||||||
|
[ retention_future_limit ]
|
||||||
[ retention_policy_name ]
|
[ retention_policy_name ]
|
||||||
] .
|
] .
|
||||||
```
|
```
|
||||||
|
|
||||||
{{% warn %}} Replication factors do not serve a purpose with single node instances.
|
> [!Warning]
|
||||||
{{% /warn %}}
|
> Replication factors do not serve a purpose with single node instances.
|
||||||
|
|
||||||
#### Examples
|
#### Examples
|
||||||
|
|
||||||
|
@ -393,11 +395,17 @@ create_database_stmt = "CREATE DATABASE" db_name
|
||||||
-- Create a database called foo
|
-- Create a database called foo
|
||||||
CREATE DATABASE "foo"
|
CREATE DATABASE "foo"
|
||||||
|
|
||||||
-- Create a database called bar with a new DEFAULT retention policy and specify the duration, replication, shard group duration, and name of that retention policy
|
-- Create a database called bar with a new DEFAULT retention policy and specify
|
||||||
|
-- the duration, replication, shard group duration, and name of that retention policy
|
||||||
CREATE DATABASE "bar" WITH DURATION 1d REPLICATION 1 SHARD DURATION 30m NAME "myrp"
|
CREATE DATABASE "bar" WITH DURATION 1d REPLICATION 1 SHARD DURATION 30m NAME "myrp"
|
||||||
|
|
||||||
-- Create a database called mydb with a new DEFAULT retention policy and specify the name of that retention policy
|
-- Create a database called mydb with a new DEFAULT retention policy and specify
|
||||||
|
-- the name of that retention policy
|
||||||
CREATE DATABASE "mydb" WITH NAME "myrp"
|
CREATE DATABASE "mydb" WITH NAME "myrp"
|
||||||
|
|
||||||
|
-- Create a database called bar with a new retention policy named "myrp", and
|
||||||
|
-- specify the duration, past and future limits, and name of that retention policy
|
||||||
|
CREATE DATABASE "bar" WITH DURATION 1d PAST LIMIT 6h FUTURE LIMIT 6h NAME "myrp"
|
||||||
```
|
```
|
||||||
|
|
||||||
### CREATE RETENTION POLICY
|
### CREATE RETENTION POLICY
|
||||||
|
@ -407,11 +415,13 @@ create_retention_policy_stmt = "CREATE RETENTION POLICY" policy_name on_clause
|
||||||
retention_policy_duration
|
retention_policy_duration
|
||||||
retention_policy_replication
|
retention_policy_replication
|
||||||
[ retention_policy_shard_group_duration ]
|
[ retention_policy_shard_group_duration ]
|
||||||
|
[ retention_past_limit ]
|
||||||
|
[ retention_future_limit ]
|
||||||
[ "DEFAULT" ] .
|
[ "DEFAULT" ] .
|
||||||
```
|
```
|
||||||
|
|
||||||
{{% warn %}} Replication factors do not serve a purpose with single node instances.
|
> [!Warning]
|
||||||
{{% /warn %}}
|
> Replication factors do not serve a purpose with single node instances.
|
||||||
|
|
||||||
#### Examples
|
#### Examples
|
||||||
|
|
||||||
|
@ -424,6 +434,9 @@ CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 60m REPLICATION 2 DEFA
|
||||||
|
|
||||||
-- Create a retention policy and specify the shard group duration.
|
-- Create a retention policy and specify the shard group duration.
|
||||||
CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 60m REPLICATION 2 SHARD DURATION 30m
|
CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 60m REPLICATION 2 SHARD DURATION 30m
|
||||||
|
|
||||||
|
-- Create a retention policy and specify past and future limits.
|
||||||
|
CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 12h PAST LIMIT 6h FUTURE LIMIT 6h
|
||||||
```
|
```
|
||||||
|
|
||||||
### CREATE SUBSCRIPTION
|
### CREATE SUBSCRIPTION
|
||||||
|
|
|
@ -10,9 +10,10 @@ menu:
|
||||||
|
|
||||||
Influx Inspect is an InfluxDB disk utility that can be used to:
|
Influx Inspect is an InfluxDB disk utility that can be used to:
|
||||||
|
|
||||||
* View detailed information about disk shards.
|
- View detailed information about disk shards.
|
||||||
* Export data from a shard to [InfluxDB line protocol](/enterprise_influxdb/v1/concepts/glossary/#influxdb-line-protocol) that can be inserted back into the database.
|
- Export data from a shard to [InfluxDB line protocol](/enterprise_influxdb/v1/concepts/glossary/#influxdb-line-protocol)
|
||||||
* Convert TSM index shards to TSI index shards.
|
that can be inserted back into the database.
|
||||||
|
- Convert TSM index shards to TSI index shards.
|
||||||
|
|
||||||
## `influx_inspect` utility
|
## `influx_inspect` utility
|
||||||
|
|
||||||
|
@ -38,8 +39,8 @@ The `influx_inspect` commands are summarized here, with links to detailed inform
|
||||||
- [`merge-schema`](#merge-schema): Merges a set of schema files from the `check-schema` command.
|
- [`merge-schema`](#merge-schema): Merges a set of schema files from the `check-schema` command.
|
||||||
- [`report`](#report): Displays a shard level report.
|
- [`report`](#report): Displays a shard level report.
|
||||||
- [`report-db`](#report-db): Estimates InfluxDB Cloud (TSM) cardinality for a database.
|
- [`report-db`](#report-db): Estimates InfluxDB Cloud (TSM) cardinality for a database.
|
||||||
- [`report-disk`](#report-disk): Reports disk usage by shard and measurement.
|
- [`report-disk`](#report-disk): Reports disk usage by shards and measurements.
|
||||||
- [`reporttsi`](#reporttsi): Reports on cardinality for measurements and shards.
|
- [`reporttsi`](#reporttsi): Reports on cardinality for shards and measurements.
|
||||||
- [`verify`](#verify): Verifies the integrity of TSM files.
|
- [`verify`](#verify): Verifies the integrity of TSM files.
|
||||||
- [`verify-seriesfile`](#verify-seriesfile): Verifies the integrity of series files.
|
- [`verify-seriesfile`](#verify-seriesfile): Verifies the integrity of series files.
|
||||||
- [`verify-tombstone`](#verify-tombstone): Verifies the integrity of tombstones.
|
- [`verify-tombstone`](#verify-tombstone): Verifies the integrity of tombstones.
|
||||||
|
@ -50,7 +51,9 @@ Builds TSI (Time Series Index) disk-based shard index files and associated serie
|
||||||
The index is written to a temporary location until complete and then moved to a permanent location.
|
The index is written to a temporary location until complete and then moved to a permanent location.
|
||||||
If an error occurs, then this operation will fall back to the original in-memory index.
|
If an error occurs, then this operation will fall back to the original in-memory index.
|
||||||
|
|
||||||
> ***Note:*** **For offline conversion only.**
|
> [!Note]
|
||||||
|
> #### For offline conversion only
|
||||||
|
>
|
||||||
> When TSI is enabled, new shards use the TSI indexes.
|
> When TSI is enabled, new shards use the TSI indexes.
|
||||||
> Existing shards continue as TSM-based shards until
|
> Existing shards continue as TSM-based shards until
|
||||||
> converted offline.
|
> converted offline.
|
||||||
|
@ -60,7 +63,9 @@ If an error occurs, then this operation will fall back to the original in-memory
|
||||||
```
|
```
|
||||||
influx_inspect buildtsi -datadir <data_dir> -waldir <wal_dir> [ options ]
|
influx_inspect buildtsi -datadir <data_dir> -waldir <wal_dir> [ options ]
|
||||||
```
|
```
|
||||||
> **Note:** Use the `buildtsi` command with the user account that you are going to run the database as,
|
|
||||||
|
> [!Note]
|
||||||
|
> Use the `buildtsi` command with the user account that you are going to run the database as,
|
||||||
> or ensure that the permissions match after running the command.
|
> or ensure that the permissions match after running the command.
|
||||||
|
|
||||||
#### Options
|
#### Options
|
||||||
|
@ -71,9 +76,8 @@ Optional arguments are in brackets.
|
||||||
|
|
||||||
The size of the batches written to the index. Default value is `10000`.
|
The size of the batches written to the index. Default value is `10000`.
|
||||||
|
|
||||||
{{% warn %}}
|
> [!Warning]
|
||||||
**Warning:** Setting this value can have adverse effects on performance and heap size.
|
> Setting this value can have adverse effects on performance and heap size.
|
||||||
{{% /warn %}}
|
|
||||||
|
|
||||||
##### `[ -compact-series-file ]`
|
##### `[ -compact-series-file ]`
|
||||||
|
|
||||||
|
@ -90,10 +94,11 @@ The name of the database.
|
||||||
|
|
||||||
##### `-datadir <data_dir>`
|
##### `-datadir <data_dir>`
|
||||||
|
|
||||||
The path to the `data` directory.
|
The path to the [`data` directory](/enterprise_influxdb/v1/concepts/file-system-layout/#data-directory).
|
||||||
|
|
||||||
Default value is `$HOME/.influxdb/data`.
|
Default value is `$HOME/.influxdb/data`.
|
||||||
See the [file system layout](/enterprise_influxdb/v1/concepts/file-system-layout/) for InfluxDB on your system.
|
See the [file system layout](/enterprise_influxdb/v1/concepts/file-system-layout/#file-system-layout)
|
||||||
|
for InfluxDB on your system.
|
||||||
|
|
||||||
##### `[ -max-cache-size ]`
|
##### `[ -max-cache-size ]`
|
||||||
|
|
||||||
|
@ -120,31 +125,32 @@ Flag to enable output in verbose mode.
|
||||||
|
|
||||||
##### `-waldir <wal_dir>`
|
##### `-waldir <wal_dir>`
|
||||||
|
|
||||||
The directory for the WAL (Write Ahead Log) files.
|
The directory for the [WAL (Write Ahead Log)](/enterprise_influxdb/v1/concepts/file-system-layout/#wal-directory) files.
|
||||||
|
|
||||||
Default value is `$HOME/.influxdb/wal`.
|
Default value is `$HOME/.influxdb/wal`.
|
||||||
See the [file system layout](/enterprise_influxdb/v1/concepts/file-system-layout/) for InfluxDB on your system.
|
See the [file system layout](/enterprise_influxdb/v1/concepts/file-system-layout/#file-system-layout)
|
||||||
|
for InfluxDB on your system.
|
||||||
|
|
||||||
#### Examples
|
#### Examples
|
||||||
|
|
||||||
##### Converting all shards on a node
|
##### Converting all shards on a node
|
||||||
|
|
||||||
```
|
```
|
||||||
$ influx_inspect buildtsi -datadir /var/lib/influxdb/data -waldir /var/lib/influxdb/wal
|
influx_inspect buildtsi -datadir ~/.influxdb/data -waldir ~/.influxdb/wal
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Converting all shards for a database
|
##### Converting all shards for a database
|
||||||
|
|
||||||
```
|
```
|
||||||
$ influx_inspect buildtsi -database mydb datadir /var/lib/influxdb/data -waldir /var/lib/influxdb/wal
|
influx_inspect buildtsi -database mydb -datadir ~/.influxdb/data -waldir ~/.influxdb/wal
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Converting a specific shard
|
##### Converting a specific shard
|
||||||
|
|
||||||
```
|
```
|
||||||
$ influx_inspect buildtsi -database stress -shard 1 datadir /var/lib/influxdb/data -waldir /var/lib/influxdb/wal
|
influx_inspect buildtsi -database stress -shard 1 -datadir ~/.influxdb/data -waldir ~/.influxdb/wal
|
||||||
```
|
```
|
||||||
|
|
||||||
### `check-schema`
|
### `check-schema`
|
||||||
|
@ -161,7 +167,7 @@ influx_inspect check-schema [ options ]
|
||||||
|
|
||||||
##### [ `-conflicts-file <string>` ]
|
##### [ `-conflicts-file <string>` ]
|
||||||
|
|
||||||
Filename conflicts data should be written to. Default is `conflicts.json`.
|
The filename where conflicts data should be written. Default is `conflicts.json`.
|
||||||
|
|
||||||
##### [ `-path <string>` ]
|
##### [ `-path <string>` ]
|
||||||
|
|
||||||
|
@ -170,17 +176,16 @@ working directory `.`.
|
||||||
|
|
||||||
##### [ `-schema-file <string>` ]
|
##### [ `-schema-file <string>` ]
|
||||||
|
|
||||||
Filename schema data should be written to. Default is `schema.json`.
|
The filename where schema data should be written. Default is `schema.json`.
|
||||||
|
|
||||||
### `deletetsm`
|
### `deletetsm`
|
||||||
|
|
||||||
Use `deletetsm -measurement` to delete a measurement in a raw TSM file (from specified shards).
|
Use `deletetsm -measurement` to delete a measurement in a raw TSM file (from specified shards).
|
||||||
Use `deletetsm -sanitize` to remove all tag and field keys containing non-printable Unicode characters in a raw TSM file (from specified shards).
|
Use `deletetsm -sanitize` to remove all tag and field keys containing non-printable Unicode characters in a raw TSM file (from specified shards).
|
||||||
|
|
||||||
{{% warn %}}
|
> [!Warning]
|
||||||
**Warning:** Use the `deletetsm` command only when your InfluxDB instance is
|
> Use the `deletetsm` command only when your InfluxDB instance is
|
||||||
offline (`influxd` service is not running).
|
> offline (`influxd` service is not running).
|
||||||
{{% /warn %}}
|
|
||||||
|
|
||||||
#### Syntax
|
#### Syntax
|
||||||
|
|
||||||
|
@ -244,7 +249,7 @@ Optional arguments are in brackets.
|
||||||
|
|
||||||
##### `-series-file <series_path>`
|
##### `-series-file <series_path>`
|
||||||
|
|
||||||
Path to the `_series` directory under the database `data` directory. Required.
|
The path to the `_series` directory under the database `data` directory. Required.
|
||||||
|
|
||||||
##### [ `-series` ]
|
##### [ `-series` ]
|
||||||
|
|
||||||
|
@ -283,18 +288,18 @@ Filter data by tag value regular expression.
|
||||||
##### Specifying paths to the `_series` and `index` directories
|
##### Specifying paths to the `_series` and `index` directories
|
||||||
|
|
||||||
```
|
```
|
||||||
$ influx_inspect dumptsi -series-file /path/to/db/_series /path/to/index
|
influx_inspect dumptsi -series-file /path/to/db/_series /path/to/index
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Specifying paths to the `_series` directory and an `index` file
|
##### Specifying paths to the `_series` directory and an `index` file
|
||||||
|
|
||||||
```
|
```
|
||||||
$ influx_inspect dumptsi -series-file /path/to/db/_series /path/to/index/file0
|
influx_inspect dumptsi -series-file /path/to/db/_series /path/to/index/file0
|
||||||
```
|
```
|
||||||
##### Specifying paths to the `_series` directory and multiple `index` files
|
##### Specifying paths to the `_series` directory and multiple `index` files
|
||||||
|
|
||||||
```
|
```
|
||||||
$ influx_inspect dumptsi -series-file /path/to/db/_series /path/to/index/file0 /path/to/index/file1 ...
|
influx_inspect dumptsi -series-file /path/to/db/_series /path/to/index/file0 /path/to/index/file1 ...
|
||||||
```
|
```
|
||||||
|
|
||||||
### `dumptsm`
|
### `dumptsm`
|
||||||
|
@ -309,7 +314,7 @@ influx_inspect dumptsm [ options ] <path>
|
||||||
|
|
||||||
##### `<path>`
|
##### `<path>`
|
||||||
|
|
||||||
Path to the `.tsm` file, located by default in the `data` directory.
|
The path to the `.tsm` file, located by default in the `data` directory.
|
||||||
|
|
||||||
#### Options
|
#### Options
|
||||||
|
|
||||||
|
@ -317,17 +322,17 @@ Optional arguments are in brackets.
|
||||||
|
|
||||||
##### [ `-index` ]
|
##### [ `-index` ]
|
||||||
|
|
||||||
Flag to dump raw index data.
|
The flag to dump raw index data.
|
||||||
Default value is `false`.
|
Default value is `false`.
|
||||||
|
|
||||||
##### [ `-blocks` ]
|
##### [ `-blocks` ]
|
||||||
|
|
||||||
Flag to dump raw block data.
|
The flag to dump raw block data.
|
||||||
Default value is `false`.
|
Default value is `false`.
|
||||||
|
|
||||||
##### [ `-all` ]
|
##### [ `-all` ]
|
||||||
|
|
||||||
Flag to dump all data. Caution: This may print a lot of information.
|
The flag to dump all data. Caution: This may print a lot of information.
|
||||||
Default value is `false`.
|
Default value is `false`.
|
||||||
|
|
||||||
##### [ `-filter-key <key_name>` ]
|
##### [ `-filter-key <key_name>` ]
|
||||||
|
@ -351,14 +356,14 @@ Optional arguments are in brackets.
|
||||||
|
|
||||||
##### [ `-show-duplicates` ]
|
##### [ `-show-duplicates` ]
|
||||||
|
|
||||||
Flag to show keys which have duplicate or out-of-order timestamps.
|
The flag to show keys which have duplicate or out-of-order timestamps.
|
||||||
If a user writes points with timestamps set by the client, then multiple points with the same timestamp (or with time-descending timestamps) can be written.
|
If a user writes points with timestamps set by the client, then multiple points with the same timestamp (or with time-descending timestamps) can be written.
|
||||||
|
|
||||||
### `export`
|
### `export`
|
||||||
|
|
||||||
Exports all TSM files in InfluxDB line protocol data format.
|
Exports all TSM files or a single TSM file in InfluxDB line protocol data format.
|
||||||
This output file can be imported using the
|
The output file can be imported using the
|
||||||
[influx](/enterprise_influxdb/v1/tools/influx-cli/use-influx/#import-data-from-a-file-with-import) command.
|
[influx](http://localhost:1313/enterprise_influxdb/v1/tools/influx-cli/use-influx-cli) command.
|
||||||
|
|
||||||
#### Syntax
|
#### Syntax
|
||||||
|
|
||||||
|
@ -382,10 +387,11 @@ Default value is `""`.
|
||||||
|
|
||||||
##### `-datadir <data_dir>`
|
##### `-datadir <data_dir>`
|
||||||
|
|
||||||
The path to the `data` directory.
|
The path to the [`data` directory](/enterprise_influxdb/v1/concepts/file-system-layout/#data-directory).
|
||||||
|
|
||||||
Default value is `$HOME/.influxdb/data`.
|
Default value is `$HOME/.influxdb/data`.
|
||||||
See the [file system layout](/enterprise_influxdb/v1/concepts/file-system-layout/) for InfluxDB on your system.
|
See the [file system layout](/enterprise_influxdb/v1/concepts/file-system-layout/#file-system-layout)
|
||||||
|
for InfluxDB on your system.
|
||||||
|
|
||||||
##### [ `-end <timestamp>` ]
|
##### [ `-end <timestamp>` ]
|
||||||
|
|
||||||
|
@ -408,15 +414,20 @@ YYYY-MM-DDTHH:MM:SS-08:00
|
||||||
YYYY-MM-DDTHH:MM:SS+07:00
|
YYYY-MM-DDTHH:MM:SS+07:00
|
||||||
```
|
```
|
||||||
|
|
||||||
> **Note:** With offsets, avoid replacing the + or - sign with a Z. It may cause an error or print Z (ISO 8601 behavior) instead of the time zone offset.
|
> [!Note]
|
||||||
|
> With offsets, avoid replacing the + or - sign with a Z. It may cause an error
|
||||||
|
> or print Z (ISO 8601 behavior) instead of the time zone offset.
|
||||||
|
|
||||||
##### [ `-lponly` ]
|
##### [ `-lponly` ]
|
||||||
|
|
||||||
Output data in line protocol format only.
|
Output data in line protocol format only.
|
||||||
Does not output data definition language (DDL) statements (such as `CREATE DATABASE`) or DML context metadata (such as `# CONTEXT-DATABASE`).
|
Does not output data definition language (DDL) statements (such as `CREATE DATABASE`)
|
||||||
|
or DML context metadata (such as `# CONTEXT-DATABASE`).
|
||||||
|
|
||||||
##### [ `-out <export_dir>` ]
|
##### [ `-out <export_dir>` or `-out -`]
|
||||||
|
|
||||||
|
Location to export shard data. Specify an export directory to export a file, or add a hyphen after out (`-out -`) to export shard data to standard out (`stdout`) and send status messages to standard error (`stderr`).
|
||||||
|
|
||||||
The location for the export file.
|
|
||||||
Default value is `$HOME/.influxdb/export`.
|
Default value is `$HOME/.influxdb/export`.
|
||||||
|
|
||||||
##### [ `-retention <rp_name> ` ]
|
##### [ `-retention <rp_name> ` ]
|
||||||
|
@ -433,7 +444,13 @@ The timestamp string must be in [RFC3339 format](https://tools.ietf.org/html/rfc
|
||||||
Path to the [WAL](/enterprise_influxdb/v1/concepts/glossary/#wal-write-ahead-log) directory.
|
Path to the [WAL](/enterprise_influxdb/v1/concepts/glossary/#wal-write-ahead-log) directory.
|
||||||
|
|
||||||
Default value is `$HOME/.influxdb/wal`.
|
Default value is `$HOME/.influxdb/wal`.
|
||||||
See the [file system layout](/enterprise_influxdb/v1/concepts/file-system-layout/) for InfluxDB on your system.
|
See the [file system layout](/enterprise_influxdb/v1/concepts/file-system-layout/#file-system-layout)
|
||||||
|
for InfluxDB on your system.
|
||||||
|
|
||||||
|
##### [ `-tsmfile <tsm_file>` ]
|
||||||
|
|
||||||
|
Path to a single tsm file to export. This requires both `-database` and
|
||||||
|
`-retention` to be specified.
|
||||||
|
|
||||||
#### Examples
|
#### Examples
|
||||||
|
|
||||||
|
@ -449,6 +466,15 @@ influx_inspect export -compress
|
||||||
influx_inspect export -database DATABASE_NAME -retention RETENTION_POLICY
|
influx_inspect export -database DATABASE_NAME -retention RETENTION_POLICY
|
||||||
```
|
```
|
||||||
|
|
||||||
|
##### Export data from a single TSM file
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influx_inspect export \
|
||||||
|
-database DATABASE_NAME \
|
||||||
|
-retention RETENTION_POLICY \
|
||||||
|
-tsmfile TSM_FILE_NAME
|
||||||
|
```
|
||||||
|
|
||||||
##### Output file
|
##### Output file
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
@ -522,7 +548,7 @@ Note: This can use a lot of memory.
|
||||||
|
|
||||||
Use the `report-db` command to estimate the series cardinality of data in a
|
Use the `report-db` command to estimate the series cardinality of data in a
|
||||||
database when migrated to InfluxDB Cloud (TSM). InfluxDB Cloud (TSM) includes
|
database when migrated to InfluxDB Cloud (TSM). InfluxDB Cloud (TSM) includes
|
||||||
field keys in the series key so unique field keys affect the total cardinality.
|
fields keys in the series key so unique field keys affect the total cardinality.
|
||||||
The total series cardinality of data in a InfluxDB 1.x database may differ from
|
The total series cardinality of data in a InfluxDB 1.x database may differ from
|
||||||
from the series cardinality of that same data when migrated to InfluxDB Cloud (TSM).
|
from the series cardinality of that same data when migrated to InfluxDB Cloud (TSM).
|
||||||
|
|
||||||
|
@ -562,33 +588,87 @@ Specify the cardinality "rollup" level--the granularity of the cardinality repor
|
||||||
|
|
||||||
### `report-disk`
|
### `report-disk`
|
||||||
|
|
||||||
Use the `report-disk` command to review TSM file disk usage per shard and measurement in a specified directory. Useful for capacity planning and identifying which measurement or shard is using the most disk space. The default directory path `~/.influxdb/data/`.
|
Use the `report-disk` command to review disk usage by shards and measurements for TSM files in a specified directory. Useful for determining disk usage for capacity planning and identifying which measurements or shards are using the most space.
|
||||||
|
|
||||||
Calculates the total disk size by database (`db`), retention policy (`rp`), shard (`shard`), tsm file (`tsm_file`), and measurement (`measurement`).
|
Calculates the total disk size (`total_tsm_size`) in bytes, the number of shards (`shards`), and the number of tsm files (`tsm_files`) for the specified directory. Also calculates the disk size (`size`) and number of tsm files (`tsm_files`) for each shard. Use the `-detailed` flag to report disk usage (`size`) by database (`db`), retention policy (`rp`), and measurement (`measurement`).
|
||||||
|
|
||||||
#### Syntax
|
#### Syntax
|
||||||
|
|
||||||
```
|
```
|
||||||
influx_inspect report-disk [ options ] <data_dir>
|
influx_inspect report-disk [ options ] <path>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
##### `<path>`
|
||||||
|
|
||||||
|
Path to the directory with `.tsm` file(s) to report disk usage for. Default location is `$HOME/.influxdb/data`.
|
||||||
|
|
||||||
|
When specifying the path, wildcards (`*`) can replace one or more characters.
|
||||||
|
|
||||||
#### Options
|
#### Options
|
||||||
|
|
||||||
Optional arguments are in brackets.
|
Optional arguments are in brackets.
|
||||||
|
|
||||||
##### [ `-detailed` ]
|
##### [ `-detailed` ]
|
||||||
|
|
||||||
Report disk usage by measurement.
|
Include this flag to report disk usage by measurement.
|
||||||
|
|
||||||
|
#### Examples
|
||||||
|
|
||||||
|
##### Report on disk size by shard
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influx_inspect report-disk ~/.influxdb/data/
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Output
|
||||||
|
|
||||||
|
```bash
|
||||||
|
{
|
||||||
|
"Summary": {"shards": 2, "tsm_files": 8, "total_tsm_size": 149834637 },
|
||||||
|
"Shard": [
|
||||||
|
{"db": "stress", "rp": "autogen", "shard": "3", "tsm_files": 7, "size": 147022321},
|
||||||
|
{"db": "telegraf", "rp": "autogen", "shard": "2", "tsm_files": 1, "size": 2812316}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Report on disk size by measurement
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influx_inspect report-disk -detailed ~/.influxdb/data/
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Output
|
||||||
|
|
||||||
|
```bash
|
||||||
|
{
|
||||||
|
"Summary": {"shards": 2, "tsm_files": 8, "total_tsm_size": 149834637 },
|
||||||
|
"Shard": [
|
||||||
|
{"db": "stress", "rp": "autogen", "shard": "3", "tsm_files": 7, "size": 147022321},
|
||||||
|
{"db": "telegraf", "rp": "autogen", "shard": "2", "tsm_files": 1, "size": 2812316}
|
||||||
|
],
|
||||||
|
"Measurement": [
|
||||||
|
{"db": "stress", "rp": "autogen", "measurement": "ctr", "size": 107900000},
|
||||||
|
{"db": "telegraf", "rp": "autogen", "measurement": "cpu", "size": 1784211},
|
||||||
|
{"db": "telegraf", "rp": "autogen", "measurement": "disk", "size": 374121},
|
||||||
|
{"db": "telegraf", "rp": "autogen", "measurement": "diskio", "size": 254453},
|
||||||
|
{"db": "telegraf", "rp": "autogen", "measurement": "mem", "size": 171120},
|
||||||
|
{"db": "telegraf", "rp": "autogen", "measurement": "processes", "size": 59691},
|
||||||
|
{"db": "telegraf", "rp": "autogen", "measurement": "swap", "size": 42310},
|
||||||
|
{"db": "telegraf", "rp": "autogen", "measurement": "system", "size": 59561}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
### `reporttsi`
|
### `reporttsi`
|
||||||
|
|
||||||
The report does the following:
|
The report does the following:
|
||||||
|
|
||||||
* Calculates the total exact series cardinality in the database.
|
- Calculates the total exact series cardinality in the database.
|
||||||
* Segments that cardinality by measurement, and emits those cardinality values.
|
- Segments that cardinality by measurement, and emits those cardinality values.
|
||||||
* Emits total exact cardinality for each shard in the database.
|
- Emits total exact cardinality for each shard in the database.
|
||||||
* Segments for each shard the exact cardinality for each measurement in the shard.
|
- Segments for each shard the exact cardinality for each measurement in the shard.
|
||||||
* Optionally limits the results in each shard to the "top n".
|
- Optionally limits the results in each shard to the "top n".
|
||||||
|
|
||||||
The `reporttsi` command is primarily useful when there has been a change in cardinality
|
The `reporttsi` command is primarily useful when there has been a change in cardinality
|
||||||
and it's not clear which measurement is responsible for this change, and further, _when_
|
and it's not clear which measurement is responsible for this change, and further, _when_
|
||||||
|
@ -703,7 +783,8 @@ Enables very verbose logging. Displays progress for every series key and time ra
|
||||||
|
|
||||||
Enables very very verbose logging. Displays progress for every series key and time range in the tombstone files. Timestamps are displayed in [RFC3339 format](https://tools.ietf.org/html/rfc3339) with nanosecond precision.
|
Enables very very verbose logging. Displays progress for every series key and time range in the tombstone files. Timestamps are displayed in [RFC3339 format](https://tools.ietf.org/html/rfc3339) with nanosecond precision.
|
||||||
|
|
||||||
> **Note on verbose logging:** Higher verbosity levels override lower levels.
|
> [!Note]
|
||||||
|
> Higher verbosity levels override lower levels.
|
||||||
|
|
||||||
## Caveats
|
## Caveats
|
||||||
|
|
||||||
|
|
|
@ -44,14 +44,16 @@ ID Database Retention Policy Desired Replicas Shard Group Start
|
||||||
{{% /expand %}}
|
{{% /expand %}}
|
||||||
{{< /expand-wrapper >}}
|
{{< /expand-wrapper >}}
|
||||||
|
|
||||||
|
You can also use the `-m` flag to output "inconsistent" shards which are shards
|
||||||
|
that are either in metadata but not on disk or on disk but not in metadata.
|
||||||
|
|
||||||
## Flags
|
## Flags
|
||||||
|
|
||||||
| Flag | Description |
|
| Flag | Description |
|
||||||
| :--- | :-------------------------------- |
|
| :--- | :-------------------------------- |
|
||||||
| `-v` | Return detailed shard information |
|
| `-v` | Return detailed shard information |
|
||||||
|
| `-m` | Return inconsistent shards |
|
||||||
|
|
||||||
{{% caption %}}
|
{{% caption %}}
|
||||||
_Also see [`influxd-ctl` global flags](/enterprise_influxdb/v1/tools/influxd-ctl/#influxd-ctl-global-flags)._
|
_Also see [`influxd-ctl` global flags](/enterprise_influxdb/v1/tools/influxd-ctl/#influxd-ctl-global-flags)._
|
||||||
{{% /caption %}}
|
{{% /caption %}}
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
|
@ -12,6 +12,45 @@ alt_links:
|
||||||
v2: /influxdb/v2/reference/release-notes/influxdb/
|
v2: /influxdb/v2/reference/release-notes/influxdb/
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## v1.12.0 {date="2025-04-15"}
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Add additional log output when using
|
||||||
|
[`influx_inspect buildtsi`](/influxdb/v1/tools/influx_inspect/#buildtsi) to
|
||||||
|
rebuild the TSI index.
|
||||||
|
- Use [`influx_inspect export`](/influxdb/v1/tools/influx_inspect/#export) with
|
||||||
|
[`-tsmfile` option](/influxdb/v1/tools/influx_inspect/#--tsmfile-tsm_file-) to
|
||||||
|
export a single TSM file.
|
||||||
|
- Add `fluxQueryRespBytes` metric to the `/debug/vars` metrics endpoint.
|
||||||
|
- Add [`aggressive-points-per-block` configuration option](/influxdb/v1/administration/config/#aggressive-points-per-block)
|
||||||
|
to prevent TSM files from not getting fully compacted.
|
||||||
|
- Improve error handling.
|
||||||
|
- InfluxQL updates:
|
||||||
|
- Delete series by retention policy.
|
||||||
|
- Allow retention policies to discard writes that fall within their range, but
|
||||||
|
outside of [`FUTURE LIMIT`](/influxdb/v1/query_language/manage-database/#future-limit)
|
||||||
|
and [`PAST LIMIT`](/influxdb/v1/query_language/manage-database/#past-limit).
|
||||||
|
|
||||||
|
## Bug fixes
|
||||||
|
|
||||||
|
- Log rejected writes to subscriptions.
|
||||||
|
- Update `xxhash` and avoid `stringtoslicebyte` in the cache.
|
||||||
|
- Prevent a panic when a shard group has no shards.
|
||||||
|
- Fix file handle leaks in `Compactor.write`.
|
||||||
|
- Ensure fields in memory match the fields on disk.
|
||||||
|
- Ensure temporary files are removed after failed compactions.
|
||||||
|
- Do not panic on invalid multiple subqueries.
|
||||||
|
|
||||||
|
## Other
|
||||||
|
|
||||||
|
- Update Go to 1.23.5.
|
||||||
|
- Upgrade Flux to v0.196.1.
|
||||||
|
- Upgrade InfluxQL to v1.4.1.
|
||||||
|
- Various other dependency updates.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## v1.11.8 {date="2024-11-15"}
|
## v1.11.8 {date="2024-11-15"}
|
||||||
|
|
||||||
### Bug Fixes
|
### Bug Fixes
|
||||||
|
@ -20,6 +59,8 @@ alt_links:
|
||||||
compatibility API](/influxdb/v1/tools/api/#apiv2delete-http-endpoint) before
|
compatibility API](/influxdb/v1/tools/api/#apiv2delete-http-endpoint) before
|
||||||
string comparisons (e.g. to allow special characters in measurement names).
|
string comparisons (e.g. to allow special characters in measurement names).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## v1.11.7 {date="2024-10-10"}
|
## v1.11.7 {date="2024-10-10"}
|
||||||
|
|
||||||
This release represents the first public release of InfluxDB OSS v1 since 2021
|
This release represents the first public release of InfluxDB OSS v1 since 2021
|
||||||
|
@ -72,17 +113,17 @@ All official build packages are for 64-bit architectures.
|
||||||
and [`influx_inspect merge-schema`](/influxdb/v1/tools/influx_inspect/#merge-schema)
|
and [`influx_inspect merge-schema`](/influxdb/v1/tools/influx_inspect/#merge-schema)
|
||||||
commands to check for type conflicts between shards.
|
commands to check for type conflicts between shards.
|
||||||
- **New configuration options:**
|
- **New configuration options:**
|
||||||
- Add [`total-buffer-bytes`](/influxdb/v1/administration/config/#total-buffer-bytes--0)
|
- Add [`total-buffer-bytes`](/influxdb/v1/administration/config/#total-buffer-bytes)
|
||||||
configuration option to set the total number of bytes to allocate to
|
configuration option to set the total number of bytes to allocate to
|
||||||
subscription buffers.
|
subscription buffers.
|
||||||
- Add [`termination-query-log`](/influxdb/v1/administration/config/#termination-query-log--false)
|
- Add [`termination-query-log`](/influxdb/v1/administration/config/#termination-query-log)
|
||||||
configuration option to enable dumping running queries to log on `SIGTERM`.
|
configuration option to enable dumping running queries to log on `SIGTERM`.
|
||||||
- Add [`max-concurrent-deletes`](/influxdb/v1/administration/config/#max-concurrent-deletes--1)
|
- Add [`max-concurrent-deletes`](/influxdb/v1/administration/config/#max-concurrent-deletes)
|
||||||
configuration option to set delete concurrency.
|
configuration option to set delete concurrency.
|
||||||
- Add [Flux query configuration settings](/influxdb/v1/administration/config/#flux-query-management-settings).
|
- Add [Flux query configuration settings](/influxdb/v1/administration/config/#flux-query-management-settings).
|
||||||
- Add [`compact-series-file`](/influxdb/v1/administration/config/#compact-series-file--false)
|
- Add [`compact-series-file`](/influxdb/v1/administration/config/#compact-series-file)
|
||||||
configuration option to enable or disable series file compaction on startup.
|
configuration option to enable or disable series file compaction on startup.
|
||||||
- Add [`prom-read-auth-enabled` configuration option](/influxdb/v1/administration/config/#prom-read-auth-enabled--false)
|
- Add [`prom-read-auth-enabled` configuration option](/influxdb/v1/administration/config/#prom-read-auth-enabled)
|
||||||
to authenticate Prometheus remote read.
|
to authenticate Prometheus remote read.
|
||||||
- **Flux improvements:**
|
- **Flux improvements:**
|
||||||
- Upgrade Flux to v0.194.5.
|
- Upgrade Flux to v0.194.5.
|
||||||
|
@ -243,7 +284,7 @@ This release is for InfluxDB Enterprise 1.8.6 customers only. No OSS-specific ch
|
||||||
|
|
||||||
### Bug fixes
|
### Bug fixes
|
||||||
|
|
||||||
- Update meta queries (for example, SHOW TAG VALUES, SHOW TAG KEYS, SHOW SERIES CARDINALITY, SHOW MEASUREMENT CARDINALITY, and SHOW MEASUREMENTS) to check the query context when possible to respect timeout values set in the [`query-timeout` configuration parameter](/influxdb/v1/administration/config/#query-timeout--0s). Note, meta queries will check the context less frequently than regular queries, which use iterators, because meta queries return data in batches.
|
- Update meta queries (for example, SHOW TAG VALUES, SHOW TAG KEYS, SHOW SERIES CARDINALITY, SHOW MEASUREMENT CARDINALITY, and SHOW MEASUREMENTS) to check the query context when possible to respect timeout values set in the [`query-timeout` configuration parameter](/influxdb/v1/administration/config/#query-timeout). Note, meta queries will check the context less frequently than regular queries, which use iterators, because meta queries return data in batches.
|
||||||
- Previously, successful writes were incorrectly incrementing the `WriteErr` statistics. Now, successful writes correctly increment the `writeOK` statistics.
|
- Previously, successful writes were incorrectly incrementing the `WriteErr` statistics. Now, successful writes correctly increment the `writeOK` statistics.
|
||||||
- Correct JSON marshalling error format.
|
- Correct JSON marshalling error format.
|
||||||
- Previously, a GROUP BY query with an offset that caused an interval to cross a daylight savings change inserted an extra output row off by one hour. Now, the correct GROUP BY interval start time is set before the time zone offset is calculated.
|
- Previously, a GROUP BY query with an offset that caused an interval to cross a daylight savings change inserted an extra output row off by one hour. Now, the correct GROUP BY interval start time is set before the time zone offset is calculated.
|
||||||
|
@ -326,19 +367,19 @@ features, performance improvements, and bug fixes below.
|
||||||
|
|
||||||
This release updates support for the Flux language and queries. To learn about Flux design principles and see how to get started with Flux, see [Introduction to Flux](/influxdb/v1/flux/).
|
This release updates support for the Flux language and queries. To learn about Flux design principles and see how to get started with Flux, see [Introduction to Flux](/influxdb/v1/flux/).
|
||||||
|
|
||||||
* Use the new [`influx -type=flux`](/influxdb/v1/tools/influx-cli/#flags) option to enable the Flux REPL shell for creating Flux queries.
|
- Use the new [`influx -type=flux`](/influxdb/v1/tools/influx-cli/#flags) option to enable the Flux REPL shell for creating Flux queries.
|
||||||
|
|
||||||
* Flux v0.65 includes the following capabilities:
|
- Flux v0.65 includes the following capabilities:
|
||||||
- Join data residing in multiple measurements, buckets, or data sources
|
- Join data residing in multiple measurements, buckets, or data sources
|
||||||
- Perform mathematical operations using data gathered across measurements/buckets
|
- Perform mathematical operations using data gathered across measurements/buckets
|
||||||
- Manipulate Strings through an extensive library of string related functions
|
- Manipulate Strings through an extensive library of string related functions
|
||||||
- Shape data through `pivot()` and other functions
|
- Shape data through `pivot()` and other functions
|
||||||
- Group based on any data column: tags, fields, etc.
|
- Group based on any data column: tags, fields, etc.
|
||||||
- Window and aggregate based on calendar months, years
|
- Window and aggregate based on calendar months, years
|
||||||
- Join data across Influx and non-Influx sources
|
- Join data across Influx and non-Influx sources
|
||||||
- Cast booleans to integers
|
- Cast booleans to integers
|
||||||
- Query geo-temporal data (experimental)
|
- Query geo-temporal data (experimental)
|
||||||
- Many additional functions for working with data
|
- Many additional functions for working with data
|
||||||
|
|
||||||
> We're evaluating the need for Flux query management controls equivalent to existing InfluxQL [query management controls](/influxdb/v1/troubleshooting/query_management/#configuration-settings-for-query-management) based on your feedback. Please join the discussion on [InfluxCommunity](https://community.influxdata.com/), [Slack](https://influxcommunity.slack.com/), or [GitHub](https://github.com/influxdata/flux). InfluxDB Enterprise customers, please contact <support@influxdata.com>.
|
> We're evaluating the need for Flux query management controls equivalent to existing InfluxQL [query management controls](/influxdb/v1/troubleshooting/query_management/#configuration-settings-for-query-management) based on your feedback. Please join the discussion on [InfluxCommunity](https://community.influxdata.com/), [Slack](https://influxcommunity.slack.com/), or [GitHub](https://github.com/influxdata/flux). InfluxDB Enterprise customers, please contact <support@influxdata.com>.
|
||||||
|
|
||||||
|
@ -564,7 +605,7 @@ Chunked query was added into the Go client v2 interface. If you compiled against
|
||||||
|
|
||||||
Support for the Flux language and queries has been added in this release. To begin exploring Flux 0.7 (technical preview):
|
Support for the Flux language and queries has been added in this release. To begin exploring Flux 0.7 (technical preview):
|
||||||
|
|
||||||
* Enable Flux using the new configuration setting [`[http] flux-enabled = true`](/influxdb/v1/administration/config/#flux-enabled-false).
|
* Enable Flux using the new configuration setting [`[http] flux-enabled = true`](/influxdb/v1/administration/config/#flux-enabled).
|
||||||
* Use the new [`influx -type=flux`](/influxdb/v1/tools/shell/#type) option to enable the Flux REPL shell for creating Flux queries.
|
* Use the new [`influx -type=flux`](/influxdb/v1/tools/shell/#type) option to enable the Flux REPL shell for creating Flux queries.
|
||||||
* Read about Flux and the Flux language, enabling Flux, or jump into the getting started and other guides.
|
* Read about Flux and the Flux language, enabling Flux, or jump into the getting started and other guides.
|
||||||
|
|
||||||
|
@ -1101,7 +1142,7 @@ With TSI, the number of series should be unbounded by the memory on the server h
|
||||||
See Paul Dix's blogpost [Path to 1 Billion Time Series: InfluxDB High Cardinality Indexing Ready for Testing](https://www.influxdata.com/path-1-billion-time-series-influxdb-high-cardinality-indexing-ready-testing/) for additional information.
|
See Paul Dix's blogpost [Path to 1 Billion Time Series: InfluxDB High Cardinality Indexing Ready for Testing](https://www.influxdata.com/path-1-billion-time-series-influxdb-high-cardinality-indexing-ready-testing/) for additional information.
|
||||||
|
|
||||||
TSI is disabled by default in version 1.3.
|
TSI is disabled by default in version 1.3.
|
||||||
To enable TSI, uncomment the [`index-version` setting](/influxdb/v1/administration/config#index-version-inmem) and set it to `tsi1`.
|
To enable TSI, uncomment the [`index-version` setting](/influxdb/v1/administration/config#index-version) and set it to `tsi1`.
|
||||||
The `index-version` setting is in the `[data]` section of the configuration file.
|
The `index-version` setting is in the `[data]` section of the configuration file.
|
||||||
Next, restart your InfluxDB instance.
|
Next, restart your InfluxDB instance.
|
||||||
|
|
||||||
|
@ -1250,14 +1291,14 @@ The following new configuration options are available.
|
||||||
|
|
||||||
#### `[http]` Section
|
#### `[http]` Section
|
||||||
|
|
||||||
* [`max-row-limit`](/influxdb/v1/administration/config#max-row-limit-0) now defaults to `0`.
|
* [`max-row-limit`](/influxdb/v1/administration/config#max-row-limit) now defaults to `0`.
|
||||||
In versions 1.0 and 1.1, the default setting was `10000`, but due to a bug, the value in use in versions 1.0 and 1.1 was effectively `0`.
|
In versions 1.0 and 1.1, the default setting was `10000`, but due to a bug, the value in use in versions 1.0 and 1.1 was effectively `0`.
|
||||||
In versions 1.2.0 through 1.2.1, we fixed that bug, but the fix caused a breaking change for Grafana and Kapacitor users; users who had not set `max-row-limit` to `0` experienced truncated/partial data due to the `10000` row limit.
|
In versions 1.2.0 through 1.2.1, we fixed that bug, but the fix caused a breaking change for Grafana and Kapacitor users; users who had not set `max-row-limit` to `0` experienced truncated/partial data due to the `10000` row limit.
|
||||||
In version 1.2.2, we've changed the default `max-row-limit` setting to `0` to match the behavior in versions 1.0 and 1.1.
|
In version 1.2.2, we've changed the default `max-row-limit` setting to `0` to match the behavior in versions 1.0 and 1.1.
|
||||||
|
|
||||||
### Bug fixes
|
### Bug fixes
|
||||||
|
|
||||||
- Change the default [`max-row-limit`](/influxdb/v1/administration/config#max-row-limit-0) setting from `10000` to `0` to prevent the absence of data in Grafana or Kapacitor.
|
- Change the default [`max-row-limit`](/influxdb/v1/administration/config#max-row-limit) setting from `10000` to `0` to prevent the absence of data in Grafana or Kapacitor.
|
||||||
|
|
||||||
## v1.2.1 {date="2017-03-08"}
|
## v1.2.1 {date="2017-03-08"}
|
||||||
|
|
||||||
|
|
|
@ -666,7 +666,7 @@ from(bucket: "example-tmp-db/autogen")
|
||||||
For more information, see
|
For more information, see
|
||||||
[How does InfluxDB handle duplicate points?](/influxdb/v1/troubleshooting/frequently-asked-questions/#how-does-influxdb-handle-duplicate-points)
|
[How does InfluxDB handle duplicate points?](/influxdb/v1/troubleshooting/frequently-asked-questions/#how-does-influxdb-handle-duplicate-points)
|
||||||
|
|
||||||
3. Use InfluxQL to delete the temporary database.
|
3. Use InfluxQL to delete the temporary database.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
DROP DATABASE "example-tmp-db"
|
DROP DATABASE "example-tmp-db"
|
||||||
|
@ -683,7 +683,7 @@ are `127.0.0.1:8088`.
|
||||||
|
|
||||||
**To customize the TCP IP and port the backup and restore services use**,
|
**To customize the TCP IP and port the backup and restore services use**,
|
||||||
uncomment and update the
|
uncomment and update the
|
||||||
[`bind-address` configuration setting](/influxdb/v1/administration/config#bind-address-127-0-0-1-8088)
|
[`bind-address` configuration setting](/influxdb/v1/administration/config#bind-address)
|
||||||
at the root level of your InfluxDB configuration file (`influxdb.conf`).
|
at the root level of your InfluxDB configuration file (`influxdb.conf`).
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -12,14 +12,14 @@ menu:
|
||||||
|
|
||||||
### `8086`
|
### `8086`
|
||||||
The default port that runs the InfluxDB HTTP service.
|
The default port that runs the InfluxDB HTTP service.
|
||||||
[Configure this port](/influxdb/v1/administration/config#bind-address-8086)
|
[Configure this port](/influxdb/v1/administration/config#http-bind-address)
|
||||||
in the configuration file.
|
in the configuration file.
|
||||||
|
|
||||||
**Resources** [API Reference](/influxdb/v1/tools/api/)
|
**Resources** [API Reference](/influxdb/v1/tools/api/)
|
||||||
|
|
||||||
### 8088
|
### 8088
|
||||||
The default port used by the RPC service for RPC calls made by the CLI for backup and restore operations (`influxdb backup` and `influxd restore`).
|
The default port used by the RPC service for RPC calls made by the CLI for backup and restore operations (`influxdb backup` and `influxd restore`).
|
||||||
[Configure this port](/influxdb/v1/administration/config#bind-address-127-0-0-1-8088)
|
[Configure this port](/influxdb/v1/administration/config#rpc-bind-address)
|
||||||
in the configuration file.
|
in the configuration file.
|
||||||
|
|
||||||
**Resources** [Backup and Restore](/influxdb/v1/administration/backup_and_restore/)
|
**Resources** [Backup and Restore](/influxdb/v1/administration/backup_and_restore/)
|
||||||
|
@ -29,7 +29,7 @@ in the configuration file.
|
||||||
### 2003
|
### 2003
|
||||||
|
|
||||||
The default port that runs the Graphite service.
|
The default port that runs the Graphite service.
|
||||||
[Enable and configure this port](/influxdb/v1/administration/config#bind-address-2003)
|
[Enable and configure this port](/influxdb/v1/administration/config#graphite-bind-address)
|
||||||
in the configuration file.
|
in the configuration file.
|
||||||
|
|
||||||
**Resources** [Graphite README](https://github.com/influxdata/influxdb/tree/1.8/services/graphite/README.md)
|
**Resources** [Graphite README](https://github.com/influxdata/influxdb/tree/1.8/services/graphite/README.md)
|
||||||
|
@ -37,7 +37,7 @@ in the configuration file.
|
||||||
### 4242
|
### 4242
|
||||||
|
|
||||||
The default port that runs the OpenTSDB service.
|
The default port that runs the OpenTSDB service.
|
||||||
[Enable and configure this port](/influxdb/v1/administration/config#bind-address-4242)
|
[Enable and configure this port](/influxdb/v1/administration/config#opentsdb-bind-address)
|
||||||
in the configuration file.
|
in the configuration file.
|
||||||
|
|
||||||
**Resources** [OpenTSDB README](https://github.com/influxdata/influxdb/tree/1.8/services/opentsdb/README.md)
|
**Resources** [OpenTSDB README](https://github.com/influxdata/influxdb/tree/1.8/services/opentsdb/README.md)
|
||||||
|
@ -45,7 +45,7 @@ in the configuration file.
|
||||||
### 8089
|
### 8089
|
||||||
|
|
||||||
The default port that runs the UDP service.
|
The default port that runs the UDP service.
|
||||||
[Enable and configure this port](/influxdb/v1/administration/config#bind-address-8089)
|
[Enable and configure this port](/influxdb/v1/administration/config#udp-bind-address)
|
||||||
in the configuration file.
|
in the configuration file.
|
||||||
|
|
||||||
**Resources** [UDP README](https://github.com/influxdata/influxdb/tree/1.8/services/udp/README.md)
|
**Resources** [UDP README](https://github.com/influxdata/influxdb/tree/1.8/services/udp/README.md)
|
||||||
|
@ -53,7 +53,7 @@ in the configuration file.
|
||||||
### 25826
|
### 25826
|
||||||
|
|
||||||
The default port that runs the Collectd service.
|
The default port that runs the Collectd service.
|
||||||
[Enable and configure this port](/influxdb/v1/administration/config#bind-address-25826)
|
[Enable and configure this port](/influxdb/v1/administration/config#collectd-bind-address)
|
||||||
in the configuration file.
|
in the configuration file.
|
||||||
|
|
||||||
**Resources** [Collectd README](https://github.com/influxdata/influxdb/tree/1.8/services/collectd/README.md)
|
**Resources** [Collectd README](https://github.com/influxdata/influxdb/tree/1.8/services/collectd/README.md)
|
||||||
|
|
|
@ -21,7 +21,7 @@ HTTP, HTTPS, or UDP in [line protocol](/influxdb/v1/write_protocols/line_protoco
|
||||||
the InfluxDB subscriber service creates multiple "writers" ([goroutines](https://golangbot.com/goroutines/))
|
the InfluxDB subscriber service creates multiple "writers" ([goroutines](https://golangbot.com/goroutines/))
|
||||||
which send writes to the subscription endpoints.
|
which send writes to the subscription endpoints.
|
||||||
|
|
||||||
_The number of writer goroutines is defined by the [`write-concurrency`](/influxdb/v1/administration/config#write-concurrency-40) configuration._
|
_The number of writer goroutines is defined by the [`write-concurrency`](/influxdb/v1/administration/config#write-concurrency) configuration._
|
||||||
|
|
||||||
As writes occur in InfluxDB, each subscription writer sends the written data to the
|
As writes occur in InfluxDB, each subscription writer sends the written data to the
|
||||||
specified subscription endpoints.
|
specified subscription endpoints.
|
||||||
|
|
|
@ -21,18 +21,18 @@ The InfluxDB file structure includes of the following:
|
||||||
|
|
||||||
### Data directory
|
### Data directory
|
||||||
Directory path where InfluxDB stores time series data (TSM files).
|
Directory path where InfluxDB stores time series data (TSM files).
|
||||||
To customize this path, use the [`[data].dir`](/influxdb/v1/administration/config/#dir--varlibinfluxdbdata)
|
To customize this path, use the [`[data].dir`](/influxdb/v1/administration/config/#dir-1)
|
||||||
configuration option.
|
configuration option.
|
||||||
|
|
||||||
### WAL directory
|
### WAL directory
|
||||||
Directory path where InfluxDB stores Write Ahead Log (WAL) files.
|
Directory path where InfluxDB stores Write Ahead Log (WAL) files.
|
||||||
To customize this path, use the [`[data].wal-dir`](/influxdb/v1/administration/config/#wal-dir--varlibinfluxdbwal)
|
To customize this path, use the [`[data].wal-dir`](/influxdb/v1/administration/config/#wal-dir)
|
||||||
configuration option.
|
configuration option.
|
||||||
|
|
||||||
### Metastore directory
|
### Metastore directory
|
||||||
Directory path of the InfluxDB metastore, which stores information about users,
|
Directory path of the InfluxDB metastore, which stores information about users,
|
||||||
databases, retention policies, shards, and continuous queries.
|
databases, retention policies, shards, and continuous queries.
|
||||||
To customize this path, use the [`[meta].dir`](/influxdb/v1/administration/config/#dir--varlibinfluxdbmeta)
|
To customize this path, use the [`[meta].dir`](/influxdb/v1/administration/config/#dir)
|
||||||
configuration option.
|
configuration option.
|
||||||
|
|
||||||
## InfluxDB configuration files
|
## InfluxDB configuration files
|
||||||
|
|
|
@ -66,13 +66,13 @@ Deletes sent to the Cache will clear out the given key or the specific time rang
|
||||||
|
|
||||||
The Cache exposes a few controls for snapshotting behavior.
|
The Cache exposes a few controls for snapshotting behavior.
|
||||||
The two most important controls are the memory limits.
|
The two most important controls are the memory limits.
|
||||||
There is a lower bound, [`cache-snapshot-memory-size`](/influxdb/v1/administration/config#cache-snapshot-memory-size-25m), which when exceeded will trigger a snapshot to TSM files and remove the corresponding WAL segments.
|
There is a lower bound, [`cache-snapshot-memory-size`](/influxdb/v1/administration/config#cache-snapshot-memory-size), which when exceeded will trigger a snapshot to TSM files and remove the corresponding WAL segments.
|
||||||
There is also an upper bound, [`cache-max-memory-size`](/influxdb/v1/administration/config#cache-max-memory-size-1g), which when exceeded will cause the Cache to reject new writes.
|
There is also an upper bound, [`cache-max-memory-size`](/influxdb/v1/administration/config#cache-max-memory-size), which when exceeded will cause the Cache to reject new writes.
|
||||||
These configurations are useful to prevent out of memory situations and to apply back pressure to clients writing data faster than the instance can persist it.
|
These configurations are useful to prevent out of memory situations and to apply back pressure to clients writing data faster than the instance can persist it.
|
||||||
The checks for memory thresholds occur on every write.
|
The checks for memory thresholds occur on every write.
|
||||||
|
|
||||||
The other snapshot controls are time based.
|
The other snapshot controls are time based.
|
||||||
The idle threshold, [`cache-snapshot-write-cold-duration`](/influxdb/v1/administration/config#cache-snapshot-write-cold-duration-10m), forces the Cache to snapshot to TSM files if it hasn't received a write within the specified interval.
|
The idle threshold, [`cache-snapshot-write-cold-duration`](/influxdb/v1/administration/config#cache-snapshot-write-cold-duration), forces the Cache to snapshot to TSM files if it hasn't received a write within the specified interval.
|
||||||
|
|
||||||
The in-memory Cache is recreated on restart by re-reading the WAL files on disk.
|
The in-memory Cache is recreated on restart by re-reading the WAL files on disk.
|
||||||
|
|
||||||
|
|
|
@ -215,7 +215,7 @@ data that reside in an RP other than the `DEFAULT` RP.
|
||||||
Between checks, `orders` may have data that are older than two hours.
|
Between checks, `orders` may have data that are older than two hours.
|
||||||
The rate at which InfluxDB checks to enforce an RP is a configurable setting,
|
The rate at which InfluxDB checks to enforce an RP is a configurable setting,
|
||||||
see
|
see
|
||||||
[Database Configuration](/influxdb/v1/administration/config#check-interval-30m0s).
|
[Database Configuration](/influxdb/v1/administration/config#check-interval).
|
||||||
|
|
||||||
Using a combination of RPs and CQs, we've successfully set up our database to
|
Using a combination of RPs and CQs, we've successfully set up our database to
|
||||||
automatically keep the high precision raw data for a limited time, create lower
|
automatically keep the high precision raw data for a limited time, create lower
|
||||||
|
|
|
@ -62,17 +62,22 @@ Creates a new database.
|
||||||
#### Syntax
|
#### Syntax
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DATABASE <database_name> [WITH [DURATION <duration>] [REPLICATION <n>] [SHARD DURATION <duration>] [NAME <retention-policy-name>]]
|
CREATE DATABASE <database_name> [WITH [DURATION <duration>] [REPLICATION <n>] [SHARD DURATION <duration>] [PAST LIMIT <duration>] [FUTURE LIMIT <duration>] [NAME <retention-policy-name>]]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Description of syntax
|
#### Description of syntax
|
||||||
|
|
||||||
`CREATE DATABASE` requires a database [name](/influxdb/v1/troubleshooting/frequently-asked-questions/#what-words-and-characters-should-i-avoid-when-writing-data-to-influxdb).
|
`CREATE DATABASE` requires a database [name](/influxdb/v1/troubleshooting/frequently-asked-questions/#what-words-and-characters-should-i-avoid-when-writing-data-to-influxdb).
|
||||||
|
|
||||||
The `WITH`, `DURATION`, `REPLICATION`, `SHARD DURATION`, and `NAME` clauses are optional and create a single [retention policy](/influxdb/v1/concepts/glossary/#retention-policy-rp) associated with the created database.
|
The `WITH`, `DURATION`, `REPLICATION`, `SHARD DURATION`, `PAST LIMIT`,
|
||||||
If you do not specify one of the clauses after `WITH`, the relevant behavior defaults to the `autogen` retention policy settings.
|
`FUTURE LIMIT, and `NAME` clauses are optional and create a single
|
||||||
|
[retention policy](/influxdb/v1/concepts/glossary/#retention-policy-rp)
|
||||||
|
associated with the created database.
|
||||||
|
If you do not specify one of the clauses after `WITH`, the relevant behavior
|
||||||
|
defaults to the `autogen` retention policy settings.
|
||||||
The created retention policy automatically serves as the database's default retention policy.
|
The created retention policy automatically serves as the database's default retention policy.
|
||||||
For more information about those clauses, see [Retention Policy Management](/influxdb/v1/query_language/manage-database/#retention-policy-management).
|
For more information about those clauses, see
|
||||||
|
[Retention Policy Management](/influxdb/v1/query_language/manage-database/#retention-policy-management).
|
||||||
|
|
||||||
A successful `CREATE DATABASE` query returns an empty result.
|
A successful `CREATE DATABASE` query returns an empty result.
|
||||||
If you attempt to create a database that already exists, InfluxDB does nothing and does not return an error.
|
If you attempt to create a database that already exists, InfluxDB does nothing and does not return an error.
|
||||||
|
@ -87,7 +92,7 @@ If you attempt to create a database that already exists, InfluxDB does nothing a
|
||||||
```
|
```
|
||||||
|
|
||||||
The query creates a database called `NOAA_water_database`.
|
The query creates a database called `NOAA_water_database`.
|
||||||
[By default](/influxdb/v1/administration/config/#retention-autocreate-true), InfluxDB also creates the `autogen` retention policy and associates it with the `NOAA_water_database`.
|
[By default](/influxdb/v1/administration/config/#retention-autocreate), InfluxDB also creates the `autogen` retention policy and associates it with the `NOAA_water_database`.
|
||||||
|
|
||||||
##### Create a database with a specific retention policy
|
##### Create a database with a specific retention policy
|
||||||
|
|
||||||
|
@ -122,21 +127,25 @@ The `DROP SERIES` query deletes all points from a [series](/influxdb/v1/concepts
|
||||||
and it drops the series from the index.
|
and it drops the series from the index.
|
||||||
|
|
||||||
The query takes the following form, where you must specify either the `FROM` clause or the `WHERE` clause:
|
The query takes the following form, where you must specify either the `FROM` clause or the `WHERE` clause:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
DROP SERIES FROM <measurement_name[,measurement_name]> WHERE <tag_key>='<tag_value>'
|
DROP SERIES FROM <measurement_name[,measurement_name]> WHERE <tag_key>='<tag_value>'
|
||||||
```
|
```
|
||||||
|
|
||||||
Drop all series from a single measurement:
|
Drop all series from a single measurement:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
> DROP SERIES FROM "h2o_feet"
|
> DROP SERIES FROM "h2o_feet"
|
||||||
```
|
```
|
||||||
|
|
||||||
Drop series with a specific tag pair from a single measurement:
|
Drop series with a specific tag pair from a single measurement:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
> DROP SERIES FROM "h2o_feet" WHERE "location" = 'santa_monica'
|
> DROP SERIES FROM "h2o_feet" WHERE "location" = 'santa_monica'
|
||||||
```
|
```
|
||||||
|
|
||||||
Drop all points in the series that have a specific tag pair from all measurements in the database:
|
Drop all points in the series that have a specific tag pair from all measurements in the database:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
> DROP SERIES WHERE "location" = 'santa_monica'
|
> DROP SERIES WHERE "location" = 'santa_monica'
|
||||||
```
|
```
|
||||||
|
@ -152,27 +161,31 @@ Unlike
|
||||||
|
|
||||||
You must include either the `FROM` clause, the `WHERE` clause, or both:
|
You must include either the `FROM` clause, the `WHERE` clause, or both:
|
||||||
|
|
||||||
```
|
```sql
|
||||||
DELETE FROM <measurement_name> WHERE [<tag_key>='<tag_value>'] | [<time interval>]
|
DELETE FROM <measurement_name> WHERE [<tag_key>='<tag_value>'] | [<time interval>]
|
||||||
```
|
```
|
||||||
|
|
||||||
Delete all data associated with the measurement `h2o_feet`:
|
Delete all data associated with the measurement `h2o_feet`:
|
||||||
```
|
|
||||||
|
```sql
|
||||||
> DELETE FROM "h2o_feet"
|
> DELETE FROM "h2o_feet"
|
||||||
```
|
```
|
||||||
|
|
||||||
Delete all data associated with the measurement `h2o_quality` and where the tag `randtag` equals `3`:
|
Delete all data associated with the measurement `h2o_quality` and where the tag `randtag` equals `3`:
|
||||||
```
|
|
||||||
|
```sql
|
||||||
> DELETE FROM "h2o_quality" WHERE "randtag" = '3'
|
> DELETE FROM "h2o_quality" WHERE "randtag" = '3'
|
||||||
```
|
```
|
||||||
|
|
||||||
Delete all data in the database that occur before January 01, 2020:
|
Delete all data in the database that occur before January 01, 2020:
|
||||||
```
|
|
||||||
|
```sql
|
||||||
> DELETE WHERE time < '2020-01-01'
|
> DELETE WHERE time < '2020-01-01'
|
||||||
```
|
```
|
||||||
|
|
||||||
Delete all data associated with the measurement `h2o_feet` in retention policy `one_day`:
|
Delete all data associated with the measurement `h2o_feet` in retention policy `one_day`:
|
||||||
```
|
|
||||||
|
```sql
|
||||||
> DELETE FROM "one_day"."h2o_feet"
|
> DELETE FROM "one_day"."h2o_feet"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -181,12 +194,16 @@ A successful `DELETE` query returns an empty result.
|
||||||
Things to note about `DELETE`:
|
Things to note about `DELETE`:
|
||||||
|
|
||||||
* `DELETE` supports
|
* `DELETE` supports
|
||||||
[regular expressions](/influxdb/v1/query_language/explore-data/#regular-expressions)
|
[regular expressions](/enterprise_influxdb/v1/query_language/explore-data/#regular-expressions)
|
||||||
in the `FROM` clause when specifying measurement names and in the `WHERE` clause
|
in the `FROM` clause when specifying measurement names and in the `WHERE` clause
|
||||||
when specifying tag values. It *does not* support regular expressions for the retention policy in the `FROM` clause.
|
when specifying tag values. It *does not* support regular expressions for the
|
||||||
`DELETE` requires that you define *one* retention policy in the `FROM` clause.
|
retention policy in the `FROM` clause.
|
||||||
* `DELETE` does not support [fields](/influxdb/v1/concepts/glossary/#field) in the `WHERE` clause.
|
If deleting a series in a retention policy, `DELETE` requires that you define
|
||||||
* If you need to delete points in the future, you must specify that time period as `DELETE SERIES` runs for `time < now()` by default. [Syntax](https://github.com/influxdata/influxdb/issues/8007)
|
*only one* retention policy in the `FROM` clause.
|
||||||
|
* `DELETE` does not support [fields](/influxdb/v1/concepts/glossary/#field) in
|
||||||
|
the `WHERE` clause.
|
||||||
|
* If you need to delete points in the future, you must specify that time period
|
||||||
|
as `DELETE SERIES` runs for `time < now()` by default.
|
||||||
|
|
||||||
### Delete measurements with DROP MEASUREMENT
|
### Delete measurements with DROP MEASUREMENT
|
||||||
|
|
||||||
|
@ -240,8 +257,9 @@ You may disable its auto-creation in the [configuration file](/influxdb/v1/admin
|
||||||
### Create retention policies with CREATE RETENTION POLICY
|
### Create retention policies with CREATE RETENTION POLICY
|
||||||
|
|
||||||
#### Syntax
|
#### Syntax
|
||||||
```
|
|
||||||
CREATE RETENTION POLICY <retention_policy_name> ON <database_name> DURATION <duration> REPLICATION <n> [SHARD DURATION <duration>] [DEFAULT]
|
```sql
|
||||||
|
CREATE RETENTION POLICY <retention_policy_name> ON <database_name> DURATION <duration> REPLICATION <n> [SHARD DURATION <duration>] [PAST LIMIT <duration>] [FUTURE LIMIT <duration>] [DEFAULT]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Description of syntax
|
#### Description of syntax
|
||||||
|
@ -289,6 +307,28 @@ See
|
||||||
[Shard group duration management](/influxdb/v1/concepts/schema_and_data_layout/#shard-group-duration-management)
|
[Shard group duration management](/influxdb/v1/concepts/schema_and_data_layout/#shard-group-duration-management)
|
||||||
for recommended configurations.
|
for recommended configurations.
|
||||||
|
|
||||||
|
##### `PAST LIMIT`
|
||||||
|
|
||||||
|
The `PAST LIMIT` clause defines a time boundary before and relative to _now_
|
||||||
|
in which points written to the retention policy are accepted. If a point has a
|
||||||
|
timestamp before the specified boundary, the point is rejected and the write
|
||||||
|
request returns a partial write error.
|
||||||
|
|
||||||
|
For example, if a write request tries to write data to a retention policy with a
|
||||||
|
`PAST LIMIT 6h` and there are points in the request with timestamps older than
|
||||||
|
6 hours, those points are rejected.
|
||||||
|
|
||||||
|
##### `FUTURE LIMIT`
|
||||||
|
|
||||||
|
The `FUTURE LIMIT` clause defines a time boundary after and relative to _now_
|
||||||
|
in which points written to the retention policy are accepted. If a point has a
|
||||||
|
timestamp after the specified boundary, the point is rejected and the write
|
||||||
|
request returns a partial write error.
|
||||||
|
|
||||||
|
For example, if a write request tries to write data to a retention policy with a
|
||||||
|
`FUTURE LIMIT 6h` and there are points in the request with future timestamps
|
||||||
|
greater than 6 hours from now, those points are rejected.
|
||||||
|
|
||||||
##### `DEFAULT`
|
##### `DEFAULT`
|
||||||
|
|
||||||
Sets the new retention policy as the default retention policy for the database.
|
Sets the new retention policy as the default retention policy for the database.
|
||||||
|
|
|
@ -8,11 +8,6 @@ menu:
|
||||||
parent: InfluxQL
|
parent: InfluxQL
|
||||||
aliases:
|
aliases:
|
||||||
- /influxdb/v2/query_language/spec/
|
- /influxdb/v2/query_language/spec/
|
||||||
- /influxdb/v2/query_language/spec/
|
|
||||||
- /influxdb/v2/query_language/spec/
|
|
||||||
- /influxdb/v2/query_language/spec/
|
|
||||||
- /influxdb/v2/query_language/spec/
|
|
||||||
- /influxdb/v2/query_language/spec/
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
@ -123,15 +118,15 @@ ALL ALTER ANY AS ASC BEGIN
|
||||||
BY CREATE CONTINUOUS DATABASE DATABASES DEFAULT
|
BY CREATE CONTINUOUS DATABASE DATABASES DEFAULT
|
||||||
DELETE DESC DESTINATIONS DIAGNOSTICS DISTINCT DROP
|
DELETE DESC DESTINATIONS DIAGNOSTICS DISTINCT DROP
|
||||||
DURATION END EVERY EXPLAIN FIELD FOR
|
DURATION END EVERY EXPLAIN FIELD FOR
|
||||||
FROM GRANT GRANTS GROUP GROUPS IN
|
FROM FUTURE GRANT GRANTS GROUP GROUPS
|
||||||
INF INSERT INTO KEY KEYS KILL
|
IN INF INSERT INTO KEY KEYS
|
||||||
LIMIT SHOW MEASUREMENT MEASUREMENTS NAME OFFSET
|
KILL LIMIT SHOW MEASUREMENT MEASUREMENTS NAME
|
||||||
ON ORDER PASSWORD POLICY POLICIES PRIVILEGES
|
OFFSET ON ORDER PASSWORD PAST POLICY
|
||||||
QUERIES QUERY READ REPLICATION RESAMPLE RETENTION
|
POLICIES PRIVILEGES QUERIES QUERY READ REPLICATION
|
||||||
REVOKE SELECT SERIES SET SHARD SHARDS
|
RESAMPLE RETENTION REVOKE SELECT SERIES SET
|
||||||
SLIMIT SOFFSET STATS SUBSCRIPTION SUBSCRIPTIONS TAG
|
SHARD SHARDS SLIMIT SOFFSET STATS SUBSCRIPTION
|
||||||
TO USER USERS VALUES WHERE WITH
|
SUBSCRIPTIONS TAG TO USER USERS VALUES
|
||||||
WRITE
|
WHERE WITH WRITE
|
||||||
```
|
```
|
||||||
|
|
||||||
If you use an InfluxQL keywords as an
|
If you use an InfluxQL keywords as an
|
||||||
|
@ -383,12 +378,14 @@ create_database_stmt = "CREATE DATABASE" db_name
|
||||||
[ retention_policy_duration ]
|
[ retention_policy_duration ]
|
||||||
[ retention_policy_replication ]
|
[ retention_policy_replication ]
|
||||||
[ retention_policy_shard_group_duration ]
|
[ retention_policy_shard_group_duration ]
|
||||||
|
[ retention_past_limit ]
|
||||||
|
[ retention_future_limit ]
|
||||||
[ retention_policy_name ]
|
[ retention_policy_name ]
|
||||||
] .
|
] .
|
||||||
```
|
```
|
||||||
|
|
||||||
{{% warn %}} Replication factors do not serve a purpose with single node instances.
|
> [!Warning]
|
||||||
{{% /warn %}}
|
> Replication factors do not serve a purpose with single node instances.
|
||||||
|
|
||||||
#### Examples
|
#### Examples
|
||||||
|
|
||||||
|
@ -396,11 +393,17 @@ create_database_stmt = "CREATE DATABASE" db_name
|
||||||
-- Create a database called foo
|
-- Create a database called foo
|
||||||
CREATE DATABASE "foo"
|
CREATE DATABASE "foo"
|
||||||
|
|
||||||
-- Create a database called bar with a new DEFAULT retention policy and specify the duration, replication, shard group duration, and name of that retention policy
|
-- Create a database called bar with a new DEFAULT retention policy and specify
|
||||||
|
-- the duration, replication, shard group duration, and name of that retention policy
|
||||||
CREATE DATABASE "bar" WITH DURATION 1d REPLICATION 1 SHARD DURATION 30m NAME "myrp"
|
CREATE DATABASE "bar" WITH DURATION 1d REPLICATION 1 SHARD DURATION 30m NAME "myrp"
|
||||||
|
|
||||||
-- Create a database called mydb with a new DEFAULT retention policy and specify the name of that retention policy
|
-- Create a database called mydb with a new DEFAULT retention policy and specify
|
||||||
|
-- the name of that retention policy
|
||||||
CREATE DATABASE "mydb" WITH NAME "myrp"
|
CREATE DATABASE "mydb" WITH NAME "myrp"
|
||||||
|
|
||||||
|
-- Create a database called bar with a new retention policy named "myrp", and
|
||||||
|
-- specify the duration, past and future limits, and name of that retention policy
|
||||||
|
CREATE DATABASE "bar" WITH DURATION 1d PAST LIMIT 6h FUTURE LIMIT 6h NAME "myrp"
|
||||||
```
|
```
|
||||||
|
|
||||||
### CREATE RETENTION POLICY
|
### CREATE RETENTION POLICY
|
||||||
|
@ -410,11 +413,13 @@ create_retention_policy_stmt = "CREATE RETENTION POLICY" policy_name on_clause
|
||||||
retention_policy_duration
|
retention_policy_duration
|
||||||
retention_policy_replication
|
retention_policy_replication
|
||||||
[ retention_policy_shard_group_duration ]
|
[ retention_policy_shard_group_duration ]
|
||||||
|
[ retention_past_limit ]
|
||||||
|
[ retention_future_limit ]
|
||||||
[ "DEFAULT" ] .
|
[ "DEFAULT" ] .
|
||||||
```
|
```
|
||||||
|
|
||||||
{{% warn %}} Replication factors do not serve a purpose with single node instances.
|
> [!Warning]
|
||||||
{{% /warn %}}
|
> Replication factors do not serve a purpose with single node instances.
|
||||||
|
|
||||||
#### Examples
|
#### Examples
|
||||||
|
|
||||||
|
@ -427,6 +432,9 @@ CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 60m REPLICATION 2 DEFA
|
||||||
|
|
||||||
-- Create a retention policy and specify the shard group duration.
|
-- Create a retention policy and specify the shard group duration.
|
||||||
CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 60m REPLICATION 2 SHARD DURATION 30m
|
CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 60m REPLICATION 2 SHARD DURATION 30m
|
||||||
|
|
||||||
|
-- Create a retention policy and specify past and future limits.
|
||||||
|
CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 12h PAST LIMIT 6h FUTURE LIMIT 6h
|
||||||
```
|
```
|
||||||
|
|
||||||
### CREATE SUBSCRIPTION
|
### CREATE SUBSCRIPTION
|
||||||
|
|
|
@ -89,7 +89,7 @@ made to match the InfluxDB data structure:
|
||||||
* Prometheus labels become InfluxDB tags.
|
* Prometheus labels become InfluxDB tags.
|
||||||
* All `# HELP` and `# TYPE` lines are ignored.
|
* All `# HELP` and `# TYPE` lines are ignored.
|
||||||
* [v1.8.6 and later] Prometheus remote write endpoint drops unsupported Prometheus values (`NaN`,`-Inf`, and `+Inf`) rather than reject the entire batch.
|
* [v1.8.6 and later] Prometheus remote write endpoint drops unsupported Prometheus values (`NaN`,`-Inf`, and `+Inf`) rather than reject the entire batch.
|
||||||
* If [write trace logging is enabled (`[http] write-tracing = true`)](/influxdb/v1/administration/config/#write-tracing-false), then summaries of dropped values are logged.
|
* If [write trace logging is enabled (`[http] write-tracing = true`)](/influxdb/v1/administration/config/#write-tracing), then summaries of dropped values are logged.
|
||||||
* If a batch of values contains values that are subsequently dropped, HTTP status code `204` is returned.
|
* If a batch of values contains values that are subsequently dropped, HTTP status code `204` is returned.
|
||||||
|
|
||||||
### Example: Parse Prometheus to InfluxDB
|
### Example: Parse Prometheus to InfluxDB
|
||||||
|
|
|
@ -554,7 +554,7 @@ A successful [`CREATE DATABASE` query](/influxdb/v1/query_language/manage-databa
|
||||||
| u=\<username> | Optional if you haven't [enabled authentication](/influxdb/v1/administration/authentication_and_authorization/#set-up-authentication). Required if you've enabled authentication.* | Sets the username for authentication if you've enabled authentication. The user must have read access to the database. Use with the query string parameter `p`. |
|
| u=\<username> | Optional if you haven't [enabled authentication](/influxdb/v1/administration/authentication_and_authorization/#set-up-authentication). Required if you've enabled authentication.* | Sets the username for authentication if you've enabled authentication. The user must have read access to the database. Use with the query string parameter `p`. |
|
||||||
|
|
||||||
\* InfluxDB does not truncate the number of rows returned for requests without the `chunked` parameter.
|
\* InfluxDB does not truncate the number of rows returned for requests without the `chunked` parameter.
|
||||||
That behavior is configurable; see the [`max-row-limit`](/influxdb/v1/administration/config/#max-row-limit-0) configuration option for more information.
|
That behavior is configurable; see the [`max-row-limit`](/influxdb/v1/administration/config/#max-row-limit) configuration option for more information.
|
||||||
|
|
||||||
\** The InfluxDB API also supports basic authentication.
|
\** The InfluxDB API also supports basic authentication.
|
||||||
Use basic authentication if you've [enabled authentication](/influxdb/v1/administration/authentication_and_authorization/#set-up-authentication)
|
Use basic authentication if you've [enabled authentication](/influxdb/v1/administration/authentication_and_authorization/#set-up-authentication)
|
||||||
|
@ -1077,7 +1077,7 @@ Errors are returned in JSON.
|
||||||
| 400 Bad Request | Unacceptable request. Can occur with an InfluxDB line protocol syntax error or if a user attempts to write values to a field that previously accepted a different value type. The returned JSON offers further information. |
|
| 400 Bad Request | Unacceptable request. Can occur with an InfluxDB line protocol syntax error or if a user attempts to write values to a field that previously accepted a different value type. The returned JSON offers further information. |
|
||||||
| 401 Unauthorized | Unacceptable request. Can occur with invalid authentication credentials. |
|
| 401 Unauthorized | Unacceptable request. Can occur with invalid authentication credentials. |
|
||||||
| 404 Not Found | Unacceptable request. Can occur if a user attempts to write to a database that does not exist. The returned JSON offers further information. |
|
| 404 Not Found | Unacceptable request. Can occur if a user attempts to write to a database that does not exist. The returned JSON offers further information. |
|
||||||
| 413 Request Entity Too Large | Unaccetable request. It will occur if the payload of the POST request is bigger than the maximum size allowed. See [`max-body-size`](/influxdb/v1/administration/config/#max-body-size-25000000) parameter for more details.
|
| 413 Request Entity Too Large | Unacceptable request. It will occur if the payload of the POST request is bigger than the maximum size allowed. See [`max-body-size`](/influxdb/v1/administration/config/#max-body-size) parameter for more details.
|
||||||
| 500 Internal Server Error | The system is overloaded or significantly impaired. Can occur if a user attempts to write to a retention policy that does not exist. The returned JSON offers further information. |
|
| 500 Internal Server Error | The system is overloaded or significantly impaired. Can occur if a user attempts to write to a retention policy that does not exist. The returned JSON offers further information. |
|
||||||
|
|
||||||
#### Examples
|
#### Examples
|
||||||
|
|
|
@ -12,9 +12,10 @@ alt_links:
|
||||||
|
|
||||||
Influx Inspect is an InfluxDB disk utility that can be used to:
|
Influx Inspect is an InfluxDB disk utility that can be used to:
|
||||||
|
|
||||||
* View detailed information about disk shards.
|
- View detailed information about disk shards.
|
||||||
* Export data from a shard to [InfluxDB line protocol](/influxdb/v1/concepts/glossary/#influxdb-line-protocol) that can be inserted back into the database.
|
- Export data from a shard to [InfluxDB line protocol](/influxdb/v1/concepts/glossary/#influxdb-line-protocol)
|
||||||
* Convert TSM index shards to TSI index shards.
|
that can be inserted back into the database.
|
||||||
|
- Convert TSM index shards to TSI index shards.
|
||||||
|
|
||||||
## `influx_inspect` utility
|
## `influx_inspect` utility
|
||||||
|
|
||||||
|
@ -52,7 +53,9 @@ Builds TSI (Time Series Index) disk-based shard index files and associated serie
|
||||||
The index is written to a temporary location until complete and then moved to a permanent location.
|
The index is written to a temporary location until complete and then moved to a permanent location.
|
||||||
If an error occurs, then this operation will fall back to the original in-memory index.
|
If an error occurs, then this operation will fall back to the original in-memory index.
|
||||||
|
|
||||||
> ***Note:*** **For offline conversion only.**
|
> [!Note]
|
||||||
|
> #### For offline conversion only
|
||||||
|
>
|
||||||
> When TSI is enabled, new shards use the TSI indexes.
|
> When TSI is enabled, new shards use the TSI indexes.
|
||||||
> Existing shards continue as TSM-based shards until
|
> Existing shards continue as TSM-based shards until
|
||||||
> converted offline.
|
> converted offline.
|
||||||
|
@ -62,7 +65,9 @@ If an error occurs, then this operation will fall back to the original in-memory
|
||||||
```
|
```
|
||||||
influx_inspect buildtsi -datadir <data_dir> -waldir <wal_dir> [ options ]
|
influx_inspect buildtsi -datadir <data_dir> -waldir <wal_dir> [ options ]
|
||||||
```
|
```
|
||||||
> **Note:** Use the `buildtsi` command with the user account that you are going to run the database as,
|
|
||||||
|
> [!Note]
|
||||||
|
> Use the `buildtsi` command with the user account that you are going to run the database as,
|
||||||
> or ensure that the permissions match after running the command.
|
> or ensure that the permissions match after running the command.
|
||||||
|
|
||||||
#### Options
|
#### Options
|
||||||
|
@ -73,9 +78,8 @@ Optional arguments are in brackets.
|
||||||
|
|
||||||
The size of the batches written to the index. Default value is `10000`.
|
The size of the batches written to the index. Default value is `10000`.
|
||||||
|
|
||||||
{{% warn %}}
|
> [!Warning]
|
||||||
**Warning:** Setting this value can have adverse effects on performance and heap size.
|
> Setting this value can have adverse effects on performance and heap size.
|
||||||
{{% /warn %}}
|
|
||||||
|
|
||||||
##### `[ -compact-series-file ]`
|
##### `[ -compact-series-file ]`
|
||||||
|
|
||||||
|
@ -123,7 +127,7 @@ Flag to enable output in verbose mode.
|
||||||
|
|
||||||
##### `-waldir <wal_dir>`
|
##### `-waldir <wal_dir>`
|
||||||
|
|
||||||
The directory for the (WAL (Write Ahead Log)](/influxdb/v1/concepts/file-system-layout/#wal-directory) files.
|
The directory for the [WAL (Write Ahead Log)](/influxdb/v1/concepts/file-system-layout/#wal-directory) files.
|
||||||
|
|
||||||
Default value is `$HOME/.influxdb/wal`.
|
Default value is `$HOME/.influxdb/wal`.
|
||||||
See the [file system layout](/influxdb/v1/concepts/file-system-layout/#file-system-layout)
|
See the [file system layout](/influxdb/v1/concepts/file-system-layout/#file-system-layout)
|
||||||
|
@ -181,10 +185,9 @@ The filename where schema data should be written. Default is `schema.json`.
|
||||||
Use `deletetsm -measurement` to delete a measurement in a raw TSM file (from specified shards).
|
Use `deletetsm -measurement` to delete a measurement in a raw TSM file (from specified shards).
|
||||||
Use `deletetsm -sanitize` to remove all tag and field keys containing non-printable Unicode characters in a raw TSM file (from specified shards).
|
Use `deletetsm -sanitize` to remove all tag and field keys containing non-printable Unicode characters in a raw TSM file (from specified shards).
|
||||||
|
|
||||||
{{% warn %}}
|
> [!Warning]
|
||||||
**Warning:** Use the `deletetsm` command only when your InfluxDB instance is
|
> Use the `deletetsm` command only when your InfluxDB instance is
|
||||||
offline (`influxd` service is not running).
|
> offline (`influxd` service is not running).
|
||||||
{{% /warn %}}
|
|
||||||
|
|
||||||
#### Syntax
|
#### Syntax
|
||||||
|
|
||||||
|
@ -287,18 +290,18 @@ Filter data by tag value regular expression.
|
||||||
##### Specifying paths to the `_series` and `index` directories
|
##### Specifying paths to the `_series` and `index` directories
|
||||||
|
|
||||||
```
|
```
|
||||||
$ influx_inspect dumptsi -series-file /path/to/db/_series /path/to/index
|
influx_inspect dumptsi -series-file /path/to/db/_series /path/to/index
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Specifying paths to the `_series` directory and an `index` file
|
##### Specifying paths to the `_series` directory and an `index` file
|
||||||
|
|
||||||
```
|
```
|
||||||
$ influx_inspect dumptsi -series-file /path/to/db/_series /path/to/index/file0
|
influx_inspect dumptsi -series-file /path/to/db/_series /path/to/index/file0
|
||||||
```
|
```
|
||||||
##### Specifying paths to the `_series` directory and multiple `index` files
|
##### Specifying paths to the `_series` directory and multiple `index` files
|
||||||
|
|
||||||
```
|
```
|
||||||
$ influx_inspect dumptsi -series-file /path/to/db/_series /path/to/index/file0 /path/to/index/file1 ...
|
influx_inspect dumptsi -series-file /path/to/db/_series /path/to/index/file0 /path/to/index/file1 ...
|
||||||
```
|
```
|
||||||
|
|
||||||
### `dumptsm`
|
### `dumptsm`
|
||||||
|
@ -360,8 +363,8 @@ If a user writes points with timestamps set by the client, then multiple points
|
||||||
|
|
||||||
### `export`
|
### `export`
|
||||||
|
|
||||||
Exports all TSM files in InfluxDB line protocol data format.
|
Exports all TSM files or a single TSM file in InfluxDB line protocol data format.
|
||||||
This output file can be imported using the
|
The output file can be imported using the
|
||||||
[influx](/influxdb/v1/tools/shell/#import-data-from-a-file-with-import) command.
|
[influx](/influxdb/v1/tools/shell/#import-data-from-a-file-with-import) command.
|
||||||
|
|
||||||
#### Syntax
|
#### Syntax
|
||||||
|
@ -413,9 +416,12 @@ YYYY-MM-DDTHH:MM:SS-08:00
|
||||||
YYYY-MM-DDTHH:MM:SS+07:00
|
YYYY-MM-DDTHH:MM:SS+07:00
|
||||||
```
|
```
|
||||||
|
|
||||||
> **Note:** With offsets, avoid replacing the + or - sign with a Z. It may cause an error or print Z (ISO 8601 behavior) instead of the time zone offset.
|
> [!Note]
|
||||||
|
> With offsets, avoid replacing the + or - sign with a Z. It may cause an error
|
||||||
|
> or print Z (ISO 8601 behavior) instead of the time zone offset.
|
||||||
|
|
||||||
##### [ `-lponly` ]
|
##### [ `-lponly` ]
|
||||||
|
|
||||||
Output data in line protocol format only.
|
Output data in line protocol format only.
|
||||||
Does not output data definition language (DDL) statements (such as `CREATE DATABASE`)
|
Does not output data definition language (DDL) statements (such as `CREATE DATABASE`)
|
||||||
or DML context metadata (such as `# CONTEXT-DATABASE`).
|
or DML context metadata (such as `# CONTEXT-DATABASE`).
|
||||||
|
@ -443,6 +449,11 @@ Default value is `$HOME/.influxdb/wal`.
|
||||||
See the [file system layout](/influxdb/v1/concepts/file-system-layout/#file-system-layout)
|
See the [file system layout](/influxdb/v1/concepts/file-system-layout/#file-system-layout)
|
||||||
for InfluxDB on your system.
|
for InfluxDB on your system.
|
||||||
|
|
||||||
|
##### [ `-tsmfile <tsm_file>` ]
|
||||||
|
|
||||||
|
Path to a single tsm file to export. This requires both `-database` and
|
||||||
|
`-retention` to be specified.
|
||||||
|
|
||||||
#### Examples
|
#### Examples
|
||||||
|
|
||||||
##### Export all databases and compress the output
|
##### Export all databases and compress the output
|
||||||
|
@ -457,6 +468,15 @@ influx_inspect export -compress
|
||||||
influx_inspect export -database DATABASE_NAME -retention RETENTION_POLICY
|
influx_inspect export -database DATABASE_NAME -retention RETENTION_POLICY
|
||||||
```
|
```
|
||||||
|
|
||||||
|
##### Export data from a single TSM file
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influx_inspect export \
|
||||||
|
-database DATABASE_NAME \
|
||||||
|
-retention RETENTION_POLICY \
|
||||||
|
-tsmfile TSM_FILE_NAME
|
||||||
|
```
|
||||||
|
|
||||||
##### Output file
|
##### Output file
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
@ -650,11 +670,11 @@ influx_inspect report-disk -detailed ~/.influxdb/data/
|
||||||
|
|
||||||
The report does the following:
|
The report does the following:
|
||||||
|
|
||||||
* Calculates the total exact series cardinality in the database.
|
- Calculates the total exact series cardinality in the database.
|
||||||
* Segments that cardinality by measurement, and emits those cardinality values.
|
- Segments that cardinality by measurement, and emits those cardinality values.
|
||||||
* Emits total exact cardinality for each shard in the database.
|
- Emits total exact cardinality for each shard in the database.
|
||||||
* Segments for each shard the exact cardinality for each measurement in the shard.
|
- Segments for each shard the exact cardinality for each measurement in the shard.
|
||||||
* Optionally limits the results in each shard to the "top n".
|
- Optionally limits the results in each shard to the "top n".
|
||||||
|
|
||||||
The `reporttsi` command is primarily useful when there has been a change in cardinality
|
The `reporttsi` command is primarily useful when there has been a change in cardinality
|
||||||
and it's not clear which measurement is responsible for this change, and further, _when_
|
and it's not clear which measurement is responsible for this change, and further, _when_
|
||||||
|
@ -769,7 +789,8 @@ Enables very verbose logging. Displays progress for every series key and time ra
|
||||||
|
|
||||||
Enables very very verbose logging. Displays progress for every series key and time range in the tombstone files. Timestamps are displayed in [RFC3339 format](https://tools.ietf.org/html/rfc3339) with nanosecond precision.
|
Enables very very verbose logging. Displays progress for every series key and time range in the tombstone files. Timestamps are displayed in [RFC3339 format](https://tools.ietf.org/html/rfc3339) with nanosecond precision.
|
||||||
|
|
||||||
> **Note on verbose logging:** Higher verbosity levels override lower levels.
|
> [!Note]
|
||||||
|
> Higher verbosity levels override lower levels.
|
||||||
|
|
||||||
## Caveats
|
## Caveats
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,7 @@ By default `max-series-per-database` is set to one million.
|
||||||
Changing the setting to `0` allows an unlimited number of series per database.
|
Changing the setting to `0` allows an unlimited number of series per database.
|
||||||
|
|
||||||
**Resources:**
|
**Resources:**
|
||||||
[Database Configuration](/influxdb/v1/administration/config/#max-series-per-database-1000000)
|
[Database Configuration](/influxdb/v1/administration/config/#max-series-per-database)
|
||||||
|
|
||||||
## `error parsing query: found < >, expected identifier at line < >, char < >`
|
## `error parsing query: found < >, expected identifier at line < >, char < >`
|
||||||
|
|
||||||
|
@ -326,7 +326,7 @@ The maximum valid timestamp is `9223372036854775806` or `2262-04-11T23:47:16.854
|
||||||
|
|
||||||
The `cache maximum memory size exceeded` error occurs when the cached
|
The `cache maximum memory size exceeded` error occurs when the cached
|
||||||
memory size increases beyond the
|
memory size increases beyond the
|
||||||
[`cache-max-memory-size` setting](/influxdb/v1/administration/config/#cache-max-memory-size-1g)
|
[`cache-max-memory-size` setting](/influxdb/v1/administration/config/#cache-max-memory-size)
|
||||||
in the configuration file.
|
in the configuration file.
|
||||||
|
|
||||||
By default, `cache-max-memory-size` is set to 512mb.
|
By default, `cache-max-memory-size` is set to 512mb.
|
||||||
|
@ -398,11 +398,15 @@ This error occurs when the Docker container cannot read files on the host machin
|
||||||
|
|
||||||
#### Make host machine files readable to Docker
|
#### Make host machine files readable to Docker
|
||||||
|
|
||||||
1. Create a directory, and then copy files to import into InfluxDB to this directory.
|
1. Create a directory, and then copy files to import into InfluxDB to this directory.
|
||||||
2. When you launch the Docker container, mount the new directory on the InfluxDB container by running the following command:
|
2. When you launch the Docker container, mount the new directory on the InfluxDB container by running the following command:
|
||||||
|
|
||||||
docker run -v /dir/path/on/host:/dir/path/in/container
|
```bash
|
||||||
|
docker run -v /dir/path/on/host:/dir/path/in/container
|
||||||
|
```
|
||||||
|
|
||||||
3. Verify the Docker container can read host machine files by running the following command:
|
3. Verify the Docker container can read host machine files by running the following command:
|
||||||
|
|
||||||
influx -import -path=/path/in/container
|
```bash
|
||||||
|
influx -import -path=/path/in/container
|
||||||
|
```
|
||||||
|
|
|
@ -164,7 +164,7 @@ an RP every 30 minutes.
|
||||||
You may need to wait for the next RP check for InfluxDB to drop data that are
|
You may need to wait for the next RP check for InfluxDB to drop data that are
|
||||||
outside the RP's new `DURATION` setting.
|
outside the RP's new `DURATION` setting.
|
||||||
The 30 minute interval is
|
The 30 minute interval is
|
||||||
[configurable](/influxdb/v1/administration/config/#check-interval-30m0s).
|
[configurable](/influxdb/v1/administration/config/#check-interval).
|
||||||
|
|
||||||
Second, altering both the `DURATION` and `SHARD DURATION` of an RP can result in
|
Second, altering both the `DURATION` and `SHARD DURATION` of an RP can result in
|
||||||
unexpected data retention.
|
unexpected data retention.
|
||||||
|
@ -623,9 +623,9 @@ Avoid using the same name for a tag and field key. If you inadvertently add the
|
||||||
|
|
||||||
#### Example
|
#### Example
|
||||||
|
|
||||||
1. [Launch `influx`](/influxdb/v1/tools/shell/#launch-influx).
|
1. [Launch `influx`](/influxdb/v1/tools/shell/#launch-influx).
|
||||||
|
|
||||||
2. Write the following points to create both a field and tag key with the same name `leaves`:
|
2. Write the following points to create both a field and tag key with the same name `leaves`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# create the `leaves` tag key
|
# create the `leaves` tag key
|
||||||
|
@ -635,7 +635,7 @@ Avoid using the same name for a tag and field key. If you inadvertently add the
|
||||||
INSERT grape leaves=5
|
INSERT grape leaves=5
|
||||||
```
|
```
|
||||||
|
|
||||||
3. If you view both keys, you'll notice that neither key includes `_1`:
|
3. If you view both keys, you'll notice that neither key includes `_1`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# show the `leaves` tag key
|
# show the `leaves` tag key
|
||||||
|
@ -655,7 +655,7 @@ Avoid using the same name for a tag and field key. If you inadvertently add the
|
||||||
leaves float
|
leaves float
|
||||||
```
|
```
|
||||||
|
|
||||||
4. If you query the `grape` measurement, you'll see the `leaves` tag key has an appended `_1`:
|
4. If you query the `grape` measurement, you'll see the `leaves` tag key has an appended `_1`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# query the `grape` measurement
|
# query the `grape` measurement
|
||||||
|
@ -668,7 +668,7 @@ Avoid using the same name for a tag and field key. If you inadvertently add the
|
||||||
1574128238044155000 5.00
|
1574128238044155000 5.00
|
||||||
```
|
```
|
||||||
|
|
||||||
5. To query a duplicate key name, you **must drop** `_1` **and include** `::tag` or `::field` after the key:
|
5. To query a duplicate key name, you **must drop** `_1` **and include** `::tag` or `::field` after the key:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# query duplicate keys using the correct syntax
|
# query duplicate keys using the correct syntax
|
||||||
|
@ -693,9 +693,9 @@ the allotted memory.
|
||||||
|
|
||||||
#### Remove a duplicate key
|
#### Remove a duplicate key
|
||||||
|
|
||||||
1. [Launch `influx`](/influxdb/v1/tools/shell/#launch-influx).
|
1. [Launch `influx`](/influxdb/v1/tools/shell/#launch-influx).
|
||||||
|
|
||||||
2. Use the following queries to remove a duplicate key.
|
2. Use the following queries to remove a duplicate key.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
|
||||||
|
@ -1093,39 +1093,39 @@ time az hostname val_1 val_2
|
||||||
|
|
||||||
To store both points:
|
To store both points:
|
||||||
|
|
||||||
* Introduce an arbitrary new tag to enforce uniqueness.
|
- Introduce an arbitrary new tag to enforce uniqueness.
|
||||||
|
|
||||||
Old point: `cpu_load,hostname=server02,az=us_west,uniq=1 val_1=24.5,val_2=7 1234567890000000`
|
Old point: `cpu_load,hostname=server02,az=us_west,uniq=1 val_1=24.5,val_2=7 1234567890000000`
|
||||||
|
|
||||||
New point: `cpu_load,hostname=server02,az=us_west,uniq=2 val_1=5.24 1234567890000000`
|
New point: `cpu_load,hostname=server02,az=us_west,uniq=2 val_1=5.24 1234567890000000`
|
||||||
|
|
||||||
After writing the new point to InfluxDB:
|
After writing the new point to InfluxDB:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
> SELECT * FROM "cpu_load" WHERE time = 1234567890000000
|
> SELECT * FROM "cpu_load" WHERE time = 1234567890000000
|
||||||
name: cpu_load
|
name: cpu_load
|
||||||
--------------
|
--------------
|
||||||
time az hostname uniq val_1 val_2
|
time az hostname uniq val_1 val_2
|
||||||
1970-01-15T06:56:07.89Z us_west server02 1 24.5 7
|
1970-01-15T06:56:07.89Z us_west server02 1 24.5 7
|
||||||
1970-01-15T06:56:07.89Z us_west server02 2 5.24
|
1970-01-15T06:56:07.89Z us_west server02 2 5.24
|
||||||
```
|
```
|
||||||
|
|
||||||
* Increment the timestamp by a nanosecond.
|
- Increment the timestamp by a nanosecond.
|
||||||
|
|
||||||
Old point: `cpu_load,hostname=server02,az=us_west val_1=24.5,val_2=7 1234567890000000`
|
Old point: `cpu_load,hostname=server02,az=us_west val_1=24.5,val_2=7 1234567890000000`
|
||||||
|
|
||||||
New point: `cpu_load,hostname=server02,az=us_west val_1=5.24 1234567890000001`
|
New point: `cpu_load,hostname=server02,az=us_west val_1=5.24 1234567890000001`
|
||||||
|
|
||||||
After writing the new point to InfluxDB:
|
After writing the new point to InfluxDB:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
> SELECT * FROM "cpu_load" WHERE time >= 1234567890000000 and time <= 1234567890000001
|
> SELECT * FROM "cpu_load" WHERE time >= 1234567890000000 and time <= 1234567890000001
|
||||||
name: cpu_load
|
name: cpu_load
|
||||||
--------------
|
--------------
|
||||||
time az hostname val_1 val_2
|
time az hostname val_1 val_2
|
||||||
1970-01-15T06:56:07.89Z us_west server02 24.5 7
|
1970-01-15T06:56:07.89Z us_west server02 24.5 7
|
||||||
1970-01-15T06:56:07.890000001Z us_west server02 5.24
|
1970-01-15T06:56:07.890000001Z us_west server02 5.24
|
||||||
```
|
```
|
||||||
|
|
||||||
## What newline character does the InfluxDB API require?
|
## What newline character does the InfluxDB API require?
|
||||||
|
|
||||||
|
@ -1207,27 +1207,29 @@ To keep regular expressions and quoting simple, avoid using the following charac
|
||||||
|
|
||||||
## When should I single quote and when should I double quote when writing data?
|
## When should I single quote and when should I double quote when writing data?
|
||||||
|
|
||||||
* Avoid single quoting and double quoting identifiers when writing data via the line protocol; see the examples below for how writing identifiers with quotes can complicate queries.
|
- Avoid single quoting and double quoting identifiers when writing data via the
|
||||||
Identifiers are database names, retention policy names, user names, measurement names, tag keys, and field keys.
|
line protocol; see the examples below for how writing identifiers with quotes
|
||||||
|
can complicate queries. Identifiers are database names, retention policy
|
||||||
|
names, user names, measurement names, tag keys, and field keys.
|
||||||
|
|
||||||
Write with a double-quoted measurement: `INSERT "bikes" bikes_available=3`
|
Write with a double-quoted measurement: `INSERT "bikes" bikes_available=3`
|
||||||
Applicable query: `SELECT * FROM "\"bikes\""`
|
Applicable query: `SELECT * FROM "\"bikes\""`
|
||||||
|
|
||||||
Write with a single-quoted measurement: `INSERT 'bikes' bikes_available=3`
|
Write with a single-quoted measurement: `INSERT 'bikes' bikes_available=3`
|
||||||
Applicable query: `SELECT * FROM "\'bikes\'"`
|
Applicable query: `SELECT * FROM "\'bikes\'"`
|
||||||
|
|
||||||
Write with an unquoted measurement: `INSERT bikes bikes_available=3`
|
Write with an unquoted measurement: `INSERT bikes bikes_available=3`
|
||||||
Applicable query: `SELECT * FROM "bikes"`
|
Applicable query: `SELECT * FROM "bikes"`
|
||||||
|
|
||||||
* Double quote field values that are strings.
|
- Double quote field values that are strings.
|
||||||
|
|
||||||
Write: `INSERT bikes happiness="level 2"`
|
Write: `INSERT bikes happiness="level 2"`
|
||||||
Applicable query: `SELECT * FROM "bikes" WHERE "happiness"='level 2'`
|
Applicable query: `SELECT * FROM "bikes" WHERE "happiness"='level 2'`
|
||||||
|
|
||||||
* Special characters should be escaped with a backslash and not placed in quotes.
|
- Special characters should be escaped with a backslash and not placed in quotes.
|
||||||
|
|
||||||
Write: `INSERT wacky va\"ue=4`
|
Write: `INSERT wacky va\"ue=4`
|
||||||
Applicable query: `SELECT "va\"ue" FROM "wacky"`
|
Applicable query: `SELECT "va\"ue" FROM "wacky"`
|
||||||
|
|
||||||
For more information , see [Line protocol](/influxdb/v1/write_protocols/).
|
For more information , see [Line protocol](/influxdb/v1/write_protocols/).
|
||||||
|
|
||||||
|
@ -1255,6 +1257,6 @@ The default shard group duration is one week and if your data cover several hund
|
||||||
Having an extremely high number of shards is inefficient for InfluxDB.
|
Having an extremely high number of shards is inefficient for InfluxDB.
|
||||||
Increase the shard group duration for your data’s retention policy with the [`ALTER RETENTION POLICY` query](/influxdb/v1/query_language/manage-database/#modify-retention-policies-with-alter-retention-policy).
|
Increase the shard group duration for your data’s retention policy with the [`ALTER RETENTION POLICY` query](/influxdb/v1/query_language/manage-database/#modify-retention-policies-with-alter-retention-policy).
|
||||||
|
|
||||||
Second, temporarily lowering the [`cache-snapshot-write-cold-duration` configuration setting](/influxdb/v1/administration/config/#cache-snapshot-write-cold-duration-10m).
|
Second, temporarily lowering the [`cache-snapshot-write-cold-duration` configuration setting](/influxdb/v1/administration/config/#cache-snapshot-write-cold-duration).
|
||||||
If you’re writing a lot of historical data, the default setting (`10m`) can cause the system to hold all of your data in cache for every shard.
|
If you’re writing a lot of historical data, the default setting (`10m`) can cause the system to hold all of your data in cache for every shard.
|
||||||
Temporarily lowering the `cache-snapshot-write-cold-duration` setting to `10s` while you write the historical data makes the process more efficient.
|
Temporarily lowering the `cache-snapshot-write-cold-duration` setting to `10s` while you write the historical data makes the process more efficient.
|
||||||
|
|
|
@ -3,7 +3,7 @@ title: Create a database token
|
||||||
description: >
|
description: >
|
||||||
Use the [`influxctl token create` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/token/create/)
|
Use the [`influxctl token create` command](/influxdb3/cloud-dedicated/reference/cli/influxctl/token/create/)
|
||||||
or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/)
|
or the [Management HTTP API](/influxdb3/cloud-dedicated/api/management/)
|
||||||
to [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) for reading and writing data in your InfluxDB Cloud Dedicated cluster.
|
to create a [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) for reading and writing data in your InfluxDB Cloud Dedicated cluster.
|
||||||
Provide a token description and permissions for databases.
|
Provide a token description and permissions for databases.
|
||||||
menu:
|
menu:
|
||||||
influxdb3_cloud_dedicated:
|
influxdb3_cloud_dedicated:
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -2,7 +2,7 @@
|
||||||
title: Create a database token
|
title: Create a database token
|
||||||
description: >
|
description: >
|
||||||
Use the [`influxctl token create` command](/influxdb3/clustered/reference/cli/influxctl/token/create/)
|
Use the [`influxctl token create` command](/influxdb3/clustered/reference/cli/influxctl/token/create/)
|
||||||
to create a database token for reading and writing data in your InfluxDB cluster.
|
to create a [database token](/influxdb3/clustered/admin/tokens/database/) for reading and writing data in your InfluxDB cluster.
|
||||||
Provide a token description and permissions for databases.
|
Provide a token description and permissions for databases.
|
||||||
menu:
|
menu:
|
||||||
influxdb3_clustered:
|
influxdb3_clustered:
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,19 @@
|
||||||
|
---
|
||||||
|
title: Manage the Distinct Value Cache
|
||||||
|
seotitle: Manage the Distinct Value Cache in {{< product-name >}}
|
||||||
|
description: >
|
||||||
|
The {{< product-name >}} Distinct Value Cache (DVC) lets you cache distinct
|
||||||
|
values of one or more columns in a table, improving the performance of
|
||||||
|
queries that return distinct tag and field values.
|
||||||
|
menu:
|
||||||
|
influxdb3_core:
|
||||||
|
parent: Administer InfluxDB
|
||||||
|
weight: 105
|
||||||
|
influxdb3/core/tags: [cache]
|
||||||
|
related:
|
||||||
|
- /influxdb3/core/reference/sql/functions/cache/#distinct_cache, distinct_cache SQL function
|
||||||
|
source: /shared/influxdb3-admin/distinct-value-cache/_index.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/distinct-value-cache/_index.md -->
|
|
@ -0,0 +1,48 @@
|
||||||
|
---
|
||||||
|
title: Create a Distinct Value Cache
|
||||||
|
description: |
|
||||||
|
Use the [`influxdb3 create distinct_cache` command](/influxdb3/core/reference/cli/influxdb3/create/distinct_cache/)
|
||||||
|
to create a Distinct Value Cache.
|
||||||
|
menu:
|
||||||
|
influxdb3_core:
|
||||||
|
parent: Manage the Distinct Value Cache
|
||||||
|
weight: 201
|
||||||
|
influxdb3/core/tags: [cache]
|
||||||
|
related:
|
||||||
|
- /influxdb3/core/reference/cli/influxdb3/create/distinct_cache/
|
||||||
|
list_code_example: |
|
||||||
|
{{% show-in "core" %}}
|
||||||
|
<!--pytest.mark.skip-->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create distinct_cache \
|
||||||
|
--database example-db \
|
||||||
|
--token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \
|
||||||
|
--table wind_data \
|
||||||
|
--columns country,county,city \
|
||||||
|
--max-cardinality 10000 \
|
||||||
|
--max-age 24h \
|
||||||
|
windDistinctCache
|
||||||
|
```
|
||||||
|
{{% /show-in %}}
|
||||||
|
|
||||||
|
{{% show-in "enterprise" %}}
|
||||||
|
<!--pytest.mark.skip-->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create distinct_cache \
|
||||||
|
--database example-db \
|
||||||
|
--token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \
|
||||||
|
--table home \
|
||||||
|
--node-spec node-01,node-02 \
|
||||||
|
--columns country,county,city \
|
||||||
|
--max-cardinality 10000 \
|
||||||
|
--max-age 24h \
|
||||||
|
windDistinctCache
|
||||||
|
```
|
||||||
|
{{% /show-in %}}
|
||||||
|
source: /shared/influxdb3-admin/distinct-value-cache/create.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/distinct-value-cache/create.md -->
|
|
@ -0,0 +1,27 @@
|
||||||
|
---
|
||||||
|
title: Delete a Distinct Value Cache
|
||||||
|
description: |
|
||||||
|
Use the [`influxdb3 delete distinct_cache` command](/influxdb3/core/reference/cli/influxdb3/delete/distinct_cache/)
|
||||||
|
to delete a Distinct Value Cache.
|
||||||
|
menu:
|
||||||
|
influxdb3_core:
|
||||||
|
parent: Manage the Distinct Value Cache
|
||||||
|
weight: 204
|
||||||
|
influxdb3/core/tags: [cache]
|
||||||
|
list_code_example: |
|
||||||
|
<!--pytest.mark.skip-->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 delete distinct_cache \
|
||||||
|
--database example-db \
|
||||||
|
--token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \
|
||||||
|
--table wind_data \
|
||||||
|
windDistinctCache
|
||||||
|
```
|
||||||
|
related:
|
||||||
|
- /influxdb3/core/reference/cli/influxdb3/delete/distinct_cache/
|
||||||
|
source: /shared/influxdb3-admin/distinct-value-cache/delete.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/distinct-value-cache/delete.md -->
|
|
@ -0,0 +1,26 @@
|
||||||
|
---
|
||||||
|
title: Query a Distinct Value Cache
|
||||||
|
description: |
|
||||||
|
Use the [`distinct_cache()` SQL function](/influxdb3/core/reference/sql/functions/cache/#distinct_cache)
|
||||||
|
in the `FROM` clause of an SQL `SELECT` statement to query data from the
|
||||||
|
Distinct Value Cache.
|
||||||
|
menu:
|
||||||
|
influxdb3_core:
|
||||||
|
parent: Manage the Distinct Value Cache
|
||||||
|
weight: 202
|
||||||
|
influxdb3/core/tags: [cache]
|
||||||
|
list_code_example: |
|
||||||
|
```sql
|
||||||
|
SELECT * FROM distinct_cache('table-name', 'cache-name')
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!Important]
|
||||||
|
> You must use SQL to query the DVC.
|
||||||
|
> InfluxQL does not support the `distinct_cache()` function.
|
||||||
|
related:
|
||||||
|
- /influxdb3/core/reference/sql/functions/cache/#distinct_cache, distinct_cache SQL function
|
||||||
|
source: /shared/influxdb3-admin/distinct-value-cache/query.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/distinct-value-cache/query.md -->
|
|
@ -0,0 +1,25 @@
|
||||||
|
---
|
||||||
|
title: Show information about Distinct Value Caches
|
||||||
|
description: |
|
||||||
|
Use the `influxdb3 show system table` command to query and output Distinct Value
|
||||||
|
Cache information from the `distinct_caches` system table.
|
||||||
|
menu:
|
||||||
|
influxdb3_core:
|
||||||
|
parent: Manage the Distinct Value Cache
|
||||||
|
name: Show Distinct Value Caches
|
||||||
|
weight: 203
|
||||||
|
influxdb3/core/tags: [cache]
|
||||||
|
list_code_example: |
|
||||||
|
<!-- pytest.mark.skip -->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 show system \
|
||||||
|
--database example-db \
|
||||||
|
--token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \
|
||||||
|
table distinct_caches
|
||||||
|
```
|
||||||
|
source: /shared/influxdb3-admin/distinct-value-cache/show.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/distinct-value-cache/show.md -->
|
|
@ -0,0 +1,20 @@
|
||||||
|
---
|
||||||
|
title: Manage the Last Value Cache
|
||||||
|
seotitle: Manage the Last Value Cache in {{< product-name >}}
|
||||||
|
description: >
|
||||||
|
The {{< product-name >}} Last Value Cache (LVC) lets you cache the most
|
||||||
|
recent values for specific fields in a table, improving the performance of
|
||||||
|
queries that return the most recent value of a field for specific time series
|
||||||
|
or the last N values of a field.
|
||||||
|
menu:
|
||||||
|
influxdb3_core:
|
||||||
|
parent: Administer InfluxDB
|
||||||
|
weight: 104
|
||||||
|
influxdb3/core/tags: [cache]
|
||||||
|
related:
|
||||||
|
- /influxdb3/core/reference/sql/functions/cache/#last_cache, last_cache SQL function
|
||||||
|
source: /shared/influxdb3-admin/last-value-cache/_index.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/last-value-cache/_index.md -->
|
|
@ -0,0 +1,50 @@
|
||||||
|
---
|
||||||
|
title: Create a Last Value Cache
|
||||||
|
description: |
|
||||||
|
Use the [`influxdb3 create last_cache` command](/influxdb3/core/reference/cli/influxdb3/create/last_cache/)
|
||||||
|
to create a Last Value Cache.
|
||||||
|
menu:
|
||||||
|
influxdb3_core:
|
||||||
|
parent: Manage the Last Value Cache
|
||||||
|
weight: 201
|
||||||
|
influxdb3/core/tags: [cache]
|
||||||
|
related:
|
||||||
|
- /influxdb3/core/reference/cli/influxdb3/create/last_cache/
|
||||||
|
list_code_example: |
|
||||||
|
{{% show-in "core" %}}
|
||||||
|
<!--pytest.mark.skip-->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create last_cache \
|
||||||
|
--database example-db \
|
||||||
|
--token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \
|
||||||
|
--table home \
|
||||||
|
--key-columns room,wall \
|
||||||
|
--value-columns temp,hum,co \
|
||||||
|
--count 5 \
|
||||||
|
--ttl 30mins \
|
||||||
|
homeLastCache
|
||||||
|
```
|
||||||
|
{{% /show-in %}}
|
||||||
|
|
||||||
|
{{% show-in "enterprise" %}}
|
||||||
|
<!--pytest.mark.skip-->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create last_cache \
|
||||||
|
--database example-db \
|
||||||
|
--token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \
|
||||||
|
--table home \
|
||||||
|
--node-spec node-01,node-02 \
|
||||||
|
--key-columns room,wall \
|
||||||
|
--value-columns temp,hum,co \
|
||||||
|
--count 5 \
|
||||||
|
--ttl 30mins \
|
||||||
|
homeLastCache
|
||||||
|
```
|
||||||
|
{{% /show-in %}}
|
||||||
|
source: /shared/influxdb3-admin/last-value-cache/create.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/last-value-cache/create.md -->
|
|
@ -0,0 +1,27 @@
|
||||||
|
---
|
||||||
|
title: Delete a Last Value Cache
|
||||||
|
description: |
|
||||||
|
Use the [`influxdb3 delete last_cache` command](/influxdb3/core/reference/cli/influxdb3/delete/last_cache/)
|
||||||
|
to delete a Last Value Cache.
|
||||||
|
menu:
|
||||||
|
influxdb3_core:
|
||||||
|
parent: Manage the Last Value Cache
|
||||||
|
weight: 204
|
||||||
|
influxdb3/core/tags: [cache]
|
||||||
|
list_code_example: |
|
||||||
|
<!--pytest.mark.skip-->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 delete last_cache \
|
||||||
|
--database example-db \
|
||||||
|
--token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \
|
||||||
|
--table home \
|
||||||
|
homeLastCache
|
||||||
|
```
|
||||||
|
related:
|
||||||
|
- /influxdb3/core/reference/cli/influxdb3/delete/last_cache/
|
||||||
|
source: /shared/influxdb3-admin/last-value-cache/delete.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/last-value-cache/delete.md -->
|
|
@ -0,0 +1,26 @@
|
||||||
|
---
|
||||||
|
title: Query a Last Value Cache
|
||||||
|
description: |
|
||||||
|
Use the [`last_cache()` SQL function](/influxdb3/core/reference/sql/functions/cache/#last_cache)
|
||||||
|
in the `FROM` clause of an SQL `SELECT` statement to query data from the
|
||||||
|
Last Value Cache.
|
||||||
|
menu:
|
||||||
|
influxdb3_core:
|
||||||
|
parent: Manage the Last Value Cache
|
||||||
|
weight: 202
|
||||||
|
influxdb3/core/tags: [cache]
|
||||||
|
list_code_example: |
|
||||||
|
```sql
|
||||||
|
SELECT * FROM last_cache('table-name', 'cache-name')
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!Important]
|
||||||
|
> You must use SQL to query the LVC.
|
||||||
|
> InfluxQL does not support the `last_cache()` function.
|
||||||
|
related:
|
||||||
|
- /influxdb3/core/reference/sql/functions/cache/#last_cache, last_cache SQL function
|
||||||
|
source: /shared/influxdb3-admin/last-value-cache/query.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/last-value-cache/query.md -->
|
|
@ -0,0 +1,25 @@
|
||||||
|
---
|
||||||
|
title: Show information about Last Value Caches
|
||||||
|
description: |
|
||||||
|
Use the `influxdb3 show system table` command to query and output Last Value
|
||||||
|
Cache information from the `last_caches` system table.
|
||||||
|
menu:
|
||||||
|
influxdb3_core:
|
||||||
|
parent: Manage the Last Value Cache
|
||||||
|
name: Show Last Value Caches
|
||||||
|
weight: 203
|
||||||
|
influxdb3/core/tags: [cache]
|
||||||
|
list_code_example: |
|
||||||
|
<!-- pytest.mark.skip -->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 show system \
|
||||||
|
--database example-db \
|
||||||
|
--token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \
|
||||||
|
table last_caches
|
||||||
|
```
|
||||||
|
source: /shared/influxdb3-admin/last-value-cache/show.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/last-value-cache/show.md -->
|
|
@ -9,7 +9,7 @@ menu:
|
||||||
influxdb3_core:
|
influxdb3_core:
|
||||||
name: Query system data
|
name: Query system data
|
||||||
parent: Administer InfluxDB
|
parent: Administer InfluxDB
|
||||||
weight: 3
|
weight: 110
|
||||||
influxdb3/core/tags: [query, api, system information, schemas]
|
influxdb3/core/tags: [query, api, system information, schemas]
|
||||||
related:
|
related:
|
||||||
- /influxdb3/core/query-data/sql/
|
- /influxdb3/core/query-data/sql/
|
||||||
|
|
|
@ -0,0 +1,17 @@
|
||||||
|
---
|
||||||
|
title: Manage tokens
|
||||||
|
description: >
|
||||||
|
InfluxDB 3 uses tokens to authenticate and authorize access to resources and data stored in {{< product-name >}}.
|
||||||
|
Use the `influxdb3` CLI or `/api/v3` HTTP API to manage tokens
|
||||||
|
for your {{% product-name %}} instance.
|
||||||
|
menu:
|
||||||
|
influxdb3_core:
|
||||||
|
parent: Administer InfluxDB
|
||||||
|
weight: 202
|
||||||
|
---
|
||||||
|
|
||||||
|
InfluxDB 3 uses tokens to authenticate and authorize access to resources and data stored in {{< product-name >}}.
|
||||||
|
Use the `influxdb3` CLI or `/api/v3` HTTP API to manage tokens
|
||||||
|
for your {{% product-name %}} instance.
|
||||||
|
|
||||||
|
{{< children hlevel="h2" readmore=true hr=true >}}
|
|
@ -0,0 +1,19 @@
|
||||||
|
---
|
||||||
|
title: Manage admin tokens
|
||||||
|
seotitle: Manage admin tokens in {{< product-name >}}
|
||||||
|
description: >
|
||||||
|
Manage admin tokens in your {{< product-name >}} instance.
|
||||||
|
An admin token grants
|
||||||
|
access to all actions (CLI commands and API endpoints) for the server.
|
||||||
|
menu:
|
||||||
|
influxdb3_core:
|
||||||
|
parent: Manage tokens
|
||||||
|
name: Admin tokens
|
||||||
|
weight: 101
|
||||||
|
influxdb3/core/tags: [tokens]
|
||||||
|
source: /shared/influxdb3-admin/tokens/_index.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/tokens/_index.md
|
||||||
|
-->
|
|
@ -0,0 +1,25 @@
|
||||||
|
---
|
||||||
|
title: Create an admin token
|
||||||
|
description: >
|
||||||
|
Use the [`influxdb3 create token --admin` command](/influxdb3/core/reference/cli/influxdb3/create/token/)
|
||||||
|
or the [HTTP API](/influxdb3/core/api/v3/)
|
||||||
|
to create an [admin token](/influxdb3/core/admin/tokens/admin/) for your {{< product-name omit="Clustered" >}} instance.
|
||||||
|
An admin token grants access to all actions on the server.
|
||||||
|
menu:
|
||||||
|
influxdb3_core:
|
||||||
|
parent: Admin tokens
|
||||||
|
weight: 201
|
||||||
|
list_code_example: |
|
||||||
|
##### CLI
|
||||||
|
```bash
|
||||||
|
influxdb3 create token --admin
|
||||||
|
```
|
||||||
|
alt_links:
|
||||||
|
cloud-dedicated: /influxdb3/cloud-dedicated/admin/tokens/create-token/
|
||||||
|
cloud-serverless: /influxdb3/cloud-serverless/admin/tokens/create-token/
|
||||||
|
source: /shared/influxdb3-admin/tokens/admin/create.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/tokens/admin/create.md
|
||||||
|
-->
|
|
@ -0,0 +1,21 @@
|
||||||
|
---
|
||||||
|
title: List an admin token
|
||||||
|
description: >
|
||||||
|
Use the `influxdb3 show tokens` command
|
||||||
|
to list the [admin token](/influxdb3/enterprise/admin/tokens/admin/) for your {{< product-name omit="Clustered" >}} instance.
|
||||||
|
An admin token grants access to all actions on the server.
|
||||||
|
menu:
|
||||||
|
influxdb3_core:
|
||||||
|
parent: Admin tokens
|
||||||
|
weight: 201
|
||||||
|
list_code_example: |
|
||||||
|
##### CLI
|
||||||
|
```bash
|
||||||
|
influxdb3 show tokens
|
||||||
|
```
|
||||||
|
source: /shared/influxdb3-admin/tokens/admin/list.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/tokens/admin/list.md
|
||||||
|
-->
|
|
@ -0,0 +1,25 @@
|
||||||
|
---
|
||||||
|
title: Regenerate an admin token
|
||||||
|
description: >
|
||||||
|
Use the [`influxdb3 create token --admin` command](/influxdb3/core/reference/cli/influxdb3/create/token/)
|
||||||
|
or the [HTTP API](/influxdb3/core/api/v3/)
|
||||||
|
to regenerate an [admin token](/influxdb3/core/admin/tokens/admin/) for your {{< product-name omit="Clustered" >}} instance.
|
||||||
|
An admin token grants access to all actions on the server.
|
||||||
|
Regenerating an admin token deactivates the previous token.
|
||||||
|
menu:
|
||||||
|
influxdb3_core:
|
||||||
|
parent: Admin tokens
|
||||||
|
weight: 201
|
||||||
|
list_code_example: |
|
||||||
|
##### CLI
|
||||||
|
```bash
|
||||||
|
influxdb3 create token --admin \
|
||||||
|
--token ADMIN_TOKEN \
|
||||||
|
--regenerate
|
||||||
|
```
|
||||||
|
source: /shared/influxdb3-admin/tokens/admin/regenerate.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/tokens/admin/create.md
|
||||||
|
-->
|
|
@ -91,13 +91,13 @@ source ~/.zshrc
|
||||||
|
|
||||||
<!-------------------------------- BEGIN LINUX -------------------------------->
|
<!-------------------------------- BEGIN LINUX -------------------------------->
|
||||||
|
|
||||||
- [{{< product-name >}} • Linux (x86) • GNU](https://download.influxdata.com/influxdb/snapshots/influxdb3-{{< product-key >}}_x86_64-unknown-linux-gnu.tar.gz)
|
- [{{< product-name >}} • Linux (AMD64, x86_64) • GNU](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_amd64.tar.gz)
|
||||||
•
|
•
|
||||||
[sha256](https://dl.influxdata.com/influxdb/snapshots/influxdb3-{{< product-key >}}_x86_64-unknown-linux-gnu.tar.gz.sha256)
|
[sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_amd64.tar.gz.sha256)
|
||||||
|
|
||||||
- [{{< product-name >}} • Linux (ARM) • GNU](https://download.influxdata.com/influxdb/snapshots/influxdb3-{{< product-key >}}_aarch64-unknown-linux-gnu.tar.gz)
|
- [{{< product-name >}} • Linux (ARM64, AArch64) • GNU](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_arm64.tar.gz)
|
||||||
•
|
•
|
||||||
[sha256](https://dl.influxdata.com/influxdb/snapshots/influxdb3-{{< product-key >}}_aarch64-unknown-linux-gnu.tar.gz.sha256)
|
[sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_arm64.tar.gz.sha256)
|
||||||
|
|
||||||
<!--------------------------------- END LINUX --------------------------------->
|
<!--------------------------------- END LINUX --------------------------------->
|
||||||
|
|
||||||
|
@ -106,9 +106,9 @@ source ~/.zshrc
|
||||||
|
|
||||||
<!-------------------------------- BEGIN MACOS -------------------------------->
|
<!-------------------------------- BEGIN MACOS -------------------------------->
|
||||||
|
|
||||||
- [{{< product-name >}} • macOS (Silicon)](https://download.influxdata.com/influxdb/snapshots/influxdb3-{{< product-key >}}_aarch64-apple-darwin.tar.gz)
|
- [{{< product-name >}} • macOS (Silicon, ARM64)](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_darwin_arm64.tar.gz)
|
||||||
•
|
•
|
||||||
[sha256](https://dl.influxdata.com/influxdb/snapshots/influxdb3-{{< product-key >}}_aarch64-apple-darwin.tar.gz.sha256)
|
[sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_darwin_arm64.tar.gz.sha256)
|
||||||
|
|
||||||
> [!Note]
|
> [!Note]
|
||||||
> macOS Intel builds are coming soon.
|
> macOS Intel builds are coming soon.
|
||||||
|
@ -120,9 +120,9 @@ source ~/.zshrc
|
||||||
|
|
||||||
<!------------------------------- BEGIN WINDOWS ------------------------------->
|
<!------------------------------- BEGIN WINDOWS ------------------------------->
|
||||||
|
|
||||||
- [{{< product-name >}} • Windows (x86)](https://dl.influxdata.com/influxdb/snapshots/influxdb3-{{< product-key >}}_x86_64-pc-windows-gnu.tar.gz)
|
- [{{< product-name >}} • Windows (AMD64, x86_64)](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}-windows_amd64.zip)
|
||||||
•
|
•
|
||||||
[sha256](https://dl.influxdata.com/influxdb/snapshots/influxdb3-{{< product-key >}}_x86_64-pc-windows-gnu.tar.gz.sha256)
|
[sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}-windows_amd64.zip.sha256)
|
||||||
|
|
||||||
<!-------------------------------- END WINDOWS -------------------------------->
|
<!-------------------------------- END WINDOWS -------------------------------->
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,7 @@ influxdb3 [GLOBAL-OPTIONS] [COMMAND]
|
||||||
| Option | | Description |
|
| Option | | Description |
|
||||||
| :----- | :------------------------------------ | :------------------------------------------------------------------------------------------------ |
|
| :----- | :------------------------------------ | :------------------------------------------------------------------------------------------------ |
|
||||||
| | `--num-threads` | Maximum number of IO runtime threads to use |
|
| | `--num-threads` | Maximum number of IO runtime threads to use |
|
||||||
| | `--io-runtime-type` | IO tokio runtime type (`current-thread`, `multi-thread` _(default)_, or `multi-thread-alt`) |
|
| | `--io-runtime-type` | IO tokio runtime type (`current-thread`, `multi-thread` _(default)_, or `multi-thread-alt`) |
|
||||||
| | `--io-runtime-disable-lifo-slot` | Disable LIFO slot of IO runtime |
|
| | `--io-runtime-disable-lifo-slot` | Disable LIFO slot of IO runtime |
|
||||||
| | `--io-runtime-event-interval` | Number of scheduler ticks after which the IOtokio runtime scheduler will poll for external events |
|
| | `--io-runtime-event-interval` | Number of scheduler ticks after which the IOtokio runtime scheduler will poll for external events |
|
||||||
| | `--io-runtime-global-queue-interval` | Number of scheduler ticks after which the IO runtime scheduler will poll the global task queue |
|
| | `--io-runtime-global-queue-interval` | Number of scheduler ticks after which the IO runtime scheduler will poll the global task queue |
|
||||||
|
@ -48,6 +48,7 @@ influxdb3 [GLOBAL-OPTIONS] [COMMAND]
|
||||||
| | `--io-runtime-thread-keep-alive` | Custom timeout for a thread in the blocking pool of the tokio IO runtime |
|
| | `--io-runtime-thread-keep-alive` | Custom timeout for a thread in the blocking pool of the tokio IO runtime |
|
||||||
| | `--io-runtime-thread-priority` | Set thread priority tokio IO runtime workers |
|
| | `--io-runtime-thread-priority` | Set thread priority tokio IO runtime workers |
|
||||||
| `-h` | `--help` | Print help information |
|
| `-h` | `--help` | Print help information |
|
||||||
|
| | `--help-all` | Print detailed help information |
|
||||||
| `-V` | `--version` | Print version |
|
| `-V` | `--version` | Print version |
|
||||||
|
|
||||||
### Option environment variables
|
### Option environment variables
|
||||||
|
|
|
@ -55,6 +55,7 @@ influxdb3 serve [OPTIONS] --node-id <HOST_IDENTIFIER_PREFIX>
|
||||||
| | `--object-store-retry-timeout` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-retry-timeout)_ |
|
| | `--object-store-retry-timeout` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-retry-timeout)_ |
|
||||||
| | `--object-store-cache-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-cache-endpoint)_ |
|
| | `--object-store-cache-endpoint` | _See [configuration options](/influxdb3/core/reference/config-options/#object-store-cache-endpoint)_ |
|
||||||
| `-h` | `--help` | Print help information |
|
| `-h` | `--help` | Print help information |
|
||||||
|
| | `--help-all` | Print detailed help information |
|
||||||
| | `--log-filter` | _See [configuration options](/influxdb3/core/reference/config-options/#log-filter)_ |
|
| | `--log-filter` | _See [configuration options](/influxdb3/core/reference/config-options/#log-filter)_ |
|
||||||
| `-v` | `--verbose` | Enable verbose output |
|
| `-v` | `--verbose` | Enable verbose output |
|
||||||
| | `--log-destination` | _See [configuration options](/influxdb3/core/reference/config-options/#log-destination)_ |
|
| | `--log-destination` | _See [configuration options](/influxdb3/core/reference/config-options/#log-destination)_ |
|
||||||
|
@ -90,7 +91,7 @@ influxdb3 serve [OPTIONS] --node-id <HOST_IDENTIFIER_PREFIX>
|
||||||
| | `--wal-max-write-buffer-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-max-write-buffer-size)_ |
|
| | `--wal-max-write-buffer-size` | _See [configuration options](/influxdb3/core/reference/config-options/#wal-max-write-buffer-size)_ |
|
||||||
| | `--snapshotted-wal-files-to-keep` | _See [configuration options](/influxdb3/core/reference/config-options/#snapshotted-wal-files-to-keep)_ |
|
| | `--snapshotted-wal-files-to-keep` | _See [configuration options](/influxdb3/core/reference/config-options/#snapshotted-wal-files-to-keep)_ |
|
||||||
| | `--query-log-size` | _See [configuration options](/influxdb3/core/reference/config-options/#query-log-size)_ |
|
| | `--query-log-size` | _See [configuration options](/influxdb3/core/reference/config-options/#query-log-size)_ |
|
||||||
| | `--parquet-mem-cache-size` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-size)_ |
|
| | `--parquet-mem-cache-size` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-size)_ |
|
||||||
| | `--parquet-mem-cache-prune-percentage` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-percentage)_ |
|
| | `--parquet-mem-cache-prune-percentage` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-percentage)_ |
|
||||||
| | `--parquet-mem-cache-prune-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-interval)_ |
|
| | `--parquet-mem-cache-prune-interval` | _See [configuration options](/influxdb3/core/reference/config-options/#parquet-mem-cache-prune-interval)_ |
|
||||||
| | `--disable-parquet-mem-cache` | _See [configuration options](/influxdb3/core/reference/config-options/#disable-parquet-mem-cache)_ |
|
| | `--disable-parquet-mem-cache` | _See [configuration options](/influxdb3/core/reference/config-options/#disable-parquet-mem-cache)_ |
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
---
|
||||||
|
title: Glossary
|
||||||
|
description: >
|
||||||
|
Terms related to InfluxData products and platforms.
|
||||||
|
weight: 109
|
||||||
|
menu:
|
||||||
|
influxdb3_core:
|
||||||
|
parent: Reference
|
||||||
|
influxdb3/core/tags: [glossary]
|
||||||
|
source: /shared/influxdb3-reference/glossary.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is at
|
||||||
|
// SOURCE content/shared/influxdb3-reference/glossary.md
|
||||||
|
-->
|
|
@ -0,0 +1,19 @@
|
||||||
|
---
|
||||||
|
title: Manage the Distinct Value Cache
|
||||||
|
seotitle: Manage the Distinct Value Cache in {{< product-name >}}
|
||||||
|
description: >
|
||||||
|
The {{< product-name >}} Distinct Value Cache (DVC) lets you cache distinct
|
||||||
|
values of one or more columns in a table, improving the performance of
|
||||||
|
queries that return distinct tag and field values.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Administer InfluxDB
|
||||||
|
weight: 105
|
||||||
|
influxdb3/enterprise/tags: [cache]
|
||||||
|
related:
|
||||||
|
- /influxdb3/enterprise/reference/sql/functions/cache/#distinct_cache, distinct_cache SQL function
|
||||||
|
source: /shared/influxdb3-admin/distinct-value-cache/_index.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/distinct-value-cache/_index.md -->
|
|
@ -0,0 +1,48 @@
|
||||||
|
---
|
||||||
|
title: Create a Distinct Value Cache
|
||||||
|
description: |
|
||||||
|
Use the [`influxdb3 create distinct_cache` command](/influxdb3/enterprise/reference/cli/influxdb3/create/distinct_cache/)
|
||||||
|
to create a Distinct Value Cache.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Manage the Distinct Value Cache
|
||||||
|
weight: 201
|
||||||
|
influxdb3/enterprise/tags: [cache]
|
||||||
|
related:
|
||||||
|
- /influxdb3/enterprise/reference/cli/influxdb3/create/distinct_cache/
|
||||||
|
list_code_example: |
|
||||||
|
{{% show-in "core" %}}
|
||||||
|
<!--pytest.mark.skip-->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create distinct_cache \
|
||||||
|
--database example-db \
|
||||||
|
--token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \
|
||||||
|
--table wind_data \
|
||||||
|
--columns country,county,city \
|
||||||
|
--max-cardinality 10000 \
|
||||||
|
--max-age 24h \
|
||||||
|
windDistinctCache
|
||||||
|
```
|
||||||
|
{{% /show-in %}}
|
||||||
|
|
||||||
|
{{% show-in "enterprise" %}}
|
||||||
|
<!--pytest.mark.skip-->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create distinct_cache \
|
||||||
|
--database example-db \
|
||||||
|
--token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \
|
||||||
|
--table home \
|
||||||
|
--node-spec node-01,node-02 \
|
||||||
|
--columns country,county,city \
|
||||||
|
--max-cardinality 10000 \
|
||||||
|
--max-age 24h \
|
||||||
|
windDistinctCache
|
||||||
|
```
|
||||||
|
{{% /show-in %}}
|
||||||
|
source: /shared/influxdb3-admin/distinct-value-cache/create.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/distinct-value-cache/create.md -->
|
|
@ -0,0 +1,27 @@
|
||||||
|
---
|
||||||
|
title: Delete a Distinct Value Cache
|
||||||
|
description: |
|
||||||
|
Use the [`influxdb3 delete distinct_cache` command](/influxdb3/enterprise/reference/cli/influxdb3/delete/distinct_cache/)
|
||||||
|
to delete a Distinct Value Cache.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Manage the Distinct Value Cache
|
||||||
|
weight: 204
|
||||||
|
influxdb3/enterprise/tags: [cache]
|
||||||
|
list_code_example: |
|
||||||
|
<!--pytest.mark.skip-->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 delete distinct_cache \
|
||||||
|
--database example-db \
|
||||||
|
--token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \
|
||||||
|
--table wind_data \
|
||||||
|
windDistinctCache
|
||||||
|
```
|
||||||
|
related:
|
||||||
|
- /influxdb3/enterprise/reference/cli/influxdb3/delete/distinct_cache/
|
||||||
|
source: /shared/influxdb3-admin/distinct-value-cache/delete.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/distinct-value-cache/delete.md -->
|
|
@ -0,0 +1,26 @@
|
||||||
|
---
|
||||||
|
title: Query a Distinct Value Cache
|
||||||
|
description: |
|
||||||
|
Use the [`distinct_cache()` SQL function](/influxdb3/enterprise/reference/sql/functions/cache/#distinct_cache)
|
||||||
|
in the `FROM` clause of an SQL `SELECT` statement to query data from the
|
||||||
|
Distinct Value Cache.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Manage the Distinct Value Cache
|
||||||
|
weight: 202
|
||||||
|
influxdb3/enterprise/tags: [cache]
|
||||||
|
list_code_example: |
|
||||||
|
```sql
|
||||||
|
SELECT * FROM distinct_cache('table-name', 'cache-name')
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!Important]
|
||||||
|
> You must use SQL to query the DVC.
|
||||||
|
> InfluxQL does not support the `distinct_cache()` function.
|
||||||
|
related:
|
||||||
|
- /influxdb3/enterprise/reference/sql/functions/cache/#distinct_cache, distinct_cache SQL function
|
||||||
|
source: /shared/influxdb3-admin/distinct-value-cache/query.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/distinct-value-cache/query.md -->
|
|
@ -0,0 +1,25 @@
|
||||||
|
---
|
||||||
|
title: Show information about Distinct Value Caches
|
||||||
|
description: |
|
||||||
|
Use the `influxdb3 show system table` command to query and output Distinct Value
|
||||||
|
Cache information from the `distinct_caches` system table.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Manage the Distinct Value Cache
|
||||||
|
name: Show Distinct Value Caches
|
||||||
|
weight: 203
|
||||||
|
influxdb3/enterprise/tags: [cache]
|
||||||
|
list_code_example: |
|
||||||
|
<!-- pytest.mark.skip -->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 show system \
|
||||||
|
--database example-db \
|
||||||
|
--token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \
|
||||||
|
table distinct_caches
|
||||||
|
```
|
||||||
|
source: /shared/influxdb3-admin/distinct-value-cache/show.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/distinct-value-cache/show.md -->
|
|
@ -0,0 +1,20 @@
|
||||||
|
---
|
||||||
|
title: Manage the Last Value Cache
|
||||||
|
seotitle: Manage the Last Value Cache in {{< product-name >}}
|
||||||
|
description: >
|
||||||
|
The {{< product-name >}} Last Value Cache (LVC) lets you cache the most
|
||||||
|
recent values for specific fields in a table, improving the performance of
|
||||||
|
queries that return the most recent value of a field for specific time series
|
||||||
|
or the last N values of a field.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Administer InfluxDB
|
||||||
|
weight: 104
|
||||||
|
influxdb3/enterprise/tags: [cache]
|
||||||
|
related:
|
||||||
|
- /influxdb3/enterprise/reference/sql/functions/cache/#last_cache, last_cache SQL function
|
||||||
|
source: /shared/influxdb3-admin/last-value-cache/_index.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/last-value-cache/_index.md -->
|
|
@ -0,0 +1,50 @@
|
||||||
|
---
|
||||||
|
title: Create a Last Value Cache
|
||||||
|
description: |
|
||||||
|
Use the [`influxdb3 create last_cache` command](/influxdb3/enterprise/reference/cli/influxdb3/create/last_cache/)
|
||||||
|
to create a Last Value Cache.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Manage the Last Value Cache
|
||||||
|
weight: 201
|
||||||
|
influxdb3/enterprise/tags: [cache]
|
||||||
|
related:
|
||||||
|
- /influxdb3/enterprise/reference/cli/influxdb3/create/last_cache/
|
||||||
|
list_code_example: |
|
||||||
|
{{% show-in "core" %}}
|
||||||
|
<!--pytest.mark.skip-->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create last_cache \
|
||||||
|
--database example-db \
|
||||||
|
--token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \
|
||||||
|
--table home \
|
||||||
|
--key-columns room,wall \
|
||||||
|
--value-columns temp,hum,co \
|
||||||
|
--count 5 \
|
||||||
|
--ttl 30mins \
|
||||||
|
homeLastCache
|
||||||
|
```
|
||||||
|
{{% /show-in %}}
|
||||||
|
|
||||||
|
{{% show-in "enterprise" %}}
|
||||||
|
<!--pytest.mark.skip-->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create last_cache \
|
||||||
|
--database example-db \
|
||||||
|
--token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \
|
||||||
|
--table home \
|
||||||
|
--node-spec node-01,node-02 \
|
||||||
|
--key-columns room,wall \
|
||||||
|
--value-columns temp,hum,co \
|
||||||
|
--count 5 \
|
||||||
|
--ttl 30mins \
|
||||||
|
homeLastCache
|
||||||
|
```
|
||||||
|
{{% /show-in %}}
|
||||||
|
source: /shared/influxdb3-admin/last-value-cache/create.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/last-value-cache/create.md -->
|
|
@ -0,0 +1,27 @@
|
||||||
|
---
|
||||||
|
title: Delete a Last Value Cache
|
||||||
|
description: |
|
||||||
|
Use the [`influxdb3 delete last_cache` command](/influxdb3/enterprise/reference/cli/influxdb3/delete/last_cache/)
|
||||||
|
to delete a Last Value Cache.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Manage the Last Value Cache
|
||||||
|
weight: 204
|
||||||
|
influxdb3/enterprise/tags: [cache]
|
||||||
|
list_code_example: |
|
||||||
|
<!--pytest.mark.skip-->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 delete last_cache \
|
||||||
|
--database example-db \
|
||||||
|
--token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \
|
||||||
|
--table home \
|
||||||
|
homeLastCache
|
||||||
|
```
|
||||||
|
related:
|
||||||
|
- /influxdb3/enterprise/reference/cli/influxdb3/delete/last_cache/
|
||||||
|
source: /shared/influxdb3-admin/last-value-cache/delete.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/last-value-cache/delete.md -->
|
|
@ -0,0 +1,26 @@
|
||||||
|
---
|
||||||
|
title: Query a Last Value Cache
|
||||||
|
description: |
|
||||||
|
Use the [`last_cache()` SQL function](/influxdb3/enterprise/reference/sql/functions/cache/#last_cache)
|
||||||
|
in the `FROM` clause of an SQL `SELECT` statement to query data from the
|
||||||
|
Last Value Cache.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Manage the Last Value Cache
|
||||||
|
weight: 202
|
||||||
|
influxdb3/enterprise/tags: [cache]
|
||||||
|
list_code_example: |
|
||||||
|
```sql
|
||||||
|
SELECT * FROM last_cache('table-name', 'cache-name')
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!Important]
|
||||||
|
> You must use SQL to query the LVC.
|
||||||
|
> InfluxQL does not support the `last_cache()` function.
|
||||||
|
related:
|
||||||
|
- /influxdb3/enterprise/reference/sql/functions/cache/#last_cache, last_cache SQL function
|
||||||
|
source: /shared/influxdb3-admin/last-value-cache/query.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/last-value-cache/query.md -->
|
|
@ -0,0 +1,25 @@
|
||||||
|
---
|
||||||
|
title: Show information about Last Value Caches
|
||||||
|
description: |
|
||||||
|
Use the `influxdb3 show system table` command to query and output Last Value
|
||||||
|
Cache information from the `last_caches` system table.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Manage the Last Value Cache
|
||||||
|
name: Show Last Value Caches
|
||||||
|
weight: 203
|
||||||
|
influxdb3/enterprise/tags: [cache]
|
||||||
|
list_code_example: |
|
||||||
|
<!-- pytest.mark.skip -->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 show system \
|
||||||
|
--database example-db \
|
||||||
|
--token 00xoXX0xXXx0000XxxxXx0Xx0xx0 \
|
||||||
|
table last_caches
|
||||||
|
```
|
||||||
|
source: /shared/influxdb3-admin/last-value-cache/show.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is located at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/last-value-cache/show.md -->
|
|
@ -0,0 +1,148 @@
|
||||||
|
---
|
||||||
|
title: Manage your InfluxDB 3 Enterprise license
|
||||||
|
description: >
|
||||||
|
{{< product-name >}} licenses authorize the use of the {{< product-name >}}
|
||||||
|
software. Learn how licenses work, how to activate and renew licenses, and more.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
name: Manage your license
|
||||||
|
parent: Administer InfluxDB
|
||||||
|
weight: 101
|
||||||
|
---
|
||||||
|
|
||||||
|
{{< product-name >}} licenses authorize the use of the {{< product-name >}}
|
||||||
|
software and apply to a single cluster. Licenses are primarily based on the
|
||||||
|
number of CPUs InfluxDB can use, but there are other limitations depending on
|
||||||
|
the license type. The following {{< product-name >}} license types are available:
|
||||||
|
|
||||||
|
- **Trial**: 30-day trial license with full access to {{< product-name >}} capabilities.
|
||||||
|
- **At-Home**: For at-home hobbyist use with limited access to {{< product-name >}} capabilities.
|
||||||
|
- **Commercial**: Commercial license with full access to {{< product-name >}} capabilities.
|
||||||
|
|
||||||
|
#### License feature comparison
|
||||||
|
|
||||||
|
| Features | Trial | At-Home | Commercial |
|
||||||
|
| :------------- | :-----------------------: | :-----: | :-----------------------: |
|
||||||
|
| CPU Core Limit | 256 | 2 | _Per contract_ |
|
||||||
|
| Expiration | 30 days | _Never_ | _Per contract_ |
|
||||||
|
| Multi-node | {{% icon "check" "v2" %}} | | {{% icon "check" "v2" %}} |
|
||||||
|
| Commercial use | {{% icon "check" "v2" %}} | | {{% icon "check" "v2" %}} |
|
||||||
|
|
||||||
|
{{% caption %}}
|
||||||
|
All other {{< product-name >}} features are available to all licenses.
|
||||||
|
{{% /caption %}}
|
||||||
|
|
||||||
|
## CPU limit
|
||||||
|
|
||||||
|
Each {{< product-name >}} license limits the number of CPUs InfluxDB can use.
|
||||||
|
The CPU limit is per cluster, not per machine. A cluster may consist of
|
||||||
|
multiple nodes that share the available CPU limit.
|
||||||
|
|
||||||
|
For example, you can purchase a 32-CPU Commercial license and set up an
|
||||||
|
{{< product-name >}} cluster with the following:
|
||||||
|
|
||||||
|
- 3 × writer nodes, each with 4 CPUs (12 total)
|
||||||
|
- 1 × compactor node with 8 CPUs
|
||||||
|
- 3 × query nodes, each with 4 CPUs (12 total)
|
||||||
|
|
||||||
|
With the {{< product-name >}} Commercial license, CPU cores are purchased in
|
||||||
|
batches of 8, 16, 32, 64, or 128 cores.
|
||||||
|
|
||||||
|
### CPU accounting
|
||||||
|
|
||||||
|
CPU cores are determined by whatever the operating system of the host machine
|
||||||
|
reports as its core count. {{< product-name >}} does not differentiate between
|
||||||
|
physical and virtual CPU cores.
|
||||||
|
|
||||||
|
> [!Note]
|
||||||
|
> If using Linux, InfluxDB uses whatever cgroup CPU accounting is active--for
|
||||||
|
> example: `cpuset` or `cpu.shares`.
|
||||||
|
|
||||||
|
## Activate a license
|
||||||
|
|
||||||
|
Each {{< product-name >}} license must be activated, but the process of activating
|
||||||
|
the license depends on the license type:
|
||||||
|
|
||||||
|
- [Activate a Trial or At-Home license](#activate-a-trial-or-at-home-license)
|
||||||
|
- [Activate a Commercial license](#activate-a-commercial-license)
|
||||||
|
|
||||||
|
### Activate a Trial or At-Home license
|
||||||
|
|
||||||
|
When starting the {{< product-name >}} server, it will ask you what type of
|
||||||
|
license you would like to use. Select `trial` or `home` and provide your
|
||||||
|
email address. The server auto-generates and stores your license.
|
||||||
|
|
||||||
|
### Activate a Commercial license
|
||||||
|
|
||||||
|
1. [Contact InfluxData Sales](https://influxdata.com/contact-sales/) to obtain
|
||||||
|
an {{< product-name >}} Commercial license. Provide the following:
|
||||||
|
|
||||||
|
- Cluster UUID
|
||||||
|
- Object Store Info
|
||||||
|
|
||||||
|
> [!Note]
|
||||||
|
> This information is provided in the output of the {{< product-name >}}
|
||||||
|
> server if you try to start the server without a valid license.
|
||||||
|
|
||||||
|
InfluxData will provide you with a Commercial license file.
|
||||||
|
|
||||||
|
2. Provide the following when starting the {{< product-name >}} server:
|
||||||
|
|
||||||
|
- **License email**: The email address associated with your Commercial license.
|
||||||
|
|
||||||
|
Use either the `--license-email` option or set the
|
||||||
|
`INFLUXDB3_ENTERPRISE_LICENSE_EMAIL` environment variable.
|
||||||
|
|
||||||
|
- **License file**: The file path of the provided Commercial license file.
|
||||||
|
|
||||||
|
Use either the `--license-file` option or set the
|
||||||
|
`INFLUXDB3_ENTERPRISE_LICENSE_FILE` environment variable.
|
||||||
|
|
||||||
|
{{< code-tabs-wrapper >}}
|
||||||
|
{{% code-tabs %}}
|
||||||
|
[influxdb3 options](#)
|
||||||
|
[Environment variables](#)
|
||||||
|
{{% /code-tabs %}}
|
||||||
|
{{% code-tab-content %}}
|
||||||
|
<!------------------------ BEGIN INFLUXDB3 CLI OPTIONS ------------------------>
|
||||||
|
<!-- pytest.mark.skip -->
|
||||||
|
```bash
|
||||||
|
influxdb3 serve \
|
||||||
|
--cluster-id cluster01 \
|
||||||
|
--node-id node01 \
|
||||||
|
--license-email example@email.com \
|
||||||
|
--license-file /path/to/license-file.jwt \
|
||||||
|
# ...
|
||||||
|
```
|
||||||
|
<!------------------------- END INFLUXDB3 CLI OPTIONS ------------------------->
|
||||||
|
{{% /code-tab-content %}}
|
||||||
|
{{% code-tab-content %}}
|
||||||
|
<!------------------------ BEGIN ENVIRONMENT VARIABLES ------------------------>
|
||||||
|
<!-- pytest.mark.skip -->
|
||||||
|
```bash
|
||||||
|
INFLUXDB3_ENTERPRISE_LICENSE_EMAIL=example@email.com
|
||||||
|
INFLUXDB3_ENTERPRISE_LICENSE_FILE=/path/to/license-file.jwt
|
||||||
|
|
||||||
|
influxdb3 serve \
|
||||||
|
--cluster-id cluster01 \
|
||||||
|
--node-id node01 \
|
||||||
|
# ...
|
||||||
|
```
|
||||||
|
<!------------------------- END ENVIRONMENT VARIABLES ------------------------->
|
||||||
|
{{% /code-tab-content %}}
|
||||||
|
{{< /code-tabs-wrapper >}}
|
||||||
|
|
||||||
|
## Renew a license
|
||||||
|
|
||||||
|
To renew an {{< product-name >}} Commercial license, contact
|
||||||
|
[InfluxData Sales](https://influxdata.com/contact-sales/).
|
||||||
|
|
||||||
|
## Expiration behavior
|
||||||
|
|
||||||
|
When your {{< product-name >}} license expires, the following occurs:
|
||||||
|
|
||||||
|
- Write requests continue to be accepted and processed.
|
||||||
|
- Compactions continue to optimize persisted data.
|
||||||
|
- Query requests return an error.
|
||||||
|
- If the {{< product-name >}} server stops, it will not restart without a valid,
|
||||||
|
non-expired license.
|
|
@ -9,7 +9,7 @@ menu:
|
||||||
influxdb3_enterprise:
|
influxdb3_enterprise:
|
||||||
name: Query system data
|
name: Query system data
|
||||||
parent: Administer InfluxDB
|
parent: Administer InfluxDB
|
||||||
weight: 3
|
weight: 110
|
||||||
influxdb3/enterprise/tags: [query, api, system information, schemas]
|
influxdb3/enterprise/tags: [query, api, system information, schemas]
|
||||||
related:
|
related:
|
||||||
- /influxdb3/enterprise/query-data/sql/
|
- /influxdb3/enterprise/query-data/sql/
|
||||||
|
|
|
@ -0,0 +1,17 @@
|
||||||
|
---
|
||||||
|
title: Manage tokens
|
||||||
|
description: >
|
||||||
|
InfluxDB 3 uses tokens to authenticate and authorize access to resources and data stored in your {{< product-name >}}.
|
||||||
|
Use the `influxdb3` CLI or `/api/v3` HTTP API to manage tokens
|
||||||
|
for your {{% product-name %}} instance.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Administer InfluxDB
|
||||||
|
weight: 202
|
||||||
|
---
|
||||||
|
|
||||||
|
InfluxDB 3 uses tokens to authenticate and authorize access to resources and data stored in your {{< product-name >}}.
|
||||||
|
Use the `influxdb3` CLI or `/api/v3` HTTP API to manage tokens
|
||||||
|
for your {{% product-name %}} instance.
|
||||||
|
|
||||||
|
{{< children hlevel="h2" readmore=true hr=true >}}
|
|
@ -0,0 +1,19 @@
|
||||||
|
---
|
||||||
|
title: Manage admin tokens
|
||||||
|
seotitle: Manage admin tokens in {{< product-name >}}
|
||||||
|
description: >
|
||||||
|
Manage admin tokens in your {{< product-name >}} instance.
|
||||||
|
An admin token grants
|
||||||
|
access to all actions (CLI commands and API endpoints) for the server.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Manage tokens
|
||||||
|
name: Admin tokens
|
||||||
|
weight: 101
|
||||||
|
influxdb3/enterprise/tags: [tokens]
|
||||||
|
source: /shared/influxdb3-admin/tokens/admin/_index.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/tokens/admin/_index.md
|
||||||
|
-->
|
|
@ -0,0 +1,25 @@
|
||||||
|
---
|
||||||
|
title: Create an admin token
|
||||||
|
description: >
|
||||||
|
Use the [`influxdb3 create token --admin` command](/influxdb3/enterprise/reference/cli/influxdb3/create/token/)
|
||||||
|
or the [HTTP API](/influxdb3/enterprise/api/v3/)
|
||||||
|
to create an [admin token](/influxdb3/enterprise/admin/tokens/admin/) for your {{< product-name omit="Clustered" >}} instance.
|
||||||
|
An admin token grants access to all actions on the server.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Admin tokens
|
||||||
|
weight: 201
|
||||||
|
list_code_example: |
|
||||||
|
##### CLI
|
||||||
|
```bash
|
||||||
|
influxdb3 create token --admin
|
||||||
|
```
|
||||||
|
alt_links:
|
||||||
|
cloud-dedicated: /influxdb3/cloud-dedicated/admin/tokens/create-token/
|
||||||
|
cloud-serverless: /influxdb3/cloud-serverless/admin/tokens/create-token/
|
||||||
|
source: /shared/influxdb3-admin/tokens/admin/create.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/tokens/admin/create.md
|
||||||
|
-->
|
|
@ -0,0 +1,21 @@
|
||||||
|
---
|
||||||
|
title: List an admin token
|
||||||
|
description: >
|
||||||
|
Use the `influxdb3 show tokens` command
|
||||||
|
to list the [admin token](/influxdb3/enterprise/admin/tokens/admin/) for your {{< product-name omit="Clustered" >}} instance.
|
||||||
|
An admin token grants access to all actions on the server.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Admin tokens
|
||||||
|
weight: 201
|
||||||
|
list_code_example: |
|
||||||
|
##### CLI
|
||||||
|
```bash
|
||||||
|
influxdb3 show tokens
|
||||||
|
```
|
||||||
|
source: /shared/influxdb3-admin/tokens/admin/list.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/tokens/admin/list.md
|
||||||
|
-->
|
|
@ -0,0 +1,25 @@
|
||||||
|
---
|
||||||
|
title: Regenerate an admin token
|
||||||
|
description: >
|
||||||
|
Use the [`influxdb3 create token --admin` command](/influxdb3/enterprise/reference/cli/influxdb3/create/token/)
|
||||||
|
or the [HTTP API](/influxdb3/enterprise/api/v3/)
|
||||||
|
to regenerate an [admin token](/influxdb3/enterprise/admin/tokens/admin/) for your {{< product-name omit="Clustered" >}} instance.
|
||||||
|
An admin token grants access to all actions on the server.
|
||||||
|
Regenerating an admin token deactivates the previous token.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Admin tokens
|
||||||
|
weight: 201
|
||||||
|
list_code_example: |
|
||||||
|
##### CLI
|
||||||
|
```bash
|
||||||
|
influxdb3 create token --admin \
|
||||||
|
--token ADMIN_TOKEN \
|
||||||
|
--regenerate
|
||||||
|
```
|
||||||
|
source: /shared/influxdb3-admin/tokens/admin/regenerate.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/tokens/admin/create.md
|
||||||
|
-->
|
|
@ -0,0 +1,21 @@
|
||||||
|
---
|
||||||
|
title: Manage resource tokens
|
||||||
|
seotitle: Manage resource tokens in {{< product-name >}}
|
||||||
|
description: >
|
||||||
|
Manage resource tokens in your {{< product-name >}} instance.
|
||||||
|
Resource tokens grant read and write permissions resources, such as databases
|
||||||
|
and system information endpoints in your {{< product-name >}} instance.
|
||||||
|
Database resource tokens allow for actions like writing and querying data.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Manage tokens
|
||||||
|
name: Resource tokens
|
||||||
|
weight: 101
|
||||||
|
influxdb3/enterprise/tags: [tokens]
|
||||||
|
---
|
||||||
|
|
||||||
|
{{< children depth="1" >}}
|
||||||
|
|
||||||
|
<!-- The content for this page is at
|
||||||
|
// SOURCE - content/shared/influxdb3-admin/tokens/database/_index.md
|
||||||
|
-->
|
|
@ -0,0 +1,520 @@
|
||||||
|
---
|
||||||
|
title: Create a resource token
|
||||||
|
description: >
|
||||||
|
Use the [`influxdb3 create token --permission` command](/influxdb3/enterprise/reference/cli/influxdb3/create/token/)
|
||||||
|
or the [HTTP API](/influxdb3/enterprise/api/v3/)
|
||||||
|
to create tokens that grant access to resources such as databases and system information.
|
||||||
|
Database tokens allow for reading and writing data in your {{< product-name omit="Clustered" >}} instance.
|
||||||
|
System tokens allow for reading system information and metrics for your server.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Resource tokens
|
||||||
|
weight: 201
|
||||||
|
list_code_example: |
|
||||||
|
##### CLI
|
||||||
|
```bash
|
||||||
|
influxdb3 create token --permission \
|
||||||
|
--token ADMIN_TOKEN \
|
||||||
|
--expiry 1y \
|
||||||
|
--name "Read-write on DATABASE1, DATABASE2" \
|
||||||
|
db:DATABASE1,DATABASE2:read,write
|
||||||
|
```
|
||||||
|
|
||||||
|
##### HTTP API
|
||||||
|
```bash
|
||||||
|
"http://{{< influxdb/host >}}/api/v3/enterprise/configure/token" \
|
||||||
|
--header 'Accept: application/json' \
|
||||||
|
--header 'Content-Type: application/json' \
|
||||||
|
--header "Authorization: Bearer ADMIN_TOKEN" \
|
||||||
|
--data '{
|
||||||
|
"token_name": "Read-write for DATABASE1, DATABASE2",
|
||||||
|
"permissions": [{
|
||||||
|
"resource_type": "db",
|
||||||
|
"resource_identifier": ["DATABASE1","DATABASE2"],
|
||||||
|
"actions": ["read","write"]
|
||||||
|
}],
|
||||||
|
"expiry_secs": 300000
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
alt_links:
|
||||||
|
cloud-dedicated: /influxdb3/enterprise/admin/tokens/create-token/
|
||||||
|
cloud-serverless: /influxdb3/cloud-serverless/admin/tokens/create-token/
|
||||||
|
---
|
||||||
|
|
||||||
|
Use the [`influxdb3 create token --permission` command](/influxdb3/enterprise/reference/cli/influxdb3/create/token/)
|
||||||
|
or the [`/api/v3/configure/token` HTTP API endpoint](/influxdb3/enterprise/api/v3/)
|
||||||
|
to create tokens that grant access to resources such as databases and system information.
|
||||||
|
Database tokens allow for reading and writing data in your {{< product-name omit="Clustered" >}} instance.
|
||||||
|
System tokens allow for reading system information and metrics for your server.
|
||||||
|
|
||||||
|
After you
|
||||||
|
[create an _admin token_](/influxdb3/enterprise/admin/tokens/admin/create/), you
|
||||||
|
can use the token string to authenticate `influxdb3` commands and HTTP API requests
|
||||||
|
for managing database and system tokens.
|
||||||
|
|
||||||
|
The HTTP API examples in this guide use [cURL](https://curl.se/) to send an API request, but you can use any HTTP client._
|
||||||
|
|
||||||
|
> [!Note]
|
||||||
|
> #### Store secure tokens in a secret store
|
||||||
|
>
|
||||||
|
> Token strings are returned _only_ on token creation.
|
||||||
|
> We recommend storing database tokens in a **secure secret store**.
|
||||||
|
> If you lose a resource token string, revoke the token and create a new one.
|
||||||
|
|
||||||
|
## Create a database token
|
||||||
|
|
||||||
|
{{< tabs-wrapper >}}
|
||||||
|
{{% tabs %}}
|
||||||
|
[influxdb3](#)
|
||||||
|
[HTTP API](#)
|
||||||
|
{{% /tabs %}}
|
||||||
|
{{% tab-content %}}
|
||||||
|
|
||||||
|
<!------------------------------- BEGIN INFLUXDB3 ----------------------------->
|
||||||
|
|
||||||
|
Use the [`influxdb3 create token` command](/influxdb3/enterprise/reference/cli/influxdb3/create/token/)
|
||||||
|
to create a database token with permissions for reading and writing data in
|
||||||
|
your {{% product-name %}} instance.
|
||||||
|
|
||||||
|
In your terminal, run the `influxdb3 create token` command and provide the following:
|
||||||
|
|
||||||
|
- `--permission` flag to create a token with permissions
|
||||||
|
- `--name` flag with a unique description of the token
|
||||||
|
- _Options_, for example:
|
||||||
|
- `--expiry` option with the token expiration time as a duration.
|
||||||
|
If an expiration isn't set, the token does not expire until revoked.
|
||||||
|
- Token permissions (read and write) in the `RESOURCE_TYPE:RESOURCE_NAMES:ACTIONS` format--for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
db:DATABASE1,DATABASE2:read,write
|
||||||
|
```
|
||||||
|
|
||||||
|
- `db:`: The `db` resource type, which specifies the token is for a database.
|
||||||
|
- `DATABASE1,DATABASE2`: The names of the databases to grant permissions to.
|
||||||
|
The resource names part supports the `*` wildcard, which grants read or write permissions to all databases.
|
||||||
|
- `read,write`: The permissions to grant to the token.
|
||||||
|
|
||||||
|
{{% code-placeholders "DATABASE1|DATABASE2|1y" %}}
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create token \
|
||||||
|
--permission \
|
||||||
|
--expiry 1y \
|
||||||
|
--name "Read-write on DATABASE1, DATABASE2" \
|
||||||
|
"db:DATABASE1,DATABASE2:read,write"
|
||||||
|
```
|
||||||
|
|
||||||
|
{{% /code-placeholders %}}
|
||||||
|
|
||||||
|
Replace the following:
|
||||||
|
|
||||||
|
- {{% code-placeholder-key %}}`DATABASE1`{{% /code-placeholder-key %}}, {{% code-placeholder-key %}}`DATABASE2`{{% /code-placeholder-key %}}:
|
||||||
|
your {{% product-name %}} [database](/influxdb3/enterprise/admin/databases/)
|
||||||
|
- {{% code-placeholder-key %}}`1y`{{% /code-placeholder-key %}}:
|
||||||
|
the token expiration time as a
|
||||||
|
duration.
|
||||||
|
|
||||||
|
The output is the token string in plain text.
|
||||||
|
|
||||||
|
<!-------------------------------- END INFLUXDB3 ------------------------------>
|
||||||
|
{{% /tab-content %}}
|
||||||
|
{{% tab-content %}}
|
||||||
|
<!------------------------------- BEGIN cURL ---------------------------------->
|
||||||
|
|
||||||
|
Send a request to the following {{% product-name %}} endpoint:
|
||||||
|
|
||||||
|
{{% api-endpoint endpoint="http://{{< influxdb/host >}}/api/v3/enterprise/configure/token" method="post" %}}
|
||||||
|
|
||||||
|
Provide the following request headers:
|
||||||
|
|
||||||
|
- `Accept: application/json` to ensure the response body is JSON content
|
||||||
|
- `Content-Type: application/json` to indicate the request body is JSON content
|
||||||
|
- `Authorization: Bearer` and the [admin token](/influxdb3/enterprise/admin/tokens/admin/)
|
||||||
|
for your instance to authorize the request
|
||||||
|
|
||||||
|
In the request body, provide the following parameters:
|
||||||
|
|
||||||
|
- `token_name`: a description of the token, unique within the instance
|
||||||
|
- `resource_type`: the resource type for the token, which is always `db`
|
||||||
|
- `resource_identifier`: an array of database names to grant permissions to
|
||||||
|
- The resource identifier field supports the `*` wildcard, which grants read or write
|
||||||
|
permissions to all databases.
|
||||||
|
- `permissions`: an array of token permission actions (`"read"`, `"write"`) for the database
|
||||||
|
- `expiry_secs`: Specify the token expiration time in seconds.
|
||||||
|
|
||||||
|
The following example shows how to use the HTTP API to create a database token:
|
||||||
|
|
||||||
|
{{% code-placeholders "DATABASE1|DATABASE2|300000" %}}
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl \
|
||||||
|
"http://{{< influxdb/host >}}/api/v3/enterprise/configure/token" \
|
||||||
|
--header 'Accept: application/json' \
|
||||||
|
--header 'Content-Type: application/json' \
|
||||||
|
--data '{
|
||||||
|
"token_name": "Read-write for DATABASE1, DATABASE2",
|
||||||
|
"permissions": [{
|
||||||
|
"resource_type": "db",
|
||||||
|
"resource_identifier": ["DATABASE1","DATABASE2"],
|
||||||
|
"actions": ["read","write"]
|
||||||
|
}],
|
||||||
|
"expiry_secs": 300000
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
{{% /code-placeholders %}}
|
||||||
|
|
||||||
|
Replace the following in your request:
|
||||||
|
|
||||||
|
- {{% code-placeholder-key %}}`DATABASE1`{{% /code-placeholder-key %}}, {{% code-placeholder-key %}}`DATABASE2`{{% /code-placeholder-key %}}:
|
||||||
|
your {{% product-name %}} [database](/influxdb3/enterprise/admin/databases/)
|
||||||
|
- {{% code-placeholder-key %}}`300000`{{% /code-placeholder-key %}}:
|
||||||
|
the token expiration time in seconds.
|
||||||
|
|
||||||
|
The response body contains token details, including the `token` field with the
|
||||||
|
token string in plain text.
|
||||||
|
|
||||||
|
<!------------------------------- END cURL ------------------------------------>
|
||||||
|
{{% /tab-content %}}
|
||||||
|
{{< /tabs-wrapper >}}
|
||||||
|
|
||||||
|
### Examples
|
||||||
|
|
||||||
|
- [Create a token with read and write access to a database](#create-a-token-with-read-and-write-access-to-a-database)
|
||||||
|
- [Create a token with read and write access to all databases](#create-a-token-with-read-and-write-access-to-all-databases)
|
||||||
|
- [Create a token with read-only access to a database](#create-a-token-with-read-only-access-to-a-database)
|
||||||
|
- [Create a token with read-only access to multiple databases](#create-a-token-with-read-only-access-to-multiple-databases)
|
||||||
|
- [Create a token with mixed permissions to multiple databases](#create-a-token-with-mixed-permissions-to-multiple-databases)
|
||||||
|
- [Create a token that expires in seven days](#create-a-token-that-expires-in-seven-days)
|
||||||
|
|
||||||
|
In the examples below, replace the following:
|
||||||
|
|
||||||
|
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}: your {{< product-name >}} [database](/influxdb3/enterprise/admin/databases/)
|
||||||
|
- {{% code-placeholder-key %}}`DATABASE2_NAME`{{% /code-placeholder-key %}}: your {{< product-name >}} [database](/influxdb3/enterprise/admin/databases/)
|
||||||
|
- {{% code-placeholder-key %}}`ADMIN TOKEN`{{% /code-placeholder-key %}}: the [admin token](/influxdb3/enterprise/admin/tokens/admin/) for your {{% product-name %}} instance
|
||||||
|
{{% code-placeholders "DATABASE_NAME|DATABASE2_NAME|ADMIN_TOKEN" %}}
|
||||||
|
|
||||||
|
#### Create a token with read and write access to a database
|
||||||
|
|
||||||
|
{{< code-tabs-wrapper >}}
|
||||||
|
{{% code-tabs %}}
|
||||||
|
[influxdb3](#)
|
||||||
|
[HTTP API](#)
|
||||||
|
{{% /code-tabs %}}
|
||||||
|
{{% code-tab-content %}}
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create token \
|
||||||
|
--permission \
|
||||||
|
--name "Read/write token for DATABASE_NAME" \
|
||||||
|
db:DATABASE_NAME:read,write
|
||||||
|
```
|
||||||
|
|
||||||
|
{{% /code-tab-content %}}
|
||||||
|
{{% code-tab-content %}}
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl \
|
||||||
|
"http://{{< influxdb/host >}}/api/v3/enterprise/configure/token" \
|
||||||
|
--header 'Accept: application/json' \
|
||||||
|
--header 'Content-Type: application/json' \
|
||||||
|
--header "Authorization: Bearer ADMIN_TOKEN" \
|
||||||
|
--data '{
|
||||||
|
"token_name": "Read/write token for DATABASE_NAME",
|
||||||
|
"permissions": [{
|
||||||
|
"resource_type": "db",
|
||||||
|
"resource_identifier": ["DATABASE_NAME"],
|
||||||
|
"actions": ["read","write"]
|
||||||
|
}]
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
{{% /code-tab-content %}}
|
||||||
|
{{< /code-tabs-wrapper >}}
|
||||||
|
|
||||||
|
#### Create a token with read and write access to all databases
|
||||||
|
|
||||||
|
{{< code-tabs-wrapper >}}
|
||||||
|
{{% code-tabs %}}
|
||||||
|
[influxdb3](#)
|
||||||
|
[HTTP API](#)
|
||||||
|
{{% /code-tabs %}}
|
||||||
|
{{% code-tab-content %}}
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create token \
|
||||||
|
--permission \
|
||||||
|
--name "Read/write token for all databases" \
|
||||||
|
db:*:read,write
|
||||||
|
```
|
||||||
|
|
||||||
|
{{% /code-tab-content %}}
|
||||||
|
{{% code-tab-content %}}
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl \
|
||||||
|
"http://{{< influxdb/host >}}/api/v3/enterprise/configure/token" \
|
||||||
|
--header 'Accept: application/json' \
|
||||||
|
--header 'Content-Type: application/json' \
|
||||||
|
--header "Authorization: Bearer ADMIN_TOKEN" \
|
||||||
|
--data '{
|
||||||
|
"token_name": "Read/write token for all databases",
|
||||||
|
"permissions": [{
|
||||||
|
"resource_type": "db",
|
||||||
|
"resource_identifier": ["*"],
|
||||||
|
"actions": ["read","write"]
|
||||||
|
}]
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
{{% /code-tab-content %}}
|
||||||
|
{{< /code-tabs-wrapper >}}
|
||||||
|
|
||||||
|
#### Create a token with read-only access to a database
|
||||||
|
|
||||||
|
{{< code-tabs-wrapper >}}
|
||||||
|
{{% code-tabs %}}
|
||||||
|
[influxdb3](#)
|
||||||
|
[HTTP API](#)
|
||||||
|
{{% /code-tabs %}}
|
||||||
|
{{% code-tab-content %}}
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create token \
|
||||||
|
--permission \
|
||||||
|
--name "Read-only token for DATABASE_NAME" \
|
||||||
|
db:DATABASE_NAME:read
|
||||||
|
```
|
||||||
|
|
||||||
|
{{% /code-tab-content %}}
|
||||||
|
{{% code-tab-content %}}
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl \
|
||||||
|
"http://{{< influxdb/host >}}/api/v3/enterprise/configure/token" \
|
||||||
|
--header 'Accept: application/json' \
|
||||||
|
--header 'Content-Type: application/json' \
|
||||||
|
--header "Authorization: Bearer ADMIN_TOKEN" \
|
||||||
|
--data '{
|
||||||
|
"token_name": "Read-only token for DATABASE_NAME",
|
||||||
|
"permissions": [{
|
||||||
|
"resource_type": "db",
|
||||||
|
"resource_identifier": ["DATABASE_NAME"],
|
||||||
|
"actions": ["read"]
|
||||||
|
}]
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
{{% /code-tab-content %}}
|
||||||
|
{{< /code-tabs-wrapper >}}
|
||||||
|
|
||||||
|
#### Create a token with read-only access to multiple databases
|
||||||
|
|
||||||
|
{{< code-tabs-wrapper >}}
|
||||||
|
{{% code-tabs %}}
|
||||||
|
[influxdb3](#)
|
||||||
|
[HTTP API](#)
|
||||||
|
{{% /code-tabs %}}
|
||||||
|
{{% code-tab-content %}}
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create token \
|
||||||
|
--permission \
|
||||||
|
--name "Read-only token for DATABASE_NAME and DATABASE2_NAME" \
|
||||||
|
db:DATABASE_NAME,DATABASE2_NAME:read
|
||||||
|
```
|
||||||
|
|
||||||
|
{{% /code-tab-content %}}
|
||||||
|
{{% code-tab-content %}}
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl \
|
||||||
|
"http://{{< influxdb/host >}}/api/v3/enterprise/configure/token" \
|
||||||
|
--header 'Accept: application/json' \
|
||||||
|
--header 'Content-Type: application/json' \
|
||||||
|
--header "Authorization: Bearer ADMIN_TOKEN" \
|
||||||
|
--data '{
|
||||||
|
"token_name": "Read-only token for DATABASE_NAME and DATABASE2_NAME",
|
||||||
|
"permissions": [{
|
||||||
|
"resource_type": "db",
|
||||||
|
"resource_identifier": ["DATABASE_NAME","DATABASE2_NAME"],
|
||||||
|
"actions": ["read"]
|
||||||
|
}]
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
{{% /code-tab-content %}}
|
||||||
|
{{< /code-tabs-wrapper >}}
|
||||||
|
|
||||||
|
#### Create a token that expires in seven days
|
||||||
|
|
||||||
|
{{< code-tabs-wrapper >}}
|
||||||
|
{{% code-tabs %}}
|
||||||
|
[influxdb3](#)
|
||||||
|
[HTTP API](#)
|
||||||
|
{{% /code-tabs %}}
|
||||||
|
{{% code-tab-content %}}
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create token \
|
||||||
|
--permission \
|
||||||
|
--expiry 7d \
|
||||||
|
--name "Read/write token for DATABASE_NAME with 7d expiration" \
|
||||||
|
db:DATABASE_NAME:read,write
|
||||||
|
```
|
||||||
|
|
||||||
|
{{% /code-tab-content %}}
|
||||||
|
{{% code-tab-content %}}
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl \
|
||||||
|
"http://{{< influxdb/host >}}/api/v3/enterprise/configure/token" \
|
||||||
|
--header 'Accept: application/json' \
|
||||||
|
--header 'Content-Type: application/json' \
|
||||||
|
--header "Authorization: Bearer ADMIN_TOKEN" \
|
||||||
|
--data '{
|
||||||
|
"token_name": "Read/write token for DATABASE_NAME with 7d expiration",
|
||||||
|
"permissions": [{
|
||||||
|
"resource_type": "db",
|
||||||
|
"resource_identifier": ["DATABASE_NAME"],
|
||||||
|
"actions": ["read","write"]
|
||||||
|
}],
|
||||||
|
"expiry_secs": 604800
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
{{% /code-tab-content %}}
|
||||||
|
{{< /code-tabs-wrapper >}}
|
||||||
|
|
||||||
|
{{% /code-placeholders %}}
|
||||||
|
|
||||||
|
## Create a system token
|
||||||
|
|
||||||
|
System tokens have the `system` resource type and allow for read-only access
|
||||||
|
to system information and metrics from your server.
|
||||||
|
|
||||||
|
You can create system tokens for the following system resources:
|
||||||
|
|
||||||
|
- `health`: system health information from the `/health` HTTP API endpoint
|
||||||
|
- `metrics`: system metrics information from the `/metrics` HTTP API endpoint
|
||||||
|
- `ping`: system ping information from the `/ping` HTTP API endpoint
|
||||||
|
|
||||||
|
{{< tabs-wrapper >}}
|
||||||
|
{{% tabs %}}
|
||||||
|
[influxdb3](#)
|
||||||
|
[HTTP API](#)
|
||||||
|
{{% /tabs %}}
|
||||||
|
{{% tab-content %}}
|
||||||
|
|
||||||
|
<!------------------------------- BEGIN INFLUXDB3 ----------------------------->
|
||||||
|
|
||||||
|
Use the [`influxdb3 create token` command](/influxdb3/enterprise/reference/cli/influxdb3/create/token/)
|
||||||
|
to create a system token with permissions for reading system information from
|
||||||
|
your {{% product-name %}} instance.
|
||||||
|
|
||||||
|
In your terminal, run the `influxdb3 create token` command and provide the following:
|
||||||
|
|
||||||
|
- `--permission` flag to create a token with permissions
|
||||||
|
- `--name` flag with a unique description of the token
|
||||||
|
- _Options_, for example:
|
||||||
|
- `--expiry` option with the token expiration time as a duration.
|
||||||
|
If an expiration isn't set, the token does not expire until revoked.
|
||||||
|
- Token permissions in the `RESOURCE_TYPE:RESOURCE_NAMES:ACTIONS` format--for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
system:health:read
|
||||||
|
```
|
||||||
|
|
||||||
|
- `system:`: The `system` resource type, which specifies the token is for system information.
|
||||||
|
- `health`: The specific system resource to grant permissions to.
|
||||||
|
- `read`: The permission to grant to the token (system tokens are always read-only).
|
||||||
|
|
||||||
|
{{% code-placeholders "1y" %}}
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create token \
|
||||||
|
--permission \
|
||||||
|
--expiry 1y \
|
||||||
|
--name "System health token" \
|
||||||
|
"system:health:read"
|
||||||
|
```
|
||||||
|
|
||||||
|
{{% /code-placeholders %}}
|
||||||
|
|
||||||
|
Replace the following:
|
||||||
|
|
||||||
|
- {{% code-placeholder-key %}}`1y`{{% /code-placeholder-key %}}:
|
||||||
|
the token expiration time as a
|
||||||
|
duration.
|
||||||
|
|
||||||
|
The output is the token string in plain text.
|
||||||
|
|
||||||
|
<!-------------------------------- END INFLUXDB3 ------------------------------>
|
||||||
|
{{% /tab-content %}}
|
||||||
|
{{% tab-content %}}
|
||||||
|
<!------------------------------- BEGIN cURL ---------------------------------->
|
||||||
|
Send a request to the following {{% product-name %}} endpoint:
|
||||||
|
|
||||||
|
{{% api-endpoint endpoint="http://{{< influxdb/host >}}/api/v3/enterprise/configure/token" method="post" %}}
|
||||||
|
|
||||||
|
Provide the following request headers:
|
||||||
|
|
||||||
|
- `Accept: application/json` to ensure the response body is JSON content
|
||||||
|
- `Content-Type: application/json` to indicate the request body is JSON content
|
||||||
|
- `Authorization: Bearer` and the [admin token](/influxdb3/enterprise/admin/tokens/admin/)
|
||||||
|
for your instance to authorize the request
|
||||||
|
|
||||||
|
In the request body, provide the following parameters:
|
||||||
|
|
||||||
|
- `token_name`: a description of the token, unique within the instance
|
||||||
|
- `resource_type`: the resource type for the token, which is `system` for system tokens
|
||||||
|
- `resource_identifier`: an array of system resource names to grant permissions to
|
||||||
|
- The resource identifier field supports the `*` wildcard, which grants read or write
|
||||||
|
permissions to all system information resources.
|
||||||
|
- `permissions`: an array of token permission actions (only `"read"` for system tokens)
|
||||||
|
- `expiry_secs`: Specify the token expiration time in seconds.
|
||||||
|
|
||||||
|
The following example shows how to use the HTTP API to create a system token:
|
||||||
|
|
||||||
|
{{% code-placeholders "300000" %}}
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl \
|
||||||
|
"http://{{< influxdb/host >}}/api/v3/enterprise/configure/token" \
|
||||||
|
--header 'Accept: application/json' \
|
||||||
|
--header 'Content-Type: application/json' \
|
||||||
|
--header "Authorization: Bearer ADMIN_TOKEN" \
|
||||||
|
--data '{
|
||||||
|
"token_name": "System health token",
|
||||||
|
"permissions": [{
|
||||||
|
"resource_type": "system",
|
||||||
|
"resource_identifier": ["health"],
|
||||||
|
"actions": ["read"]
|
||||||
|
}],
|
||||||
|
"expiry_secs": 300000
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
{{% /code-placeholders %}}
|
||||||
|
|
||||||
|
Replace the following in your request:
|
||||||
|
|
||||||
|
- {{% code-placeholder-key %}}`300000`{{% /code-placeholder-key %}}:
|
||||||
|
the token expiration time in seconds.
|
||||||
|
|
||||||
|
The response body contains token details, including the `token` field with the
|
||||||
|
token string in plain text.
|
||||||
|
|
||||||
|
<!------------------------------- END cURL ------------------------------------>
|
||||||
|
{{% /tab-content %}}
|
||||||
|
{{< /tabs-wrapper >}}
|
||||||
|
|
||||||
|
|
||||||
|
## Output format
|
||||||
|
|
||||||
|
The `influxdb3 create token` command supports the `--format json` option.
|
||||||
|
By default, the command outputs the token string.
|
||||||
|
For easier programmatic access to the command output, include `--format json`
|
||||||
|
with your command to format the output as JSON.
|
||||||
|
|
||||||
|
The `/api/v3/configure/token` endpoint outputs JSON format in the response body.
|
|
@ -0,0 +1,27 @@
|
||||||
|
---
|
||||||
|
title: List resource tokens
|
||||||
|
description: >
|
||||||
|
Use the `influxdb3 show tokens` command
|
||||||
|
to list resource tokens in your InfluxDB 3 Enterprise instance.
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Resource tokens
|
||||||
|
weight: 202
|
||||||
|
list_code_example: |
|
||||||
|
##### CLI
|
||||||
|
```bash
|
||||||
|
influxdb3 show tokens \
|
||||||
|
--token ADMIN_TOKEN
|
||||||
|
--host http://{{< influxdb/host >}}
|
||||||
|
```
|
||||||
|
aliases:
|
||||||
|
- /influxdb3/enterprise/admin/tokens/list/
|
||||||
|
related:
|
||||||
|
- /influxdb3/enterprise/reference/cli/influxdb3/token/list/
|
||||||
|
- /influxdb3/enterprise/reference/api/
|
||||||
|
source: /shared/influxdb3-admin/tokens/admin/list.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is at
|
||||||
|
// SOURCE content/shared/influxdb3-admin/tokens/admin/list.md
|
||||||
|
-->
|
|
@ -91,13 +91,13 @@ source ~/.zshrc
|
||||||
|
|
||||||
<!-------------------------------- BEGIN LINUX -------------------------------->
|
<!-------------------------------- BEGIN LINUX -------------------------------->
|
||||||
|
|
||||||
- [{{< product-name >}} • Linux (x86) • GNU](https://download.influxdata.com/influxdb/snapshots/influxdb3-{{< product-key >}}_x86_64-unknown-linux-gnu.tar.gz)
|
- [{{< product-name >}} • Linux (AMD64, x86_64) • GNU](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_amd64.tar.gz)
|
||||||
•
|
•
|
||||||
[sha256](https://dl.influxdata.com/influxdb/snapshots/influxdb3-{{< product-key >}}_x86_64-unknown-linux-gnu.tar.gz.sha256)
|
[sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_amd64.tar.gz.sha256)
|
||||||
|
|
||||||
- [{{< product-name >}} • Linux (ARM) • GNU](https://download.influxdata.com/influxdb/snapshots/influxdb3-{{< product-key >}}_aarch64-unknown-linux-gnu.tar.gz)
|
- [{{< product-name >}} • Linux (ARM64, AArch64) • GNU](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_arm64.tar.gz)
|
||||||
•
|
•
|
||||||
[sha256](https://dl.influxdata.com/influxdb/snapshots/influxdb3-{{< product-key >}}_aarch64-unknown-linux-gnu.tar.gz.sha256)
|
[sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_linux_arm64.tar.gz.sha256)
|
||||||
|
|
||||||
<!--------------------------------- END LINUX --------------------------------->
|
<!--------------------------------- END LINUX --------------------------------->
|
||||||
|
|
||||||
|
@ -106,9 +106,9 @@ source ~/.zshrc
|
||||||
|
|
||||||
<!-------------------------------- BEGIN MACOS -------------------------------->
|
<!-------------------------------- BEGIN MACOS -------------------------------->
|
||||||
|
|
||||||
- [{{< product-name >}} • macOS (Silicon)](https://download.influxdata.com/influxdb/snapshots/influxdb3-{{< product-key >}}_aarch64-apple-darwin.tar.gz)
|
- [{{< product-name >}} • macOS (Silicon, ARM64)](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_darwin_arm64.tar.gz)
|
||||||
•
|
•
|
||||||
[sha256](https://dl.influxdata.com/influxdb/snapshots/influxdb3-{{< product-key >}}_aarch64-apple-darwin.tar.gz.sha256)
|
[sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}_darwin_arm64.tar.gz.sha256)
|
||||||
|
|
||||||
> [!Note]
|
> [!Note]
|
||||||
> macOS Intel builds are coming soon.
|
> macOS Intel builds are coming soon.
|
||||||
|
@ -120,9 +120,9 @@ source ~/.zshrc
|
||||||
|
|
||||||
<!------------------------------- BEGIN WINDOWS ------------------------------->
|
<!------------------------------- BEGIN WINDOWS ------------------------------->
|
||||||
|
|
||||||
- [{{< product-name >}} • Windows (x86)](https://dl.influxdata.com/influxdb/snapshots/influxdb3-{{< product-key >}}_x86_64-pc-windows-gnu.tar.gz)
|
- [{{< product-name >}} • Windows (AMD64, x86_64)](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}-windows_amd64.zip)
|
||||||
•
|
•
|
||||||
[sha256](https://dl.influxdata.com/influxdb/snapshots/influxdb3-{{< product-key >}}_x86_64-pc-windows-gnu.tar.gz.sha256)
|
[sha256](https://dl.influxdata.com/influxdb/releases/influxdb3-{{< product-key >}}-{{< latest-patch >}}-windows_amd64.zip.sha256)
|
||||||
|
|
||||||
<!-------------------------------- END WINDOWS -------------------------------->
|
<!-------------------------------- END WINDOWS -------------------------------->
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,7 @@ influxdb3 [GLOBAL-OPTIONS] [COMMAND]
|
||||||
| Option | | Description |
|
| Option | | Description |
|
||||||
| :----- | :------------------------------------ | :------------------------------------------------------------------------------------------------ |
|
| :----- | :------------------------------------ | :------------------------------------------------------------------------------------------------ |
|
||||||
| | `--num-threads` | Maximum number of IO runtime threads to use |
|
| | `--num-threads` | Maximum number of IO runtime threads to use |
|
||||||
| | `--io-runtime-type` | IO tokio runtime type (`current-thread`, `multi-thread` _(default)_, or `multi-thread-alt`) |
|
| | `--io-runtime-type` | IO tokio runtime type (`current-thread`, `multi-thread` _(default)_, or `multi-thread-alt`) |
|
||||||
| | `--io-runtime-disable-lifo-slot` | Disable LIFO slot of IO runtime |
|
| | `--io-runtime-disable-lifo-slot` | Disable LIFO slot of IO runtime |
|
||||||
| | `--io-runtime-event-interval` | Number of scheduler ticks after which the IOtokio runtime scheduler will poll for external events |
|
| | `--io-runtime-event-interval` | Number of scheduler ticks after which the IOtokio runtime scheduler will poll for external events |
|
||||||
| | `--io-runtime-global-queue-interval` | Number of scheduler ticks after which the IO runtime scheduler will poll the global task queue |
|
| | `--io-runtime-global-queue-interval` | Number of scheduler ticks after which the IO runtime scheduler will poll the global task queue |
|
||||||
|
@ -48,6 +48,7 @@ influxdb3 [GLOBAL-OPTIONS] [COMMAND]
|
||||||
| | `--io-runtime-thread-keep-alive` | Custom timeout for a thread in the blocking pool of the tokio IO runtime |
|
| | `--io-runtime-thread-keep-alive` | Custom timeout for a thread in the blocking pool of the tokio IO runtime |
|
||||||
| | `--io-runtime-thread-priority` | Set thread priority tokio IO runtime workers |
|
| | `--io-runtime-thread-priority` | Set thread priority tokio IO runtime workers |
|
||||||
| `-h` | `--help` | Print help information |
|
| `-h` | `--help` | Print help information |
|
||||||
|
| | `--help-all` | Print detailed help information |
|
||||||
| `-V` | `--version` | Print version |
|
| `-V` | `--version` | Print version |
|
||||||
|
|
||||||
### Option environment variables
|
### Option environment variables
|
||||||
|
|
|
@ -59,6 +59,7 @@ influxdb3 serve [OPTIONS] \
|
||||||
| | `--object-store-max-retries` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#object-store-max-retries)_ |
|
| | `--object-store-max-retries` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#object-store-max-retries)_ |
|
||||||
| | `--object-store-retry-timeout` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#object-store-retry-timeout)_ |
|
| | `--object-store-retry-timeout` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#object-store-retry-timeout)_ |
|
||||||
| `-h` | `--help` | Print help information |
|
| `-h` | `--help` | Print help information |
|
||||||
|
| | `--help-all` | Print detailed help information |
|
||||||
| | `--object-store-cache-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#object-store-cache-endpoint)_ |
|
| | `--object-store-cache-endpoint` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#object-store-cache-endpoint)_ |
|
||||||
| | `--log-filter` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#log-filter)_ |
|
| | `--log-filter` | _See [configuration options](/influxdb3/enterprise/reference/config-options/#log-filter)_ |
|
||||||
| `-v` | `--verbose` | Enable verbose output |
|
| `-v` | `--verbose` | Enable verbose output |
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
---
|
||||||
|
title: Glossary
|
||||||
|
description: >
|
||||||
|
Terms related to InfluxData products and platforms.
|
||||||
|
weight: 109
|
||||||
|
menu:
|
||||||
|
influxdb3_enterprise:
|
||||||
|
parent: Reference
|
||||||
|
influxdb3/enterprise/tags: [glossary]
|
||||||
|
source: /shared/influxdb3-reference/glossary.md
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- The content for this page is at
|
||||||
|
// SOURCE content/shared/influxdb3-reference/glossary.md
|
||||||
|
-->
|
|
@ -541,6 +541,9 @@ The number of Flux query requests served.
|
||||||
#### fluxQueryReqDurationNs
|
#### fluxQueryReqDurationNs
|
||||||
The duration (wall-time), in nanoseconds, spent executing Flux query requests.
|
The duration (wall-time), in nanoseconds, spent executing Flux query requests.
|
||||||
|
|
||||||
|
#### fluxQueryRespBytes
|
||||||
|
The sum of all bytes returned in Flux query responses.
|
||||||
|
|
||||||
#### pingReq
|
#### pingReq
|
||||||
The number of times InfluxDB HTTP server served the `/ping` HTTP endpoint.
|
The number of times InfluxDB HTTP server served the `/ping` HTTP endpoint.
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,106 @@
|
||||||
|
|
||||||
|
The {{< product-name >}} Distinct Value Cache (DVC) lets you cache distinct
|
||||||
|
values of one or more columns in a table, improving the performance of
|
||||||
|
queries that return distinct tag and field values.
|
||||||
|
|
||||||
|
The DVC is an in-memory cache that stores distinct values for specific columns
|
||||||
|
in a table. When you create an DVC, you can specify what columns' distinct
|
||||||
|
values to cache, the maximum number of distinct value combinations to cache, and
|
||||||
|
the maximum age of cached values. A DVC is associated with a table, which can
|
||||||
|
have multiple DVCs.
|
||||||
|
|
||||||
|
{{< children type="anchored-list" >}}
|
||||||
|
- [Important things to know about the Distinct Value Cache](#important-things-to-know-about-the-distinct-value-cache)
|
||||||
|
- [High cardinality limits](#high-cardinality-limits)
|
||||||
|
{{% show-in "core" %}}
|
||||||
|
- [Distinct Value Caches are flushed when the server stops](#distinct-value-caches-are-flushed-when-the-server-stops)
|
||||||
|
{{% /show-in %}}
|
||||||
|
{{% show-in "enterprise" %}}
|
||||||
|
- [Distinct Value Caches are rebuilt on restart](#distinct-value-caches-are-rebuilt-on-restart)
|
||||||
|
{{% /show-in %}}
|
||||||
|
|
||||||
|
Consider a dataset with the following schema:
|
||||||
|
|
||||||
|
- wind_data (table)
|
||||||
|
- tags:
|
||||||
|
- country
|
||||||
|
- _multiple European countries_
|
||||||
|
- county
|
||||||
|
- _multiple European counties_
|
||||||
|
- city
|
||||||
|
- _multiple European cities_
|
||||||
|
- fields:
|
||||||
|
- wind_speed (float)
|
||||||
|
- wind_direction (integer)
|
||||||
|
|
||||||
|
If you cache distinct values for `country`, `county`, and `city`, the DVC looks
|
||||||
|
similar to this:
|
||||||
|
|
||||||
|
| country | county | city |
|
||||||
|
| :------------- | :---------------- | :----------- |
|
||||||
|
| Austria | Salzburg | Salzburg |
|
||||||
|
| Austria | Vienna | Vienna |
|
||||||
|
| Belgium | Antwerp | Antwerp |
|
||||||
|
| Belgium | West Flanders | Bruges |
|
||||||
|
| Czech Republic | Liberec Region | Liberec |
|
||||||
|
| Czech Republic | Prague | Prague |
|
||||||
|
| Denmark | Capital Region | Copenhagen |
|
||||||
|
| Denmark | Southern Denmark | Odense |
|
||||||
|
| Estonia | Ida-Viru County | Kohtla-Järve |
|
||||||
|
| Estonia | Ida-Viru County | Narva |
|
||||||
|
| ... | ... | ... |
|
||||||
|
|
||||||
|
> [!Important]
|
||||||
|
> #### Repeated values in DVC results
|
||||||
|
>
|
||||||
|
> Distinct values may appear multiple times in a column when querying the DVC,
|
||||||
|
> but only when associated with distinct values in other columns.
|
||||||
|
> If you query a single column in the DVC, no values are repeated in the results.
|
||||||
|
|
||||||
|
> [!Note]
|
||||||
|
> #### Null column values
|
||||||
|
>
|
||||||
|
> _Null_ column values are still considered values and are cached in the DVC.
|
||||||
|
> If you write data to a table and don't provide a value for an existing column,
|
||||||
|
> the column value is cached as _null_ and treated as a distinct value.
|
||||||
|
|
||||||
|
{{< children hlevel="h2" >}}
|
||||||
|
|
||||||
|
## Important things to know about the Distinct Value Cache
|
||||||
|
|
||||||
|
DVCs are stored in memory; the larger the cache, the more memory your InfluxDB 3
|
||||||
|
node requires to maintain it. Consider the following:
|
||||||
|
|
||||||
|
- [High cardinality limits](#high-cardinality-limits)
|
||||||
|
{{% show-in "core" %}}
|
||||||
|
- [Distinct Value Caches are flushed when the server stops](#distinct-value-caches-are-flushed-when-the-server-stops)
|
||||||
|
{{% /show-in %}}
|
||||||
|
{{% show-in "enterprise" %}}
|
||||||
|
- [Distinct Value Caches are rebuilt on restart](#distinct-value-caches-are-rebuilt-on-restart)
|
||||||
|
{{% /show-in %}}
|
||||||
|
|
||||||
|
### High cardinality limits
|
||||||
|
|
||||||
|
“Cardinality” refers to the number of unique key column combinations in your
|
||||||
|
cached data and essentially defines the maximum number of rows to store in your
|
||||||
|
DVC. While the InfluxDB 3 storage engine is not limited by cardinality,
|
||||||
|
it does affect the DVC. You can define a custom maximum cardinality limit for
|
||||||
|
a DVC, but higher cardinality increases memory requirements for
|
||||||
|
storing the DVC and can affect DVC query performance.
|
||||||
|
|
||||||
|
{{% show-in "core" %}}
|
||||||
|
### Distinct Value Caches are flushed when the server stops
|
||||||
|
|
||||||
|
Because the DVC is an in-memory cache, the cache is flushed any time the server
|
||||||
|
stops. After a server restart, {{% product-name %}} only writes new values to
|
||||||
|
the DVC when you write data, so there may be a period of time when some values are
|
||||||
|
unavailable in the DVC.
|
||||||
|
{{% /show-in %}}
|
||||||
|
|
||||||
|
{{% show-in "enterprise" %}}
|
||||||
|
### Distinct Value Caches are rebuilt on restart
|
||||||
|
|
||||||
|
Because the DVC is an in-memory cache, the cache is flushed any time the server
|
||||||
|
stops. After a server restarts, {{< product-name >}} uses persisted data to
|
||||||
|
rebuild the DVC.
|
||||||
|
{{% /show-in %}}
|
|
@ -0,0 +1,108 @@
|
||||||
|
|
||||||
|
Use the [`influxdb3 create distinct_cache` command](/influxdb3/version/reference/cli/influxdb3/create/distinct_cache/)
|
||||||
|
to create a Distinct Value Cache (DVC). Provide the following:
|
||||||
|
|
||||||
|
- **Database** (`-d`, `--database`): _({{< req >}})_ The name of the database to
|
||||||
|
associate the DVC with. You can also use the `INFLUXDB3_DATABASE_NAME`
|
||||||
|
environment variable to specify the database.
|
||||||
|
- **Token** (`--token`): _({{< req >}})_ Your {{< product-name >}}
|
||||||
|
{{% show-in "enterprise" %}}admin {{% /show-in %}}authentication token.
|
||||||
|
You can also use the `INFLUXDB3_AUTH_TOKEN` environment variable to specify
|
||||||
|
the token.
|
||||||
|
- **Table** (`-t`, `--table`): _({{< req >}})_ The name of the table to
|
||||||
|
associate the DVC with.
|
||||||
|
{{% show-in "enterprise" %}}
|
||||||
|
- **Node specification** (`-n`, `--node-spec`): Specify which nodes the DVC
|
||||||
|
should be configured on.
|
||||||
|
{{% /show-in %}}
|
||||||
|
- **Columns** (`--columns`): _({{< req >}})_ Specify which columns to cache
|
||||||
|
distinct values for. These are typically tag columns but can also be
|
||||||
|
string fields.
|
||||||
|
- **Maximum cardinality** (`--max-cardinality`): Specify the maximum number of
|
||||||
|
distinct value combinations to store in the cache. The default maximum
|
||||||
|
cardinality is `100000`.
|
||||||
|
- **Maximum age** (`--max-age`): Specify the maximum age of distinct values to
|
||||||
|
keep in the DVC in
|
||||||
|
[humantime](https://docs.rs/humantime/latest/humantime/fn.parse_duration.html)
|
||||||
|
form. The default maximum age is `24 hours`.
|
||||||
|
- **Cache name**: A unique name for the cache. If you don’t provide one,
|
||||||
|
InfluxDB automatically generates a cache name for you.
|
||||||
|
|
||||||
|
{{% show-in "core" %}}
|
||||||
|
<!----------------------------- BEGIN CORE EXAMPLE ---------------------------->
|
||||||
|
{{% code-placeholders "(DATABASE|TABLE|DVC)_NAME|AUTH_TOKEN|NODE_SPEC|COLUMNS|MAX_(CARDINALITY|AGE)" %}}
|
||||||
|
|
||||||
|
<!--pytest.mark.skip-->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create distinct_cache \
|
||||||
|
--database DATABASE_NAME \
|
||||||
|
--token AUTH_TOKEN \
|
||||||
|
--table TABLE_NAME \
|
||||||
|
--columns COLUMNS \
|
||||||
|
--max-cardinality MAX_CARDINALITY \
|
||||||
|
--max-age MAX_AGE \
|
||||||
|
DVC_NAME
|
||||||
|
```
|
||||||
|
{{% /code-placeholders %}}
|
||||||
|
<!------------------------------ END CORE EXAMPLE ----------------------------->
|
||||||
|
{{% /show-in %}}
|
||||||
|
|
||||||
|
{{% show-in "enterprise" %}}
|
||||||
|
<!-------------------------- BEGIN ENTERPRISE EXAMPLE ------------------------->
|
||||||
|
{{% code-placeholders "(DATABASE|TABLE|DVC)_NAME|AUTH_TOKEN|NODE_SPEC|COLUMNS|MAX_(CARDINALITY|AGE)" %}}
|
||||||
|
|
||||||
|
<!--pytest.mark.skip-->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create distinct_cache \
|
||||||
|
--database DATABASE_NAME \
|
||||||
|
--token AUTH_TOKEN \
|
||||||
|
--table TABLE_NAME \
|
||||||
|
--node_spec NODE_SPEC \
|
||||||
|
--columns COLUMNS \
|
||||||
|
--max-cardinality MAX_CARDINALITY \
|
||||||
|
--max-age MAX_AGE \
|
||||||
|
DVC_NAME
|
||||||
|
```
|
||||||
|
{{% /code-placeholders %}}
|
||||||
|
<!--------------------------- END ENTERPRISE EXAMPLE -------------------------->
|
||||||
|
{{% /show-in %}}
|
||||||
|
|
||||||
|
Replace the following:
|
||||||
|
|
||||||
|
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
|
||||||
|
the name of the database to associate the DVC with
|
||||||
|
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
|
||||||
|
your {{< product-name >}} {{% show-in "enterprise" %}}admin {{% /show-in %}}
|
||||||
|
authentication token
|
||||||
|
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}:
|
||||||
|
the name of the table to associate the DVC with
|
||||||
|
{{% show-in "enterprise" %}}
|
||||||
|
- {{% code-placeholder-key %}}`NODE_SPEC`{{% /code-placeholder-key %}}:
|
||||||
|
a comma-delimited list of node IDs to configure the DVC on--for example:
|
||||||
|
`node-01,node-02`.
|
||||||
|
{{% /show-in %}}
|
||||||
|
- {{% code-placeholder-key %}}`COLUMNS`{{% /code-placeholder-key %}}:
|
||||||
|
a comma-delimited list of columns to cache distinct values for--for example:
|
||||||
|
`country,county,city`
|
||||||
|
- {{% code-placeholder-key %}}`MAX_CARDINALITY`{{% /code-placeholder-key %}}:
|
||||||
|
the maximum number of distinct value combinations to cache--for example: `10000`
|
||||||
|
- {{% code-placeholder-key %}}`MAX_AGE`{{% /code-placeholder-key %}}:
|
||||||
|
the maximum age of distinct values to keep in the cache in
|
||||||
|
[humantime](https://docs.rs/humantime/latest/humantime/fn.parse_duration.html)
|
||||||
|
form--for example: `6h`, `1 day`, `1 week`
|
||||||
|
- {{% code-placeholder-key %}}`DVC_NAME`{{% /code-placeholder-key %}}:
|
||||||
|
a unique name for the DVC
|
||||||
|
|
||||||
|
> [!Note]
|
||||||
|
> #### Values are cached on write
|
||||||
|
>
|
||||||
|
> Values are cached on write. When you create a cache, it will not cache
|
||||||
|
> previously written points, only newly written points.
|
||||||
|
>
|
||||||
|
> #### DVC size and persistence
|
||||||
|
>
|
||||||
|
> The DVC is stored in memory, so it's important to consider the size and
|
||||||
|
> persistence of the cache. For more information, see
|
||||||
|
> [Important things to know about the Distinct Value Cache](/influxdb3/version/admin/distinct-value-cache/#important-things-to-know-about-the-distinct-value-cache).
|
|
@ -0,0 +1,39 @@
|
||||||
|
|
||||||
|
Use the [`influxdb3 delete distinct_cache` command](/influxdb3/version/reference/cli/influxdb3/delete/distinct_cache/)
|
||||||
|
to delete a Distinct Value Cache (DVC). Provide the following:
|
||||||
|
|
||||||
|
- **Database** (`-d`, `--database`): _({{< req >}})_ The name of the database
|
||||||
|
that the DVC you want to delete is associated with. You can also use the
|
||||||
|
`INFLUXDB3_DATABASE_NAME` environment variable to specify the database.
|
||||||
|
- **Token** (`--token`): _({{< req >}})_ Your {{< product-name >}}
|
||||||
|
{{% show-in "enterprise" %}}admin {{% /show-in %}}authentication token.
|
||||||
|
You can also use the `INFLUXDB3_AUTH_TOKEN` environment variable to specify
|
||||||
|
the token.
|
||||||
|
- **Table** (`-t`, `--table`): _({{< req >}})_ The name of the table that the
|
||||||
|
DVC you want to delete is associated with.
|
||||||
|
- **Cache name**: The name of the DVC to delete.
|
||||||
|
|
||||||
|
{{% code-placeholders "(DATABASE|TABLE|DVC)_NAME|AUTH_TOKEN" %}}
|
||||||
|
```bash
|
||||||
|
influxdb3 delete distinct_cache \
|
||||||
|
--database DATABASE_NAME \
|
||||||
|
--token AUTH_TOKEN \
|
||||||
|
--table TABLE_NAME \
|
||||||
|
DVC_NAME
|
||||||
|
```
|
||||||
|
{{% /code-placeholders %}}
|
||||||
|
|
||||||
|
Replace the following:
|
||||||
|
|
||||||
|
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
|
||||||
|
the name of the database that the DVC you want to delete is associated with
|
||||||
|
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
|
||||||
|
your {{< product-name >}} admin authentication token
|
||||||
|
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}:
|
||||||
|
the name of the table associated with the DVC you want to delete
|
||||||
|
- {{% code-placeholder-key %}}`DVC_NAME`{{% /code-placeholder-key %}}:
|
||||||
|
the name of the DVC to delete
|
||||||
|
|
||||||
|
> [!Caution]
|
||||||
|
> This is a destructive action that cannot be undone. Once deleted, any queries
|
||||||
|
> against the deleted DVC will return an error.
|
|
@ -0,0 +1,33 @@
|
||||||
|
|
||||||
|
Use the [`distinct_cache()` SQL function](/influxdb3/version/reference/sql/functions/cache/#distinct_cache)
|
||||||
|
in the `FROM` clause of an SQL `SELECT` statement to query data from the
|
||||||
|
Distinct Value Cache (DVC).
|
||||||
|
|
||||||
|
> [!Important]
|
||||||
|
> You must use SQL to query the DVC.
|
||||||
|
> InfluxQL does not support the `distinct_cache()` function.
|
||||||
|
|
||||||
|
`distinct_cache()` supports the following arguments:
|
||||||
|
|
||||||
|
- **table_name**: _({{< req >}})_ The name of the table the DVC is associated with
|
||||||
|
formatted as a string literal.
|
||||||
|
- **cache_name**: The name of the DVC to query formatted as a string literal.
|
||||||
|
This argument is only required if there is more than one DVC associated with
|
||||||
|
the specified table.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT * FROM distinct_cache('table_name', 'cache_name')
|
||||||
|
```
|
||||||
|
|
||||||
|
You can use other [SQL clauses](/influxdb3/version/reference/sql/#statements-and-clauses)
|
||||||
|
to modify query results. For example, you can use the `WHERE` clause to return
|
||||||
|
the distinct tag values associated with another distinct tag value:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
city
|
||||||
|
FROM
|
||||||
|
distinct_cache('wind_data', 'windDistinctCache')
|
||||||
|
WHERE
|
||||||
|
country = 'Spain'
|
||||||
|
```
|
|
@ -0,0 +1,69 @@
|
||||||
|
|
||||||
|
Use the [`influxdb3 show system table` command](/influxdb3/version/reference/cli/influxdb3/show/syste/table/)
|
||||||
|
to query and output Distinct Value Cache information from the `distinct_caches`
|
||||||
|
system table.
|
||||||
|
|
||||||
|
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
|
||||||
|
<!-- pytest.mark.skip -->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 show system \
|
||||||
|
--database DATABASE_NAME \
|
||||||
|
--token AUTH_TOKEN \
|
||||||
|
table distinct_caches
|
||||||
|
```
|
||||||
|
{{% /code-placeholders %}}
|
||||||
|
|
||||||
|
This returns a table similar to the following:
|
||||||
|
|
||||||
|
| table | name | column_ids | column_names | max_cardinality | max_age_seconds |
|
||||||
|
| :-------- | :--------------- | :--------- | :---------------------- | --------------: | --------------: |
|
||||||
|
| wind_data | wind_distinct | [0, 1, 2] | [country, county, city] | 100000 | 86400 |
|
||||||
|
| weather | weather_distinct | [0] | [location] | 100 | 604800 |
|
||||||
|
| bitcoin | bitcoin_dis | [0, 1] | [code, crypto] | 5000 | 86400 |
|
||||||
|
| home | home_distinct | [0, 1] | [room, wall] | 12000 | 15770000 |
|
||||||
|
|
||||||
|
## Query specific columns from the distinct_caches system table
|
||||||
|
|
||||||
|
Use the `--select` option to query specific columns from the `distinct_caches`
|
||||||
|
system table. Provide a comma-delimited list of columns to return:
|
||||||
|
|
||||||
|
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
|
||||||
|
<!-- pytest.mark.skip -->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 show system \
|
||||||
|
--database DATABASE_NAME \
|
||||||
|
--token AUTH_TOKEN \
|
||||||
|
table distinct_caches \
|
||||||
|
--select name,column_names,max_age_seconds
|
||||||
|
```
|
||||||
|
{{% /code-placeholders %}}
|
||||||
|
|
||||||
|
## Sort distinct_caches system table output
|
||||||
|
|
||||||
|
Use the `--order-by` option to sort data from the `distinct_caches` system table by
|
||||||
|
specific columns. Provide a comma-delimited list of columns to sort by:
|
||||||
|
|
||||||
|
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
|
||||||
|
<!-- pytest.mark.skip -->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 show system \
|
||||||
|
--database DATABASE_NAME \
|
||||||
|
--token AUTH_TOKEN \
|
||||||
|
table distinct_caches \
|
||||||
|
--order-by max_cardinality,max_age_seconds
|
||||||
|
```
|
||||||
|
{{% /code-placeholders %}}
|
||||||
|
|
||||||
|
> [!Note]
|
||||||
|
> Results are sorted in ascending order based on the provided columns.
|
||||||
|
|
||||||
|
In the examples above, replace the following:
|
||||||
|
|
||||||
|
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
|
||||||
|
the name of the database to query system data from
|
||||||
|
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
|
||||||
|
your {{< product-name >}} {{% show-in "enterprise" %}}admin {{% /show-in %}}
|
||||||
|
authentication token
|
|
@ -0,0 +1,160 @@
|
||||||
|
|
||||||
|
The {{< product-name >}} Last Value Cache (LVC) lets you cache the most recent
|
||||||
|
values for specific fields in a table, improving the performance of queries that
|
||||||
|
return the most recent value of a field for specific series or the last N values
|
||||||
|
of a field.
|
||||||
|
|
||||||
|
The LVC is an in-memory cache that stores the last N number of values for
|
||||||
|
specific fields of series in a table. When you create an LVC, you can specify
|
||||||
|
what fields to cache, what tags to use to identify each series, and the
|
||||||
|
number of values to cache for each unique series.
|
||||||
|
An LVC is associated with a table, which can have multiple LVCs.
|
||||||
|
|
||||||
|
{{< children type="anchored-list" >}}
|
||||||
|
- [Important things to know about the Last Value Cache](#important-things-to-know-about-the-last-value-cache)
|
||||||
|
- [High cardinality key columns](#high-cardinality-key-columns)
|
||||||
|
- [Value count](#value-count)
|
||||||
|
{{% show-in "core" %}}
|
||||||
|
- [Last Value Caches are flushed when the server stops](#last-value-caches-are-flushed-when-the-server-stops)
|
||||||
|
{{% /show-in %}}
|
||||||
|
{{% show-in "enterprise" %}}
|
||||||
|
- [Last Value Caches are rebuilt on restart](#last-value-caches-are-rebuilt-on-restart)
|
||||||
|
{{% /show-in %}}
|
||||||
|
- [Defining value columns](#defining-value-columns)
|
||||||
|
|
||||||
|
Consider a dataset with the following schema (similar to the
|
||||||
|
[home sensor sample dataset](/influxdb3/version/reference/sample-data/#home-sensor-data)):
|
||||||
|
|
||||||
|
- home (table)
|
||||||
|
- tags:
|
||||||
|
- room
|
||||||
|
- kitchen
|
||||||
|
- living room
|
||||||
|
- wall
|
||||||
|
- north
|
||||||
|
- east
|
||||||
|
- south
|
||||||
|
- fields:
|
||||||
|
- co (integer)
|
||||||
|
- temp (float)
|
||||||
|
- hum (float)
|
||||||
|
|
||||||
|
If you cache the last value for each field per room and wall, the LVC looks
|
||||||
|
similar to this:
|
||||||
|
|
||||||
|
{{% influxdb/custom-timestamps %}}
|
||||||
|
|
||||||
|
| room | wall | co | hum | temp | time |
|
||||||
|
| :---------- | :---- | --: | ---: | ---: | :------------------- |
|
||||||
|
| Kitchen | east | 26 | 36.5 | 22.7 | 2022-01-01T20:00:00Z |
|
||||||
|
| Living Room | north | 17 | 36.4 | 22.2 | 2022-01-01T20:00:00Z |
|
||||||
|
| Living Room | south | 16 | 36.3 | 22.1 | 2022-01-01T20:00:00Z |
|
||||||
|
|
||||||
|
If you cache the last four values of each field per room and wall, the LVC looks
|
||||||
|
similar to the following:
|
||||||
|
|
||||||
|
| room | wall | co | hum | temp | time |
|
||||||
|
| :---------- | :---- | --: | ---: | ---: | :------------------ |
|
||||||
|
| Kitchen | east | 26 | 36.5 | 22.7 | 2022-01-01T20:00:00Z |
|
||||||
|
| Kitchen | east | 9 | 36.0 | 22.7 | 2022-01-01T17:00:00Z |
|
||||||
|
| Kitchen | east | 3 | 36.2 | 22.7 | 2022-01-01T15:00:00Z |
|
||||||
|
| Kitchen | east | 0 | 36.1 | 22.7 | 2022-01-01T10:00:00Z |
|
||||||
|
| Living Room | north | 17 | 36.4 | 22.2 | 2022-01-01T20:00:00Z |
|
||||||
|
| Living Room | north | 5 | 35.9 | 22.6 | 2022-01-01T17:00:00Z |
|
||||||
|
| Living Room | north | 1 | 36.1 | 22.3 | 2022-01-01T15:00:00Z |
|
||||||
|
| Living Room | north | 0 | 36.0 | 21.8 | 2022-01-01T10:00:00Z |
|
||||||
|
| Living Room | south | 16 | 36.3 | 22.1 | 2022-01-01T20:00:00Z |
|
||||||
|
| Living Room | south | 4 | 35.8 | 22.5 | 2022-01-01T17:00:00Z |
|
||||||
|
| Living Room | south | 0 | 36.0 | 22.3 | 2022-01-01T15:00:00Z |
|
||||||
|
| Living Room | south | 0 | 35.9 | 21.8 | 2022-01-01T10:00:00Z |
|
||||||
|
|
||||||
|
{{% /influxdb/custom-timestamps %}}
|
||||||
|
|
||||||
|
> [!Note]
|
||||||
|
> #### Null column values
|
||||||
|
>
|
||||||
|
> _Null_ column values are still considered values and are cached in the LVC.
|
||||||
|
> If you write data to a table and don't provide a value for an existing column,
|
||||||
|
> the column value is cached as _null_.
|
||||||
|
|
||||||
|
{{< children hlevel="h2" >}}
|
||||||
|
|
||||||
|
## Important things to know about the Last Value Cache
|
||||||
|
|
||||||
|
LVCs are stored in memory; the larger the cache, the more memory your InfluxDB 3 node requires to
|
||||||
|
maintain it. Consider the following:
|
||||||
|
|
||||||
|
- [High cardinality key columns](#high-cardinality-key-columns)
|
||||||
|
- [Value count](#value-count)
|
||||||
|
{{% show-in "core" %}}
|
||||||
|
- [Last Value Caches are flushed when the server stops](#last-value-caches-are-flushed-when-the-server-stops)
|
||||||
|
{{% /show-in %}}
|
||||||
|
{{% show-in "enterprise" %}}
|
||||||
|
- [Last Value Caches are rebuilt on restart](#last-value-caches-are-rebuilt-on-restart)
|
||||||
|
{{% /show-in %}}
|
||||||
|
- [Defining value columns](#defining-value-columns)
|
||||||
|
|
||||||
|
### High cardinality key columns
|
||||||
|
|
||||||
|
“Cardinality” refers to the number of unique key column combinations in your
|
||||||
|
cached data. While the InfluxDB 3 storage engine is not limited by cardinality,
|
||||||
|
it does affect the LVC. Higher cardinality increases memory requirements for
|
||||||
|
storing the LVC and can affect LVC query performance. We recommend the
|
||||||
|
following:
|
||||||
|
|
||||||
|
- Only use tags important to your query workload as key columns in the LVC.
|
||||||
|
Caching unnecessary tags or fields as key columns results in higher
|
||||||
|
cardinality without any benefit.
|
||||||
|
- Avoid including high-cardinality key columns in your LVC.
|
||||||
|
- Don’t include multiple high-cardinality key columns in your LVC.
|
||||||
|
|
||||||
|
To estimate total key column cardinality in an LVC, use the
|
||||||
|
following equation:
|
||||||
|
|
||||||
|
```txt
|
||||||
|
num_uniq_col_val_N [× num_uniq_col_val_N …] = key_column_cardinality
|
||||||
|
```
|
||||||
|
|
||||||
|
### Value count
|
||||||
|
|
||||||
|
By increasing the number of values to store in the LVC, you increase the number
|
||||||
|
of rows stored in the cache and the amount of memory required to store them. Be
|
||||||
|
judicious with the number of values to store. This count is per unique key
|
||||||
|
column combination. If you include two tags as key columns, one with three
|
||||||
|
unique values and the other with 10, you could have up to 30 unique key column
|
||||||
|
combinations. If you want to keep the last 10 values, you could potentially
|
||||||
|
have 300+ rows in the cache.
|
||||||
|
|
||||||
|
To get an idea of the number of rows required to cache the specified number of
|
||||||
|
values, use the following equation:
|
||||||
|
|
||||||
|
```txt
|
||||||
|
key_column_cardinality × count = number_of_rows
|
||||||
|
```
|
||||||
|
|
||||||
|
{{% show-in "core" %}}
|
||||||
|
### Last Value Caches are flushed when the server stops
|
||||||
|
|
||||||
|
Because the LVC is an in-memory cache, the cache is flushed any time the server
|
||||||
|
stops. After a server restart, {{% product-name %}} only writes new values to the LVC when
|
||||||
|
you write data, so there may be a period of time when some values are
|
||||||
|
unavailable in the LVC.
|
||||||
|
{{% /show-in %}}
|
||||||
|
|
||||||
|
{{% show-in "enterprise" %}}
|
||||||
|
### Last Value Caches are rebuilt on restart
|
||||||
|
|
||||||
|
Because the LVC is an in-memory cache, the cache is flushed any time the server
|
||||||
|
stops. After a server restarts, {{< product-name >}} uses persisted data to
|
||||||
|
rebuild the LVC.
|
||||||
|
{{% /show-in %}}
|
||||||
|
|
||||||
|
### Defining value columns
|
||||||
|
|
||||||
|
When creating an LVC, if you include the `--value-columns` options to specify
|
||||||
|
which fields to cache as value columns, any new fields added in the future will
|
||||||
|
not be added to the cache.
|
||||||
|
|
||||||
|
However, if you omit the `--value-columns` option, all columns other than those
|
||||||
|
specified as `--key-columns` are cached as value columns, including columns that
|
||||||
|
are added later.
|
|
@ -0,0 +1,122 @@
|
||||||
|
|
||||||
|
Use the [`influxdb3 create last_cache` command](/influxdb3/version/reference/cli/influxdb3/create/last_cache/)
|
||||||
|
to create a Last Value Cache (LVC). Provide the following:
|
||||||
|
|
||||||
|
- **Database** (`-d`, `--database`): _({{< req >}})_ The name of the database to
|
||||||
|
associate the LVC with. You can also use the `INFLUXDB3_DATABASE_NAME`
|
||||||
|
environment variable to specify the database.
|
||||||
|
- **Token** (`--token`): _({{< req >}})_ Your {{< product-name >}}
|
||||||
|
{{% show-in "enterprise" %}}admin {{% /show-in %}}authentication token.
|
||||||
|
You can also use the `INFLUXDB3_AUTH_TOKEN` environment variable to specify
|
||||||
|
the token.
|
||||||
|
- **Table** (`-t`, `--table`): _({{< req >}})_ The name of the table to
|
||||||
|
associate the LVC with.
|
||||||
|
{{% show-in "enterprise" %}}
|
||||||
|
- **Node specification** (`-n`, `--node-spec`): Specify which nodes the LVC
|
||||||
|
should be configured on.
|
||||||
|
{{% /show-in %}}
|
||||||
|
- **Key columns** (`--key-columns`): Specify which columns to include in the
|
||||||
|
primary key of the cache. Rows in the LVC are uniquely identified by their
|
||||||
|
timestamp and key columns, so include all the columns you need to identify
|
||||||
|
each row. These are typically tags, but you can use any columns with the
|
||||||
|
following types:
|
||||||
|
|
||||||
|
- String
|
||||||
|
- Integer
|
||||||
|
- Unsigned integer
|
||||||
|
- Boolean
|
||||||
|
|
||||||
|
- **Value columns** (`--value-columns`): Specify which columns to cache as value
|
||||||
|
columns. These are typically fields but can also be tags. By default, `time` and
|
||||||
|
columns other than those specified as `--key-columns` are cached as value columns.
|
||||||
|
- **Count** (`--count`): The number of values to cache per unique key column combination.
|
||||||
|
The supported range is `[1-10]`. The default count is `1`.
|
||||||
|
- **Time-to-Live (TTL)** (`--ttl`): The time-to-live for cached values in
|
||||||
|
[humantime](https://docs.rs/humantime/latest/humantime/fn.parse_duration.html)
|
||||||
|
form. The default TTL is four hours.
|
||||||
|
- **Cache name**: A unique name for the cache. If you don’t provide one,
|
||||||
|
InfluxDB automatically generates a cache name for you.
|
||||||
|
|
||||||
|
{{% show-in "core" %}}
|
||||||
|
<!----------------------------- BEGIN CORE EXAMPLE ---------------------------->
|
||||||
|
{{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN|(KEY|VALUE)_COLUMNS|COUNT|TTL" %}}
|
||||||
|
|
||||||
|
<!--pytest.mark.skip-->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create last_cache \
|
||||||
|
--database DATABASE_NAME \
|
||||||
|
--token AUTH_TOKEN \
|
||||||
|
--table TABLE_NAME \
|
||||||
|
--key-columns KEY_COLUMNS \
|
||||||
|
--value-columns VALUE_COLUMNS \
|
||||||
|
--count COUNT \
|
||||||
|
--ttl TTL\
|
||||||
|
LVC_NAME
|
||||||
|
```
|
||||||
|
{{% /code-placeholders %}}
|
||||||
|
<!------------------------------ END CORE EXAMPLE ----------------------------->
|
||||||
|
{{% /show-in %}}
|
||||||
|
|
||||||
|
{{% show-in "enterprise" %}}
|
||||||
|
<!-------------------------- BEGIN ENTERPRISE EXAMPLE ------------------------->
|
||||||
|
{{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN|NODE_SPEC|(KEY|VALUE)_COLUMNS|COUNT|TTL" %}}
|
||||||
|
|
||||||
|
<!--pytest.mark.skip-->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create last_cache \
|
||||||
|
--database DATABASE_NAME \
|
||||||
|
--token AUTH_TOKEN \
|
||||||
|
--table TABLE_NAME \
|
||||||
|
--node_spec NODE_SPEC \
|
||||||
|
--key-columns KEY_COLUMNS \
|
||||||
|
--value-columns VALUE_COLUMNS \
|
||||||
|
--count COUNT \
|
||||||
|
--ttl TTL\
|
||||||
|
LVC_NAME
|
||||||
|
```
|
||||||
|
{{% /code-placeholders %}}
|
||||||
|
<!--------------------------- END ENTERPRISE EXAMPLE -------------------------->
|
||||||
|
{{% /show-in %}}
|
||||||
|
|
||||||
|
Replace the following:
|
||||||
|
|
||||||
|
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
|
||||||
|
the name of the database to associate the LVC with
|
||||||
|
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
|
||||||
|
your {{< product-name >}} {{% show-in "enterprise" %}}admin {{% /show-in %}}
|
||||||
|
authentication token
|
||||||
|
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}:
|
||||||
|
the name of the table to associate the LVC with
|
||||||
|
{{% show-in "enterprise" %}}
|
||||||
|
- {{% code-placeholder-key %}}`NODE_SPEC`{{% /code-placeholder-key %}}:
|
||||||
|
a comma-delimited list of node IDs to configure the LVC on--for example:
|
||||||
|
`node-01,node-02`.
|
||||||
|
{{% /show-in %}}
|
||||||
|
- {{% code-placeholder-key %}}`KEY_COLUMNS`{{% /code-placeholder-key %}}:
|
||||||
|
a comma-delimited list of columns to use to unique identify each series--for
|
||||||
|
example: `room,wall`
|
||||||
|
- {{% code-placeholder-key %}}`VALUE_COLUMNS`{{% /code-placeholder-key %}}:
|
||||||
|
a comma-delimited list of columns to cache as value columns--for
|
||||||
|
example: `temp,hum,co`
|
||||||
|
- {{% code-placeholder-key %}}`COUNT`{{% /code-placeholder-key %}}:
|
||||||
|
the number of last values to cache per series--for example: `5`
|
||||||
|
- {{% code-placeholder-key %}}`TTL`{{% /code-placeholder-key %}}:
|
||||||
|
the TTL of cached values in
|
||||||
|
[humantime](https://docs.rs/humantime/latest/humantime/fn.parse_duration.html)
|
||||||
|
form--for example: `10s`, `1min 30sec`, `3 hours`
|
||||||
|
- {{% code-placeholder-key %}}`LVC_NAME`{{% /code-placeholder-key %}}:
|
||||||
|
a unique name for the LVC
|
||||||
|
|
||||||
|
> [!Note]
|
||||||
|
> #### Values are cached on write
|
||||||
|
>
|
||||||
|
> Values are cached on write. When you create a cache, it will not cache
|
||||||
|
> previously written points, only newly written points.
|
||||||
|
>
|
||||||
|
> #### LVC size and persistence
|
||||||
|
>
|
||||||
|
> The LVC is stored in memory, so it's important to consider the size and persistence
|
||||||
|
> of the cache. For more information, see
|
||||||
|
> [Important things to know about the Last Value Cache](/influxdb3/version/admin/last-value-cache/#important-things-to-know-about-the-last-value-cache).
|
|
@ -0,0 +1,40 @@
|
||||||
|
|
||||||
|
Use the [`influxdb3 delete last_cache` command](/influxdb3/version/reference/cli/influxdb3/delete/last_cache/)
|
||||||
|
to delete a Last Value Cache (LVC). Provide the following:
|
||||||
|
|
||||||
|
- **Database** (`-d`, `--database`): _({{< req >}})_ The name of the database
|
||||||
|
that the LVC you want to delete is associated with. You can also use the
|
||||||
|
`INFLUXDB3_DATABASE_NAME` environment variable to specify the database.
|
||||||
|
- **Token** (`--token`): _({{< req >}})_ Your {{< product-name >}}
|
||||||
|
{{% show-in "enterprise" %}}admin {{% /show-in %}}authentication token.
|
||||||
|
You can also use the `INFLUXDB3_AUTH_TOKEN` environment variable to specify
|
||||||
|
the database.
|
||||||
|
- **Table** (`-t`, `--table`): _({{< req >}})_ The name of the table that the
|
||||||
|
LVC you want to delete is associated with.
|
||||||
|
- **Cache name**: The name of the LVC to delete.
|
||||||
|
|
||||||
|
{{% code-placeholders "(DATABASE|TABLE|LVC)_NAME|AUTH_TOKEN" %}}
|
||||||
|
```bash
|
||||||
|
influxdb3 delete last_cache \
|
||||||
|
--database DATABASE_NAME \
|
||||||
|
--token AUTH_TOKEN \
|
||||||
|
--table TABLE_NAME \
|
||||||
|
LVC_NAME
|
||||||
|
```
|
||||||
|
{{% /code-placeholders %}}
|
||||||
|
|
||||||
|
Replace the following:
|
||||||
|
|
||||||
|
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
|
||||||
|
the name of the database that the LVC you want to delete is associated with
|
||||||
|
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
|
||||||
|
your {{< product-name >}} {{% show-in "enterprise" %}}admin {{% /show-in %}}
|
||||||
|
authentication token
|
||||||
|
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}:
|
||||||
|
the name of the table that the LVC you want to delete is associated with
|
||||||
|
- {{% code-placeholder-key %}}`LVC`{{% /code-placeholder-key %}}:
|
||||||
|
the name of the LVC to delete
|
||||||
|
|
||||||
|
> [!Caution]
|
||||||
|
> This is a destructive action that cannot be undone. Once deleted, any queries
|
||||||
|
> against the deleted LVC will return an error.
|
|
@ -0,0 +1,34 @@
|
||||||
|
|
||||||
|
Use the [`last_cache()` SQL function](/influxdb3/version/reference/sql/functions/cache/#last_cache)
|
||||||
|
in the `FROM` clause of an SQL `SELECT` statement to query data from the
|
||||||
|
Last Value Cache (LVC).
|
||||||
|
|
||||||
|
> [!Important]
|
||||||
|
> You must use SQL to query the LVC.
|
||||||
|
> InfluxQL does not support the `last_cache()` function.
|
||||||
|
|
||||||
|
`last_cache()` supports the following arguments:
|
||||||
|
|
||||||
|
- **table_name**: _({{< req >}})_ The name of the table the LVC is associated with
|
||||||
|
formatted as a string literal.
|
||||||
|
- **cache_name**: The name of the LVC to query formatted as a string literal.
|
||||||
|
This argument is only required if there is more than one LVC associated with the specified
|
||||||
|
table.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT * FROM last_cache('table_name', 'cache_name')
|
||||||
|
```
|
||||||
|
|
||||||
|
You can use other [SQL clauses](/influxdb3/version/reference/sql/#statements-and-clauses)
|
||||||
|
to modify query results. For example, you can use the `WHERE` clause to return
|
||||||
|
the last value for a specific tag set:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
room,
|
||||||
|
temp
|
||||||
|
FROM
|
||||||
|
last_cache('home', 'homeCache')
|
||||||
|
WHERE
|
||||||
|
room = 'Kitchen'
|
||||||
|
```
|
|
@ -0,0 +1,68 @@
|
||||||
|
|
||||||
|
Use the [`influxdb3 show system table` command](/influxdb3/version/reference/cli/influxdb3/show/syste/table/)
|
||||||
|
to query and output Last Value Cache information from the `last_caches` system table.
|
||||||
|
|
||||||
|
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
|
||||||
|
<!-- pytest.mark.skip -->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 show system \
|
||||||
|
--database DATABASE_NAME \
|
||||||
|
--token AUTH_TOKEN \
|
||||||
|
table last_caches
|
||||||
|
```
|
||||||
|
{{% /code-placeholders %}}
|
||||||
|
|
||||||
|
This returns a table similar to the following:
|
||||||
|
|
||||||
|
| table | name | key_column_ids | key_column_names | value_column_ids | value_column_names | count | ttl |
|
||||||
|
| ------- | ------------ | -------------- | ---------------- | ---------------- | ------------------------------------------------ | ----- | ----- |
|
||||||
|
| weather | weather_last | [0] | [location] | [2, 3, 4, 5, 1] | [precip, temp_avg, temp_max, temp_min, wind_avg] | 1 | 86400 |
|
||||||
|
| bitcoin | bitcoin_last | [0, 1] | [code, crypto] | [4] | [price] | 1 | 14400 |
|
||||||
|
| numbers | numbers_last | [] | [] | [0, 1] | [a, b] | 5 | 14400 |
|
||||||
|
| home | home_last | [0] | [room] | [1, 2, 3] | [temp, hum, co] | 5 | 60 |
|
||||||
|
|
||||||
|
## Query specific columns from the last_caches system table
|
||||||
|
|
||||||
|
Use the `--select` option to query specific columns from the `last_caches`
|
||||||
|
system table. Provide a comma-delimited list of columns to return:
|
||||||
|
|
||||||
|
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
|
||||||
|
<!-- pytest.mark.skip -->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 show system \
|
||||||
|
--database DATABASE_NAME \
|
||||||
|
--token AUTH_TOKEN \
|
||||||
|
table last_caches \
|
||||||
|
--select name,key_column_names,value_column_names
|
||||||
|
```
|
||||||
|
{{% /code-placeholders %}}
|
||||||
|
|
||||||
|
## Sort last_caches system table output
|
||||||
|
|
||||||
|
Use the `--order-by` option to sort data from the `last_caches` system table by
|
||||||
|
specific columns. Provide a comma-delimited list of columns to sort by:
|
||||||
|
|
||||||
|
{{% code-placeholders "DATABASE_NAME|AUTH_TOKEN" %}}
|
||||||
|
<!-- pytest.mark.skip -->
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 show system \
|
||||||
|
--database DATABASE_NAME \
|
||||||
|
--token AUTH_TOKEN \
|
||||||
|
table last_caches \
|
||||||
|
--order-by table,ttl
|
||||||
|
```
|
||||||
|
{{% /code-placeholders %}}
|
||||||
|
|
||||||
|
> [!Note]
|
||||||
|
> Results are sorted in ascending order based on the provided columns.
|
||||||
|
|
||||||
|
In the examples above, replace the following:
|
||||||
|
|
||||||
|
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
|
||||||
|
the name of the database to query system data from
|
||||||
|
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
|
||||||
|
your {{< product-name >}} {{% show-in "enterprise" %}}admin {{% /show-in %}}
|
||||||
|
authentication token
|
|
@ -0,0 +1,10 @@
|
||||||
|
Manage tokens to authenticate and authorize access to resources and data in your
|
||||||
|
{{< product-name >}} instance.
|
||||||
|
|
||||||
|
> [!Note]
|
||||||
|
> #### Store secure tokens in a secret store
|
||||||
|
>
|
||||||
|
> Token strings are returned _only_ on token creation.
|
||||||
|
> We recommend storing database tokens in a **secure secret store**.
|
||||||
|
|
||||||
|
{{< children hlevel="h2" readmore=true hr=true >}}
|
|
@ -0,0 +1,6 @@
|
||||||
|
|
||||||
|
Manage admin tokens in your {{< product-name >}} instance.
|
||||||
|
An admin token grants
|
||||||
|
access to all actions (CLI commands and API endpoints) for the server.
|
||||||
|
|
||||||
|
{{< children hlevel="h2" readmore=true hr=true >}}
|
|
@ -0,0 +1,33 @@
|
||||||
|
|
||||||
|
Use the [`influxdb3 create token --admin` subcommand](/influxdb3/version/reference/cli/influxdb3/create/token/)
|
||||||
|
or the [HTTP API](/influxdb3/version/api/v3/)
|
||||||
|
to create an [admin token](/influxdb3/version/admin/tokens/admin/) for your {{< product-name omit="Clustered" >}} instance.
|
||||||
|
An admin token grants full access to all actions for your InfluxDB 3 instance.
|
||||||
|
|
||||||
|
> [!Note]
|
||||||
|
> #### Store secure tokens in a secret store
|
||||||
|
>
|
||||||
|
> Token strings are returned _only_ on token creation.
|
||||||
|
> We recommend storing database tokens in a **secure secret store**.
|
||||||
|
> If you lose the admin token string, you must regenerate the token.
|
||||||
|
|
||||||
|
## Create an admin token
|
||||||
|
|
||||||
|
- [Use the influxdb3 CLI](#use-the-influxdb3-cli)
|
||||||
|
- [Use the HTTP API](#use-the-http-api)
|
||||||
|
|
||||||
|
### Use the influxdb3 CLI
|
||||||
|
|
||||||
|
Use the `influxdb3 create token --admin` command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create token --admin
|
||||||
|
```
|
||||||
|
|
||||||
|
The command returns the token string in plain text.
|
||||||
|
|
||||||
|
To use the token as the default for later commands, and to persist the token
|
||||||
|
across sessions, assign the token string to the `INFLUXDB3_AUTH_TOKEN` environment variable.
|
||||||
|
|
||||||
|
> [!Caution]
|
||||||
|
> Protect your admin token. Anyone with access to the admin token has full control over your {{< product-name >}} instance.
|
|
@ -0,0 +1,29 @@
|
||||||
|
Use the `influxdb3` CLI to list tokens, including admin tokens.
|
||||||
|
|
||||||
|
## Use the CLI
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 show tokens
|
||||||
|
```
|
||||||
|
|
||||||
|
The command lists metadata for all tokens in your InfluxDB 3 instance, including
|
||||||
|
the `_admin` token.
|
||||||
|
The token metadata includes the hash of the token string.
|
||||||
|
InfluxDB 3 does not store the token string.
|
||||||
|
|
||||||
|
### Output formats
|
||||||
|
|
||||||
|
The `influxdb3 show tokens` command supports output formats:
|
||||||
|
|
||||||
|
- `pretty` _(default)_
|
||||||
|
- `json`
|
||||||
|
- `jsonl`
|
||||||
|
- `csv`
|
||||||
|
<!-- - `parquet` _(must [output to a file](#output-to-a-parquet-file))_ -->
|
||||||
|
|
||||||
|
Use the `--format` flag to specify the output format:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
influxdb3 show tokens \
|
||||||
|
--format json
|
||||||
|
```
|
|
@ -0,0 +1,15 @@
|
||||||
|
|
||||||
|
## Use the CLI to regenerate an admin token
|
||||||
|
|
||||||
|
To regenerate an admin token, you can use the `--regenerate` flag with the `influxdb3 create token --admin` subcommand. This revokes the existing admin token.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
influxdb3 create token --admin \
|
||||||
|
--token ADMIN_TOKEN
|
||||||
|
--host {{< influxdb/host >}}
|
||||||
|
--regenerate
|
||||||
|
```
|
||||||
|
|
||||||
|
In your command, replace `ADMIN_TOKEN` with the current token string.
|
||||||
|
|
||||||
|
By default, `influxdb3` asks for confirmation before regenerating the token.
|
|
@ -12,8 +12,8 @@ influxdb3 create <SUBCOMMAND>
|
||||||
|
|
||||||
## Subcommands
|
## Subcommands
|
||||||
|
|
||||||
| Subcommand | Description |
|
| Subcommand | Description |
|
||||||
| :------------------------------------------------------------------------------------- | :---------------------------------------------- |
|
| :---------------------------------------------------------------------------------- | :---------------------------------------------- |
|
||||||
| [database](/influxdb3/version/reference/cli/influxdb3/create/database/) | Create a new database |
|
| [database](/influxdb3/version/reference/cli/influxdb3/create/database/) | Create a new database |
|
||||||
| [file_index](/influxdb3/version/reference/cli/influxdb3/create/file_index/) | Create a new file index for a database or table |
|
| [file_index](/influxdb3/version/reference/cli/influxdb3/create/file_index/) | Create a new file index for a database or table |
|
||||||
| [last_cache](/influxdb3/version/reference/cli/influxdb3/create/last_cache/) | Create a new last value cache |
|
| [last_cache](/influxdb3/version/reference/cli/influxdb3/create/last_cache/) | Create a new last value cache |
|
||||||
|
@ -22,10 +22,11 @@ influxdb3 create <SUBCOMMAND>
|
||||||
| [table](/influxdb3/version/reference/cli/influxdb3/create/table/) | Create a new table in a database |
|
| [table](/influxdb3/version/reference/cli/influxdb3/create/table/) | Create a new table in a database |
|
||||||
| [token](/influxdb3/version/reference/cli/influxdb3/create/token/) | Create a new authentication token |
|
| [token](/influxdb3/version/reference/cli/influxdb3/create/token/) | Create a new authentication token |
|
||||||
| [trigger](/influxdb3/version/reference/cli/influxdb3/create/trigger/) | Create a new trigger for the processing engine |
|
| [trigger](/influxdb3/version/reference/cli/influxdb3/create/trigger/) | Create a new trigger for the processing engine |
|
||||||
| help | Print command help or the help of a subcommand |
|
| help | Print command help or the help of a subcommand |
|
||||||
|
|
||||||
## Options
|
## Options
|
||||||
|
|
||||||
| Option | | Description |
|
| Option | | Description |
|
||||||
| :----- | :------- | :--------------------- |
|
| :----- | :----------- | :------------------------------ |
|
||||||
| `-h` | `--help` | Print help information |
|
| `-h` | `--help` | Print help information |
|
||||||
|
| | `--help-all` | Print detailed help information |
|
||||||
|
|
|
@ -23,7 +23,9 @@ influxdb3 create database [OPTIONS] <DATABASE_NAME>
|
||||||
| :----- | :----------- | :--------------------------------------------------------------------------------------- |
|
| :----- | :----------- | :--------------------------------------------------------------------------------------- |
|
||||||
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
|
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
|
||||||
| | `--token` | Authentication token |
|
| | `--token` | Authentication token |
|
||||||
|
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
|
||||||
| `-h` | `--help` | Print help information |
|
| `-h` | `--help` | Print help information |
|
||||||
|
| | `--help-all` | Print detailed help information |
|
||||||
|
|
||||||
### Option environment variables
|
### Option environment variables
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,7 @@ The `influxdb3 create distinct_cache` command creates a new distinct value cache
|
||||||
```bash
|
```bash
|
||||||
influxdb3 create distinct_cache [OPTIONS] \
|
influxdb3 create distinct_cache [OPTIONS] \
|
||||||
--database <DATABASE_NAME> \
|
--database <DATABASE_NAME> \
|
||||||
|
--token <AUTH_TOKEN>
|
||||||
--table <TABLE> \
|
--table <TABLE> \
|
||||||
--columns <COLUMNS> \
|
--columns <COLUMNS> \
|
||||||
[CACHE_NAME]
|
[CACHE_NAME]
|
||||||
|
@ -24,12 +25,14 @@ influxdb3 create distinct_cache [OPTIONS] \
|
||||||
| :----- | :------------------ | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
| :----- | :------------------ | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
|
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
|
||||||
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
|
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
|
||||||
| | `--token` | Authentication token |
|
| | `--token` | _({{< req >}})_ Authentication token |
|
||||||
| `-t` | `--table` | _({{< req >}})_ Table to create the cache for |
|
| `-t` | `--table` | _({{< req >}})_ Table to create the cache for |
|
||||||
| | `--columns` | _({{< req >}})_ Comma-separated list of columns to cache distinct values for--for example: `col1,col2,col3` (see [Metadata cache hierarchy](#metadata-cache-hierarchy)) |
|
| | `--columns` | _({{< req >}})_ Comma-separated list of columns to cache distinct values for--for example: `col1,col2,col3` (see [Metadata cache hierarchy](#metadata-cache-hierarchy)) |
|
||||||
| | `--max-cardinality` | Maximum number of distinct value combinations to hold in the cache |
|
| | `--max-cardinality` | Maximum number of distinct value combinations to hold in the cache |
|
||||||
| | `--max-age` | Maximum age of an entry in the cache entered as a human-readable duration--for example: `30d`, `24h` |
|
| | `--max-age` | Maximum age of an entry in the cache entered as a human-readable duration--for example: `30d`, `24h` |
|
||||||
|
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
|
||||||
| `-h` | `--help` | Print help information |
|
| `-h` | `--help` | Print help information |
|
||||||
|
| | `--help-all` | Print detailed help information |
|
||||||
|
|
||||||
> [!Important]
|
> [!Important]
|
||||||
>
|
>
|
||||||
|
|
|
@ -7,7 +7,10 @@ database or table.
|
||||||
<!--pytest.mark.skip-->
|
<!--pytest.mark.skip-->
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
influxdb3 create file_index [OPTIONS] --database <DATABASE_NAME> <COLUMNS>...
|
influxdb3 create file_index [OPTIONS] \
|
||||||
|
--database <DATABASE_NAME> \
|
||||||
|
--token <AUTH_TOKEN> \
|
||||||
|
<COLUMNS>...
|
||||||
```
|
```
|
||||||
|
|
||||||
## Arguments
|
## Arguments
|
||||||
|
@ -20,9 +23,11 @@ influxdb3 create file_index [OPTIONS] --database <DATABASE_NAME> <COLUMNS>...
|
||||||
| :----- | :----------- | :--------------------------------------------------------------------------------------- |
|
| :----- | :----------- | :--------------------------------------------------------------------------------------- |
|
||||||
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
|
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
|
||||||
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
|
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
|
||||||
| | `--token` | Authentication token |
|
| | `--token` | _({{< req >}})_ Authentication token |
|
||||||
| `-t` | `--table` | Table to apply the file index too |
|
| `-t` | `--table` | Table to apply the file index too |
|
||||||
|
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
|
||||||
| `-h` | `--help` | Print help information |
|
| `-h` | `--help` | Print help information |
|
||||||
|
| | `--help-all` | Print detailed help information |
|
||||||
|
|
||||||
### Option environment variables
|
### Option environment variables
|
||||||
|
|
||||||
|
@ -43,10 +48,12 @@ In the examples below, replace the following:
|
||||||
|
|
||||||
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
|
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
|
||||||
Database name
|
Database name
|
||||||
|
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
|
||||||
|
Authentication token
|
||||||
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}:
|
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}:
|
||||||
Table name
|
Table name
|
||||||
|
|
||||||
{{% code-placeholders "(DATABASE|TABLE)_NAME" %}}
|
{{% code-placeholders "(DATABASE|TABLE)_NAME|AUTH_TOKEN" %}}
|
||||||
|
|
||||||
### Create a new file index for a database
|
### Create a new file index for a database
|
||||||
|
|
||||||
|
@ -55,6 +62,7 @@ In the examples below, replace the following:
|
||||||
```bash
|
```bash
|
||||||
influxdb3 create file_index \
|
influxdb3 create file_index \
|
||||||
--database DATABASE_NAME \
|
--database DATABASE_NAME \
|
||||||
|
--token AUTH_TOKEN \
|
||||||
column1 column2 column3
|
column1 column2 column3
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -65,6 +73,7 @@ influxdb3 create file_index \
|
||||||
```bash
|
```bash
|
||||||
influxdb3 create file_index \
|
influxdb3 create file_index \
|
||||||
--database DATABASE_NAME \
|
--database DATABASE_NAME \
|
||||||
|
--token AUTH_TOKEN \
|
||||||
--table TABLE_NAME \
|
--table TABLE_NAME \
|
||||||
column1 column2 column3
|
column1 column2 column3
|
||||||
```
|
```
|
||||||
|
|
|
@ -20,13 +20,15 @@ influxdb3 create last_cache [OPTIONS] --database <DATABASE_NAME> --table <TABLE>
|
||||||
| :----- | :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
| :----- | :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
|
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
|
||||||
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
|
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
|
||||||
| | `--token` | Authentication token |
|
| | `--token` | _({{< req >}})_ Authentication token |
|
||||||
| `-t` | `--table` | _({{< req >}})_ Table to create the cache for |
|
| `-t` | `--table` | _({{< req >}})_ Table to create the cache for |
|
||||||
| | `--key-columns` | Comma-separated list of columns to use as keys in the cache--for example: `foo,bar,baz` |
|
| | `--key-columns` | Comma-separated list of columns to use as keys in the cache--for example: `foo,bar,baz` |
|
||||||
| | `--value-columns` | Comma-separated list of columns to store as values in the cache--for example: `foo,bar,baz` |
|
| | `--value-columns` | Comma-separated list of columns to store as values in the cache--for example: `foo,bar,baz` |
|
||||||
| | `--count` | Number of entries per unique key column combination to store in the cache |
|
| | `--count` | Number of entries per unique key column combination to store in the cache |
|
||||||
| | `--ttl` | Cache entries' time-to-live (TTL) in [Humantime form](https://docs.rs/humantime/latest/humantime/fn.parse_duration.html)--for example: `10s`, `1min 30sec`, `3 hours` |
|
| | `--ttl` | Cache entries' time-to-live (TTL) in [Humantime form](https://docs.rs/humantime/latest/humantime/fn.parse_duration.html)--for example: `10s`, `1min 30sec`, `3 hours` |
|
||||||
|
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
|
||||||
| `-h` | `--help` | Print help information |
|
| `-h` | `--help` | Print help information |
|
||||||
|
| | `--help-all` | Print detailed help information |
|
||||||
|
|
||||||
### Option environment variables
|
### Option environment variables
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,7 @@ The `influxdb3 create plugin` command creates a new processing engine plugin.
|
||||||
```bash
|
```bash
|
||||||
influxdb3 create plugin [OPTIONS] \
|
influxdb3 create plugin [OPTIONS] \
|
||||||
--database <DATABASE_NAME> \
|
--database <DATABASE_NAME> \
|
||||||
|
--token <AUTH_TOKEN> \
|
||||||
--filename <PLUGIN_FILENAME> \
|
--filename <PLUGIN_FILENAME> \
|
||||||
--entry-point <FUNCTION_NAME> \
|
--entry-point <FUNCTION_NAME> \
|
||||||
<PLUGIN_NAME>
|
<PLUGIN_NAME>
|
||||||
|
@ -23,11 +24,13 @@ influxdb3 create plugin [OPTIONS] \
|
||||||
| :----- | :-------------- | :--------------------------------------------------------------------------------------- |
|
| :----- | :-------------- | :--------------------------------------------------------------------------------------- |
|
||||||
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
|
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
|
||||||
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
|
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
|
||||||
| | `--token` | Authentication token |
|
| | `--token` | _({{< req >}})_ Authentication token |
|
||||||
| | `--filename` | _({{< req >}})_ Name of the plugin Python file in the plugin directory |
|
| | `--filename` | _({{< req >}})_ Name of the plugin Python file in the plugin directory |
|
||||||
| | `--entry-point` | _({{< req >}})_ Entry point function name for the plugin |
|
| | `--entry-point` | _({{< req >}})_ Entry point function name for the plugin |
|
||||||
| | `--plugin-type` | Type of trigger the plugin processes (default is `wal_rows`) |
|
| | `--plugin-type` | Type of trigger the plugin processes (default is `wal_rows`) |
|
||||||
|
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
|
||||||
| `-h` | `--help` | Print help information |
|
| `-h` | `--help` | Print help information |
|
||||||
|
| | `--help-all` | Print detailed help information |
|
||||||
|
|
||||||
### Option environment variables
|
### Option environment variables
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,7 @@ The `influxdb3 create table` command creates a table in a database.
|
||||||
influxdb3 create table [OPTIONS] \
|
influxdb3 create table [OPTIONS] \
|
||||||
--tags [<TAGS>...] \
|
--tags [<TAGS>...] \
|
||||||
--database <DATABASE_NAME> \
|
--database <DATABASE_NAME> \
|
||||||
|
--token <AUTH_TOKEN> \
|
||||||
<TABLE_NAME>
|
<TABLE_NAME>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -22,10 +23,12 @@ influxdb3 create table [OPTIONS] \
|
||||||
| :----- | :----------- | :--------------------------------------------------------------------------------------- |
|
| :----- | :----------- | :--------------------------------------------------------------------------------------- |
|
||||||
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
|
| `-H` | `--host` | Host URL of the running {{< product-name >}} server (default is `http://127.0.0.1:8181`) |
|
||||||
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
|
| `-d` | `--database` | _({{< req >}})_ Name of the database to operate on |
|
||||||
| | `--token` | Authentication token |
|
| | `--token` | _({{< req >}})_ Authentication token |
|
||||||
| | `--tags` | _({{< req >}})_ Comma-separated list of tag columns to include in the table |
|
| | `--tags` | _({{< req >}})_ Comma-separated list of tag columns to include in the table |
|
||||||
| | `--fields` | Comma-separated list of field columns and their types to include in the table |
|
| | `--fields` | Comma-separated list of field columns and their types to include in the table |
|
||||||
|
| | `--tls-ca` | Path to a custom TLS certificate authority (for testing or self-signed certificates) |
|
||||||
| `-h` | `--help` | Print help information |
|
| `-h` | `--help` | Print help information |
|
||||||
|
| | `--help-all` | Print detailed help information |
|
||||||
|
|
||||||
> [!Important]
|
> [!Important]
|
||||||
>
|
>
|
||||||
|
@ -53,6 +56,8 @@ In the examples below, replace the following:
|
||||||
|
|
||||||
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
|
- {{% code-placeholder-key %}}`DATABASE_NAME`{{% /code-placeholder-key %}}:
|
||||||
Database name
|
Database name
|
||||||
|
- {{% code-placeholder-key %}}`AUTH_TOKEN`{{% /code-placeholder-key %}}:
|
||||||
|
Authentication token
|
||||||
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}:
|
- {{% code-placeholder-key %}}`TABLE_NAME`{{% /code-placeholder-key %}}:
|
||||||
Table name
|
Table name
|
||||||
|
|
||||||
|
@ -64,6 +69,7 @@ In the examples below, replace the following:
|
||||||
influxdb3 create table \
|
influxdb3 create table \
|
||||||
--tags tag1,tag2,tag3 \
|
--tags tag1,tag2,tag3 \
|
||||||
--database DATABASE_NAME \
|
--database DATABASE_NAME \
|
||||||
|
--token AUTH_TOKEN \
|
||||||
TABLE_NAME
|
TABLE_NAME
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -76,6 +82,7 @@ influxdb3 create table \
|
||||||
--tags room,sensor_id \
|
--tags room,sensor_id \
|
||||||
--fields temp:float64,hum:float64,co:int64 \
|
--fields temp:float64,hum:float64,co:int64 \
|
||||||
--database DATABASE_NAME \
|
--database DATABASE_NAME \
|
||||||
|
--token AUTH_TOKEN \
|
||||||
TABLE_NAME
|
TABLE_NAME
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,7 @@ influxdb3 create token
|
||||||
|
|
||||||
## Options
|
## Options
|
||||||
|
|
||||||
| Option | | Description |
|
| Option | | Description |
|
||||||
| :----- | :------- | :--------------------- |
|
| :----- | :----------- | :------------------------------ |
|
||||||
| `-h` | `--help` | Print help information |
|
| `-h` | `--help` | Print help information |
|
||||||
|
| | `--help-all` | Print detailed help information |
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue